1 /* SPDX-License-Identifier: LGPL-2.1+ */
7 #include <linux/libbpf.h>
8 #include <net/ethernet.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
18 #include "alloc-util.h"
19 #include "bpf-firewall.h"
20 #include "bpf-program.h"
22 #include "ip-address-access.h"
35 /* Compile instructions for one list of addresses, one direction and one specific verdict on matches. */
37 static int add_lookup_instructions(
44 int r
, addr_offset
, addr_size
;
52 addr_size
= sizeof(uint32_t);
53 addr_offset
= is_ingress
?
54 offsetof(struct iphdr
, saddr
) :
55 offsetof(struct iphdr
, daddr
);
59 addr_size
= 4 * sizeof(uint32_t);
60 addr_offset
= is_ingress
?
61 offsetof(struct ip6_hdr
, ip6_src
.s6_addr
) :
62 offsetof(struct ip6_hdr
, ip6_dst
.s6_addr
);
70 /* Compare IPv4 with one word instruction (32bit) */
71 struct bpf_insn insn
[] = {
72 /* If skb->protocol != ETH_P_IP, skip this whole block. The offset will be set later. */
73 BPF_JMP_IMM(BPF_JNE
, BPF_REG_7
, htobe16(protocol
), 0),
76 * Call into BPF_FUNC_skb_load_bytes to load the dst/src IP address
78 * R1: Pointer to the skb
80 * R3: Destination buffer on the stack (r10 - 4)
81 * R4: Number of bytes to read (4)
84 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
85 BPF_MOV32_IMM(BPF_REG_2
, addr_offset
),
87 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
88 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -addr_size
),
90 BPF_MOV32_IMM(BPF_REG_4
, addr_size
),
91 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
94 * Call into BPF_FUNC_map_lookup_elem to see if the address matches any entry in the
95 * LPM trie map. For this to work, the prefixlen field of 'struct bpf_lpm_trie_key'
96 * has to be set to the maximum possible value.
98 * On success, the looked up value is stored in R0. For this application, the actual
99 * value doesn't matter, however; we just set the bit in @verdict in R8 if we found any
103 BPF_LD_MAP_FD(BPF_REG_1
, map_fd
),
104 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
105 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -addr_size
- sizeof(uint32_t)),
106 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, addr_size
* 8),
108 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
109 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
110 BPF_ALU32_IMM(BPF_OR
, BPF_REG_8
, verdict
),
113 /* Jump label fixup */
114 insn
[0].off
= ELEMENTSOF(insn
) - 1;
116 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
125 static int bpf_firewall_compile_bpf(
130 struct bpf_insn pre_insn
[] = {
132 * When the eBPF program is entered, R1 contains the address of the skb.
133 * However, R1-R5 are scratch registers that are not preserved when calling
134 * into kernel functions, so we need to save anything that's supposed to
135 * stay around to R6-R9. Save the skb to R6.
137 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
140 * Although we cannot access the skb data directly from eBPF programs used in this
141 * scenario, the kernel has prepared some fields for us to access through struct __sk_buff.
142 * Load the protocol (IPv4, IPv6) used by the packet in flight once and cache it in R7
145 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_6
, offsetof(struct __sk_buff
, protocol
)),
148 * R8 is used to keep track of whether any address check has explicitly allowed or denied the packet
149 * through ACCESS_DENIED or ACCESS_ALLOWED bits. Reset them both to 0 in the beginning.
151 BPF_MOV32_IMM(BPF_REG_8
, 0),
155 * The access checkers compiled for the configured allowance and denial lists
156 * write to R8 at runtime. The following code prepares for an early exit that
157 * skip the accounting if the packet is denied.
160 * if (R8 == ACCESS_DENIED)
163 * This means that if both ACCESS_DENIED and ACCESS_ALLOWED are set, the packet
164 * is allowed to pass.
166 struct bpf_insn post_insn
[] = {
167 BPF_MOV64_IMM(BPF_REG_0
, 1),
168 BPF_JMP_IMM(BPF_JNE
, BPF_REG_8
, ACCESS_DENIED
, 1),
169 BPF_MOV64_IMM(BPF_REG_0
, 0),
172 _cleanup_(bpf_program_unrefp
) BPFProgram
*p
= NULL
;
173 int accounting_map_fd
, r
;
179 accounting_map_fd
= is_ingress
?
180 u
->ip_accounting_ingress_map_fd
:
181 u
->ip_accounting_egress_map_fd
;
184 u
->ipv4_allow_map_fd
>= 0 ||
185 u
->ipv6_allow_map_fd
>= 0 ||
186 u
->ipv4_deny_map_fd
>= 0 ||
187 u
->ipv6_deny_map_fd
>= 0;
189 if (accounting_map_fd
< 0 && !access_enabled
) {
194 r
= bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB
, &p
);
198 r
= bpf_program_add_instructions(p
, pre_insn
, ELEMENTSOF(pre_insn
));
202 if (access_enabled
) {
204 * The simple rule this function translates into eBPF instructions is:
206 * - Access will be granted when an address matches an entry in @list_allow
207 * - Otherwise, access will be denied when an address matches an entry in @list_deny
208 * - Otherwise, access will be granted
211 if (u
->ipv4_deny_map_fd
>= 0) {
212 r
= add_lookup_instructions(p
, u
->ipv4_deny_map_fd
, ETH_P_IP
, is_ingress
, ACCESS_DENIED
);
217 if (u
->ipv6_deny_map_fd
>= 0) {
218 r
= add_lookup_instructions(p
, u
->ipv6_deny_map_fd
, ETH_P_IPV6
, is_ingress
, ACCESS_DENIED
);
223 if (u
->ipv4_allow_map_fd
>= 0) {
224 r
= add_lookup_instructions(p
, u
->ipv4_allow_map_fd
, ETH_P_IP
, is_ingress
, ACCESS_ALLOWED
);
229 if (u
->ipv6_allow_map_fd
>= 0) {
230 r
= add_lookup_instructions(p
, u
->ipv6_allow_map_fd
, ETH_P_IPV6
, is_ingress
, ACCESS_ALLOWED
);
236 r
= bpf_program_add_instructions(p
, post_insn
, ELEMENTSOF(post_insn
));
240 if (accounting_map_fd
>= 0) {
241 struct bpf_insn insn
[] = {
243 * If R0 == 0, the packet will be denied; skip the accounting instructions in this case.
244 * The jump label will be fixed up later.
246 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 0),
249 BPF_MOV64_IMM(BPF_REG_0
, MAP_KEY_PACKETS
), /* r0 = 0 */
250 BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_0
, -4), /* *(u32 *)(fp - 4) = r0 */
251 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
252 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4), /* r2 = fp - 4 */
253 BPF_LD_MAP_FD(BPF_REG_1
, accounting_map_fd
), /* load map fd to r1 */
254 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
255 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
256 BPF_MOV64_IMM(BPF_REG_1
, 1), /* r1 = 1 */
257 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0, 0), /* xadd r0 += r1 */
260 BPF_MOV64_IMM(BPF_REG_0
, MAP_KEY_BYTES
), /* r0 = 1 */
261 BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_0
, -4), /* *(u32 *)(fp - 4) = r0 */
262 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4), /* r2 = fp - 4 */
264 BPF_LD_MAP_FD(BPF_REG_1
, accounting_map_fd
),
265 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
266 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
267 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_6
, offsetof(struct __sk_buff
, len
)), /* r1 = skb->len */
268 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0, 0), /* xadd r0 += r1 */
270 /* Allow the packet to pass */
271 BPF_MOV64_IMM(BPF_REG_0
, 1),
274 /* Jump label fixup */
275 insn
[0].off
= ELEMENTSOF(insn
) - 1;
277 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
284 * Exit from the eBPF program, R0 contains the verdict.
285 * 0 means the packet is denied, 1 means the packet may pass.
287 struct bpf_insn insn
[] = {
291 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
301 static int bpf_firewall_count_access_items(IPAddressAccessItem
*list
, size_t *n_ipv4
, size_t *n_ipv6
) {
302 IPAddressAccessItem
*a
;
307 LIST_FOREACH(items
, a
, list
) {
319 return -EAFNOSUPPORT
;
326 static int bpf_firewall_add_access_items(
327 IPAddressAccessItem
*list
,
332 struct bpf_lpm_trie_key
*key_ipv4
, *key_ipv6
;
333 uint64_t value
= verdict
;
334 IPAddressAccessItem
*a
;
337 key_ipv4
= alloca0(offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t));
338 key_ipv6
= alloca0(offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t) * 4);
340 LIST_FOREACH(items
, a
, list
) {
344 key_ipv4
->prefixlen
= a
->prefixlen
;
345 memcpy(key_ipv4
->data
, &a
->address
, sizeof(uint32_t));
347 r
= bpf_map_update_element(ipv4_map_fd
, key_ipv4
, &value
);
354 key_ipv6
->prefixlen
= a
->prefixlen
;
355 memcpy(key_ipv6
->data
, &a
->address
, 4 * sizeof(uint32_t));
357 r
= bpf_map_update_element(ipv6_map_fd
, key_ipv6
, &value
);
364 return -EAFNOSUPPORT
;
371 static int bpf_firewall_prepare_access_maps(
374 int *ret_ipv4_map_fd
,
375 int *ret_ipv6_map_fd
) {
377 _cleanup_close_
int ipv4_map_fd
= -1, ipv6_map_fd
= -1;
378 size_t n_ipv4
= 0, n_ipv6
= 0;
382 assert(ret_ipv4_map_fd
);
383 assert(ret_ipv6_map_fd
);
385 for (p
= u
; p
; p
= UNIT_DEREF(p
->slice
)) {
388 cc
= unit_get_cgroup_context(p
);
392 bpf_firewall_count_access_items(verdict
== ACCESS_ALLOWED
? cc
->ip_address_allow
: cc
->ip_address_deny
, &n_ipv4
, &n_ipv6
);
396 ipv4_map_fd
= bpf_map_new(
397 BPF_MAP_TYPE_LPM_TRIE
,
398 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t),
407 ipv6_map_fd
= bpf_map_new(
408 BPF_MAP_TYPE_LPM_TRIE
,
409 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t)*4,
417 for (p
= u
; p
; p
= UNIT_DEREF(p
->slice
)) {
420 cc
= unit_get_cgroup_context(p
);
424 r
= bpf_firewall_add_access_items(verdict
== ACCESS_ALLOWED
? cc
->ip_address_allow
: cc
->ip_address_deny
,
425 ipv4_map_fd
, ipv6_map_fd
, verdict
);
430 *ret_ipv4_map_fd
= ipv4_map_fd
;
431 *ret_ipv6_map_fd
= ipv6_map_fd
;
433 ipv4_map_fd
= ipv6_map_fd
= -1;
437 static int bpf_firewall_prepare_accounting_maps(Unit
*u
, bool enabled
, int *fd_ingress
, int *fd_egress
) {
445 if (*fd_ingress
< 0) {
446 r
= bpf_map_new(BPF_MAP_TYPE_ARRAY
, sizeof(int), sizeof(uint64_t), 2, 0);
453 if (*fd_egress
< 0) {
455 r
= bpf_map_new(BPF_MAP_TYPE_ARRAY
, sizeof(int), sizeof(uint64_t), 2, 0);
463 *fd_ingress
= safe_close(*fd_ingress
);
464 *fd_egress
= safe_close(*fd_egress
);
466 zero(u
->ip_accounting_extra
);
472 int bpf_firewall_compile(Unit
*u
) {
478 cc
= unit_get_cgroup_context(u
);
482 supported
= bpf_firewall_supported();
485 if (supported
== BPF_FIREWALL_UNSUPPORTED
) {
486 log_unit_debug(u
, "BPF firewalling not supported on this manager, proceeding without.");
489 if (supported
!= BPF_FIREWALL_SUPPORTED_WITH_MULTI
&& u
->type
== UNIT_SLICE
) {
490 /* If BPF_F_ALLOW_MULTI is not supported we don't support any BPF magic on inner nodes (i.e. on slice
491 * units), since that would mean leaf nodes couldn't do any BPF anymore at all. Under the assumption
492 * that BPF is more interesting on leaf nodes we hence avoid it on inner nodes in that case. This is
493 * consistent with old systemd behaviour from before v238, where BPF wasn't supported in inner nodes at
495 log_unit_debug(u
, "BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
499 /* Note that when we compile a new firewall we first flush out the access maps and the BPF programs themselves,
500 * but we reuse the the accounting maps. That way the firewall in effect always maps to the actual
501 * configuration, but we don't flush out the accounting unnecessarily */
503 u
->ip_bpf_ingress
= bpf_program_unref(u
->ip_bpf_ingress
);
504 u
->ip_bpf_egress
= bpf_program_unref(u
->ip_bpf_egress
);
506 u
->ipv4_allow_map_fd
= safe_close(u
->ipv4_allow_map_fd
);
507 u
->ipv4_deny_map_fd
= safe_close(u
->ipv4_deny_map_fd
);
509 u
->ipv6_allow_map_fd
= safe_close(u
->ipv6_allow_map_fd
);
510 u
->ipv6_deny_map_fd
= safe_close(u
->ipv6_deny_map_fd
);
512 if (u
->type
!= UNIT_SLICE
) {
513 /* In inner nodes we only do accounting, we do not actually bother with access control. However, leaf
514 * nodes will incorporate all IP access rules set on all their parent nodes. This has the benefit that
515 * they can optionally cancel out system-wide rules. Since inner nodes can't contain processes this
516 * means that all configure IP access rules *will* take effect on processes, even though we never
517 * compile them for inner nodes. */
519 r
= bpf_firewall_prepare_access_maps(u
, ACCESS_ALLOWED
, &u
->ipv4_allow_map_fd
, &u
->ipv6_allow_map_fd
);
521 return log_unit_error_errno(u
, r
, "Preparation of eBPF allow maps failed: %m");
523 r
= bpf_firewall_prepare_access_maps(u
, ACCESS_DENIED
, &u
->ipv4_deny_map_fd
, &u
->ipv6_deny_map_fd
);
525 return log_unit_error_errno(u
, r
, "Preparation of eBPF deny maps failed: %m");
528 r
= bpf_firewall_prepare_accounting_maps(u
, cc
->ip_accounting
, &u
->ip_accounting_ingress_map_fd
, &u
->ip_accounting_egress_map_fd
);
530 return log_unit_error_errno(u
, r
, "Preparation of eBPF accounting maps failed: %m");
532 r
= bpf_firewall_compile_bpf(u
, true, &u
->ip_bpf_ingress
);
534 return log_unit_error_errno(u
, r
, "Compilation for ingress BPF program failed: %m");
536 r
= bpf_firewall_compile_bpf(u
, false, &u
->ip_bpf_egress
);
538 return log_unit_error_errno(u
, r
, "Compilation for egress BPF program failed: %m");
543 int bpf_firewall_install(Unit
*u
) {
544 _cleanup_free_
char *path
= NULL
;
551 cc
= unit_get_cgroup_context(u
);
556 if (!u
->cgroup_realized
)
559 supported
= bpf_firewall_supported();
562 if (supported
== BPF_FIREWALL_UNSUPPORTED
) {
563 log_unit_debug(u
, "BPF firewalling not supported on this manager, proceeding without.");
566 if (supported
!= BPF_FIREWALL_SUPPORTED_WITH_MULTI
&& u
->type
== UNIT_SLICE
) {
567 log_unit_debug(u
, "BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
571 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, NULL
, &path
);
573 return log_unit_error_errno(u
, r
, "Failed to determine cgroup path: %m");
575 flags
= (supported
== BPF_FIREWALL_SUPPORTED_WITH_MULTI
&&
576 (u
->type
== UNIT_SLICE
|| unit_cgroup_delegate(u
))) ? BPF_F_ALLOW_MULTI
: 0;
578 /* Unref the old BPF program (which will implicitly detach it) right before attaching the new program, to
579 * minimize the time window when we don't account for IP traffic. */
580 u
->ip_bpf_egress_installed
= bpf_program_unref(u
->ip_bpf_egress_installed
);
581 u
->ip_bpf_ingress_installed
= bpf_program_unref(u
->ip_bpf_ingress_installed
);
583 if (u
->ip_bpf_egress
) {
584 r
= bpf_program_cgroup_attach(u
->ip_bpf_egress
, BPF_CGROUP_INET_EGRESS
, path
, flags
);
586 return log_unit_error_errno(u
, r
, "Attaching egress BPF program to cgroup %s failed: %m", path
);
588 /* Remember that this BPF program is installed now. */
589 u
->ip_bpf_egress_installed
= bpf_program_ref(u
->ip_bpf_egress
);
592 if (u
->ip_bpf_ingress
) {
593 r
= bpf_program_cgroup_attach(u
->ip_bpf_ingress
, BPF_CGROUP_INET_INGRESS
, path
, flags
);
595 return log_unit_error_errno(u
, r
, "Attaching ingress BPF program to cgroup %s failed: %m", path
);
597 u
->ip_bpf_ingress_installed
= bpf_program_ref(u
->ip_bpf_ingress
);
603 int bpf_firewall_read_accounting(int map_fd
, uint64_t *ret_bytes
, uint64_t *ret_packets
) {
604 uint64_t key
, packets
;
611 key
= MAP_KEY_PACKETS
;
612 r
= bpf_map_lookup_element(map_fd
, &key
, &packets
);
619 r
= bpf_map_lookup_element(map_fd
, &key
, ret_bytes
);
625 *ret_packets
= packets
;
630 int bpf_firewall_reset_accounting(int map_fd
) {
631 uint64_t key
, value
= 0;
637 key
= MAP_KEY_PACKETS
;
638 r
= bpf_map_update_element(map_fd
, &key
, &value
);
643 return bpf_map_update_element(map_fd
, &key
, &value
);
646 int bpf_firewall_supported(void) {
647 struct bpf_insn trivial
[] = {
648 BPF_MOV64_IMM(BPF_REG_0
, 1),
652 _cleanup_(bpf_program_unrefp
) BPFProgram
*program
= NULL
;
653 static int supported
= -1;
657 /* Checks whether BPF firewalling is supported. For this, we check five things:
659 * a) whether we are privileged
660 * b) whether the unified hierarchy is being used
661 * c) the BPF implementation in the kernel supports BPF LPM TRIE maps, which we require
662 * d) the BPF implementation in the kernel supports BPF_PROG_TYPE_CGROUP_SKB programs, which we require
663 * e) the BPF implementation in the kernel supports the BPF_PROG_DETACH call, which we require
669 if (geteuid() != 0) {
670 log_debug("Not enough privileges, BPF firewalling is not supported.");
671 return supported
= BPF_FIREWALL_UNSUPPORTED
;
674 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
676 return log_error_errno(r
, "Can't determine whether the unified hierarchy is used: %m");
678 log_debug("Not running with unified cgroups, BPF firewalling is not supported.");
679 return supported
= BPF_FIREWALL_UNSUPPORTED
;
682 fd
= bpf_map_new(BPF_MAP_TYPE_LPM_TRIE
,
683 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint64_t),
688 log_debug_errno(fd
, "Can't allocate BPF LPM TRIE map, BPF firewalling is not supported: %m");
689 return supported
= BPF_FIREWALL_UNSUPPORTED
;
694 r
= bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB
, &program
);
696 log_debug_errno(r
, "Can't allocate CGROUP SKB BPF program, BPF firewalling is not supported: %m");
697 return supported
= BPF_FIREWALL_UNSUPPORTED
;
700 r
= bpf_program_add_instructions(program
, trivial
, ELEMENTSOF(trivial
));
702 log_debug_errno(r
, "Can't add trivial instructions to CGROUP SKB BPF program, BPF firewalling is not supported: %m");
703 return supported
= BPF_FIREWALL_UNSUPPORTED
;
706 r
= bpf_program_load_kernel(program
, NULL
, 0);
708 log_debug_errno(r
, "Can't load kernel CGROUP SKB BPF program, BPF firewalling is not supported: %m");
709 return supported
= BPF_FIREWALL_UNSUPPORTED
;
712 /* Unfortunately the kernel allows us to create BPF_PROG_TYPE_CGROUP_SKB programs even when CONFIG_CGROUP_BPF
713 * is turned off at kernel compilation time. This sucks of course: why does it allow us to create a cgroup BPF
714 * program if we can't do a thing with it later?
716 * We detect this case by issuing the BPF_PROG_DETACH bpf() call with invalid file descriptors: if
717 * CONFIG_CGROUP_BPF is turned off, then the call will fail early with EINVAL. If it is turned on the
718 * parameters are validated however, and that'll fail with EBADF then. */
720 attr
= (union bpf_attr
) {
721 .attach_type
= BPF_CGROUP_INET_EGRESS
,
726 if (bpf(BPF_PROG_DETACH
, &attr
, sizeof(attr
)) < 0) {
727 if (errno
!= EBADF
) {
728 log_debug_errno(errno
, "Didn't get EBADF from BPF_PROG_DETACH, BPF firewalling is not supported: %m");
729 return supported
= BPF_FIREWALL_UNSUPPORTED
;
734 log_debug("Wut? Kernel accepted our invalid BPF_PROG_DETACH call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
735 return supported
= BPF_FIREWALL_UNSUPPORTED
;
738 /* So now we know that the BPF program is generally available, let's see if BPF_F_ALLOW_MULTI is also supported
739 * (which was added in kernel 4.15). We use a similar logic as before, but this time we use the BPF_PROG_ATTACH
740 * bpf() call and the BPF_F_ALLOW_MULTI flags value. Since the flags are checked early in the system call we'll
741 * get EINVAL if it's not supported, and EBADF as before if it is available. */
743 attr
= (union bpf_attr
) {
744 .attach_type
= BPF_CGROUP_INET_EGRESS
,
747 .attach_flags
= BPF_F_ALLOW_MULTI
,
750 if (bpf(BPF_PROG_ATTACH
, &attr
, sizeof(attr
)) < 0) {
751 if (errno
== EBADF
) {
752 log_debug_errno(errno
, "Got EBADF when using BPF_F_ALLOW_MULTI, which indicates it is supported. Yay!");
753 return supported
= BPF_FIREWALL_SUPPORTED_WITH_MULTI
;
757 log_debug_errno(errno
, "Got EINVAL error when using BPF_F_ALLOW_MULTI, which indicates it's not supported.");
759 log_debug_errno(errno
, "Got unexpected error when using BPF_F_ALLOW_MULTI, assuming it's not supported: %m");
761 return supported
= BPF_FIREWALL_SUPPORTED
;
763 log_debug("Wut? Kernel accepted our invalid BPF_PROG_ATTACH+BPF_F_ALLOW_MULTI call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
764 return supported
= BPF_FIREWALL_UNSUPPORTED
;