1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2016 Daniel Mack
12 #include <linux/libbpf.h>
13 #include <net/ethernet.h>
15 #include <netinet/ip.h>
16 #include <netinet/ip6.h>
23 #include "alloc-util.h"
24 #include "bpf-firewall.h"
25 #include "bpf-program.h"
27 #include "ip-address-access.h"
40 /* Compile instructions for one list of addresses, one direction and one specific verdict on matches. */
42 static int add_lookup_instructions(
49 int r
, addr_offset
, addr_size
;
57 addr_size
= sizeof(uint32_t);
58 addr_offset
= is_ingress
?
59 offsetof(struct iphdr
, saddr
) :
60 offsetof(struct iphdr
, daddr
);
64 addr_size
= 4 * sizeof(uint32_t);
65 addr_offset
= is_ingress
?
66 offsetof(struct ip6_hdr
, ip6_src
.s6_addr
) :
67 offsetof(struct ip6_hdr
, ip6_dst
.s6_addr
);
75 /* Compare IPv4 with one word instruction (32bit) */
76 struct bpf_insn insn
[] = {
77 /* If skb->protocol != ETH_P_IP, skip this whole block. The offset will be set later. */
78 BPF_JMP_IMM(BPF_JNE
, BPF_REG_7
, htobe16(protocol
), 0),
81 * Call into BPF_FUNC_skb_load_bytes to load the dst/src IP address
83 * R1: Pointer to the skb
85 * R3: Destination buffer on the stack (r10 - 4)
86 * R4: Number of bytes to read (4)
89 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
90 BPF_MOV32_IMM(BPF_REG_2
, addr_offset
),
92 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
93 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -addr_size
),
95 BPF_MOV32_IMM(BPF_REG_4
, addr_size
),
96 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_skb_load_bytes
),
99 * Call into BPF_FUNC_map_lookup_elem to see if the address matches any entry in the
100 * LPM trie map. For this to work, the prefixlen field of 'struct bpf_lpm_trie_key'
101 * has to be set to the maximum possible value.
103 * On success, the looked up value is stored in R0. For this application, the actual
104 * value doesn't matter, however; we just set the bit in @verdict in R8 if we found any
108 BPF_LD_MAP_FD(BPF_REG_1
, map_fd
),
109 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
110 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -addr_size
- sizeof(uint32_t)),
111 BPF_ST_MEM(BPF_W
, BPF_REG_2
, 0, addr_size
* 8),
113 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
114 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
115 BPF_ALU32_IMM(BPF_OR
, BPF_REG_8
, verdict
),
118 /* Jump label fixup */
119 insn
[0].off
= ELEMENTSOF(insn
) - 1;
121 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
130 static int bpf_firewall_compile_bpf(
135 struct bpf_insn pre_insn
[] = {
137 * When the eBPF program is entered, R1 contains the address of the skb.
138 * However, R1-R5 are scratch registers that are not preserved when calling
139 * into kernel functions, so we need to save anything that's supposed to
140 * stay around to R6-R9. Save the skb to R6.
142 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
145 * Although we cannot access the skb data directly from eBPF programs used in this
146 * scenario, the kernel has prepared some fields for us to access through struct __sk_buff.
147 * Load the protocol (IPv4, IPv6) used by the packet in flight once and cache it in R7
150 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_6
, offsetof(struct __sk_buff
, protocol
)),
153 * R8 is used to keep track of whether any address check has explicitly allowed or denied the packet
154 * through ACCESS_DENIED or ACCESS_ALLOWED bits. Reset them both to 0 in the beginning.
156 BPF_MOV32_IMM(BPF_REG_8
, 0),
160 * The access checkers compiled for the configured allowance and denial lists
161 * write to R8 at runtime. The following code prepares for an early exit that
162 * skip the accounting if the packet is denied.
165 * if (R8 == ACCESS_DENIED)
168 * This means that if both ACCESS_DENIED and ACCESS_ALLOWED are set, the packet
169 * is allowed to pass.
171 struct bpf_insn post_insn
[] = {
172 BPF_MOV64_IMM(BPF_REG_0
, 1),
173 BPF_JMP_IMM(BPF_JNE
, BPF_REG_8
, ACCESS_DENIED
, 1),
174 BPF_MOV64_IMM(BPF_REG_0
, 0),
177 _cleanup_(bpf_program_unrefp
) BPFProgram
*p
= NULL
;
178 int accounting_map_fd
, r
;
184 accounting_map_fd
= is_ingress
?
185 u
->ip_accounting_ingress_map_fd
:
186 u
->ip_accounting_egress_map_fd
;
189 u
->ipv4_allow_map_fd
>= 0 ||
190 u
->ipv6_allow_map_fd
>= 0 ||
191 u
->ipv4_deny_map_fd
>= 0 ||
192 u
->ipv6_deny_map_fd
>= 0;
194 if (accounting_map_fd
< 0 && !access_enabled
) {
199 r
= bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB
, &p
);
203 r
= bpf_program_add_instructions(p
, pre_insn
, ELEMENTSOF(pre_insn
));
207 if (access_enabled
) {
209 * The simple rule this function translates into eBPF instructions is:
211 * - Access will be granted when an address matches an entry in @list_allow
212 * - Otherwise, access will be denied when an address matches an entry in @list_deny
213 * - Otherwise, access will be granted
216 if (u
->ipv4_deny_map_fd
>= 0) {
217 r
= add_lookup_instructions(p
, u
->ipv4_deny_map_fd
, ETH_P_IP
, is_ingress
, ACCESS_DENIED
);
222 if (u
->ipv6_deny_map_fd
>= 0) {
223 r
= add_lookup_instructions(p
, u
->ipv6_deny_map_fd
, ETH_P_IPV6
, is_ingress
, ACCESS_DENIED
);
228 if (u
->ipv4_allow_map_fd
>= 0) {
229 r
= add_lookup_instructions(p
, u
->ipv4_allow_map_fd
, ETH_P_IP
, is_ingress
, ACCESS_ALLOWED
);
234 if (u
->ipv6_allow_map_fd
>= 0) {
235 r
= add_lookup_instructions(p
, u
->ipv6_allow_map_fd
, ETH_P_IPV6
, is_ingress
, ACCESS_ALLOWED
);
241 r
= bpf_program_add_instructions(p
, post_insn
, ELEMENTSOF(post_insn
));
245 if (accounting_map_fd
>= 0) {
246 struct bpf_insn insn
[] = {
248 * If R0 == 0, the packet will be denied; skip the accounting instructions in this case.
249 * The jump label will be fixed up later.
251 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 0),
254 BPF_MOV64_IMM(BPF_REG_0
, MAP_KEY_PACKETS
), /* r0 = 0 */
255 BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_0
, -4), /* *(u32 *)(fp - 4) = r0 */
256 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
257 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4), /* r2 = fp - 4 */
258 BPF_LD_MAP_FD(BPF_REG_1
, accounting_map_fd
), /* load map fd to r1 */
259 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
260 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
261 BPF_MOV64_IMM(BPF_REG_1
, 1), /* r1 = 1 */
262 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0, 0), /* xadd r0 += r1 */
265 BPF_MOV64_IMM(BPF_REG_0
, MAP_KEY_BYTES
), /* r0 = 1 */
266 BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_0
, -4), /* *(u32 *)(fp - 4) = r0 */
267 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
268 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4), /* r2 = fp - 4 */
269 BPF_LD_MAP_FD(BPF_REG_1
, accounting_map_fd
),
270 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
271 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
272 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_6
, offsetof(struct __sk_buff
, len
)), /* r1 = skb->len */
273 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0, 0), /* xadd r0 += r1 */
275 /* Allow the packet to pass */
276 BPF_MOV64_IMM(BPF_REG_0
, 1),
279 /* Jump label fixup */
280 insn
[0].off
= ELEMENTSOF(insn
) - 1;
282 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
289 * Exit from the eBPF program, R0 contains the verdict.
290 * 0 means the packet is denied, 1 means the packet may pass.
292 struct bpf_insn insn
[] = {
296 r
= bpf_program_add_instructions(p
, insn
, ELEMENTSOF(insn
));
306 static int bpf_firewall_count_access_items(IPAddressAccessItem
*list
, size_t *n_ipv4
, size_t *n_ipv6
) {
307 IPAddressAccessItem
*a
;
312 LIST_FOREACH(items
, a
, list
) {
324 return -EAFNOSUPPORT
;
331 static int bpf_firewall_add_access_items(
332 IPAddressAccessItem
*list
,
337 struct bpf_lpm_trie_key
*key_ipv4
, *key_ipv6
;
338 uint64_t value
= verdict
;
339 IPAddressAccessItem
*a
;
342 key_ipv4
= alloca0(offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t));
343 key_ipv6
= alloca0(offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t) * 4);
345 LIST_FOREACH(items
, a
, list
) {
349 key_ipv4
->prefixlen
= a
->prefixlen
;
350 memcpy(key_ipv4
->data
, &a
->address
, sizeof(uint32_t));
352 r
= bpf_map_update_element(ipv4_map_fd
, key_ipv4
, &value
);
359 key_ipv6
->prefixlen
= a
->prefixlen
;
360 memcpy(key_ipv6
->data
, &a
->address
, 4 * sizeof(uint32_t));
362 r
= bpf_map_update_element(ipv6_map_fd
, key_ipv6
, &value
);
369 return -EAFNOSUPPORT
;
376 static int bpf_firewall_prepare_access_maps(
379 int *ret_ipv4_map_fd
,
380 int *ret_ipv6_map_fd
) {
382 _cleanup_close_
int ipv4_map_fd
= -1, ipv6_map_fd
= -1;
383 size_t n_ipv4
= 0, n_ipv6
= 0;
387 assert(ret_ipv4_map_fd
);
388 assert(ret_ipv6_map_fd
);
390 for (p
= u
; p
; p
= UNIT_DEREF(p
->slice
)) {
393 cc
= unit_get_cgroup_context(p
);
397 bpf_firewall_count_access_items(verdict
== ACCESS_ALLOWED
? cc
->ip_address_allow
: cc
->ip_address_deny
, &n_ipv4
, &n_ipv6
);
401 ipv4_map_fd
= bpf_map_new(
402 BPF_MAP_TYPE_LPM_TRIE
,
403 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t),
412 ipv6_map_fd
= bpf_map_new(
413 BPF_MAP_TYPE_LPM_TRIE
,
414 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint32_t)*4,
422 for (p
= u
; p
; p
= UNIT_DEREF(p
->slice
)) {
425 cc
= unit_get_cgroup_context(p
);
429 r
= bpf_firewall_add_access_items(verdict
== ACCESS_ALLOWED
? cc
->ip_address_allow
: cc
->ip_address_deny
,
430 ipv4_map_fd
, ipv6_map_fd
, verdict
);
435 *ret_ipv4_map_fd
= ipv4_map_fd
;
436 *ret_ipv6_map_fd
= ipv6_map_fd
;
438 ipv4_map_fd
= ipv6_map_fd
= -1;
442 static int bpf_firewall_prepare_accounting_maps(Unit
*u
, bool enabled
, int *fd_ingress
, int *fd_egress
) {
450 if (*fd_ingress
< 0) {
451 r
= bpf_map_new(BPF_MAP_TYPE_ARRAY
, sizeof(int), sizeof(uint64_t), 2, 0);
458 if (*fd_egress
< 0) {
460 r
= bpf_map_new(BPF_MAP_TYPE_ARRAY
, sizeof(int), sizeof(uint64_t), 2, 0);
468 *fd_ingress
= safe_close(*fd_ingress
);
469 *fd_egress
= safe_close(*fd_egress
);
471 zero(u
->ip_accounting_extra
);
477 int bpf_firewall_compile(Unit
*u
) {
483 cc
= unit_get_cgroup_context(u
);
487 supported
= bpf_firewall_supported();
490 if (supported
== BPF_FIREWALL_UNSUPPORTED
) {
491 log_debug("BPF firewalling not supported on this manager, proceeding without.");
494 if (supported
!= BPF_FIREWALL_SUPPORTED_WITH_MULTI
&& u
->type
== UNIT_SLICE
) {
495 /* If BPF_F_ALLOW_MULTI is not supported we don't support any BPF magic on inner nodes (i.e. on slice
496 * units), since that would mean leaf nodes couldn't do any BPF anymore at all. Under the assumption
497 * that BPF is more interesting on leaf nodes we hence avoid it on inner nodes in that case. This is
498 * consistent with old systemd behaviour from before v238, where BPF wasn't supported in inner nodes at
500 log_debug("BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
504 /* Note that when we compile a new firewall we first flush out the access maps and the BPF programs themselves,
505 * but we reuse the the accounting maps. That way the firewall in effect always maps to the actual
506 * configuration, but we don't flush out the accounting unnecessarily */
508 u
->ip_bpf_ingress
= bpf_program_unref(u
->ip_bpf_ingress
);
509 u
->ip_bpf_egress
= bpf_program_unref(u
->ip_bpf_egress
);
511 u
->ipv4_allow_map_fd
= safe_close(u
->ipv4_allow_map_fd
);
512 u
->ipv4_deny_map_fd
= safe_close(u
->ipv4_deny_map_fd
);
514 u
->ipv6_allow_map_fd
= safe_close(u
->ipv6_allow_map_fd
);
515 u
->ipv6_deny_map_fd
= safe_close(u
->ipv6_deny_map_fd
);
517 if (u
->type
!= UNIT_SLICE
) {
518 /* In inner nodes we only do accounting, we do not actually bother with access control. However, leaf
519 * nodes will incorporate all IP access rules set on all their parent nodes. This has the benefit that
520 * they can optionally cancel out system-wide rules. Since inner nodes can't contain processes this
521 * means that all configure IP access rules *will* take effect on processes, even though we never
522 * compile them for inner nodes. */
524 r
= bpf_firewall_prepare_access_maps(u
, ACCESS_ALLOWED
, &u
->ipv4_allow_map_fd
, &u
->ipv6_allow_map_fd
);
526 return log_error_errno(r
, "Preparation of eBPF allow maps failed: %m");
528 r
= bpf_firewall_prepare_access_maps(u
, ACCESS_DENIED
, &u
->ipv4_deny_map_fd
, &u
->ipv6_deny_map_fd
);
530 return log_error_errno(r
, "Preparation of eBPF deny maps failed: %m");
533 r
= bpf_firewall_prepare_accounting_maps(u
, cc
->ip_accounting
, &u
->ip_accounting_ingress_map_fd
, &u
->ip_accounting_egress_map_fd
);
535 return log_error_errno(r
, "Preparation of eBPF accounting maps failed: %m");
537 r
= bpf_firewall_compile_bpf(u
, true, &u
->ip_bpf_ingress
);
539 return log_error_errno(r
, "Compilation for ingress BPF program failed: %m");
541 r
= bpf_firewall_compile_bpf(u
, false, &u
->ip_bpf_egress
);
543 return log_error_errno(r
, "Compilation for egress BPF program failed: %m");
548 int bpf_firewall_install(Unit
*u
) {
549 _cleanup_free_
char *path
= NULL
;
556 cc
= unit_get_cgroup_context(u
);
561 if (!u
->cgroup_realized
)
564 supported
= bpf_firewall_supported();
567 if (supported
== BPF_FIREWALL_UNSUPPORTED
) {
568 log_debug("BPF firewalling not supported on this manager, proceeding without.");
571 if (supported
!= BPF_FIREWALL_SUPPORTED_WITH_MULTI
&& u
->type
== UNIT_SLICE
) {
572 log_debug("BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
576 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, NULL
, &path
);
578 return log_error_errno(r
, "Failed to determine cgroup path: %m");
580 flags
= (supported
== BPF_FIREWALL_SUPPORTED_WITH_MULTI
&&
581 (u
->type
== UNIT_SLICE
|| unit_cgroup_delegate(u
))) ? BPF_F_ALLOW_MULTI
: 0;
583 /* Unref the old BPF program (which will implicitly detach it) right before attaching the new program, to
584 * minimize the time window when we don't account for IP traffic. */
585 u
->ip_bpf_egress_installed
= bpf_program_unref(u
->ip_bpf_egress_installed
);
586 u
->ip_bpf_ingress_installed
= bpf_program_unref(u
->ip_bpf_ingress_installed
);
588 if (u
->ip_bpf_egress
) {
589 r
= bpf_program_cgroup_attach(u
->ip_bpf_egress
, BPF_CGROUP_INET_EGRESS
, path
, flags
);
591 return log_error_errno(r
, "Attaching egress BPF program to cgroup %s failed: %m", path
);
593 /* Remember that this BPF program is installed now. */
594 u
->ip_bpf_egress_installed
= bpf_program_ref(u
->ip_bpf_egress
);
597 if (u
->ip_bpf_ingress
) {
598 r
= bpf_program_cgroup_attach(u
->ip_bpf_ingress
, BPF_CGROUP_INET_INGRESS
, path
, flags
);
600 return log_error_errno(r
, "Attaching ingress BPF program to cgroup %s failed: %m", path
);
602 u
->ip_bpf_ingress_installed
= bpf_program_ref(u
->ip_bpf_ingress
);
608 int bpf_firewall_read_accounting(int map_fd
, uint64_t *ret_bytes
, uint64_t *ret_packets
) {
609 uint64_t key
, packets
;
616 key
= MAP_KEY_PACKETS
;
617 r
= bpf_map_lookup_element(map_fd
, &key
, &packets
);
624 r
= bpf_map_lookup_element(map_fd
, &key
, ret_bytes
);
630 *ret_packets
= packets
;
635 int bpf_firewall_reset_accounting(int map_fd
) {
636 uint64_t key
, value
= 0;
642 key
= MAP_KEY_PACKETS
;
643 r
= bpf_map_update_element(map_fd
, &key
, &value
);
648 return bpf_map_update_element(map_fd
, &key
, &value
);
651 int bpf_firewall_supported(void) {
652 struct bpf_insn trivial
[] = {
653 BPF_MOV64_IMM(BPF_REG_0
, 1),
657 _cleanup_(bpf_program_unrefp
) BPFProgram
*program
= NULL
;
658 static int supported
= -1;
662 /* Checks whether BPF firewalling is supported. For this, we check five things:
664 * a) whether we are privileged
665 * b) whether the unified hierarchy is being used
666 * c) the BPF implementation in the kernel supports BPF LPM TRIE maps, which we require
667 * d) the BPF implementation in the kernel supports BPF_PROG_TYPE_CGROUP_SKB programs, which we require
668 * e) the BPF implementation in the kernel supports the BPF_PROG_ATTACH call, which we require
675 if (geteuid() != 0) {
676 log_debug("Not enough privileges, BPF firewalling is not supported.");
677 return supported
= BPF_FIREWALL_UNSUPPORTED
;
680 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
682 return log_error_errno(r
, "Can't determine whether the unified hierarchy is used: %m");
684 log_debug("Not running with unified cgroups, BPF firewalling is not supported.");
685 return supported
= BPF_FIREWALL_UNSUPPORTED
;
688 fd
= bpf_map_new(BPF_MAP_TYPE_LPM_TRIE
,
689 offsetof(struct bpf_lpm_trie_key
, data
) + sizeof(uint64_t),
694 log_debug_errno(r
, "Can't allocate BPF LPM TRIE map, BPF firewalling is not supported: %m");
695 return supported
= BPF_FIREWALL_UNSUPPORTED
;
700 if (bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB
, &program
) < 0) {
701 log_debug_errno(r
, "Can't allocate CGROUP SKB BPF program, BPF firewalling is not supported: %m");
702 return supported
= BPF_FIREWALL_UNSUPPORTED
;
705 r
= bpf_program_add_instructions(program
, trivial
, ELEMENTSOF(trivial
));
707 log_debug_errno(r
, "Can't add trivial instructions to CGROUP SKB BPF program, BPF firewalling is not supported: %m");
708 return supported
= BPF_FIREWALL_UNSUPPORTED
;
711 r
= bpf_program_load_kernel(program
, NULL
, 0);
713 log_debug_errno(r
, "Can't load kernel CGROUP SKB BPF program, BPF firewalling is not supported: %m");
714 return supported
= BPF_FIREWALL_UNSUPPORTED
;
717 /* Unfortunately the kernel allows us to create BPF_PROG_TYPE_CGROUP_SKB programs even when CONFIG_CGROUP_BPF
718 * is turned off at kernel compilation time. This sucks of course: why does it allow us to create a cgroup BPF
719 * program if we can't do a thing with it later?
721 * We detect this case by issuing the BPF_PROG_ATTACH bpf() call with invalid file descriptors: if
722 * CONFIG_CGROUP_BPF is turned off, then the call will fail early with EINVAL. If it is turned on the
723 * parameters are validated however, and that'll fail with EBADF then. */
725 attr
= (union bpf_attr
) {
726 .attach_type
= BPF_CGROUP_INET_EGRESS
,
731 r
= bpf(BPF_PROG_ATTACH
, &attr
, sizeof(attr
));
733 if (errno
!= EBADF
) {
734 log_debug_errno(errno
, "Didn't get EBADF from BPF_PROG_ATTACH, BPF firewalling is not supported: %m");
735 return supported
= BPF_FIREWALL_UNSUPPORTED
;
740 log_debug("Wut? Kernel accepted our invalid BPF_PROG_ATTACH call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
741 return supported
= BPF_FIREWALL_UNSUPPORTED
;
744 /* So now we know that the BPF program is generally available, let's see if BPF_F_ALLOW_MULTI is also supported
745 * (which was added in kernel 4.15). We use a similar logic as before, but this time we use
746 * BPF_F_ALLOW_MULTI. Since the flags are checked early in the system call we'll get EINVAL if it's not
747 * supported, and EBADF as before if it is available. */
749 attr
= (union bpf_attr
) {
750 .attach_type
= BPF_CGROUP_INET_EGRESS
,
753 .attach_flags
= BPF_F_ALLOW_MULTI
,
756 r
= bpf(BPF_PROG_ATTACH
, &attr
, sizeof(attr
));
758 if (errno
== EBADF
) {
759 log_debug_errno(errno
, "Got EBADF when using BPF_F_ALLOW_MULTI, which indicates it is supported. Yay!");
760 return supported
= BPF_FIREWALL_SUPPORTED_WITH_MULTI
;
764 log_debug_errno(errno
, "Got EINVAL error when using BPF_F_ALLOW_MULTI, which indicates it's not supported.");
766 log_debug_errno(errno
, "Got unexpected error when using BPF_F_ALLOW_MULTI, assuming it's not supported: %m");
768 return supported
= BPF_FIREWALL_SUPPORTED
;
770 log_debug("Wut? Kernel accepted our invalid BPF_PROG_ATTACH+BPF_F_ALLOW_MULTI call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
771 return supported
= BPF_FIREWALL_UNSUPPORTED
;