]>
git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/bpf-program.c
ab7562c17bc2b46951ce1ae07360ee61c9acc7f6
1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 Copyright 2016 Daniel Mack
11 #include "alloc-util.h"
12 #include "bpf-program.h"
16 #include "path-util.h"
19 int bpf_program_new(uint32_t prog_type
, BPFProgram
**ret
) {
20 _cleanup_(bpf_program_unrefp
) BPFProgram
*p
= NULL
;
22 p
= new0(BPFProgram
, 1);
27 p
->prog_type
= prog_type
;
35 BPFProgram
*bpf_program_ref(BPFProgram
*p
) {
45 BPFProgram
*bpf_program_unref(BPFProgram
*p
) {
55 /* Unfortunately, the kernel currently doesn't implicitly detach BPF programs from their cgroups when the last
56 * fd to the BPF program is closed. This has nasty side-effects since this means that abnormally terminated
57 * programs that attached one of their BPF programs to a cgroup will leave this programs pinned for good with
58 * zero chance of recovery, until the cgroup is removed. This is particularly problematic if the cgroup in
59 * question is the root cgroup (or any other cgroup belonging to a service that cannot be restarted during
60 * operation, such as dbus), as the memory for the BPF program can only be reclaimed through a reboot. To
61 * counter this, we track closely to which cgroup a program was attached to and will detach it on our own
62 * whenever we close the BPF fd. */
63 (void) bpf_program_cgroup_detach(p
);
65 safe_close(p
->kernel_fd
);
66 free(p
->instructions
);
67 free(p
->attached_path
);
72 int bpf_program_add_instructions(BPFProgram
*p
, const struct bpf_insn
*instructions
, size_t count
) {
76 if (p
->kernel_fd
>= 0) /* don't allow modification after we uploaded things to the kernel */
79 if (!GREEDY_REALLOC(p
->instructions
, p
->allocated
, p
->n_instructions
+ count
))
82 memcpy(p
->instructions
+ p
->n_instructions
, instructions
, sizeof(struct bpf_insn
) * count
);
83 p
->n_instructions
+= count
;
88 int bpf_program_load_kernel(BPFProgram
*p
, char *log_buf
, size_t log_size
) {
93 if (p
->kernel_fd
>= 0) { /* make this idempotent */
94 memzero(log_buf
, log_size
);
98 attr
= (union bpf_attr
) {
99 .prog_type
= p
->prog_type
,
100 .insns
= PTR_TO_UINT64(p
->instructions
),
101 .insn_cnt
= p
->n_instructions
,
102 .license
= PTR_TO_UINT64("GPL"),
103 .log_buf
= PTR_TO_UINT64(log_buf
),
104 .log_level
= !!log_buf
,
105 .log_size
= log_size
,
108 p
->kernel_fd
= bpf(BPF_PROG_LOAD
, &attr
, sizeof(attr
));
109 if (p
->kernel_fd
< 0)
115 int bpf_program_cgroup_attach(BPFProgram
*p
, int type
, const char *path
, uint32_t flags
) {
116 _cleanup_free_
char *copy
= NULL
;
117 _cleanup_close_
int fd
= -1;
125 if (!IN_SET(flags
, 0, BPF_F_ALLOW_OVERRIDE
, BPF_F_ALLOW_MULTI
))
128 /* We need to track which cgroup the program is attached to, and we can only track one attachment, hence let's
129 * refuse this early. */
130 if (p
->attached_path
) {
131 if (!path_equal(p
->attached_path
, path
))
133 if (p
->attached_type
!= type
)
135 if (p
->attached_flags
!= flags
)
138 /* Here's a shortcut: if we previously attached this program already, then we don't have to do so
139 * again. Well, with one exception: if we are in BPF_F_ALLOW_OVERRIDE mode then someone else might have
140 * replaced our program since the last time, hence let's reattach it again, just to be safe. In flags
141 * == 0 mode this is not an issue since nobody else can replace our program in that case, and in flags
142 * == BPF_F_ALLOW_MULTI mode any other's program would be installed in addition to ours hence ours
143 * would remain in effect. */
144 if (flags
!= BPF_F_ALLOW_OVERRIDE
)
148 /* Ensure we have a kernel object for this. */
149 r
= bpf_program_load_kernel(p
, NULL
, 0);
157 fd
= open(path
, O_DIRECTORY
|O_RDONLY
|O_CLOEXEC
);
161 attr
= (union bpf_attr
) {
164 .attach_bpf_fd
= p
->kernel_fd
,
165 .attach_flags
= flags
,
168 if (bpf(BPF_PROG_ATTACH
, &attr
, sizeof(attr
)) < 0)
171 free_and_replace(p
->attached_path
, copy
);
172 p
->attached_type
= type
;
173 p
->attached_flags
= flags
;
178 int bpf_program_cgroup_detach(BPFProgram
*p
) {
179 _cleanup_close_
int fd
= -1;
183 if (!p
->attached_path
)
186 fd
= open(p
->attached_path
, O_DIRECTORY
|O_RDONLY
|O_CLOEXEC
);
191 /* If the cgroup does not exist anymore, then we don't have to explicitly detach, it got detached
192 * implicitly by the removal, hence don't complain */
197 attr
= (union bpf_attr
) {
198 .attach_type
= p
->attached_type
,
200 .attach_bpf_fd
= p
->kernel_fd
,
203 if (bpf(BPF_PROG_DETACH
, &attr
, sizeof(attr
)) < 0)
207 p
->attached_path
= mfree(p
->attached_path
);
212 int bpf_map_new(enum bpf_map_type type
, size_t key_size
, size_t value_size
, size_t max_entries
, uint32_t flags
) {
213 union bpf_attr attr
= {
215 .key_size
= key_size
,
216 .value_size
= value_size
,
217 .max_entries
= max_entries
,
222 fd
= bpf(BPF_MAP_CREATE
, &attr
, sizeof(attr
));
229 int bpf_map_update_element(int fd
, const void *key
, void *value
) {
231 union bpf_attr attr
= {
233 .key
= PTR_TO_UINT64(key
),
234 .value
= PTR_TO_UINT64(value
),
237 if (bpf(BPF_MAP_UPDATE_ELEM
, &attr
, sizeof(attr
)) < 0)
243 int bpf_map_lookup_element(int fd
, const void *key
, void *value
) {
245 union bpf_attr attr
= {
247 .key
= PTR_TO_UINT64(key
),
248 .value
= PTR_TO_UINT64(value
),
251 if (bpf(BPF_MAP_LOOKUP_ELEM
, &attr
, sizeof(attr
)) < 0)