]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: LGPL-2.1-or-later */ | |
2 | #pragma once | |
3 | ||
4 | #include "forward.h" | |
5 | ||
6 | #define SYSTEMD_CGROUP_CONTROLLER_LEGACY "name=systemd" | |
7 | #define SYSTEMD_CGROUP_CONTROLLER_HYBRID "name=unified" | |
8 | #define SYSTEMD_CGROUP_CONTROLLER "_systemd" | |
9 | ||
10 | /* An enum of well known cgroup controllers */ | |
11 | typedef enum CGroupController { | |
12 | /* Original cgroup controllers */ | |
13 | CGROUP_CONTROLLER_CPU, | |
14 | CGROUP_CONTROLLER_CPUACCT, /* v1 only */ | |
15 | CGROUP_CONTROLLER_CPUSET, /* v2 only */ | |
16 | CGROUP_CONTROLLER_IO, /* v2 only */ | |
17 | CGROUP_CONTROLLER_BLKIO, /* v1 only */ | |
18 | CGROUP_CONTROLLER_MEMORY, | |
19 | CGROUP_CONTROLLER_DEVICES, /* v1 only */ | |
20 | CGROUP_CONTROLLER_PIDS, | |
21 | ||
22 | /* BPF-based pseudo-controllers, v2 only */ | |
23 | CGROUP_CONTROLLER_BPF_FIREWALL, | |
24 | CGROUP_CONTROLLER_BPF_DEVICES, | |
25 | CGROUP_CONTROLLER_BPF_FOREIGN, | |
26 | CGROUP_CONTROLLER_BPF_SOCKET_BIND, | |
27 | CGROUP_CONTROLLER_BPF_RESTRICT_NETWORK_INTERFACES, | |
28 | /* The BPF hook implementing RestrictFileSystems= is not defined here. | |
29 | * It's applied as late as possible in exec_invoke() so we don't block | |
30 | * our own unit setup code. */ | |
31 | ||
32 | _CGROUP_CONTROLLER_MAX, | |
33 | _CGROUP_CONTROLLER_INVALID = -EINVAL, | |
34 | } CGroupController; | |
35 | ||
36 | #define CGROUP_CONTROLLER_TO_MASK(c) (1U << (c)) | |
37 | ||
38 | /* A bit mask of well known cgroup controllers */ | |
39 | typedef enum CGroupMask { | |
40 | CGROUP_MASK_CPU = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPU), | |
41 | CGROUP_MASK_CPUACCT = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPUACCT), | |
42 | CGROUP_MASK_CPUSET = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_CPUSET), | |
43 | CGROUP_MASK_IO = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_IO), | |
44 | CGROUP_MASK_BLKIO = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BLKIO), | |
45 | CGROUP_MASK_MEMORY = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_MEMORY), | |
46 | CGROUP_MASK_DEVICES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_DEVICES), | |
47 | CGROUP_MASK_PIDS = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_PIDS), | |
48 | CGROUP_MASK_BPF_FIREWALL = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_FIREWALL), | |
49 | CGROUP_MASK_BPF_DEVICES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_DEVICES), | |
50 | CGROUP_MASK_BPF_FOREIGN = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_FOREIGN), | |
51 | CGROUP_MASK_BPF_SOCKET_BIND = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_SOCKET_BIND), | |
52 | CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES = CGROUP_CONTROLLER_TO_MASK(CGROUP_CONTROLLER_BPF_RESTRICT_NETWORK_INTERFACES), | |
53 | ||
54 | /* All real cgroup v1 controllers */ | |
55 | CGROUP_MASK_V1 = CGROUP_MASK_CPU|CGROUP_MASK_CPUACCT|CGROUP_MASK_BLKIO|CGROUP_MASK_MEMORY|CGROUP_MASK_DEVICES|CGROUP_MASK_PIDS, | |
56 | ||
57 | /* All real cgroup v2 controllers */ | |
58 | CGROUP_MASK_V2 = CGROUP_MASK_CPU|CGROUP_MASK_CPUSET|CGROUP_MASK_IO|CGROUP_MASK_MEMORY|CGROUP_MASK_PIDS, | |
59 | ||
60 | /* All controllers we want to delegate in case of Delegate=yes. Which are pretty much the v2 controllers only, as delegation on v1 is not safe, and bpf stuff isn't a real controller */ | |
61 | CGROUP_MASK_DELEGATE = CGROUP_MASK_V2, | |
62 | ||
63 | /* All cgroup v2 BPF pseudo-controllers */ | |
64 | CGROUP_MASK_BPF = CGROUP_MASK_BPF_FIREWALL|CGROUP_MASK_BPF_DEVICES|CGROUP_MASK_BPF_FOREIGN|CGROUP_MASK_BPF_SOCKET_BIND|CGROUP_MASK_BPF_RESTRICT_NETWORK_INTERFACES, | |
65 | ||
66 | _CGROUP_MASK_ALL = CGROUP_CONTROLLER_TO_MASK(_CGROUP_CONTROLLER_MAX) - 1, | |
67 | } CGroupMask; | |
68 | ||
69 | /* Special values for all weight knobs on unified hierarchy */ | |
70 | #define CGROUP_WEIGHT_INVALID UINT64_MAX | |
71 | #define CGROUP_WEIGHT_IDLE UINT64_C(0) | |
72 | #define CGROUP_WEIGHT_MIN UINT64_C(1) | |
73 | #define CGROUP_WEIGHT_MAX UINT64_C(10000) | |
74 | #define CGROUP_WEIGHT_DEFAULT UINT64_C(100) | |
75 | ||
76 | #define CGROUP_LIMIT_MIN UINT64_C(0) | |
77 | #define CGROUP_LIMIT_MAX UINT64_MAX | |
78 | ||
79 | static inline bool CGROUP_WEIGHT_IS_OK(uint64_t x) { | |
80 | return | |
81 | x == CGROUP_WEIGHT_INVALID || | |
82 | (x >= CGROUP_WEIGHT_MIN && x <= CGROUP_WEIGHT_MAX); | |
83 | } | |
84 | ||
85 | /* IO limits on unified hierarchy */ | |
86 | typedef enum CGroupIOLimitType { | |
87 | CGROUP_IO_RBPS_MAX, | |
88 | CGROUP_IO_WBPS_MAX, | |
89 | CGROUP_IO_RIOPS_MAX, | |
90 | CGROUP_IO_WIOPS_MAX, | |
91 | ||
92 | _CGROUP_IO_LIMIT_TYPE_MAX, | |
93 | _CGROUP_IO_LIMIT_TYPE_INVALID = -EINVAL, | |
94 | } CGroupIOLimitType; | |
95 | ||
96 | extern const uint64_t cgroup_io_limit_defaults[_CGROUP_IO_LIMIT_TYPE_MAX]; | |
97 | ||
98 | const char* cgroup_io_limit_type_to_string(CGroupIOLimitType t) _const_; | |
99 | CGroupIOLimitType cgroup_io_limit_type_from_string(const char *s) _pure_; | |
100 | ||
101 | /* Special values for the io.bfq.weight attribute */ | |
102 | #define CGROUP_BFQ_WEIGHT_INVALID UINT64_MAX | |
103 | #define CGROUP_BFQ_WEIGHT_MIN UINT64_C(1) | |
104 | #define CGROUP_BFQ_WEIGHT_MAX UINT64_C(1000) | |
105 | #define CGROUP_BFQ_WEIGHT_DEFAULT UINT64_C(100) | |
106 | ||
107 | /* Convert the normal io.weight value to io.bfq.weight */ | |
108 | static inline uint64_t BFQ_WEIGHT(uint64_t io_weight) { | |
109 | return | |
110 | io_weight <= CGROUP_WEIGHT_DEFAULT ? | |
111 | CGROUP_BFQ_WEIGHT_DEFAULT - (CGROUP_WEIGHT_DEFAULT - io_weight) * (CGROUP_BFQ_WEIGHT_DEFAULT - CGROUP_BFQ_WEIGHT_MIN) / (CGROUP_WEIGHT_DEFAULT - CGROUP_WEIGHT_MIN) : | |
112 | CGROUP_BFQ_WEIGHT_DEFAULT + (io_weight - CGROUP_WEIGHT_DEFAULT) * (CGROUP_BFQ_WEIGHT_MAX - CGROUP_BFQ_WEIGHT_DEFAULT) / (CGROUP_WEIGHT_MAX - CGROUP_WEIGHT_DEFAULT); | |
113 | } | |
114 | ||
115 | typedef enum CGroupUnified { | |
116 | CGROUP_UNIFIED_UNKNOWN = -1, | |
117 | CGROUP_UNIFIED_NONE = 0, /* Both systemd and controllers on legacy */ | |
118 | CGROUP_UNIFIED_SYSTEMD = 1, /* Only systemd on unified */ | |
119 | CGROUP_UNIFIED_ALL = 2, /* Both systemd and controllers on unified */ | |
120 | } CGroupUnified; | |
121 | ||
122 | /* | |
123 | * General rules: | |
124 | * | |
125 | * We accept named hierarchies in the syntax "foo" and "name=foo". | |
126 | * | |
127 | * We expect that named hierarchies do not conflict in name with a | |
128 | * kernel hierarchy, modulo the "name=" prefix. | |
129 | * | |
130 | * We always generate "normalized" controller names, i.e. without the | |
131 | * "name=" prefix. | |
132 | * | |
133 | * We require absolute cgroup paths. When returning, we will always | |
134 | * generate paths with multiple adjacent / removed. | |
135 | */ | |
136 | ||
137 | int cg_path_open(const char *controller, const char *path); | |
138 | int cg_cgroupid_open(int cgroupfs_fd, uint64_t id); | |
139 | ||
140 | int cg_path_from_cgroupid(int cgroupfs_fd, uint64_t id, char **ret); | |
141 | int cg_get_cgroupid_at(int dfd, const char *path, uint64_t *ret); | |
142 | static inline int cg_path_get_cgroupid(const char *path, uint64_t *ret) { | |
143 | return cg_get_cgroupid_at(AT_FDCWD, path, ret); | |
144 | } | |
145 | static inline int cg_fd_get_cgroupid(int fd, uint64_t *ret) { | |
146 | return cg_get_cgroupid_at(fd, NULL, ret); | |
147 | } | |
148 | ||
149 | typedef enum CGroupFlags { | |
150 | CGROUP_SIGCONT = 1 << 0, | |
151 | CGROUP_IGNORE_SELF = 1 << 1, | |
152 | CGROUP_DONT_SKIP_UNMAPPED = 1 << 2, | |
153 | } CGroupFlags; | |
154 | ||
155 | int cg_enumerate_processes(const char *controller, const char *path, FILE **ret); | |
156 | int cg_read_pid(FILE *f, pid_t *ret, CGroupFlags flags); | |
157 | int cg_read_pidref(FILE *f, PidRef *ret, CGroupFlags flags); | |
158 | ||
159 | int cg_enumerate_subgroups(const char *controller, const char *path, DIR **ret); | |
160 | int cg_read_subgroup(DIR *d, char **ret); | |
161 | ||
162 | typedef int (*cg_kill_log_func_t)(const PidRef *pid, int sig, void *userdata); | |
163 | ||
164 | int cg_kill(const char *path, int sig, CGroupFlags flags, Set *killed_pids, cg_kill_log_func_t log_kill, void *userdata); | |
165 | int cg_kill_kernel_sigkill(const char *path); | |
166 | int cg_kill_recursive(const char *path, int sig, CGroupFlags flags, Set *killed_pids, cg_kill_log_func_t log_kill, void *userdata); | |
167 | ||
168 | int cg_split_spec(const char *spec, char **ret_controller, char **ret_path); | |
169 | int cg_mangle_path(const char *path, char **ret); | |
170 | ||
171 | int cg_get_path(const char *controller, const char *path, const char *suffix, char **ret); | |
172 | int cg_get_path_and_check(const char *controller, const char *path, const char *suffix, char **ret); | |
173 | ||
174 | int cg_pid_get_path(const char *controller, pid_t pid, char **ret); | |
175 | int cg_pidref_get_path(const char *controller, const PidRef *pidref, char **ret); | |
176 | ||
177 | int cg_is_threaded(const char *path); | |
178 | ||
179 | int cg_is_delegated(const char *path); | |
180 | int cg_is_delegated_fd(int fd); | |
181 | ||
182 | int cg_has_coredump_receive(const char *path); | |
183 | ||
184 | int cg_set_attribute(const char *controller, const char *path, const char *attribute, const char *value); | |
185 | int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret); | |
186 | int cg_get_keyed_attribute(const char *controller, const char *path, const char *attribute, char * const *keys, char **values); | |
187 | ||
188 | int cg_get_attribute_as_uint64(const char *controller, const char *path, const char *attribute, uint64_t *ret); | |
189 | int cg_get_attribute_as_bool(const char *controller, const char *path, const char *attribute); | |
190 | ||
191 | int cg_get_owner(const char *path, uid_t *ret_uid); | |
192 | ||
193 | int cg_set_xattr(const char *path, const char *name, const void *value, size_t size, int flags); | |
194 | int cg_get_xattr(const char *path, const char *name, char **ret, size_t *ret_size); | |
195 | /* Returns negative on error, and 0 or 1 on success for the bool value */ | |
196 | int cg_get_xattr_bool(const char *path, const char *name); | |
197 | int cg_remove_xattr(const char *path, const char *name); | |
198 | ||
199 | int cg_is_empty(const char *controller, const char *path); | |
200 | ||
201 | int cg_get_root_path(char **path); | |
202 | ||
203 | int cg_path_get_session(const char *path, char **ret_session); | |
204 | int cg_path_get_owner_uid(const char *path, uid_t *ret_uid); | |
205 | int cg_path_get_unit(const char *path, char **ret_unit); | |
206 | int cg_path_get_unit_path(const char *path, char **ret_unit); | |
207 | int cg_path_get_user_unit(const char *path, char **ret_unit); | |
208 | int cg_path_get_machine_name(const char *path, char **ret_machine); | |
209 | int cg_path_get_slice(const char *path, char **ret_slice); | |
210 | int cg_path_get_user_slice(const char *path, char **ret_slice); | |
211 | ||
212 | int cg_shift_path(const char *cgroup, const char *cached_root, const char **ret_shifted); | |
213 | int cg_pid_get_path_shifted(pid_t pid, const char *cached_root, char **ret_cgroup); | |
214 | ||
215 | int cg_pid_get_session(pid_t pid, char **ret_session); | |
216 | int cg_pidref_get_session(const PidRef *pidref, char **ret); | |
217 | int cg_pid_get_owner_uid(pid_t pid, uid_t *ret_uid); | |
218 | int cg_pidref_get_owner_uid(const PidRef *pidref, uid_t *ret); | |
219 | int cg_pid_get_unit(pid_t pid, char **ret_unit); | |
220 | int cg_pidref_get_unit(const PidRef *pidref, char **ret); | |
221 | int cg_pid_get_user_unit(pid_t pid, char **ret_unit); | |
222 | int cg_pid_get_machine_name(pid_t pid, char **ret_machine); | |
223 | int cg_pid_get_slice(pid_t pid, char **ret_slice); | |
224 | int cg_pid_get_user_slice(pid_t pid, char **ret_slice); | |
225 | ||
226 | int cg_path_decode_unit(const char *cgroup, char **ret_unit); | |
227 | ||
228 | bool cg_needs_escape(const char *p); | |
229 | int cg_escape(const char *p, char **ret); | |
230 | char* cg_unescape(const char *p) _pure_; | |
231 | ||
232 | bool cg_controller_is_valid(const char *p); | |
233 | ||
234 | int cg_slice_to_path(const char *unit, char **ret); | |
235 | ||
236 | int cg_mask_supported(CGroupMask *ret); | |
237 | int cg_mask_supported_subtree(const char *root, CGroupMask *ret); | |
238 | int cg_mask_from_string(const char *s, CGroupMask *ret); | |
239 | int cg_mask_to_string(CGroupMask mask, char **ret); | |
240 | ||
241 | bool cg_kill_supported(void); | |
242 | ||
243 | int cg_all_unified(void); | |
244 | int cg_hybrid_unified(void); | |
245 | int cg_unified_controller(const char *controller); | |
246 | int cg_unified_cached(bool flush); | |
247 | static inline int cg_unified(void) { | |
248 | return cg_unified_cached(true); | |
249 | } | |
250 | ||
251 | const char* cgroup_controller_to_string(CGroupController c) _const_; | |
252 | CGroupController cgroup_controller_from_string(const char *s) _pure_; | |
253 | ||
254 | typedef enum ManagedOOMMode { | |
255 | MANAGED_OOM_AUTO, | |
256 | MANAGED_OOM_KILL, | |
257 | _MANAGED_OOM_MODE_MAX, | |
258 | _MANAGED_OOM_MODE_INVALID = -EINVAL, | |
259 | } ManagedOOMMode; | |
260 | ||
261 | const char* managed_oom_mode_to_string(ManagedOOMMode m) _const_; | |
262 | ManagedOOMMode managed_oom_mode_from_string(const char *s) _pure_; | |
263 | ||
264 | typedef enum ManagedOOMPreference { | |
265 | MANAGED_OOM_PREFERENCE_NONE = 0, | |
266 | MANAGED_OOM_PREFERENCE_AVOID = 1, | |
267 | MANAGED_OOM_PREFERENCE_OMIT = 2, | |
268 | _MANAGED_OOM_PREFERENCE_MAX, | |
269 | _MANAGED_OOM_PREFERENCE_INVALID = -EINVAL, | |
270 | } ManagedOOMPreference; | |
271 | ||
272 | const char* managed_oom_preference_to_string(ManagedOOMPreference a) _const_; | |
273 | ManagedOOMPreference managed_oom_preference_from_string(const char *s) _pure_; |