]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.h
core/cgroup: introduce MemoryZSwapWriteback setting
[thirdparty/systemd.git] / src / core / cgroup.h
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 #pragma once
3
4 #include <stdbool.h>
5
6 #include "sd-event.h"
7
8 #include "bpf-program.h"
9 #include "bpf-restrict-fs.h"
10 #include "cgroup-util.h"
11 #include "cpu-set-util.h"
12 #include "firewall-util.h"
13 #include "list.h"
14 #include "pidref.h"
15 #include "time-util.h"
16
17 typedef struct CGroupTasksMax {
18 /* If scale == 0, just use value; otherwise, value / scale.
19 * See tasks_max_resolve(). */
20 uint64_t value;
21 uint64_t scale;
22 } CGroupTasksMax;
23
24 #define CGROUP_TASKS_MAX_UNSET ((CGroupTasksMax) { .value = UINT64_MAX, .scale = 0 })
25
26 static inline bool cgroup_tasks_max_isset(const CGroupTasksMax *tasks_max) {
27 return tasks_max->value != UINT64_MAX || tasks_max->scale != 0;
28 }
29
30 uint64_t cgroup_tasks_max_resolve(const CGroupTasksMax *tasks_max);
31
32 typedef struct CGroupContext CGroupContext;
33 typedef struct CGroupDeviceAllow CGroupDeviceAllow;
34 typedef struct CGroupIODeviceWeight CGroupIODeviceWeight;
35 typedef struct CGroupIODeviceLimit CGroupIODeviceLimit;
36 typedef struct CGroupIODeviceLatency CGroupIODeviceLatency;
37 typedef struct CGroupBlockIODeviceWeight CGroupBlockIODeviceWeight;
38 typedef struct CGroupBlockIODeviceBandwidth CGroupBlockIODeviceBandwidth;
39 typedef struct CGroupBPFForeignProgram CGroupBPFForeignProgram;
40 typedef struct CGroupSocketBindItem CGroupSocketBindItem;
41 typedef struct CGroupRuntime CGroupRuntime;
42
43 typedef enum CGroupDevicePolicy {
44 /* When devices listed, will allow those, plus built-in ones, if none are listed will allow
45 * everything. */
46 CGROUP_DEVICE_POLICY_AUTO,
47
48 /* Everything forbidden, except built-in ones and listed ones. */
49 CGROUP_DEVICE_POLICY_CLOSED,
50
51 /* Everything forbidden, except for the listed devices */
52 CGROUP_DEVICE_POLICY_STRICT,
53
54 _CGROUP_DEVICE_POLICY_MAX,
55 _CGROUP_DEVICE_POLICY_INVALID = -EINVAL,
56 } CGroupDevicePolicy;
57
58 typedef enum FreezerAction {
59 FREEZER_FREEZE,
60 FREEZER_PARENT_FREEZE,
61 FREEZER_THAW,
62 FREEZER_PARENT_THAW,
63
64 _FREEZER_ACTION_MAX,
65 _FREEZER_ACTION_INVALID = -EINVAL,
66 } FreezerAction;
67
68 typedef enum CGroupDevicePermissions {
69 /* We reuse the same bit meanings the kernel's BPF_DEVCG_ACC_xyz definitions use */
70 CGROUP_DEVICE_MKNOD = 1 << 0,
71 CGROUP_DEVICE_READ = 1 << 1,
72 CGROUP_DEVICE_WRITE = 1 << 2,
73 _CGROUP_DEVICE_PERMISSIONS_MAX = 1 << 3,
74 _CGROUP_DEVICE_PERMISSIONS_ALL = _CGROUP_DEVICE_PERMISSIONS_MAX - 1,
75 _CGROUP_DEVICE_PERMISSIONS_INVALID = -EINVAL,
76 } CGroupDevicePermissions;
77
78 struct CGroupDeviceAllow {
79 LIST_FIELDS(CGroupDeviceAllow, device_allow);
80 char *path;
81 CGroupDevicePermissions permissions;
82 };
83
84 struct CGroupIODeviceWeight {
85 LIST_FIELDS(CGroupIODeviceWeight, device_weights);
86 char *path;
87 uint64_t weight;
88 };
89
90 struct CGroupIODeviceLimit {
91 LIST_FIELDS(CGroupIODeviceLimit, device_limits);
92 char *path;
93 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
94 };
95
96 struct CGroupIODeviceLatency {
97 LIST_FIELDS(CGroupIODeviceLatency, device_latencies);
98 char *path;
99 usec_t target_usec;
100 };
101
102 struct CGroupBlockIODeviceWeight {
103 LIST_FIELDS(CGroupBlockIODeviceWeight, device_weights);
104 char *path;
105 uint64_t weight;
106 };
107
108 struct CGroupBlockIODeviceBandwidth {
109 LIST_FIELDS(CGroupBlockIODeviceBandwidth, device_bandwidths);
110 char *path;
111 uint64_t rbps;
112 uint64_t wbps;
113 };
114
115 struct CGroupBPFForeignProgram {
116 LIST_FIELDS(CGroupBPFForeignProgram, programs);
117 uint32_t attach_type;
118 char *bpffs_path;
119 };
120
121 struct CGroupSocketBindItem {
122 LIST_FIELDS(CGroupSocketBindItem, socket_bind_items);
123 int address_family;
124 int ip_protocol;
125 uint16_t nr_ports;
126 uint16_t port_min;
127 };
128
129 typedef enum CGroupPressureWatch {
130 CGROUP_PRESSURE_WATCH_OFF, /* → tells the service payload explicitly not to watch for memory pressure */
131 CGROUP_PRESSURE_WATCH_AUTO, /* → on if memory account is on anyway for the unit, otherwise off */
132 CGROUP_PRESSURE_WATCH_ON,
133 CGROUP_PRESSURE_WATCH_SKIP, /* → doesn't set up memory pressure watch, but also doesn't explicitly tell payload to avoid it */
134 _CGROUP_PRESSURE_WATCH_MAX,
135 _CGROUP_PRESSURE_WATCH_INVALID = -EINVAL,
136 } CGroupPressureWatch;
137
138 /* The user-supplied cgroup-related configuration options. This remains mostly immutable while the service
139 * manager is running (except for an occasional SetProperty() configuration change), outside of reload
140 * cycles. When adding members make sure to update cgroup_context_copy() accordingly. */
141 struct CGroupContext {
142 bool cpu_accounting;
143 bool io_accounting;
144 bool blockio_accounting;
145 bool memory_accounting;
146 bool tasks_accounting;
147 bool ip_accounting;
148
149 /* Configures the memory.oom.group attribute (on unified) */
150 bool memory_oom_group;
151
152 bool delegate;
153 CGroupMask delegate_controllers;
154 CGroupMask disable_controllers;
155 char *delegate_subgroup;
156
157 /* For unified hierarchy */
158 uint64_t cpu_weight;
159 uint64_t startup_cpu_weight;
160 usec_t cpu_quota_per_sec_usec;
161 usec_t cpu_quota_period_usec;
162
163 CPUSet cpuset_cpus;
164 CPUSet startup_cpuset_cpus;
165 CPUSet cpuset_mems;
166 CPUSet startup_cpuset_mems;
167
168 uint64_t io_weight;
169 uint64_t startup_io_weight;
170 LIST_HEAD(CGroupIODeviceWeight, io_device_weights);
171 LIST_HEAD(CGroupIODeviceLimit, io_device_limits);
172 LIST_HEAD(CGroupIODeviceLatency, io_device_latencies);
173
174 uint64_t default_memory_min;
175 uint64_t default_memory_low;
176 uint64_t default_startup_memory_low;
177 uint64_t memory_min;
178 uint64_t memory_low;
179 uint64_t startup_memory_low;
180 uint64_t memory_high;
181 uint64_t startup_memory_high;
182 uint64_t memory_max;
183 uint64_t startup_memory_max;
184 uint64_t memory_swap_max;
185 uint64_t startup_memory_swap_max;
186 uint64_t memory_zswap_max;
187 uint64_t startup_memory_zswap_max;
188
189 bool default_memory_min_set:1;
190 bool default_memory_low_set:1;
191 bool default_startup_memory_low_set:1;
192 bool memory_min_set:1;
193 bool memory_low_set:1;
194 bool startup_memory_low_set:1;
195 bool startup_memory_high_set:1;
196 bool startup_memory_max_set:1;
197 bool startup_memory_swap_max_set:1;
198 bool startup_memory_zswap_max_set:1;
199
200 bool memory_zswap_writeback;
201
202 Set *ip_address_allow;
203 Set *ip_address_deny;
204 /* These two flags indicate that redundant entries have been removed from
205 * ip_address_allow/ip_address_deny, i.e. in_addr_prefixes_reduce() has already been called. */
206 bool ip_address_allow_reduced;
207 bool ip_address_deny_reduced;
208
209 char **ip_filters_ingress;
210 char **ip_filters_egress;
211 LIST_HEAD(CGroupBPFForeignProgram, bpf_foreign_programs);
212
213 Set *restrict_network_interfaces;
214 bool restrict_network_interfaces_is_allow_list;
215
216 /* For legacy hierarchies */
217 uint64_t cpu_shares;
218 uint64_t startup_cpu_shares;
219
220 uint64_t blockio_weight;
221 uint64_t startup_blockio_weight;
222 LIST_HEAD(CGroupBlockIODeviceWeight, blockio_device_weights);
223 LIST_HEAD(CGroupBlockIODeviceBandwidth, blockio_device_bandwidths);
224
225 uint64_t memory_limit;
226
227 CGroupDevicePolicy device_policy;
228 LIST_HEAD(CGroupDeviceAllow, device_allow);
229
230 LIST_HEAD(CGroupSocketBindItem, socket_bind_allow);
231 LIST_HEAD(CGroupSocketBindItem, socket_bind_deny);
232
233 /* Common */
234 CGroupTasksMax tasks_max;
235
236 /* Settings for systemd-oomd */
237 ManagedOOMMode moom_swap;
238 ManagedOOMMode moom_mem_pressure;
239 uint32_t moom_mem_pressure_limit; /* Normalized to 2^32-1 == 100% */
240 ManagedOOMPreference moom_preference;
241
242 /* Memory pressure logic */
243 CGroupPressureWatch memory_pressure_watch;
244 usec_t memory_pressure_threshold_usec;
245 /* NB: For now we don't make the period configurable, not the type, nor do we allow multiple
246 * triggers, nor triggers for non-memory pressure. We might add that later. */
247
248 NFTSetContext nft_set_context;
249
250 /* Forward coredumps for processes that crash within this cgroup.
251 * Requires 'delegate' to also be true. */
252 bool coredump_receive;
253 };
254
255 /* Used when querying IP accounting data */
256 typedef enum CGroupIPAccountingMetric {
257 CGROUP_IP_INGRESS_BYTES,
258 CGROUP_IP_INGRESS_PACKETS,
259 CGROUP_IP_EGRESS_BYTES,
260 CGROUP_IP_EGRESS_PACKETS,
261 _CGROUP_IP_ACCOUNTING_METRIC_MAX,
262 _CGROUP_IP_ACCOUNTING_METRIC_INVALID = -EINVAL,
263 } CGroupIPAccountingMetric;
264
265 /* Used when querying IO accounting data */
266 typedef enum CGroupIOAccountingMetric {
267 CGROUP_IO_READ_BYTES,
268 CGROUP_IO_WRITE_BYTES,
269 CGROUP_IO_READ_OPERATIONS,
270 CGROUP_IO_WRITE_OPERATIONS,
271 _CGROUP_IO_ACCOUNTING_METRIC_MAX,
272 _CGROUP_IO_ACCOUNTING_METRIC_INVALID = -EINVAL,
273 } CGroupIOAccountingMetric;
274
275 typedef enum CGroupMemoryAccountingMetric {
276 CGROUP_MEMORY_PEAK,
277 CGROUP_MEMORY_SWAP_PEAK,
278 /* We cache the above attributes, so that they can be fetched even after the cgroup is gone, e.g.
279 * when systemd-run exits. */
280 _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST = CGROUP_MEMORY_SWAP_PEAK,
281
282 /* These attributes are transient, so no need for caching. */
283 CGROUP_MEMORY_SWAP_CURRENT,
284 CGROUP_MEMORY_ZSWAP_CURRENT,
285
286 _CGROUP_MEMORY_ACCOUNTING_METRIC_MAX,
287 _CGROUP_MEMORY_ACCOUNTING_METRIC_INVALID = -EINVAL,
288 } CGroupMemoryAccountingMetric;
289
290 /* Used for limits whose value sets have infimum */
291 typedef enum CGroupLimitType {
292 CGROUP_LIMIT_MEMORY_MAX,
293 CGROUP_LIMIT_MEMORY_HIGH,
294 CGROUP_LIMIT_TASKS_MAX,
295 _CGROUP_LIMIT_TYPE_MAX,
296 _CGROUP_LIMIT_INVALID = -EINVAL,
297 } CGroupLimitType;
298
299 /* The dynamic, regular updated information about a unit that as a realized cgroup. This is only allocated when a unit is first realized */
300 typedef struct CGroupRuntime {
301 /* Where the cpu.stat or cpuacct.usage was at the time the unit was started */
302 nsec_t cpu_usage_base;
303 nsec_t cpu_usage_last; /* the most recently read value */
304
305 /* Most recently read value of memory accounting metrics */
306 uint64_t memory_accounting_last[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1];
307
308 /* The current counter of OOM kills initiated by systemd-oomd */
309 uint64_t managed_oom_kill_last;
310
311 /* The current counter of the oom_kill field in the memory.events cgroup attribute */
312 uint64_t oom_kill_last;
313
314 /* Where the io.stat data was at the time the unit was started */
315 uint64_t io_accounting_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX];
316 uint64_t io_accounting_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX]; /* the most recently read value */
317
318 /* Counterparts in the cgroup filesystem */
319 char *cgroup_path;
320 uint64_t cgroup_id;
321 CGroupMask cgroup_realized_mask; /* In which hierarchies does this unit's cgroup exist? (only relevant on cgroup v1) */
322 CGroupMask cgroup_enabled_mask; /* Which controllers are enabled (or more correctly: enabled for the children) for this unit's cgroup? (only relevant on cgroup v2) */
323 CGroupMask cgroup_invalidated_mask; /* A mask specifying controllers which shall be considered invalidated, and require re-realization */
324 CGroupMask cgroup_members_mask; /* A cache for the controllers required by all children of this cgroup (only relevant for slice units) */
325
326 /* Inotify watch descriptors for watching cgroup.events and memory.events on cgroupv2 */
327 int cgroup_control_inotify_wd;
328 int cgroup_memory_inotify_wd;
329
330 /* Device Controller BPF program */
331 BPFProgram *bpf_device_control_installed;
332
333 /* IP BPF Firewalling/accounting */
334 int ip_accounting_ingress_map_fd;
335 int ip_accounting_egress_map_fd;
336 uint64_t ip_accounting_extra[_CGROUP_IP_ACCOUNTING_METRIC_MAX];
337
338 int ipv4_allow_map_fd;
339 int ipv6_allow_map_fd;
340 int ipv4_deny_map_fd;
341 int ipv6_deny_map_fd;
342 BPFProgram *ip_bpf_ingress, *ip_bpf_ingress_installed;
343 BPFProgram *ip_bpf_egress, *ip_bpf_egress_installed;
344
345 Set *ip_bpf_custom_ingress;
346 Set *ip_bpf_custom_ingress_installed;
347 Set *ip_bpf_custom_egress;
348 Set *ip_bpf_custom_egress_installed;
349
350 /* BPF programs managed (e.g. loaded to kernel) by an entity external to systemd,
351 * attached to unit cgroup by provided program fd and attach type. */
352 Hashmap *bpf_foreign_by_key;
353
354 FDSet *initial_socket_bind_link_fds;
355 #if BPF_FRAMEWORK
356 /* BPF links to BPF programs attached to cgroup/bind{4|6} hooks and
357 * responsible for allowing or denying a unit to bind(2) to a socket
358 * address. */
359 struct bpf_link *ipv4_socket_bind_link;
360 struct bpf_link *ipv6_socket_bind_link;
361 #endif
362
363 FDSet *initial_restrict_ifaces_link_fds;
364 #if BPF_FRAMEWORK
365 struct bpf_link *restrict_ifaces_ingress_bpf_link;
366 struct bpf_link *restrict_ifaces_egress_bpf_link;
367 #endif
368
369 bool cgroup_realized:1;
370 bool cgroup_members_mask_valid:1;
371
372 /* Reset cgroup accounting next time we fork something off */
373 bool reset_accounting:1;
374
375 /* Whether we warned about clamping the CPU quota period */
376 bool warned_clamping_cpu_quota_period:1;
377 } CGroupRuntime;
378
379 typedef struct Unit Unit;
380 typedef struct Manager Manager;
381 typedef enum ManagerState ManagerState;
382
383 uint64_t cgroup_context_cpu_weight(CGroupContext *c, ManagerState state);
384
385 usec_t cgroup_cpu_adjust_period(usec_t period, usec_t quota, usec_t resolution, usec_t max_period);
386
387 void cgroup_context_init(CGroupContext *c);
388 int cgroup_context_copy(CGroupContext *dst, const CGroupContext *src);
389 void cgroup_context_done(CGroupContext *c);
390 void cgroup_context_dump(Unit *u, FILE* f, const char *prefix);
391 void cgroup_context_dump_socket_bind_item(const CGroupSocketBindItem *item, FILE *f);
392 void cgroup_context_dump_socket_bind_items(const CGroupSocketBindItem *items, FILE *f);
393
394 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a);
395 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w);
396 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l);
397 void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l);
398 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w);
399 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b);
400 void cgroup_context_remove_bpf_foreign_program(CGroupContext *c, CGroupBPFForeignProgram *p);
401 void cgroup_context_remove_socket_bind(CGroupSocketBindItem **head);
402
403 static inline bool cgroup_context_want_memory_pressure(const CGroupContext *c) {
404 assert(c);
405
406 return c->memory_pressure_watch == CGROUP_PRESSURE_WATCH_ON ||
407 (c->memory_pressure_watch == CGROUP_PRESSURE_WATCH_AUTO && c->memory_accounting);
408 }
409
410 int cgroup_context_add_device_allow(CGroupContext *c, const char *dev, CGroupDevicePermissions p);
411 int cgroup_context_add_or_update_device_allow(CGroupContext *c, const char *dev, CGroupDevicePermissions p);
412 int cgroup_context_add_bpf_foreign_program(CGroupContext *c, uint32_t attach_type, const char *path);
413 static inline int cgroup_context_add_bpf_foreign_program_dup(CGroupContext *c, const CGroupBPFForeignProgram *p) {
414 return cgroup_context_add_bpf_foreign_program(c, p->attach_type, p->bpffs_path);
415 }
416 int cgroup_context_add_io_device_limit_dup(CGroupContext *c, const CGroupIODeviceLimit *l);
417 int cgroup_context_add_io_device_weight_dup(CGroupContext *c, const CGroupIODeviceWeight *w);
418 int cgroup_context_add_io_device_latency_dup(CGroupContext *c, const CGroupIODeviceLatency *l);
419 int cgroup_context_add_block_io_device_weight_dup(CGroupContext *c, const CGroupBlockIODeviceWeight *w);
420 int cgroup_context_add_block_io_device_bandwidth_dup(CGroupContext *c, const CGroupBlockIODeviceBandwidth *b);
421 int cgroup_context_add_device_allow_dup(CGroupContext *c, const CGroupDeviceAllow *a);
422 int cgroup_context_add_socket_bind_item_allow_dup(CGroupContext *c, const CGroupSocketBindItem *i);
423 int cgroup_context_add_socket_bind_item_deny_dup(CGroupContext *c, const CGroupSocketBindItem *i);
424
425 void unit_modify_nft_set(Unit *u, bool add);
426
427 CGroupMask unit_get_own_mask(Unit *u);
428 CGroupMask unit_get_delegate_mask(Unit *u);
429 CGroupMask unit_get_members_mask(Unit *u);
430 CGroupMask unit_get_siblings_mask(Unit *u);
431 CGroupMask unit_get_ancestor_disable_mask(Unit *u);
432
433 CGroupMask unit_get_target_mask(Unit *u);
434 CGroupMask unit_get_enable_mask(Unit *u);
435
436 void unit_invalidate_cgroup_members_masks(Unit *u);
437
438 void unit_add_family_to_cgroup_realize_queue(Unit *u);
439
440 const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask);
441 int unit_default_cgroup_path(const Unit *u, char **ret);
442 int unit_set_cgroup_path(Unit *u, const char *path);
443 int unit_pick_cgroup_path(Unit *u);
444
445 int unit_realize_cgroup(Unit *u);
446 void unit_prune_cgroup(Unit *u);
447 int unit_watch_cgroup(Unit *u);
448 int unit_watch_cgroup_memory(Unit *u);
449 void unit_add_to_cgroup_realize_queue(Unit *u);
450
451 int unit_cgroup_is_empty(Unit *u);
452 void unit_release_cgroup(Unit *u);
453 /* Releases the cgroup only if it is recursively empty.
454 * Returns true if the cgroup was released, false otherwise. */
455 bool unit_maybe_release_cgroup(Unit *u);
456
457 void unit_add_to_cgroup_empty_queue(Unit *u);
458 int unit_check_oomd_kill(Unit *u);
459 int unit_check_oom(Unit *u);
460
461 int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path);
462
463 int manager_setup_cgroup(Manager *m);
464 void manager_shutdown_cgroup(Manager *m, bool delete);
465
466 unsigned manager_dispatch_cgroup_realize_queue(Manager *m);
467
468 Unit *manager_get_unit_by_cgroup(Manager *m, const char *cgroup);
469 Unit *manager_get_unit_by_pidref_cgroup(Manager *m, const PidRef *pid);
470 Unit *manager_get_unit_by_pidref_watching(Manager *m, const PidRef *pid);
471 Unit* manager_get_unit_by_pidref(Manager *m, const PidRef *pid);
472 Unit* manager_get_unit_by_pid(Manager *m, pid_t pid);
473
474 uint64_t unit_get_ancestor_memory_min(Unit *u);
475 uint64_t unit_get_ancestor_memory_low(Unit *u);
476 uint64_t unit_get_ancestor_startup_memory_low(Unit *u);
477
478 int unit_search_main_pid(Unit *u, PidRef *ret);
479 int unit_watch_all_pids(Unit *u);
480
481 int unit_synthesize_cgroup_empty_event(Unit *u);
482
483 int unit_get_memory_available(Unit *u, uint64_t *ret);
484 int unit_get_memory_current(Unit *u, uint64_t *ret);
485 int unit_get_memory_accounting(Unit *u, CGroupMemoryAccountingMetric metric, uint64_t *ret);
486 int unit_get_tasks_current(Unit *u, uint64_t *ret);
487 int unit_get_cpu_usage(Unit *u, nsec_t *ret);
488 int unit_get_io_accounting(Unit *u, CGroupIOAccountingMetric metric, bool allow_cache, uint64_t *ret);
489 int unit_get_ip_accounting(Unit *u, CGroupIPAccountingMetric metric, uint64_t *ret);
490 int unit_get_effective_limit(Unit *u, CGroupLimitType type, uint64_t *ret);
491
492 int unit_reset_cpu_accounting(Unit *u);
493 void unit_reset_memory_accounting_last(Unit *u);
494 int unit_reset_ip_accounting(Unit *u);
495 void unit_reset_io_accounting_last(Unit *u);
496 int unit_reset_io_accounting(Unit *u);
497 int unit_reset_accounting(Unit *u);
498
499 #define UNIT_CGROUP_BOOL(u, name) \
500 ({ \
501 CGroupContext *cc = unit_get_cgroup_context(u); \
502 cc ? cc->name : false; \
503 })
504
505 bool manager_owns_host_root_cgroup(Manager *m);
506 bool unit_has_host_root_cgroup(Unit *u);
507
508 bool unit_has_startup_cgroup_constraints(Unit *u);
509
510 int manager_notify_cgroup_empty(Manager *m, const char *group);
511
512 void unit_invalidate_cgroup(Unit *u, CGroupMask m);
513 void unit_invalidate_cgroup_bpf(Unit *u);
514
515 void manager_invalidate_startup_units(Manager *m);
516
517 const char* cgroup_device_policy_to_string(CGroupDevicePolicy i) _const_;
518 CGroupDevicePolicy cgroup_device_policy_from_string(const char *s) _pure_;
519
520 void unit_cgroup_catchup(Unit *u);
521
522 bool unit_cgroup_delegate(Unit *u);
523
524 int unit_get_cpuset(Unit *u, CPUSet *cpus, const char *name);
525 int unit_cgroup_freezer_action(Unit *u, FreezerAction action);
526
527 const char* freezer_action_to_string(FreezerAction a) _const_;
528 FreezerAction freezer_action_from_string(const char *s) _pure_;
529
530 CGroupRuntime *cgroup_runtime_new(void);
531 CGroupRuntime *cgroup_runtime_free(CGroupRuntime *crt);
532 DEFINE_TRIVIAL_CLEANUP_FUNC(CGroupRuntime*, cgroup_runtime_free);
533
534 int cgroup_runtime_serialize(Unit *u, FILE *f, FDSet *fds);
535 int cgroup_runtime_deserialize_one(Unit *u, const char *key, const char *value, FDSet *fds);
536
537 const char* cgroup_pressure_watch_to_string(CGroupPressureWatch a) _const_;
538 CGroupPressureWatch cgroup_pressure_watch_from_string(const char *s) _pure_;
539
540 const char *cgroup_device_permissions_to_string(CGroupDevicePermissions p) _const_;
541 CGroupDevicePermissions cgroup_device_permissions_from_string(const char *s) _pure_;
542
543 const char* cgroup_ip_accounting_metric_to_string(CGroupIPAccountingMetric m) _const_;
544 CGroupIPAccountingMetric cgroup_ip_accounting_metric_from_string(const char *s) _pure_;
545
546 const char* cgroup_io_accounting_metric_to_string(CGroupIOAccountingMetric m) _const_;
547 CGroupIOAccountingMetric cgroup_io_accounting_metric_from_string(const char *s) _pure_;
548
549 const char* cgroup_effective_limit_type_to_string(CGroupLimitType m) _const_;
550 CGroupLimitType cgroup_effective_limit_type_from_string(const char *s) _pure_;
551
552 const char* cgroup_memory_accounting_metric_to_string(CGroupMemoryAccountingMetric m) _const_;
553 CGroupMemoryAccountingMetric cgroup_memory_accounting_metric_from_string(const char *s) _pure_;