]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/cgroup.h
cgroup: Introduce family queueing instead of siblings
[thirdparty/systemd.git] / src / core / cgroup.h
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 #pragma once
3
4 #include <stdbool.h>
5
6 #include "cgroup-util.h"
7 #include "cpu-set-util.h"
8 #include "ip-address-access.h"
9 #include "list.h"
10 #include "time-util.h"
11
12 typedef struct TasksMax {
13 /* If scale == 0, just use value; otherwise, value / scale.
14 * See tasks_max_resolve(). */
15 uint64_t value;
16 uint64_t scale;
17 } TasksMax;
18
19 #define TASKS_MAX_UNSET ((TasksMax) { .value = UINT64_MAX, .scale = 0 })
20
21 static inline bool tasks_max_isset(const TasksMax *tasks_max) {
22 return tasks_max->value != UINT64_MAX || tasks_max->scale != 0;
23 }
24
25 uint64_t tasks_max_resolve(const TasksMax *tasks_max);
26
27 typedef struct CGroupContext CGroupContext;
28 typedef struct CGroupDeviceAllow CGroupDeviceAllow;
29 typedef struct CGroupIODeviceWeight CGroupIODeviceWeight;
30 typedef struct CGroupIODeviceLimit CGroupIODeviceLimit;
31 typedef struct CGroupIODeviceLatency CGroupIODeviceLatency;
32 typedef struct CGroupBlockIODeviceWeight CGroupBlockIODeviceWeight;
33 typedef struct CGroupBlockIODeviceBandwidth CGroupBlockIODeviceBandwidth;
34
35 typedef enum CGroupDevicePolicy {
36 /* When devices listed, will allow those, plus built-in ones, if none are listed will allow
37 * everything. */
38 CGROUP_DEVICE_POLICY_AUTO,
39
40 /* Everything forbidden, except built-in ones and listed ones. */
41 CGROUP_DEVICE_POLICY_CLOSED,
42
43 /* Everything forbidden, except for the listed devices */
44 CGROUP_DEVICE_POLICY_STRICT,
45
46 _CGROUP_DEVICE_POLICY_MAX,
47 _CGROUP_DEVICE_POLICY_INVALID = -1
48 } CGroupDevicePolicy;
49
50 typedef enum FreezerAction {
51 FREEZER_FREEZE,
52 FREEZER_THAW,
53
54 _FREEZER_ACTION_MAX,
55 _FREEZER_ACTION_INVALID = -1,
56 } FreezerAction;
57
58 struct CGroupDeviceAllow {
59 LIST_FIELDS(CGroupDeviceAllow, device_allow);
60 char *path;
61 bool r:1;
62 bool w:1;
63 bool m:1;
64 };
65
66 struct CGroupIODeviceWeight {
67 LIST_FIELDS(CGroupIODeviceWeight, device_weights);
68 char *path;
69 uint64_t weight;
70 };
71
72 struct CGroupIODeviceLimit {
73 LIST_FIELDS(CGroupIODeviceLimit, device_limits);
74 char *path;
75 uint64_t limits[_CGROUP_IO_LIMIT_TYPE_MAX];
76 };
77
78 struct CGroupIODeviceLatency {
79 LIST_FIELDS(CGroupIODeviceLatency, device_latencies);
80 char *path;
81 usec_t target_usec;
82 };
83
84 struct CGroupBlockIODeviceWeight {
85 LIST_FIELDS(CGroupBlockIODeviceWeight, device_weights);
86 char *path;
87 uint64_t weight;
88 };
89
90 struct CGroupBlockIODeviceBandwidth {
91 LIST_FIELDS(CGroupBlockIODeviceBandwidth, device_bandwidths);
92 char *path;
93 uint64_t rbps;
94 uint64_t wbps;
95 };
96
97 struct CGroupContext {
98 bool cpu_accounting;
99 bool io_accounting;
100 bool blockio_accounting;
101 bool memory_accounting;
102 bool tasks_accounting;
103 bool ip_accounting;
104
105 /* Configures the memory.oom.group attribute (on unified) */
106 bool memory_oom_group;
107
108 bool delegate;
109 CGroupMask delegate_controllers;
110 CGroupMask disable_controllers;
111
112 /* For unified hierarchy */
113 uint64_t cpu_weight;
114 uint64_t startup_cpu_weight;
115 usec_t cpu_quota_per_sec_usec;
116 usec_t cpu_quota_period_usec;
117
118 CPUSet cpuset_cpus;
119 CPUSet cpuset_mems;
120
121 uint64_t io_weight;
122 uint64_t startup_io_weight;
123 LIST_HEAD(CGroupIODeviceWeight, io_device_weights);
124 LIST_HEAD(CGroupIODeviceLimit, io_device_limits);
125 LIST_HEAD(CGroupIODeviceLatency, io_device_latencies);
126
127 uint64_t default_memory_min;
128 uint64_t default_memory_low;
129 uint64_t memory_min;
130 uint64_t memory_low;
131 uint64_t memory_high;
132 uint64_t memory_max;
133 uint64_t memory_swap_max;
134
135 bool default_memory_min_set;
136 bool default_memory_low_set;
137 bool memory_min_set;
138 bool memory_low_set;
139
140 LIST_HEAD(IPAddressAccessItem, ip_address_allow);
141 LIST_HEAD(IPAddressAccessItem, ip_address_deny);
142
143 char **ip_filters_ingress;
144 char **ip_filters_egress;
145
146 /* For legacy hierarchies */
147 uint64_t cpu_shares;
148 uint64_t startup_cpu_shares;
149
150 uint64_t blockio_weight;
151 uint64_t startup_blockio_weight;
152 LIST_HEAD(CGroupBlockIODeviceWeight, blockio_device_weights);
153 LIST_HEAD(CGroupBlockIODeviceBandwidth, blockio_device_bandwidths);
154
155 uint64_t memory_limit;
156
157 CGroupDevicePolicy device_policy;
158 LIST_HEAD(CGroupDeviceAllow, device_allow);
159
160 /* Common */
161 TasksMax tasks_max;
162 };
163
164 /* Used when querying IP accounting data */
165 typedef enum CGroupIPAccountingMetric {
166 CGROUP_IP_INGRESS_BYTES,
167 CGROUP_IP_INGRESS_PACKETS,
168 CGROUP_IP_EGRESS_BYTES,
169 CGROUP_IP_EGRESS_PACKETS,
170 _CGROUP_IP_ACCOUNTING_METRIC_MAX,
171 _CGROUP_IP_ACCOUNTING_METRIC_INVALID = -1,
172 } CGroupIPAccountingMetric;
173
174 /* Used when querying IO accounting data */
175 typedef enum CGroupIOAccountingMetric {
176 CGROUP_IO_READ_BYTES,
177 CGROUP_IO_WRITE_BYTES,
178 CGROUP_IO_READ_OPERATIONS,
179 CGROUP_IO_WRITE_OPERATIONS,
180 _CGROUP_IO_ACCOUNTING_METRIC_MAX,
181 _CGROUP_IO_ACCOUNTING_METRIC_INVALID = -1,
182 } CGroupIOAccountingMetric;
183
184 typedef struct Unit Unit;
185 typedef struct Manager Manager;
186
187 usec_t cgroup_cpu_adjust_period(usec_t period, usec_t quota, usec_t resolution, usec_t max_period);
188
189 void cgroup_context_init(CGroupContext *c);
190 void cgroup_context_done(CGroupContext *c);
191 void cgroup_context_dump(Unit *u, FILE* f, const char *prefix);
192
193 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a);
194 void cgroup_context_free_io_device_weight(CGroupContext *c, CGroupIODeviceWeight *w);
195 void cgroup_context_free_io_device_limit(CGroupContext *c, CGroupIODeviceLimit *l);
196 void cgroup_context_free_io_device_latency(CGroupContext *c, CGroupIODeviceLatency *l);
197 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w);
198 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b);
199
200 int cgroup_add_device_allow(CGroupContext *c, const char *dev, const char *mode);
201
202 CGroupMask unit_get_own_mask(Unit *u);
203 CGroupMask unit_get_delegate_mask(Unit *u);
204 CGroupMask unit_get_members_mask(Unit *u);
205 CGroupMask unit_get_siblings_mask(Unit *u);
206 CGroupMask unit_get_subtree_mask(Unit *u);
207 CGroupMask unit_get_disable_mask(Unit *u);
208 CGroupMask unit_get_ancestor_disable_mask(Unit *u);
209
210 CGroupMask unit_get_target_mask(Unit *u);
211 CGroupMask unit_get_enable_mask(Unit *u);
212
213 void unit_invalidate_cgroup_members_masks(Unit *u);
214
215 void unit_add_family_to_cgroup_realize_queue(Unit *u);
216
217 const char *unit_get_realized_cgroup_path(Unit *u, CGroupMask mask);
218 char *unit_default_cgroup_path(const Unit *u);
219 int unit_set_cgroup_path(Unit *u, const char *path);
220 int unit_pick_cgroup_path(Unit *u);
221
222 int unit_realize_cgroup(Unit *u);
223 void unit_release_cgroup(Unit *u);
224 void unit_prune_cgroup(Unit *u);
225 int unit_watch_cgroup(Unit *u);
226 int unit_watch_cgroup_memory(Unit *u);
227
228 void unit_add_to_cgroup_empty_queue(Unit *u);
229 int unit_check_oom(Unit *u);
230
231 int unit_attach_pids_to_cgroup(Unit *u, Set *pids, const char *suffix_path);
232
233 int manager_setup_cgroup(Manager *m);
234 void manager_shutdown_cgroup(Manager *m, bool delete);
235
236 unsigned manager_dispatch_cgroup_realize_queue(Manager *m);
237
238 Unit *manager_get_unit_by_cgroup(Manager *m, const char *cgroup);
239 Unit *manager_get_unit_by_pid_cgroup(Manager *m, pid_t pid);
240 Unit* manager_get_unit_by_pid(Manager *m, pid_t pid);
241
242 uint64_t unit_get_ancestor_memory_min(Unit *u);
243 uint64_t unit_get_ancestor_memory_low(Unit *u);
244
245 int unit_search_main_pid(Unit *u, pid_t *ret);
246 int unit_watch_all_pids(Unit *u);
247
248 int unit_synthesize_cgroup_empty_event(Unit *u);
249
250 int unit_get_memory_current(Unit *u, uint64_t *ret);
251 int unit_get_tasks_current(Unit *u, uint64_t *ret);
252 int unit_get_cpu_usage(Unit *u, nsec_t *ret);
253 int unit_get_io_accounting(Unit *u, CGroupIOAccountingMetric metric, bool allow_cache, uint64_t *ret);
254 int unit_get_ip_accounting(Unit *u, CGroupIPAccountingMetric metric, uint64_t *ret);
255
256 int unit_reset_cpu_accounting(Unit *u);
257 int unit_reset_ip_accounting(Unit *u);
258 int unit_reset_io_accounting(Unit *u);
259 int unit_reset_accounting(Unit *u);
260
261 #define UNIT_CGROUP_BOOL(u, name) \
262 ({ \
263 CGroupContext *cc = unit_get_cgroup_context(u); \
264 cc ? cc->name : false; \
265 })
266
267 bool manager_owns_host_root_cgroup(Manager *m);
268 bool unit_has_host_root_cgroup(Unit *u);
269
270 int manager_notify_cgroup_empty(Manager *m, const char *group);
271
272 void unit_invalidate_cgroup(Unit *u, CGroupMask m);
273 void unit_invalidate_cgroup_bpf(Unit *u);
274
275 void manager_invalidate_startup_units(Manager *m);
276
277 const char* cgroup_device_policy_to_string(CGroupDevicePolicy i) _const_;
278 CGroupDevicePolicy cgroup_device_policy_from_string(const char *s) _pure_;
279
280 bool unit_cgroup_delegate(Unit *u);
281
282 int compare_job_priority(const void *a, const void *b);
283
284 int unit_get_cpuset(Unit *u, CPUSet *cpus, const char *name);
285 int unit_cgroup_freezer_action(Unit *u, FreezerAction action);
286
287 const char* freezer_action_to_string(FreezerAction a) _const_;
288 FreezerAction freezer_action_from_string(const char *s) _pure_;