]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
242309e47f0b0972cccfad194b72dad91d3049ff
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bus-common-errors.h"
15 #include "bus-util.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "install.h"
31 #include "load-dropin.h"
32 #include "load-fragment.h"
33 #include "log.h"
34 #include "macro.h"
35 #include "missing_audit.h"
36 #include "mkdir.h"
37 #include "parse-util.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "rm-rf.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
126 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 if (MANAGER_IS_SYSTEM(u->manager))
191 ec->keyring_mode = EXEC_KEYRING_SHARED;
192 else {
193 ec->keyring_mode = EXEC_KEYRING_INHERIT;
194
195 /* User manager might have its umask redefined by PAM or UMask=. In this
196 * case let the units it manages inherit this value by default. They can
197 * still tune this value through their own unit file */
198 (void) get_process_umask(getpid_cached(), &ec->umask);
199 }
200 }
201
202 kc = unit_get_kill_context(u);
203 if (kc)
204 kill_context_init(kc);
205
206 if (UNIT_VTABLE(u)->init)
207 UNIT_VTABLE(u)->init(u);
208 }
209
210 int unit_add_name(Unit *u, const char *text) {
211 _cleanup_free_ char *s = NULL, *i = NULL;
212 UnitType t;
213 int r;
214
215 assert(u);
216 assert(text);
217
218 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
219
220 if (!u->instance)
221 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
222 "instance is not set when adding name '%s': %m", text);
223
224 r = unit_name_replace_instance(text, u->instance, &s);
225 if (r < 0)
226 return log_unit_debug_errno(u, r,
227 "failed to build instance name from '%s': %m", text);
228 } else {
229 s = strdup(text);
230 if (!s)
231 return -ENOMEM;
232 }
233
234 if (set_contains(u->names, s))
235 return 0;
236 if (hashmap_contains(u->manager->units, s))
237 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
238 "unit already exist when adding name '%s': %m", text);
239
240 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
241 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
242 "name '%s' is invalid: %m", text);
243
244 t = unit_name_to_type(s);
245 if (t < 0)
246 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
247 "failed to to derive unit type from name '%s': %m", text);
248
249 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
250 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
251 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
252 u->type, t, text);
253
254 r = unit_name_to_instance(s, &i);
255 if (r < 0)
256 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", text);
257
258 if (i && !unit_type_may_template(t))
259 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", text);
260
261 /* Ensure that this unit is either instanced or not instanced,
262 * but not both. Note that we do allow names with different
263 * instance names however! */
264 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
266 "instance is illegal: u->type(%d), u->instance(%s) and i(%s) for name '%s': %m",
267 u->type, u->instance, i, text);
268
269 if (!unit_type_may_alias(t) && !set_isempty(u->names))
270 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST), "symlinks are not allowed for name '%s': %m", text);
271
272 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
273 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(E2BIG), "too many units: %m");
274
275 r = set_put(u->names, s);
276 if (r < 0)
277 return r;
278 assert(r > 0);
279
280 r = hashmap_put(u->manager->units, s, u);
281 if (r < 0) {
282 (void) set_remove(u->names, s);
283 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
284 }
285
286 if (u->type == _UNIT_TYPE_INVALID) {
287 u->type = t;
288 u->id = s;
289 u->instance = TAKE_PTR(i);
290
291 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
292
293 unit_init(u);
294 }
295
296 s = NULL;
297
298 unit_add_to_dbus_queue(u);
299 return 0;
300 }
301
302 int unit_choose_id(Unit *u, const char *name) {
303 _cleanup_free_ char *t = NULL;
304 char *s, *i;
305 int r;
306
307 assert(u);
308 assert(name);
309
310 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
311
312 if (!u->instance)
313 return -EINVAL;
314
315 r = unit_name_replace_instance(name, u->instance, &t);
316 if (r < 0)
317 return r;
318
319 name = t;
320 }
321
322 /* Selects one of the names of this unit as the id */
323 s = set_get(u->names, (char*) name);
324 if (!s)
325 return -ENOENT;
326
327 /* Determine the new instance from the new id */
328 r = unit_name_to_instance(s, &i);
329 if (r < 0)
330 return r;
331
332 u->id = s;
333
334 free(u->instance);
335 u->instance = i;
336
337 unit_add_to_dbus_queue(u);
338
339 return 0;
340 }
341
342 int unit_set_description(Unit *u, const char *description) {
343 int r;
344
345 assert(u);
346
347 r = free_and_strdup(&u->description, empty_to_null(description));
348 if (r < 0)
349 return r;
350 if (r > 0)
351 unit_add_to_dbus_queue(u);
352
353 return 0;
354 }
355
356 bool unit_may_gc(Unit *u) {
357 UnitActiveState state;
358 int r;
359
360 assert(u);
361
362 /* Checks whether the unit is ready to be unloaded for garbage collection.
363 * Returns true when the unit may be collected, and false if there's some
364 * reason to keep it loaded.
365 *
366 * References from other units are *not* checked here. Instead, this is done
367 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
368 */
369
370 if (u->job)
371 return false;
372
373 if (u->nop_job)
374 return false;
375
376 state = unit_active_state(u);
377
378 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
379 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
380 UNIT_VTABLE(u)->release_resources)
381 UNIT_VTABLE(u)->release_resources(u);
382
383 if (u->perpetual)
384 return false;
385
386 if (sd_bus_track_count(u->bus_track) > 0)
387 return false;
388
389 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
390 switch (u->collect_mode) {
391
392 case COLLECT_INACTIVE:
393 if (state != UNIT_INACTIVE)
394 return false;
395
396 break;
397
398 case COLLECT_INACTIVE_OR_FAILED:
399 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
400 return false;
401
402 break;
403
404 default:
405 assert_not_reached("Unknown garbage collection mode");
406 }
407
408 if (u->cgroup_path) {
409 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
410 * around. Units with active processes should never be collected. */
411
412 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
413 if (r < 0)
414 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
415 if (r <= 0)
416 return false;
417 }
418
419 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
420 return false;
421
422 return true;
423 }
424
425 void unit_add_to_load_queue(Unit *u) {
426 assert(u);
427 assert(u->type != _UNIT_TYPE_INVALID);
428
429 if (u->load_state != UNIT_STUB || u->in_load_queue)
430 return;
431
432 LIST_PREPEND(load_queue, u->manager->load_queue, u);
433 u->in_load_queue = true;
434 }
435
436 void unit_add_to_cleanup_queue(Unit *u) {
437 assert(u);
438
439 if (u->in_cleanup_queue)
440 return;
441
442 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
443 u->in_cleanup_queue = true;
444 }
445
446 void unit_add_to_gc_queue(Unit *u) {
447 assert(u);
448
449 if (u->in_gc_queue || u->in_cleanup_queue)
450 return;
451
452 if (!unit_may_gc(u))
453 return;
454
455 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
456 u->in_gc_queue = true;
457 }
458
459 void unit_add_to_dbus_queue(Unit *u) {
460 assert(u);
461 assert(u->type != _UNIT_TYPE_INVALID);
462
463 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
464 return;
465
466 /* Shortcut things if nobody cares */
467 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
468 sd_bus_track_count(u->bus_track) <= 0 &&
469 set_isempty(u->manager->private_buses)) {
470 u->sent_dbus_new_signal = true;
471 return;
472 }
473
474 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
475 u->in_dbus_queue = true;
476 }
477
478 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
479 assert(u);
480
481 if (u->in_stop_when_unneeded_queue)
482 return;
483
484 if (!u->stop_when_unneeded)
485 return;
486
487 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
488 return;
489
490 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
491 u->in_stop_when_unneeded_queue = true;
492 }
493
494 static void bidi_set_free(Unit *u, Hashmap *h) {
495 Unit *other;
496 Iterator i;
497 void *v;
498
499 assert(u);
500
501 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
502
503 HASHMAP_FOREACH_KEY(v, other, h, i) {
504 UnitDependency d;
505
506 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
507 hashmap_remove(other->dependencies[d], u);
508
509 unit_add_to_gc_queue(other);
510 }
511
512 hashmap_free(h);
513 }
514
515 static void unit_remove_transient(Unit *u) {
516 char **i;
517
518 assert(u);
519
520 if (!u->transient)
521 return;
522
523 if (u->fragment_path)
524 (void) unlink(u->fragment_path);
525
526 STRV_FOREACH(i, u->dropin_paths) {
527 _cleanup_free_ char *p = NULL, *pp = NULL;
528
529 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
530 if (!p)
531 continue;
532
533 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
534 if (!pp)
535 continue;
536
537 /* Only drop transient drop-ins */
538 if (!path_equal(u->manager->lookup_paths.transient, pp))
539 continue;
540
541 (void) unlink(*i);
542 (void) rmdir(p);
543 }
544 }
545
546 static void unit_free_requires_mounts_for(Unit *u) {
547 assert(u);
548
549 for (;;) {
550 _cleanup_free_ char *path;
551
552 path = hashmap_steal_first_key(u->requires_mounts_for);
553 if (!path)
554 break;
555 else {
556 char s[strlen(path) + 1];
557
558 PATH_FOREACH_PREFIX_MORE(s, path) {
559 char *y;
560 Set *x;
561
562 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
563 if (!x)
564 continue;
565
566 (void) set_remove(x, u);
567
568 if (set_isempty(x)) {
569 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
570 free(y);
571 set_free(x);
572 }
573 }
574 }
575 }
576
577 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
578 }
579
580 static void unit_done(Unit *u) {
581 ExecContext *ec;
582 CGroupContext *cc;
583
584 assert(u);
585
586 if (u->type < 0)
587 return;
588
589 if (UNIT_VTABLE(u)->done)
590 UNIT_VTABLE(u)->done(u);
591
592 ec = unit_get_exec_context(u);
593 if (ec)
594 exec_context_done(ec);
595
596 cc = unit_get_cgroup_context(u);
597 if (cc)
598 cgroup_context_done(cc);
599 }
600
601 void unit_free(Unit *u) {
602 UnitDependency d;
603 Iterator i;
604 char *t;
605
606 if (!u)
607 return;
608
609 if (UNIT_ISSET(u->slice)) {
610 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
611 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
612
613 /* And make sure the parent is realized again, updating cgroup memberships */
614 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
615 }
616
617 u->transient_file = safe_fclose(u->transient_file);
618
619 if (!MANAGER_IS_RELOADING(u->manager))
620 unit_remove_transient(u);
621
622 bus_unit_send_removed_signal(u);
623
624 unit_done(u);
625
626 unit_dequeue_rewatch_pids(u);
627
628 sd_bus_slot_unref(u->match_bus_slot);
629 sd_bus_track_unref(u->bus_track);
630 u->deserialized_refs = strv_free(u->deserialized_refs);
631
632 unit_free_requires_mounts_for(u);
633
634 SET_FOREACH(t, u->names, i)
635 hashmap_remove_value(u->manager->units, t, u);
636
637 if (!sd_id128_is_null(u->invocation_id))
638 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
639
640 if (u->job) {
641 Job *j = u->job;
642 job_uninstall(j);
643 job_free(j);
644 }
645
646 if (u->nop_job) {
647 Job *j = u->nop_job;
648 job_uninstall(j);
649 job_free(j);
650 }
651
652 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
653 bidi_set_free(u, u->dependencies[d]);
654
655 if (u->on_console)
656 manager_unref_console(u->manager);
657
658 unit_release_cgroup(u);
659
660 if (!MANAGER_IS_RELOADING(u->manager))
661 unit_unlink_state_files(u);
662
663 unit_unref_uid_gid(u, false);
664
665 (void) manager_update_failed_units(u->manager, u, false);
666 set_remove(u->manager->startup_units, u);
667
668 unit_unwatch_all_pids(u);
669
670 unit_ref_unset(&u->slice);
671 while (u->refs_by_target)
672 unit_ref_unset(u->refs_by_target);
673
674 if (u->type != _UNIT_TYPE_INVALID)
675 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
676
677 if (u->in_load_queue)
678 LIST_REMOVE(load_queue, u->manager->load_queue, u);
679
680 if (u->in_dbus_queue)
681 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
682
683 if (u->in_gc_queue)
684 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
685
686 if (u->in_cgroup_realize_queue)
687 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
688
689 if (u->in_cgroup_empty_queue)
690 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
691
692 if (u->in_cleanup_queue)
693 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
694
695 if (u->in_target_deps_queue)
696 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
697
698 if (u->in_stop_when_unneeded_queue)
699 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
700
701 safe_close(u->ip_accounting_ingress_map_fd);
702 safe_close(u->ip_accounting_egress_map_fd);
703
704 safe_close(u->ipv4_allow_map_fd);
705 safe_close(u->ipv6_allow_map_fd);
706 safe_close(u->ipv4_deny_map_fd);
707 safe_close(u->ipv6_deny_map_fd);
708
709 bpf_program_unref(u->ip_bpf_ingress);
710 bpf_program_unref(u->ip_bpf_ingress_installed);
711 bpf_program_unref(u->ip_bpf_egress);
712 bpf_program_unref(u->ip_bpf_egress_installed);
713
714 set_free(u->ip_bpf_custom_ingress);
715 set_free(u->ip_bpf_custom_egress);
716 set_free(u->ip_bpf_custom_ingress_installed);
717 set_free(u->ip_bpf_custom_egress_installed);
718
719 bpf_program_unref(u->bpf_device_control_installed);
720
721 condition_free_list(u->conditions);
722 condition_free_list(u->asserts);
723
724 free(u->description);
725 strv_free(u->documentation);
726 free(u->fragment_path);
727 free(u->source_path);
728 strv_free(u->dropin_paths);
729 free(u->instance);
730
731 free(u->job_timeout_reboot_arg);
732
733 set_free_free(u->names);
734
735 free(u->reboot_arg);
736
737 free(u);
738 }
739
740 UnitActiveState unit_active_state(Unit *u) {
741 assert(u);
742
743 if (u->load_state == UNIT_MERGED)
744 return unit_active_state(unit_follow_merge(u));
745
746 /* After a reload it might happen that a unit is not correctly
747 * loaded but still has a process around. That's why we won't
748 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
749
750 return UNIT_VTABLE(u)->active_state(u);
751 }
752
753 const char* unit_sub_state_to_string(Unit *u) {
754 assert(u);
755
756 return UNIT_VTABLE(u)->sub_state_to_string(u);
757 }
758
759 static int set_complete_move(Set **s, Set **other) {
760 assert(s);
761 assert(other);
762
763 if (!other)
764 return 0;
765
766 if (*s)
767 return set_move(*s, *other);
768 else
769 *s = TAKE_PTR(*other);
770
771 return 0;
772 }
773
774 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
775 assert(s);
776 assert(other);
777
778 if (!*other)
779 return 0;
780
781 if (*s)
782 return hashmap_move(*s, *other);
783 else
784 *s = TAKE_PTR(*other);
785
786 return 0;
787 }
788
789 static int merge_names(Unit *u, Unit *other) {
790 char *t;
791 Iterator i;
792 int r;
793
794 assert(u);
795 assert(other);
796
797 r = set_complete_move(&u->names, &other->names);
798 if (r < 0)
799 return r;
800
801 set_free_free(other->names);
802 other->names = NULL;
803 other->id = NULL;
804
805 SET_FOREACH(t, u->names, i)
806 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
807
808 return 0;
809 }
810
811 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
812 unsigned n_reserve;
813
814 assert(u);
815 assert(other);
816 assert(d < _UNIT_DEPENDENCY_MAX);
817
818 /*
819 * If u does not have this dependency set allocated, there is no need
820 * to reserve anything. In that case other's set will be transferred
821 * as a whole to u by complete_move().
822 */
823 if (!u->dependencies[d])
824 return 0;
825
826 /* merge_dependencies() will skip a u-on-u dependency */
827 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
828
829 return hashmap_reserve(u->dependencies[d], n_reserve);
830 }
831
832 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
833 Iterator i;
834 Unit *back;
835 void *v;
836 int r;
837
838 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
839
840 assert(u);
841 assert(other);
842 assert(d < _UNIT_DEPENDENCY_MAX);
843
844 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
845 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
846 UnitDependency k;
847
848 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
849 * pointers back, and let's fix them up, to instead point to 'u'. */
850
851 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
852 if (back == u) {
853 /* Do not add dependencies between u and itself. */
854 if (hashmap_remove(back->dependencies[k], other))
855 maybe_warn_about_dependency(u, other_id, k);
856 } else {
857 UnitDependencyInfo di_u, di_other, di_merged;
858
859 /* Let's drop this dependency between "back" and "other", and let's create it between
860 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
861 * and any such dependency which might already exist */
862
863 di_other.data = hashmap_get(back->dependencies[k], other);
864 if (!di_other.data)
865 continue; /* dependency isn't set, let's try the next one */
866
867 di_u.data = hashmap_get(back->dependencies[k], u);
868
869 di_merged = (UnitDependencyInfo) {
870 .origin_mask = di_u.origin_mask | di_other.origin_mask,
871 .destination_mask = di_u.destination_mask | di_other.destination_mask,
872 };
873
874 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
875 if (r < 0)
876 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
877 assert(r >= 0);
878
879 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
880 }
881 }
882
883 }
884
885 /* Also do not move dependencies on u to itself */
886 back = hashmap_remove(other->dependencies[d], u);
887 if (back)
888 maybe_warn_about_dependency(u, other_id, d);
889
890 /* The move cannot fail. The caller must have performed a reservation. */
891 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
892
893 other->dependencies[d] = hashmap_free(other->dependencies[d]);
894 }
895
896 int unit_merge(Unit *u, Unit *other) {
897 UnitDependency d;
898 const char *other_id = NULL;
899 int r;
900
901 assert(u);
902 assert(other);
903 assert(u->manager == other->manager);
904 assert(u->type != _UNIT_TYPE_INVALID);
905
906 other = unit_follow_merge(other);
907
908 if (other == u)
909 return 0;
910
911 if (u->type != other->type)
912 return -EINVAL;
913
914 if (!u->instance != !other->instance)
915 return -EINVAL;
916
917 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
918 return -EEXIST;
919
920 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
921 return -EEXIST;
922
923 if (other->job)
924 return -EEXIST;
925
926 if (other->nop_job)
927 return -EEXIST;
928
929 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
930 return -EEXIST;
931
932 if (other->id)
933 other_id = strdupa(other->id);
934
935 /* Make reservations to ensure merge_dependencies() won't fail */
936 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
937 r = reserve_dependencies(u, other, d);
938 /*
939 * We don't rollback reservations if we fail. We don't have
940 * a way to undo reservations. A reservation is not a leak.
941 */
942 if (r < 0)
943 return r;
944 }
945
946 /* Merge names */
947 r = merge_names(u, other);
948 if (r < 0)
949 return r;
950
951 /* Redirect all references */
952 while (other->refs_by_target)
953 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
954
955 /* Merge dependencies */
956 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
957 merge_dependencies(u, other, other_id, d);
958
959 other->load_state = UNIT_MERGED;
960 other->merged_into = u;
961
962 /* If there is still some data attached to the other node, we
963 * don't need it anymore, and can free it. */
964 if (other->load_state != UNIT_STUB)
965 if (UNIT_VTABLE(other)->done)
966 UNIT_VTABLE(other)->done(other);
967
968 unit_add_to_dbus_queue(u);
969 unit_add_to_cleanup_queue(other);
970
971 return 0;
972 }
973
974 int unit_merge_by_name(Unit *u, const char *name) {
975 _cleanup_free_ char *s = NULL;
976 Unit *other;
977 int r;
978
979 /* Either add name to u, or if a unit with name already exists, merge it with u.
980 * If name is a template, do the same for name@instance, where instance is u's instance. */
981
982 assert(u);
983 assert(name);
984
985 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
986 if (!u->instance)
987 return -EINVAL;
988
989 r = unit_name_replace_instance(name, u->instance, &s);
990 if (r < 0)
991 return r;
992
993 name = s;
994 }
995
996 other = manager_get_unit(u->manager, name);
997 if (other)
998 return unit_merge(u, other);
999
1000 return unit_add_name(u, name);
1001 }
1002
1003 Unit* unit_follow_merge(Unit *u) {
1004 assert(u);
1005
1006 while (u->load_state == UNIT_MERGED)
1007 assert_se(u = u->merged_into);
1008
1009 return u;
1010 }
1011
1012 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1013 ExecDirectoryType dt;
1014 char **dp;
1015 int r;
1016
1017 assert(u);
1018 assert(c);
1019
1020 if (c->working_directory && !c->working_directory_missing_ok) {
1021 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1022 if (r < 0)
1023 return r;
1024 }
1025
1026 if (c->root_directory) {
1027 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1028 if (r < 0)
1029 return r;
1030 }
1031
1032 if (c->root_image) {
1033 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1034 if (r < 0)
1035 return r;
1036 }
1037
1038 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1039 if (!u->manager->prefix[dt])
1040 continue;
1041
1042 STRV_FOREACH(dp, c->directories[dt].paths) {
1043 _cleanup_free_ char *p;
1044
1045 p = path_join(u->manager->prefix[dt], *dp);
1046 if (!p)
1047 return -ENOMEM;
1048
1049 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1050 if (r < 0)
1051 return r;
1052 }
1053 }
1054
1055 if (!MANAGER_IS_SYSTEM(u->manager))
1056 return 0;
1057
1058 /* For the following three directory types we need write access, and /var/ is possibly on the root
1059 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1060 if (!strv_isempty(c->directories[EXEC_DIRECTORY_STATE].paths) ||
1061 !strv_isempty(c->directories[EXEC_DIRECTORY_CACHE].paths) ||
1062 !strv_isempty(c->directories[EXEC_DIRECTORY_LOGS].paths)) {
1063 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1064 if (r < 0)
1065 return r;
1066 }
1067
1068 if (c->private_tmp) {
1069 const char *p;
1070
1071 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1072 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1073 if (r < 0)
1074 return r;
1075 }
1076
1077 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1078 if (r < 0)
1079 return r;
1080 }
1081
1082 if (c->root_image) {
1083 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1084 * implicit dependency on udev */
1085
1086 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1087 if (r < 0)
1088 return r;
1089 }
1090
1091 if (!IN_SET(c->std_output,
1092 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1093 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1094 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1095 !IN_SET(c->std_error,
1096 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1097 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1098 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1099 !c->log_namespace)
1100 return 0;
1101
1102 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1103 * is run first. */
1104
1105 if (c->log_namespace) {
1106 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1107
1108 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1109 if (r < 0)
1110 return r;
1111
1112 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1113 if (r < 0)
1114 return r;
1115
1116 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1117 if (r < 0)
1118 return r;
1119
1120 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1121 if (r < 0)
1122 return r;
1123 } else
1124 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1125 if (r < 0)
1126 return r;
1127
1128 return 0;
1129 }
1130
1131 const char *unit_description(Unit *u) {
1132 assert(u);
1133
1134 if (u->description)
1135 return u->description;
1136
1137 return strna(u->id);
1138 }
1139
1140 const char *unit_status_string(Unit *u) {
1141 assert(u);
1142
1143 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1144 return u->id;
1145
1146 return unit_description(u);
1147 }
1148
1149 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1150 const struct {
1151 UnitDependencyMask mask;
1152 const char *name;
1153 } table[] = {
1154 { UNIT_DEPENDENCY_FILE, "file" },
1155 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1156 { UNIT_DEPENDENCY_DEFAULT, "default" },
1157 { UNIT_DEPENDENCY_UDEV, "udev" },
1158 { UNIT_DEPENDENCY_PATH, "path" },
1159 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1160 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1161 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1162 };
1163 size_t i;
1164
1165 assert(f);
1166 assert(kind);
1167 assert(space);
1168
1169 for (i = 0; i < ELEMENTSOF(table); i++) {
1170
1171 if (mask == 0)
1172 break;
1173
1174 if (FLAGS_SET(mask, table[i].mask)) {
1175 if (*space)
1176 fputc(' ', f);
1177 else
1178 *space = true;
1179
1180 fputs(kind, f);
1181 fputs("-", f);
1182 fputs(table[i].name, f);
1183
1184 mask &= ~table[i].mask;
1185 }
1186 }
1187
1188 assert(mask == 0);
1189 }
1190
1191 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1192 char *t, **j;
1193 UnitDependency d;
1194 Iterator i;
1195 const char *prefix2;
1196 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1197 Unit *following;
1198 _cleanup_set_free_ Set *following_set = NULL;
1199 const char *n;
1200 CGroupMask m;
1201 int r;
1202
1203 assert(u);
1204 assert(u->type >= 0);
1205
1206 prefix = strempty(prefix);
1207 prefix2 = strjoina(prefix, "\t");
1208
1209 fprintf(f,
1210 "%s-> Unit %s:\n",
1211 prefix, u->id);
1212
1213 SET_FOREACH(t, u->names, i)
1214 if (!streq(t, u->id))
1215 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1216
1217 fprintf(f,
1218 "%s\tDescription: %s\n"
1219 "%s\tInstance: %s\n"
1220 "%s\tUnit Load State: %s\n"
1221 "%s\tUnit Active State: %s\n"
1222 "%s\tState Change Timestamp: %s\n"
1223 "%s\tInactive Exit Timestamp: %s\n"
1224 "%s\tActive Enter Timestamp: %s\n"
1225 "%s\tActive Exit Timestamp: %s\n"
1226 "%s\tInactive Enter Timestamp: %s\n"
1227 "%s\tMay GC: %s\n"
1228 "%s\tNeed Daemon Reload: %s\n"
1229 "%s\tTransient: %s\n"
1230 "%s\tPerpetual: %s\n"
1231 "%s\tGarbage Collection Mode: %s\n"
1232 "%s\tSlice: %s\n"
1233 "%s\tCGroup: %s\n"
1234 "%s\tCGroup realized: %s\n",
1235 prefix, unit_description(u),
1236 prefix, strna(u->instance),
1237 prefix, unit_load_state_to_string(u->load_state),
1238 prefix, unit_active_state_to_string(unit_active_state(u)),
1239 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1240 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1241 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1242 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1243 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1244 prefix, yes_no(unit_may_gc(u)),
1245 prefix, yes_no(unit_need_daemon_reload(u)),
1246 prefix, yes_no(u->transient),
1247 prefix, yes_no(u->perpetual),
1248 prefix, collect_mode_to_string(u->collect_mode),
1249 prefix, strna(unit_slice_name(u)),
1250 prefix, strna(u->cgroup_path),
1251 prefix, yes_no(u->cgroup_realized));
1252
1253 if (u->cgroup_realized_mask != 0) {
1254 _cleanup_free_ char *s = NULL;
1255 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1256 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1257 }
1258
1259 if (u->cgroup_enabled_mask != 0) {
1260 _cleanup_free_ char *s = NULL;
1261 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1262 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1263 }
1264
1265 m = unit_get_own_mask(u);
1266 if (m != 0) {
1267 _cleanup_free_ char *s = NULL;
1268 (void) cg_mask_to_string(m, &s);
1269 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1270 }
1271
1272 m = unit_get_members_mask(u);
1273 if (m != 0) {
1274 _cleanup_free_ char *s = NULL;
1275 (void) cg_mask_to_string(m, &s);
1276 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1277 }
1278
1279 m = unit_get_delegate_mask(u);
1280 if (m != 0) {
1281 _cleanup_free_ char *s = NULL;
1282 (void) cg_mask_to_string(m, &s);
1283 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1284 }
1285
1286 if (!sd_id128_is_null(u->invocation_id))
1287 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1288 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1289
1290 STRV_FOREACH(j, u->documentation)
1291 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1292
1293 following = unit_following(u);
1294 if (following)
1295 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1296
1297 r = unit_following_set(u, &following_set);
1298 if (r >= 0) {
1299 Unit *other;
1300
1301 SET_FOREACH(other, following_set, i)
1302 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1303 }
1304
1305 if (u->fragment_path)
1306 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1307
1308 if (u->source_path)
1309 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1310
1311 STRV_FOREACH(j, u->dropin_paths)
1312 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1313
1314 if (u->failure_action != EMERGENCY_ACTION_NONE)
1315 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1316 if (u->failure_action_exit_status >= 0)
1317 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1318 if (u->success_action != EMERGENCY_ACTION_NONE)
1319 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1320 if (u->success_action_exit_status >= 0)
1321 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1322
1323 if (u->job_timeout != USEC_INFINITY)
1324 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1325
1326 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1327 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1328
1329 if (u->job_timeout_reboot_arg)
1330 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1331
1332 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1333 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1334
1335 if (dual_timestamp_is_set(&u->condition_timestamp))
1336 fprintf(f,
1337 "%s\tCondition Timestamp: %s\n"
1338 "%s\tCondition Result: %s\n",
1339 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1340 prefix, yes_no(u->condition_result));
1341
1342 if (dual_timestamp_is_set(&u->assert_timestamp))
1343 fprintf(f,
1344 "%s\tAssert Timestamp: %s\n"
1345 "%s\tAssert Result: %s\n",
1346 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1347 prefix, yes_no(u->assert_result));
1348
1349 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1350 UnitDependencyInfo di;
1351 Unit *other;
1352
1353 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1354 bool space = false;
1355
1356 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1357
1358 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1359 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1360
1361 fputs(")\n", f);
1362 }
1363 }
1364
1365 if (!hashmap_isempty(u->requires_mounts_for)) {
1366 UnitDependencyInfo di;
1367 const char *path;
1368
1369 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1370 bool space = false;
1371
1372 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1373
1374 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1375 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1376
1377 fputs(")\n", f);
1378 }
1379 }
1380
1381 if (u->load_state == UNIT_LOADED) {
1382
1383 fprintf(f,
1384 "%s\tStopWhenUnneeded: %s\n"
1385 "%s\tRefuseManualStart: %s\n"
1386 "%s\tRefuseManualStop: %s\n"
1387 "%s\tDefaultDependencies: %s\n"
1388 "%s\tOnFailureJobMode: %s\n"
1389 "%s\tIgnoreOnIsolate: %s\n",
1390 prefix, yes_no(u->stop_when_unneeded),
1391 prefix, yes_no(u->refuse_manual_start),
1392 prefix, yes_no(u->refuse_manual_stop),
1393 prefix, yes_no(u->default_dependencies),
1394 prefix, job_mode_to_string(u->on_failure_job_mode),
1395 prefix, yes_no(u->ignore_on_isolate));
1396
1397 if (UNIT_VTABLE(u)->dump)
1398 UNIT_VTABLE(u)->dump(u, f, prefix2);
1399
1400 } else if (u->load_state == UNIT_MERGED)
1401 fprintf(f,
1402 "%s\tMerged into: %s\n",
1403 prefix, u->merged_into->id);
1404 else if (u->load_state == UNIT_ERROR)
1405 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1406
1407 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1408 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1409
1410 if (u->job)
1411 job_dump(u->job, f, prefix2);
1412
1413 if (u->nop_job)
1414 job_dump(u->nop_job, f, prefix2);
1415 }
1416
1417 /* Common implementation for multiple backends */
1418 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1419 int r;
1420
1421 assert(u);
1422
1423 /* Load a .{service,socket,...} file */
1424 r = unit_load_fragment(u);
1425 if (r < 0)
1426 return r;
1427
1428 if (u->load_state == UNIT_STUB) {
1429 if (fragment_required)
1430 return -ENOENT;
1431
1432 u->load_state = UNIT_LOADED;
1433 }
1434
1435 /* Load drop-in directory data. If u is an alias, we might be reloading the
1436 * target unit needlessly. But we cannot be sure which drops-ins have already
1437 * been loaded and which not, at least without doing complicated book-keeping,
1438 * so let's always reread all drop-ins. */
1439 return unit_load_dropin(unit_follow_merge(u));
1440 }
1441
1442 void unit_add_to_target_deps_queue(Unit *u) {
1443 Manager *m = u->manager;
1444
1445 assert(u);
1446
1447 if (u->in_target_deps_queue)
1448 return;
1449
1450 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1451 u->in_target_deps_queue = true;
1452 }
1453
1454 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1455 assert(u);
1456 assert(target);
1457
1458 if (target->type != UNIT_TARGET)
1459 return 0;
1460
1461 /* Only add the dependency if both units are loaded, so that
1462 * that loop check below is reliable */
1463 if (u->load_state != UNIT_LOADED ||
1464 target->load_state != UNIT_LOADED)
1465 return 0;
1466
1467 /* If either side wants no automatic dependencies, then let's
1468 * skip this */
1469 if (!u->default_dependencies ||
1470 !target->default_dependencies)
1471 return 0;
1472
1473 /* Don't create loops */
1474 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1475 return 0;
1476
1477 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1478 }
1479
1480 static int unit_add_slice_dependencies(Unit *u) {
1481 UnitDependencyMask mask;
1482 assert(u);
1483
1484 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1485 return 0;
1486
1487 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1488 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1489 relationship). */
1490 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1491
1492 if (UNIT_ISSET(u->slice))
1493 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1494
1495 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1496 return 0;
1497
1498 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1499 }
1500
1501 static int unit_add_mount_dependencies(Unit *u) {
1502 UnitDependencyInfo di;
1503 const char *path;
1504 Iterator i;
1505 int r;
1506
1507 assert(u);
1508
1509 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1510 char prefix[strlen(path) + 1];
1511
1512 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1513 _cleanup_free_ char *p = NULL;
1514 Unit *m;
1515
1516 r = unit_name_from_path(prefix, ".mount", &p);
1517 if (r < 0)
1518 return r;
1519
1520 m = manager_get_unit(u->manager, p);
1521 if (!m) {
1522 /* Make sure to load the mount unit if
1523 * it exists. If so the dependencies
1524 * on this unit will be added later
1525 * during the loading of the mount
1526 * unit. */
1527 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1528 continue;
1529 }
1530 if (m == u)
1531 continue;
1532
1533 if (m->load_state != UNIT_LOADED)
1534 continue;
1535
1536 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1537 if (r < 0)
1538 return r;
1539
1540 if (m->fragment_path) {
1541 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1542 if (r < 0)
1543 return r;
1544 }
1545 }
1546 }
1547
1548 return 0;
1549 }
1550
1551 static int unit_add_startup_units(Unit *u) {
1552 CGroupContext *c;
1553 int r;
1554
1555 c = unit_get_cgroup_context(u);
1556 if (!c)
1557 return 0;
1558
1559 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1560 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1561 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1562 return 0;
1563
1564 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1565 if (r < 0)
1566 return r;
1567
1568 return set_put(u->manager->startup_units, u);
1569 }
1570
1571 int unit_load(Unit *u) {
1572 int r;
1573
1574 assert(u);
1575
1576 if (u->in_load_queue) {
1577 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1578 u->in_load_queue = false;
1579 }
1580
1581 if (u->type == _UNIT_TYPE_INVALID)
1582 return -EINVAL;
1583
1584 if (u->load_state != UNIT_STUB)
1585 return 0;
1586
1587 if (u->transient_file) {
1588 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1589 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1590
1591 r = fflush_and_check(u->transient_file);
1592 if (r < 0)
1593 goto fail;
1594
1595 u->transient_file = safe_fclose(u->transient_file);
1596 u->fragment_mtime = now(CLOCK_REALTIME);
1597 }
1598
1599 r = UNIT_VTABLE(u)->load(u);
1600 if (r < 0)
1601 goto fail;
1602
1603 assert(u->load_state != UNIT_STUB);
1604
1605 if (u->load_state == UNIT_LOADED) {
1606 unit_add_to_target_deps_queue(u);
1607
1608 r = unit_add_slice_dependencies(u);
1609 if (r < 0)
1610 goto fail;
1611
1612 r = unit_add_mount_dependencies(u);
1613 if (r < 0)
1614 goto fail;
1615
1616 r = unit_add_startup_units(u);
1617 if (r < 0)
1618 goto fail;
1619
1620 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1621 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1622 r = -ENOEXEC;
1623 goto fail;
1624 }
1625
1626 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1627 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1628
1629 /* We finished loading, let's ensure our parents recalculate the members mask */
1630 unit_invalidate_cgroup_members_masks(u);
1631 }
1632
1633 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1634
1635 unit_add_to_dbus_queue(unit_follow_merge(u));
1636 unit_add_to_gc_queue(u);
1637
1638 return 0;
1639
1640 fail:
1641 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1642 * return ENOEXEC to ensure units are placed in this state after loading */
1643
1644 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1645 r == -ENOEXEC ? UNIT_BAD_SETTING :
1646 UNIT_ERROR;
1647 u->load_error = r;
1648
1649 unit_add_to_dbus_queue(u);
1650 unit_add_to_gc_queue(u);
1651
1652 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1653 }
1654
1655 _printf_(7, 8)
1656 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1657 Unit *u = userdata;
1658 va_list ap;
1659 int r;
1660
1661 va_start(ap, format);
1662 if (u)
1663 r = log_object_internalv(level, error, file, line, func,
1664 u->manager->unit_log_field,
1665 u->id,
1666 u->manager->invocation_log_field,
1667 u->invocation_id_string,
1668 format, ap);
1669 else
1670 r = log_internalv(level, error, file, line, func, format, ap);
1671 va_end(ap);
1672
1673 return r;
1674 }
1675
1676 static bool unit_test_condition(Unit *u) {
1677 assert(u);
1678
1679 dual_timestamp_get(&u->condition_timestamp);
1680 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1681
1682 unit_add_to_dbus_queue(u);
1683
1684 return u->condition_result;
1685 }
1686
1687 static bool unit_test_assert(Unit *u) {
1688 assert(u);
1689
1690 dual_timestamp_get(&u->assert_timestamp);
1691 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1692
1693 unit_add_to_dbus_queue(u);
1694
1695 return u->assert_result;
1696 }
1697
1698 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *unit_status_msg_format) {
1699 const char *d;
1700
1701 d = unit_status_string(u);
1702 if (log_get_show_color())
1703 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1704
1705 DISABLE_WARNING_FORMAT_NONLITERAL;
1706 manager_status_printf(u->manager, status_type, status, unit_status_msg_format, d);
1707 REENABLE_WARNING;
1708 }
1709
1710 int unit_test_start_limit(Unit *u) {
1711 const char *reason;
1712
1713 assert(u);
1714
1715 if (ratelimit_below(&u->start_ratelimit)) {
1716 u->start_limit_hit = false;
1717 return 0;
1718 }
1719
1720 log_unit_warning(u, "Start request repeated too quickly.");
1721 u->start_limit_hit = true;
1722
1723 reason = strjoina("unit ", u->id, " failed");
1724
1725 emergency_action(u->manager, u->start_limit_action,
1726 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1727 u->reboot_arg, -1, reason);
1728
1729 return -ECANCELED;
1730 }
1731
1732 bool unit_shall_confirm_spawn(Unit *u) {
1733 assert(u);
1734
1735 if (manager_is_confirm_spawn_disabled(u->manager))
1736 return false;
1737
1738 /* For some reasons units remaining in the same process group
1739 * as PID 1 fail to acquire the console even if it's not used
1740 * by any process. So skip the confirmation question for them. */
1741 return !unit_get_exec_context(u)->same_pgrp;
1742 }
1743
1744 static bool unit_verify_deps(Unit *u) {
1745 Unit *other;
1746 Iterator j;
1747 void *v;
1748
1749 assert(u);
1750
1751 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1752 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1753 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1754 * conjunction with After= as for them any such check would make things entirely racy. */
1755
1756 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1757
1758 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1759 continue;
1760
1761 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1762 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1763 return false;
1764 }
1765 }
1766
1767 return true;
1768 }
1769
1770 /* Errors that aren't really errors:
1771 * -EALREADY: Unit is already started.
1772 * -ECOMM: Condition failed
1773 * -EAGAIN: An operation is already in progress. Retry later.
1774 *
1775 * Errors that are real errors:
1776 * -EBADR: This unit type does not support starting.
1777 * -ECANCELED: Start limit hit, too many requests for now
1778 * -EPROTO: Assert failed
1779 * -EINVAL: Unit not loaded
1780 * -EOPNOTSUPP: Unit type not supported
1781 * -ENOLINK: The necessary dependencies are not fulfilled.
1782 * -ESTALE: This unit has been started before and can't be started a second time
1783 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1784 */
1785 int unit_start(Unit *u) {
1786 UnitActiveState state;
1787 Unit *following;
1788
1789 assert(u);
1790
1791 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1792 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1793 * waiting is finished. */
1794 state = unit_active_state(u);
1795 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1796 return -EALREADY;
1797 if (state == UNIT_MAINTENANCE)
1798 return -EAGAIN;
1799
1800 /* Units that aren't loaded cannot be started */
1801 if (u->load_state != UNIT_LOADED)
1802 return -EINVAL;
1803
1804 /* Refuse starting scope units more than once */
1805 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1806 return -ESTALE;
1807
1808 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1809 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1810 * recheck the condition in that case. */
1811 if (state != UNIT_ACTIVATING &&
1812 !unit_test_condition(u))
1813 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1814
1815 /* If the asserts failed, fail the entire job */
1816 if (state != UNIT_ACTIVATING &&
1817 !unit_test_assert(u))
1818 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1819
1820 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1821 * condition checks, so that we rather return condition check errors (which are usually not
1822 * considered a true failure) than "not supported" errors (which are considered a failure).
1823 */
1824 if (!unit_type_supported(u->type))
1825 return -EOPNOTSUPP;
1826
1827 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1828 * should have taken care of this already, but let's check this here again. After all, our
1829 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1830 if (!unit_verify_deps(u))
1831 return -ENOLINK;
1832
1833 /* Forward to the main object, if we aren't it. */
1834 following = unit_following(u);
1835 if (following) {
1836 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1837 return unit_start(following);
1838 }
1839
1840 /* If it is stopped, but we cannot start it, then fail */
1841 if (!UNIT_VTABLE(u)->start)
1842 return -EBADR;
1843
1844 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1845 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1846 * waits for a holdoff timer to elapse before it will start again. */
1847
1848 unit_add_to_dbus_queue(u);
1849
1850 return UNIT_VTABLE(u)->start(u);
1851 }
1852
1853 bool unit_can_start(Unit *u) {
1854 assert(u);
1855
1856 if (u->load_state != UNIT_LOADED)
1857 return false;
1858
1859 if (!unit_type_supported(u->type))
1860 return false;
1861
1862 /* Scope units may be started only once */
1863 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1864 return false;
1865
1866 return !!UNIT_VTABLE(u)->start;
1867 }
1868
1869 bool unit_can_isolate(Unit *u) {
1870 assert(u);
1871
1872 return unit_can_start(u) &&
1873 u->allow_isolate;
1874 }
1875
1876 /* Errors:
1877 * -EBADR: This unit type does not support stopping.
1878 * -EALREADY: Unit is already stopped.
1879 * -EAGAIN: An operation is already in progress. Retry later.
1880 */
1881 int unit_stop(Unit *u) {
1882 UnitActiveState state;
1883 Unit *following;
1884
1885 assert(u);
1886
1887 state = unit_active_state(u);
1888 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1889 return -EALREADY;
1890
1891 following = unit_following(u);
1892 if (following) {
1893 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1894 return unit_stop(following);
1895 }
1896
1897 if (!UNIT_VTABLE(u)->stop)
1898 return -EBADR;
1899
1900 unit_add_to_dbus_queue(u);
1901
1902 return UNIT_VTABLE(u)->stop(u);
1903 }
1904
1905 bool unit_can_stop(Unit *u) {
1906 assert(u);
1907
1908 if (!unit_type_supported(u->type))
1909 return false;
1910
1911 if (u->perpetual)
1912 return false;
1913
1914 return !!UNIT_VTABLE(u)->stop;
1915 }
1916
1917 /* Errors:
1918 * -EBADR: This unit type does not support reloading.
1919 * -ENOEXEC: Unit is not started.
1920 * -EAGAIN: An operation is already in progress. Retry later.
1921 */
1922 int unit_reload(Unit *u) {
1923 UnitActiveState state;
1924 Unit *following;
1925
1926 assert(u);
1927
1928 if (u->load_state != UNIT_LOADED)
1929 return -EINVAL;
1930
1931 if (!unit_can_reload(u))
1932 return -EBADR;
1933
1934 state = unit_active_state(u);
1935 if (state == UNIT_RELOADING)
1936 return -EAGAIN;
1937
1938 if (state != UNIT_ACTIVE) {
1939 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1940 return -ENOEXEC;
1941 }
1942
1943 following = unit_following(u);
1944 if (following) {
1945 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1946 return unit_reload(following);
1947 }
1948
1949 unit_add_to_dbus_queue(u);
1950
1951 if (!UNIT_VTABLE(u)->reload) {
1952 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1953 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1954 return 0;
1955 }
1956
1957 return UNIT_VTABLE(u)->reload(u);
1958 }
1959
1960 bool unit_can_reload(Unit *u) {
1961 assert(u);
1962
1963 if (UNIT_VTABLE(u)->can_reload)
1964 return UNIT_VTABLE(u)->can_reload(u);
1965
1966 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1967 return true;
1968
1969 return UNIT_VTABLE(u)->reload;
1970 }
1971
1972 bool unit_is_unneeded(Unit *u) {
1973 static const UnitDependency deps[] = {
1974 UNIT_REQUIRED_BY,
1975 UNIT_REQUISITE_OF,
1976 UNIT_WANTED_BY,
1977 UNIT_BOUND_BY,
1978 };
1979 size_t j;
1980
1981 assert(u);
1982
1983 if (!u->stop_when_unneeded)
1984 return false;
1985
1986 /* Don't clean up while the unit is transitioning or is even inactive. */
1987 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1988 return false;
1989 if (u->job)
1990 return false;
1991
1992 for (j = 0; j < ELEMENTSOF(deps); j++) {
1993 Unit *other;
1994 Iterator i;
1995 void *v;
1996
1997 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1998 * restart, then don't clean this one up. */
1999
2000 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
2001 if (other->job)
2002 return false;
2003
2004 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2005 return false;
2006
2007 if (unit_will_restart(other))
2008 return false;
2009 }
2010 }
2011
2012 return true;
2013 }
2014
2015 static void check_unneeded_dependencies(Unit *u) {
2016
2017 static const UnitDependency deps[] = {
2018 UNIT_REQUIRES,
2019 UNIT_REQUISITE,
2020 UNIT_WANTS,
2021 UNIT_BINDS_TO,
2022 };
2023 size_t j;
2024
2025 assert(u);
2026
2027 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2028
2029 for (j = 0; j < ELEMENTSOF(deps); j++) {
2030 Unit *other;
2031 Iterator i;
2032 void *v;
2033
2034 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2035 unit_submit_to_stop_when_unneeded_queue(other);
2036 }
2037 }
2038
2039 static void unit_check_binds_to(Unit *u) {
2040 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2041 bool stop = false;
2042 Unit *other;
2043 Iterator i;
2044 void *v;
2045 int r;
2046
2047 assert(u);
2048
2049 if (u->job)
2050 return;
2051
2052 if (unit_active_state(u) != UNIT_ACTIVE)
2053 return;
2054
2055 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2056 if (other->job)
2057 continue;
2058
2059 if (!other->coldplugged)
2060 /* We might yet create a job for the other unit… */
2061 continue;
2062
2063 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2064 continue;
2065
2066 stop = true;
2067 break;
2068 }
2069
2070 if (!stop)
2071 return;
2072
2073 /* If stopping a unit fails continuously we might enter a stop
2074 * loop here, hence stop acting on the service being
2075 * unnecessary after a while. */
2076 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2077 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2078 return;
2079 }
2080
2081 assert(other);
2082 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2083
2084 /* A unit we need to run is gone. Sniff. Let's stop this. */
2085 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2086 if (r < 0)
2087 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2088 }
2089
2090 static void retroactively_start_dependencies(Unit *u) {
2091 Iterator i;
2092 Unit *other;
2093 void *v;
2094
2095 assert(u);
2096 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2097
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2099 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2100 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2101 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2102
2103 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2104 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2105 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2107
2108 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2109 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2110 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2111 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2112
2113 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2114 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2115 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2116
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2118 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2119 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2120 }
2121
2122 static void retroactively_stop_dependencies(Unit *u) {
2123 Unit *other;
2124 Iterator i;
2125 void *v;
2126
2127 assert(u);
2128 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2129
2130 /* Pull down units which are bound to us recursively if enabled */
2131 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2132 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2133 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2134 }
2135
2136 void unit_start_on_failure(Unit *u) {
2137 Unit *other;
2138 Iterator i;
2139 void *v;
2140 int r;
2141
2142 assert(u);
2143
2144 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2145 return;
2146
2147 log_unit_info(u, "Triggering OnFailure= dependencies.");
2148
2149 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2150 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2151
2152 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2153 if (r < 0)
2154 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2155 }
2156 }
2157
2158 void unit_trigger_notify(Unit *u) {
2159 Unit *other;
2160 Iterator i;
2161 void *v;
2162
2163 assert(u);
2164
2165 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2166 if (UNIT_VTABLE(other)->trigger_notify)
2167 UNIT_VTABLE(other)->trigger_notify(other, u);
2168 }
2169
2170 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2171 if (condition_notice && log_level > LOG_NOTICE)
2172 return LOG_NOTICE;
2173 if (condition_info && log_level > LOG_INFO)
2174 return LOG_INFO;
2175 return log_level;
2176 }
2177
2178 static int unit_log_resources(Unit *u) {
2179 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2180 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2181 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2182 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2183 size_t n_message_parts = 0, n_iovec = 0;
2184 char* message_parts[1 + 2 + 2 + 1], *t;
2185 nsec_t nsec = NSEC_INFINITY;
2186 CGroupIPAccountingMetric m;
2187 size_t i;
2188 int r;
2189 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2190 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2194 };
2195 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2196 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2197 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2198 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2199 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2200 };
2201
2202 assert(u);
2203
2204 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2205 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2206 * information and the complete data in structured fields. */
2207
2208 (void) unit_get_cpu_usage(u, &nsec);
2209 if (nsec != NSEC_INFINITY) {
2210 char buf[FORMAT_TIMESPAN_MAX] = "";
2211
2212 /* Format the CPU time for inclusion in the structured log message */
2213 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2214 r = log_oom();
2215 goto finish;
2216 }
2217 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2218
2219 /* Format the CPU time for inclusion in the human language message string */
2220 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2221 t = strjoin("consumed ", buf, " CPU time");
2222 if (!t) {
2223 r = log_oom();
2224 goto finish;
2225 }
2226
2227 message_parts[n_message_parts++] = t;
2228
2229 log_level = raise_level(log_level,
2230 nsec > NOTICEWORTHY_CPU_NSEC,
2231 nsec > MENTIONWORTHY_CPU_NSEC);
2232 }
2233
2234 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2235 char buf[FORMAT_BYTES_MAX] = "";
2236 uint64_t value = UINT64_MAX;
2237
2238 assert(io_fields[k]);
2239
2240 (void) unit_get_io_accounting(u, k, k > 0, &value);
2241 if (value == UINT64_MAX)
2242 continue;
2243
2244 have_io_accounting = true;
2245 if (value > 0)
2246 any_io = true;
2247
2248 /* Format IO accounting data for inclusion in the structured log message */
2249 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2250 r = log_oom();
2251 goto finish;
2252 }
2253 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2254
2255 /* Format the IO accounting data for inclusion in the human language message string, but only
2256 * for the bytes counters (and not for the operations counters) */
2257 if (k == CGROUP_IO_READ_BYTES) {
2258 assert(!rr);
2259 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2260 if (!rr) {
2261 r = log_oom();
2262 goto finish;
2263 }
2264 } else if (k == CGROUP_IO_WRITE_BYTES) {
2265 assert(!wr);
2266 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2267 if (!wr) {
2268 r = log_oom();
2269 goto finish;
2270 }
2271 }
2272
2273 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2274 log_level = raise_level(log_level,
2275 value > MENTIONWORTHY_IO_BYTES,
2276 value > NOTICEWORTHY_IO_BYTES);
2277 }
2278
2279 if (have_io_accounting) {
2280 if (any_io) {
2281 if (rr)
2282 message_parts[n_message_parts++] = TAKE_PTR(rr);
2283 if (wr)
2284 message_parts[n_message_parts++] = TAKE_PTR(wr);
2285
2286 } else {
2287 char *k;
2288
2289 k = strdup("no IO");
2290 if (!k) {
2291 r = log_oom();
2292 goto finish;
2293 }
2294
2295 message_parts[n_message_parts++] = k;
2296 }
2297 }
2298
2299 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2300 char buf[FORMAT_BYTES_MAX] = "";
2301 uint64_t value = UINT64_MAX;
2302
2303 assert(ip_fields[m]);
2304
2305 (void) unit_get_ip_accounting(u, m, &value);
2306 if (value == UINT64_MAX)
2307 continue;
2308
2309 have_ip_accounting = true;
2310 if (value > 0)
2311 any_traffic = true;
2312
2313 /* Format IP accounting data for inclusion in the structured log message */
2314 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2315 r = log_oom();
2316 goto finish;
2317 }
2318 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2319
2320 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2321 * bytes counters (and not for the packets counters) */
2322 if (m == CGROUP_IP_INGRESS_BYTES) {
2323 assert(!igress);
2324 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2325 if (!igress) {
2326 r = log_oom();
2327 goto finish;
2328 }
2329 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2330 assert(!egress);
2331 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2332 if (!egress) {
2333 r = log_oom();
2334 goto finish;
2335 }
2336 }
2337
2338 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2339 log_level = raise_level(log_level,
2340 value > MENTIONWORTHY_IP_BYTES,
2341 value > NOTICEWORTHY_IP_BYTES);
2342 }
2343
2344 if (have_ip_accounting) {
2345 if (any_traffic) {
2346 if (igress)
2347 message_parts[n_message_parts++] = TAKE_PTR(igress);
2348 if (egress)
2349 message_parts[n_message_parts++] = TAKE_PTR(egress);
2350
2351 } else {
2352 char *k;
2353
2354 k = strdup("no IP traffic");
2355 if (!k) {
2356 r = log_oom();
2357 goto finish;
2358 }
2359
2360 message_parts[n_message_parts++] = k;
2361 }
2362 }
2363
2364 /* Is there any accounting data available at all? */
2365 if (n_iovec == 0) {
2366 r = 0;
2367 goto finish;
2368 }
2369
2370 if (n_message_parts == 0)
2371 t = strjoina("MESSAGE=", u->id, ": Completed.");
2372 else {
2373 _cleanup_free_ char *joined;
2374
2375 message_parts[n_message_parts] = NULL;
2376
2377 joined = strv_join(message_parts, ", ");
2378 if (!joined) {
2379 r = log_oom();
2380 goto finish;
2381 }
2382
2383 joined[0] = ascii_toupper(joined[0]);
2384 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2385 }
2386
2387 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2388 * and hence don't increase n_iovec for them */
2389 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2390 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2391
2392 t = strjoina(u->manager->unit_log_field, u->id);
2393 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2394
2395 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2396 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2397
2398 log_struct_iovec(log_level, iovec, n_iovec + 4);
2399 r = 0;
2400
2401 finish:
2402 for (i = 0; i < n_message_parts; i++)
2403 free(message_parts[i]);
2404
2405 for (i = 0; i < n_iovec; i++)
2406 free(iovec[i].iov_base);
2407
2408 return r;
2409
2410 }
2411
2412 static void unit_update_on_console(Unit *u) {
2413 bool b;
2414
2415 assert(u);
2416
2417 b = unit_needs_console(u);
2418 if (u->on_console == b)
2419 return;
2420
2421 u->on_console = b;
2422 if (b)
2423 manager_ref_console(u->manager);
2424 else
2425 manager_unref_console(u->manager);
2426 }
2427
2428 static void unit_emit_audit_start(Unit *u) {
2429 assert(u);
2430
2431 if (u->type != UNIT_SERVICE)
2432 return;
2433
2434 /* Write audit record if we have just finished starting up */
2435 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2436 u->in_audit = true;
2437 }
2438
2439 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2440 assert(u);
2441
2442 if (u->type != UNIT_SERVICE)
2443 return;
2444
2445 if (u->in_audit) {
2446 /* Write audit record if we have just finished shutting down */
2447 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2448 u->in_audit = false;
2449 } else {
2450 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2451 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2452
2453 if (state == UNIT_INACTIVE)
2454 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2455 }
2456 }
2457
2458 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2459 bool unexpected = false;
2460 JobResult result;
2461
2462 assert(j);
2463
2464 if (j->state == JOB_WAITING)
2465
2466 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2467 * due to EAGAIN. */
2468 job_add_to_run_queue(j);
2469
2470 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2471 * hence needs to invalidate jobs. */
2472
2473 switch (j->type) {
2474
2475 case JOB_START:
2476 case JOB_VERIFY_ACTIVE:
2477
2478 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2479 job_finish_and_invalidate(j, JOB_DONE, true, false);
2480 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2481 unexpected = true;
2482
2483 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2484 if (ns == UNIT_FAILED)
2485 result = JOB_FAILED;
2486 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2487 result = JOB_SKIPPED;
2488 else
2489 result = JOB_DONE;
2490
2491 job_finish_and_invalidate(j, result, true, false);
2492 }
2493 }
2494
2495 break;
2496
2497 case JOB_RELOAD:
2498 case JOB_RELOAD_OR_START:
2499 case JOB_TRY_RELOAD:
2500
2501 if (j->state == JOB_RUNNING) {
2502 if (ns == UNIT_ACTIVE)
2503 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2504 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2505 unexpected = true;
2506
2507 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2508 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2509 }
2510 }
2511
2512 break;
2513
2514 case JOB_STOP:
2515 case JOB_RESTART:
2516 case JOB_TRY_RESTART:
2517
2518 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2519 job_finish_and_invalidate(j, JOB_DONE, true, false);
2520 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2521 unexpected = true;
2522 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2523 }
2524
2525 break;
2526
2527 default:
2528 assert_not_reached("Job type unknown");
2529 }
2530
2531 return unexpected;
2532 }
2533
2534 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2535 const char *reason;
2536 Manager *m;
2537
2538 assert(u);
2539 assert(os < _UNIT_ACTIVE_STATE_MAX);
2540 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2541
2542 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2543 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2544 * remounted this function will be called too! */
2545
2546 m = u->manager;
2547
2548 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2549 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2550 unit_add_to_dbus_queue(u);
2551
2552 /* Update timestamps for state changes */
2553 if (!MANAGER_IS_RELOADING(m)) {
2554 dual_timestamp_get(&u->state_change_timestamp);
2555
2556 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2557 u->inactive_exit_timestamp = u->state_change_timestamp;
2558 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2559 u->inactive_enter_timestamp = u->state_change_timestamp;
2560
2561 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2562 u->active_enter_timestamp = u->state_change_timestamp;
2563 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2564 u->active_exit_timestamp = u->state_change_timestamp;
2565 }
2566
2567 /* Keep track of failed units */
2568 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2569
2570 /* Make sure the cgroup and state files are always removed when we become inactive */
2571 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2572 unit_prune_cgroup(u);
2573 unit_unlink_state_files(u);
2574 }
2575
2576 unit_update_on_console(u);
2577
2578 if (!MANAGER_IS_RELOADING(m)) {
2579 bool unexpected;
2580
2581 /* Let's propagate state changes to the job */
2582 if (u->job)
2583 unexpected = unit_process_job(u->job, ns, flags);
2584 else
2585 unexpected = true;
2586
2587 /* If this state change happened without being requested by a job, then let's retroactively start or
2588 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2589 * additional jobs just because something is already activated. */
2590
2591 if (unexpected) {
2592 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2593 retroactively_start_dependencies(u);
2594 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2595 retroactively_stop_dependencies(u);
2596 }
2597
2598 /* stop unneeded units regardless if going down was expected or not */
2599 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2600 check_unneeded_dependencies(u);
2601
2602 if (ns != os && ns == UNIT_FAILED) {
2603 log_unit_debug(u, "Unit entered failed state.");
2604
2605 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2606 unit_start_on_failure(u);
2607 }
2608
2609 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2610 /* This unit just finished starting up */
2611
2612 unit_emit_audit_start(u);
2613 manager_send_unit_plymouth(m, u);
2614 }
2615
2616 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2617 /* This unit just stopped/failed. */
2618
2619 unit_emit_audit_stop(u, ns);
2620 unit_log_resources(u);
2621 }
2622 }
2623
2624 manager_recheck_journal(m);
2625 manager_recheck_dbus(m);
2626
2627 unit_trigger_notify(u);
2628
2629 if (!MANAGER_IS_RELOADING(m)) {
2630 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2631 unit_submit_to_stop_when_unneeded_queue(u);
2632
2633 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2634 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2635 * without ever entering started.) */
2636 unit_check_binds_to(u);
2637
2638 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2639 reason = strjoina("unit ", u->id, " failed");
2640 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2641 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2642 reason = strjoina("unit ", u->id, " succeeded");
2643 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2644 }
2645 }
2646
2647 unit_add_to_gc_queue(u);
2648 }
2649
2650 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2651 int r;
2652
2653 assert(u);
2654 assert(pid_is_valid(pid));
2655
2656 /* Watch a specific PID */
2657
2658 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2659 * opportunity to remove any stalled references to this PID as they can be created
2660 * easily (when watching a process which is not our direct child). */
2661 if (exclusive)
2662 manager_unwatch_pid(u->manager, pid);
2663
2664 r = set_ensure_allocated(&u->pids, NULL);
2665 if (r < 0)
2666 return r;
2667
2668 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2669 if (r < 0)
2670 return r;
2671
2672 /* First try, let's add the unit keyed by "pid". */
2673 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2674 if (r == -EEXIST) {
2675 Unit **array;
2676 bool found = false;
2677 size_t n = 0;
2678
2679 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2680 * to an array of Units rather than just a Unit), lists us already. */
2681
2682 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2683 if (array)
2684 for (; array[n]; n++)
2685 if (array[n] == u)
2686 found = true;
2687
2688 if (found) /* Found it already? if so, do nothing */
2689 r = 0;
2690 else {
2691 Unit **new_array;
2692
2693 /* Allocate a new array */
2694 new_array = new(Unit*, n + 2);
2695 if (!new_array)
2696 return -ENOMEM;
2697
2698 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2699 new_array[n] = u;
2700 new_array[n+1] = NULL;
2701
2702 /* Add or replace the old array */
2703 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2704 if (r < 0) {
2705 free(new_array);
2706 return r;
2707 }
2708
2709 free(array);
2710 }
2711 } else if (r < 0)
2712 return r;
2713
2714 r = set_put(u->pids, PID_TO_PTR(pid));
2715 if (r < 0)
2716 return r;
2717
2718 return 0;
2719 }
2720
2721 void unit_unwatch_pid(Unit *u, pid_t pid) {
2722 Unit **array;
2723
2724 assert(u);
2725 assert(pid_is_valid(pid));
2726
2727 /* First let's drop the unit in case it's keyed as "pid". */
2728 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2729
2730 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2731 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2732 if (array) {
2733 size_t n, m = 0;
2734
2735 /* Let's iterate through the array, dropping our own entry */
2736 for (n = 0; array[n]; n++)
2737 if (array[n] != u)
2738 array[m++] = array[n];
2739 array[m] = NULL;
2740
2741 if (m == 0) {
2742 /* The array is now empty, remove the entire entry */
2743 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2744 free(array);
2745 }
2746 }
2747
2748 (void) set_remove(u->pids, PID_TO_PTR(pid));
2749 }
2750
2751 void unit_unwatch_all_pids(Unit *u) {
2752 assert(u);
2753
2754 while (!set_isempty(u->pids))
2755 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2756
2757 u->pids = set_free(u->pids);
2758 }
2759
2760 static void unit_tidy_watch_pids(Unit *u) {
2761 pid_t except1, except2;
2762 Iterator i;
2763 void *e;
2764
2765 assert(u);
2766
2767 /* Cleans dead PIDs from our list */
2768
2769 except1 = unit_main_pid(u);
2770 except2 = unit_control_pid(u);
2771
2772 SET_FOREACH(e, u->pids, i) {
2773 pid_t pid = PTR_TO_PID(e);
2774
2775 if (pid == except1 || pid == except2)
2776 continue;
2777
2778 if (!pid_is_unwaited(pid))
2779 unit_unwatch_pid(u, pid);
2780 }
2781 }
2782
2783 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2784 Unit *u = userdata;
2785
2786 assert(s);
2787 assert(u);
2788
2789 unit_tidy_watch_pids(u);
2790 unit_watch_all_pids(u);
2791
2792 /* If the PID set is empty now, then let's finish this off. */
2793 unit_synthesize_cgroup_empty_event(u);
2794
2795 return 0;
2796 }
2797
2798 int unit_enqueue_rewatch_pids(Unit *u) {
2799 int r;
2800
2801 assert(u);
2802
2803 if (!u->cgroup_path)
2804 return -ENOENT;
2805
2806 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2807 if (r < 0)
2808 return r;
2809 if (r > 0) /* On unified we can use proper notifications */
2810 return 0;
2811
2812 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2813 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2814 * involves issuing kill(pid, 0) on all processes we watch. */
2815
2816 if (!u->rewatch_pids_event_source) {
2817 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2818
2819 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2820 if (r < 0)
2821 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2822
2823 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2824 if (r < 0)
2825 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2826
2827 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2828
2829 u->rewatch_pids_event_source = TAKE_PTR(s);
2830 }
2831
2832 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2833 if (r < 0)
2834 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2835
2836 return 0;
2837 }
2838
2839 void unit_dequeue_rewatch_pids(Unit *u) {
2840 int r;
2841 assert(u);
2842
2843 if (!u->rewatch_pids_event_source)
2844 return;
2845
2846 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2847 if (r < 0)
2848 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2849
2850 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2851 }
2852
2853 bool unit_job_is_applicable(Unit *u, JobType j) {
2854 assert(u);
2855 assert(j >= 0 && j < _JOB_TYPE_MAX);
2856
2857 switch (j) {
2858
2859 case JOB_VERIFY_ACTIVE:
2860 case JOB_START:
2861 case JOB_NOP:
2862 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2863 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2864 * jobs for it. */
2865 return true;
2866
2867 case JOB_STOP:
2868 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2869 * external events), hence it makes no sense to permit enqueing such a request either. */
2870 return !u->perpetual;
2871
2872 case JOB_RESTART:
2873 case JOB_TRY_RESTART:
2874 return unit_can_stop(u) && unit_can_start(u);
2875
2876 case JOB_RELOAD:
2877 case JOB_TRY_RELOAD:
2878 return unit_can_reload(u);
2879
2880 case JOB_RELOAD_OR_START:
2881 return unit_can_reload(u) && unit_can_start(u);
2882
2883 default:
2884 assert_not_reached("Invalid job type");
2885 }
2886 }
2887
2888 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2889 assert(u);
2890
2891 /* Only warn about some unit types */
2892 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2893 return;
2894
2895 if (streq_ptr(u->id, other))
2896 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2897 else
2898 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2899 }
2900
2901 static int unit_add_dependency_hashmap(
2902 Hashmap **h,
2903 Unit *other,
2904 UnitDependencyMask origin_mask,
2905 UnitDependencyMask destination_mask) {
2906
2907 UnitDependencyInfo info;
2908 int r;
2909
2910 assert(h);
2911 assert(other);
2912 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2913 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2914 assert(origin_mask > 0 || destination_mask > 0);
2915
2916 r = hashmap_ensure_allocated(h, NULL);
2917 if (r < 0)
2918 return r;
2919
2920 assert_cc(sizeof(void*) == sizeof(info));
2921
2922 info.data = hashmap_get(*h, other);
2923 if (info.data) {
2924 /* Entry already exists. Add in our mask. */
2925
2926 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2927 FLAGS_SET(destination_mask, info.destination_mask))
2928 return 0; /* NOP */
2929
2930 info.origin_mask |= origin_mask;
2931 info.destination_mask |= destination_mask;
2932
2933 r = hashmap_update(*h, other, info.data);
2934 } else {
2935 info = (UnitDependencyInfo) {
2936 .origin_mask = origin_mask,
2937 .destination_mask = destination_mask,
2938 };
2939
2940 r = hashmap_put(*h, other, info.data);
2941 }
2942 if (r < 0)
2943 return r;
2944
2945 return 1;
2946 }
2947
2948 int unit_add_dependency(
2949 Unit *u,
2950 UnitDependency d,
2951 Unit *other,
2952 bool add_reference,
2953 UnitDependencyMask mask) {
2954
2955 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2956 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2957 [UNIT_WANTS] = UNIT_WANTED_BY,
2958 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2959 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2960 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2961 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2962 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2963 [UNIT_WANTED_BY] = UNIT_WANTS,
2964 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2965 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2966 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2967 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2968 [UNIT_BEFORE] = UNIT_AFTER,
2969 [UNIT_AFTER] = UNIT_BEFORE,
2970 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2971 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2972 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2973 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2974 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2975 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2976 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2977 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2978 };
2979 Unit *original_u = u, *original_other = other;
2980 int r;
2981
2982 assert(u);
2983 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2984 assert(other);
2985
2986 u = unit_follow_merge(u);
2987 other = unit_follow_merge(other);
2988
2989 /* We won't allow dependencies on ourselves. We will not
2990 * consider them an error however. */
2991 if (u == other) {
2992 maybe_warn_about_dependency(original_u, original_other->id, d);
2993 return 0;
2994 }
2995
2996 if (d == UNIT_AFTER && UNIT_VTABLE(u)->refuse_after) {
2997 log_unit_warning(u, "Requested dependency After=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(u->type));
2998 return 0;
2999 }
3000
3001 if (d == UNIT_BEFORE && UNIT_VTABLE(other)->refuse_after) {
3002 log_unit_warning(u, "Requested dependency Before=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(other->type));
3003 return 0;
3004 }
3005
3006 if (d == UNIT_ON_FAILURE && !UNIT_VTABLE(u)->can_fail) {
3007 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3008 return 0;
3009 }
3010
3011 if (d == UNIT_TRIGGERS && !UNIT_VTABLE(u)->can_trigger)
3012 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3013 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3014 if (d == UNIT_TRIGGERED_BY && !UNIT_VTABLE(other)->can_trigger)
3015 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3016 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3017
3018 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
3019 if (r < 0)
3020 return r;
3021
3022 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
3023 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
3024 if (r < 0)
3025 return r;
3026 }
3027
3028 if (add_reference) {
3029 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
3030 if (r < 0)
3031 return r;
3032
3033 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
3034 if (r < 0)
3035 return r;
3036 }
3037
3038 unit_add_to_dbus_queue(u);
3039 return 0;
3040 }
3041
3042 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3043 int r;
3044
3045 assert(u);
3046
3047 r = unit_add_dependency(u, d, other, add_reference, mask);
3048 if (r < 0)
3049 return r;
3050
3051 return unit_add_dependency(u, e, other, add_reference, mask);
3052 }
3053
3054 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3055 int r;
3056
3057 assert(u);
3058 assert(name);
3059 assert(buf);
3060 assert(ret);
3061
3062 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3063 *buf = NULL;
3064 *ret = name;
3065 return 0;
3066 }
3067
3068 if (u->instance)
3069 r = unit_name_replace_instance(name, u->instance, buf);
3070 else {
3071 _cleanup_free_ char *i = NULL;
3072
3073 r = unit_name_to_prefix(u->id, &i);
3074 if (r < 0)
3075 return r;
3076
3077 r = unit_name_replace_instance(name, i, buf);
3078 }
3079 if (r < 0)
3080 return r;
3081
3082 *ret = *buf;
3083 return 0;
3084 }
3085
3086 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3087 _cleanup_free_ char *buf = NULL;
3088 Unit *other;
3089 int r;
3090
3091 assert(u);
3092 assert(name);
3093
3094 r = resolve_template(u, name, &buf, &name);
3095 if (r < 0)
3096 return r;
3097
3098 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3099 if (r < 0)
3100 return r;
3101
3102 return unit_add_dependency(u, d, other, add_reference, mask);
3103 }
3104
3105 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3106 _cleanup_free_ char *buf = NULL;
3107 Unit *other;
3108 int r;
3109
3110 assert(u);
3111 assert(name);
3112
3113 r = resolve_template(u, name, &buf, &name);
3114 if (r < 0)
3115 return r;
3116
3117 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3118 if (r < 0)
3119 return r;
3120
3121 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3122 }
3123
3124 int set_unit_path(const char *p) {
3125 /* This is mostly for debug purposes */
3126 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3127 return -errno;
3128
3129 return 0;
3130 }
3131
3132 char *unit_dbus_path(Unit *u) {
3133 assert(u);
3134
3135 if (!u->id)
3136 return NULL;
3137
3138 return unit_dbus_path_from_name(u->id);
3139 }
3140
3141 char *unit_dbus_path_invocation_id(Unit *u) {
3142 assert(u);
3143
3144 if (sd_id128_is_null(u->invocation_id))
3145 return NULL;
3146
3147 return unit_dbus_path_from_name(u->invocation_id_string);
3148 }
3149
3150 int unit_set_slice(Unit *u, Unit *slice) {
3151 assert(u);
3152 assert(slice);
3153
3154 /* Sets the unit slice if it has not been set before. Is extra
3155 * careful, to only allow this for units that actually have a
3156 * cgroup context. Also, we don't allow to set this for slices
3157 * (since the parent slice is derived from the name). Make
3158 * sure the unit we set is actually a slice. */
3159
3160 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3161 return -EOPNOTSUPP;
3162
3163 if (u->type == UNIT_SLICE)
3164 return -EINVAL;
3165
3166 if (unit_active_state(u) != UNIT_INACTIVE)
3167 return -EBUSY;
3168
3169 if (slice->type != UNIT_SLICE)
3170 return -EINVAL;
3171
3172 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3173 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3174 return -EPERM;
3175
3176 if (UNIT_DEREF(u->slice) == slice)
3177 return 0;
3178
3179 /* Disallow slice changes if @u is already bound to cgroups */
3180 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3181 return -EBUSY;
3182
3183 unit_ref_set(&u->slice, u, slice);
3184 return 1;
3185 }
3186
3187 int unit_set_default_slice(Unit *u) {
3188 const char *slice_name;
3189 Unit *slice;
3190 int r;
3191
3192 assert(u);
3193
3194 if (UNIT_ISSET(u->slice))
3195 return 0;
3196
3197 if (u->instance) {
3198 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3199
3200 /* Implicitly place all instantiated units in their
3201 * own per-template slice */
3202
3203 r = unit_name_to_prefix(u->id, &prefix);
3204 if (r < 0)
3205 return r;
3206
3207 /* The prefix is already escaped, but it might include
3208 * "-" which has a special meaning for slice units,
3209 * hence escape it here extra. */
3210 escaped = unit_name_escape(prefix);
3211 if (!escaped)
3212 return -ENOMEM;
3213
3214 if (MANAGER_IS_SYSTEM(u->manager))
3215 slice_name = strjoina("system-", escaped, ".slice");
3216 else
3217 slice_name = strjoina(escaped, ".slice");
3218 } else
3219 slice_name =
3220 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3221 ? SPECIAL_SYSTEM_SLICE
3222 : SPECIAL_ROOT_SLICE;
3223
3224 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3225 if (r < 0)
3226 return r;
3227
3228 return unit_set_slice(u, slice);
3229 }
3230
3231 const char *unit_slice_name(Unit *u) {
3232 assert(u);
3233
3234 if (!UNIT_ISSET(u->slice))
3235 return NULL;
3236
3237 return UNIT_DEREF(u->slice)->id;
3238 }
3239
3240 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3241 _cleanup_free_ char *t = NULL;
3242 int r;
3243
3244 assert(u);
3245 assert(type);
3246 assert(_found);
3247
3248 r = unit_name_change_suffix(u->id, type, &t);
3249 if (r < 0)
3250 return r;
3251 if (unit_has_name(u, t))
3252 return -EINVAL;
3253
3254 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3255 assert(r < 0 || *_found != u);
3256 return r;
3257 }
3258
3259 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3260 const char *new_owner;
3261 Unit *u = userdata;
3262 int r;
3263
3264 assert(message);
3265 assert(u);
3266
3267 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3268 if (r < 0) {
3269 bus_log_parse_error(r);
3270 return 0;
3271 }
3272
3273 if (UNIT_VTABLE(u)->bus_name_owner_change)
3274 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3275
3276 return 0;
3277 }
3278
3279 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3280 const sd_bus_error *e;
3281 const char *new_owner;
3282 Unit *u = userdata;
3283 int r;
3284
3285 assert(message);
3286 assert(u);
3287
3288 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3289
3290 e = sd_bus_message_get_error(message);
3291 if (e) {
3292 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3293 log_unit_error(u, "Unexpected error response from GetNameOwner(): %s", e->message);
3294
3295 new_owner = NULL;
3296 } else {
3297 r = sd_bus_message_read(message, "s", &new_owner);
3298 if (r < 0)
3299 return bus_log_parse_error(r);
3300
3301 assert(!isempty(new_owner));
3302 }
3303
3304 if (UNIT_VTABLE(u)->bus_name_owner_change)
3305 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3306
3307 return 0;
3308 }
3309
3310 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3311 const char *match;
3312 int r;
3313
3314 assert(u);
3315 assert(bus);
3316 assert(name);
3317
3318 if (u->match_bus_slot || u->get_name_owner_slot)
3319 return -EBUSY;
3320
3321 match = strjoina("type='signal',"
3322 "sender='org.freedesktop.DBus',"
3323 "path='/org/freedesktop/DBus',"
3324 "interface='org.freedesktop.DBus',"
3325 "member='NameOwnerChanged',"
3326 "arg0='", name, "'");
3327
3328 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3329 if (r < 0)
3330 return r;
3331
3332 r = sd_bus_call_method_async(
3333 bus,
3334 &u->get_name_owner_slot,
3335 "org.freedesktop.DBus",
3336 "/org/freedesktop/DBus",
3337 "org.freedesktop.DBus",
3338 "GetNameOwner",
3339 get_name_owner_handler,
3340 u,
3341 "s", name);
3342 if (r < 0) {
3343 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3344 return r;
3345 }
3346
3347 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3348 return 0;
3349 }
3350
3351 int unit_watch_bus_name(Unit *u, const char *name) {
3352 int r;
3353
3354 assert(u);
3355 assert(name);
3356
3357 /* Watch a specific name on the bus. We only support one unit
3358 * watching each name for now. */
3359
3360 if (u->manager->api_bus) {
3361 /* If the bus is already available, install the match directly.
3362 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3363 r = unit_install_bus_match(u, u->manager->api_bus, name);
3364 if (r < 0)
3365 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3366 }
3367
3368 r = hashmap_put(u->manager->watch_bus, name, u);
3369 if (r < 0) {
3370 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3371 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3372 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3373 }
3374
3375 return 0;
3376 }
3377
3378 void unit_unwatch_bus_name(Unit *u, const char *name) {
3379 assert(u);
3380 assert(name);
3381
3382 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3383 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3384 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3385 }
3386
3387 bool unit_can_serialize(Unit *u) {
3388 assert(u);
3389
3390 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3391 }
3392
3393 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3394 _cleanup_free_ char *s = NULL;
3395 int r;
3396
3397 assert(f);
3398 assert(key);
3399
3400 if (mask == 0)
3401 return 0;
3402
3403 r = cg_mask_to_string(mask, &s);
3404 if (r < 0)
3405 return log_error_errno(r, "Failed to format cgroup mask: %m");
3406
3407 return serialize_item(f, key, s);
3408 }
3409
3410 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3411 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3412 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3413 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3414 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3415 };
3416
3417 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3418 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3419 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3420 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3421 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3422 };
3423
3424 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3425 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3426 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3427 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3428 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3429 };
3430
3431 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3432 CGroupIPAccountingMetric m;
3433 int r;
3434
3435 assert(u);
3436 assert(f);
3437 assert(fds);
3438
3439 if (unit_can_serialize(u)) {
3440 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3441 if (r < 0)
3442 return r;
3443 }
3444
3445 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3446
3447 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3448 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3449 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3450 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3451
3452 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3453 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3454
3455 if (dual_timestamp_is_set(&u->condition_timestamp))
3456 (void) serialize_bool(f, "condition-result", u->condition_result);
3457
3458 if (dual_timestamp_is_set(&u->assert_timestamp))
3459 (void) serialize_bool(f, "assert-result", u->assert_result);
3460
3461 (void) serialize_bool(f, "transient", u->transient);
3462 (void) serialize_bool(f, "in-audit", u->in_audit);
3463
3464 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3465 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3466 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3467 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_ratelimit_interval);
3468 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_ratelimit_burst);
3469
3470 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3471 if (u->cpu_usage_last != NSEC_INFINITY)
3472 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3473
3474 if (u->oom_kill_last > 0)
3475 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3476
3477 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3478 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3479
3480 if (u->io_accounting_last[im] != UINT64_MAX)
3481 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3482 }
3483
3484 if (u->cgroup_path)
3485 (void) serialize_item(f, "cgroup", u->cgroup_path);
3486
3487 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3488 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3489 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3490 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3491
3492 if (uid_is_valid(u->ref_uid))
3493 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3494 if (gid_is_valid(u->ref_gid))
3495 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3496
3497 if (!sd_id128_is_null(u->invocation_id))
3498 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3499
3500 bus_track_serialize(u->bus_track, f, "ref");
3501
3502 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3503 uint64_t v;
3504
3505 r = unit_get_ip_accounting(u, m, &v);
3506 if (r >= 0)
3507 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3508 }
3509
3510 if (serialize_jobs) {
3511 if (u->job) {
3512 fputs("job\n", f);
3513 job_serialize(u->job, f);
3514 }
3515
3516 if (u->nop_job) {
3517 fputs("job\n", f);
3518 job_serialize(u->nop_job, f);
3519 }
3520 }
3521
3522 /* End marker */
3523 fputc('\n', f);
3524 return 0;
3525 }
3526
3527 static int unit_deserialize_job(Unit *u, FILE *f) {
3528 _cleanup_(job_freep) Job *j = NULL;
3529 int r;
3530
3531 assert(u);
3532 assert(f);
3533
3534 j = job_new_raw(u);
3535 if (!j)
3536 return log_oom();
3537
3538 r = job_deserialize(j, f);
3539 if (r < 0)
3540 return r;
3541
3542 r = job_install_deserialized(j);
3543 if (r < 0)
3544 return r;
3545
3546 TAKE_PTR(j);
3547 return 0;
3548 }
3549
3550 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3551 int r;
3552
3553 assert(u);
3554 assert(f);
3555 assert(fds);
3556
3557 for (;;) {
3558 _cleanup_free_ char *line = NULL;
3559 char *l, *v;
3560 ssize_t m;
3561 size_t k;
3562
3563 r = read_line(f, LONG_LINE_MAX, &line);
3564 if (r < 0)
3565 return log_error_errno(r, "Failed to read serialization line: %m");
3566 if (r == 0) /* eof */
3567 break;
3568
3569 l = strstrip(line);
3570 if (isempty(l)) /* End marker */
3571 break;
3572
3573 k = strcspn(l, "=");
3574
3575 if (l[k] == '=') {
3576 l[k] = 0;
3577 v = l+k+1;
3578 } else
3579 v = l+k;
3580
3581 if (streq(l, "job")) {
3582 if (v[0] == '\0') {
3583 /* New-style serialized job */
3584 r = unit_deserialize_job(u, f);
3585 if (r < 0)
3586 return r;
3587 } else /* Legacy for pre-44 */
3588 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3589 continue;
3590 } else if (streq(l, "state-change-timestamp")) {
3591 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3592 continue;
3593 } else if (streq(l, "inactive-exit-timestamp")) {
3594 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3595 continue;
3596 } else if (streq(l, "active-enter-timestamp")) {
3597 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3598 continue;
3599 } else if (streq(l, "active-exit-timestamp")) {
3600 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3601 continue;
3602 } else if (streq(l, "inactive-enter-timestamp")) {
3603 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3604 continue;
3605 } else if (streq(l, "condition-timestamp")) {
3606 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3607 continue;
3608 } else if (streq(l, "assert-timestamp")) {
3609 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3610 continue;
3611 } else if (streq(l, "condition-result")) {
3612
3613 r = parse_boolean(v);
3614 if (r < 0)
3615 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3616 else
3617 u->condition_result = r;
3618
3619 continue;
3620
3621 } else if (streq(l, "assert-result")) {
3622
3623 r = parse_boolean(v);
3624 if (r < 0)
3625 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3626 else
3627 u->assert_result = r;
3628
3629 continue;
3630
3631 } else if (streq(l, "transient")) {
3632
3633 r = parse_boolean(v);
3634 if (r < 0)
3635 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3636 else
3637 u->transient = r;
3638
3639 continue;
3640
3641 } else if (streq(l, "in-audit")) {
3642
3643 r = parse_boolean(v);
3644 if (r < 0)
3645 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3646 else
3647 u->in_audit = r;
3648
3649 continue;
3650
3651 } else if (streq(l, "exported-invocation-id")) {
3652
3653 r = parse_boolean(v);
3654 if (r < 0)
3655 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3656 else
3657 u->exported_invocation_id = r;
3658
3659 continue;
3660
3661 } else if (streq(l, "exported-log-level-max")) {
3662
3663 r = parse_boolean(v);
3664 if (r < 0)
3665 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3666 else
3667 u->exported_log_level_max = r;
3668
3669 continue;
3670
3671 } else if (streq(l, "exported-log-extra-fields")) {
3672
3673 r = parse_boolean(v);
3674 if (r < 0)
3675 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3676 else
3677 u->exported_log_extra_fields = r;
3678
3679 continue;
3680
3681 } else if (streq(l, "exported-log-rate-limit-interval")) {
3682
3683 r = parse_boolean(v);
3684 if (r < 0)
3685 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3686 else
3687 u->exported_log_ratelimit_interval = r;
3688
3689 continue;
3690
3691 } else if (streq(l, "exported-log-rate-limit-burst")) {
3692
3693 r = parse_boolean(v);
3694 if (r < 0)
3695 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3696 else
3697 u->exported_log_ratelimit_burst = r;
3698
3699 continue;
3700
3701 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3702
3703 r = safe_atou64(v, &u->cpu_usage_base);
3704 if (r < 0)
3705 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3706
3707 continue;
3708
3709 } else if (streq(l, "cpu-usage-last")) {
3710
3711 r = safe_atou64(v, &u->cpu_usage_last);
3712 if (r < 0)
3713 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3714
3715 continue;
3716
3717 } else if (streq(l, "oom-kill-last")) {
3718
3719 r = safe_atou64(v, &u->oom_kill_last);
3720 if (r < 0)
3721 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3722
3723 continue;
3724
3725 } else if (streq(l, "cgroup")) {
3726
3727 r = unit_set_cgroup_path(u, v);
3728 if (r < 0)
3729 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3730
3731 (void) unit_watch_cgroup(u);
3732 (void) unit_watch_cgroup_memory(u);
3733
3734 continue;
3735 } else if (streq(l, "cgroup-realized")) {
3736 int b;
3737
3738 b = parse_boolean(v);
3739 if (b < 0)
3740 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3741 else
3742 u->cgroup_realized = b;
3743
3744 continue;
3745
3746 } else if (streq(l, "cgroup-realized-mask")) {
3747
3748 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3749 if (r < 0)
3750 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3751 continue;
3752
3753 } else if (streq(l, "cgroup-enabled-mask")) {
3754
3755 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3756 if (r < 0)
3757 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3758 continue;
3759
3760 } else if (streq(l, "cgroup-invalidated-mask")) {
3761
3762 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3763 if (r < 0)
3764 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3765 continue;
3766
3767 } else if (streq(l, "ref-uid")) {
3768 uid_t uid;
3769
3770 r = parse_uid(v, &uid);
3771 if (r < 0)
3772 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3773 else
3774 unit_ref_uid_gid(u, uid, GID_INVALID);
3775
3776 continue;
3777
3778 } else if (streq(l, "ref-gid")) {
3779 gid_t gid;
3780
3781 r = parse_gid(v, &gid);
3782 if (r < 0)
3783 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3784 else
3785 unit_ref_uid_gid(u, UID_INVALID, gid);
3786
3787 continue;
3788
3789 } else if (streq(l, "ref")) {
3790
3791 r = strv_extend(&u->deserialized_refs, v);
3792 if (r < 0)
3793 return log_oom();
3794
3795 continue;
3796 } else if (streq(l, "invocation-id")) {
3797 sd_id128_t id;
3798
3799 r = sd_id128_from_string(v, &id);
3800 if (r < 0)
3801 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3802 else {
3803 r = unit_set_invocation_id(u, id);
3804 if (r < 0)
3805 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3806 }
3807
3808 continue;
3809 }
3810
3811 /* Check if this is an IP accounting metric serialization field */
3812 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3813 if (m >= 0) {
3814 uint64_t c;
3815
3816 r = safe_atou64(v, &c);
3817 if (r < 0)
3818 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3819 else
3820 u->ip_accounting_extra[m] = c;
3821 continue;
3822 }
3823
3824 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3825 if (m >= 0) {
3826 uint64_t c;
3827
3828 r = safe_atou64(v, &c);
3829 if (r < 0)
3830 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3831 else
3832 u->io_accounting_base[m] = c;
3833 continue;
3834 }
3835
3836 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3837 if (m >= 0) {
3838 uint64_t c;
3839
3840 r = safe_atou64(v, &c);
3841 if (r < 0)
3842 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3843 else
3844 u->io_accounting_last[m] = c;
3845 continue;
3846 }
3847
3848 if (unit_can_serialize(u)) {
3849 r = exec_runtime_deserialize_compat(u, l, v, fds);
3850 if (r < 0) {
3851 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3852 continue;
3853 }
3854
3855 /* Returns positive if key was handled by the call */
3856 if (r > 0)
3857 continue;
3858
3859 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3860 if (r < 0)
3861 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3862 }
3863 }
3864
3865 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3866 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3867 * before 228 where the base for timeouts was not persistent across reboots. */
3868
3869 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3870 dual_timestamp_get(&u->state_change_timestamp);
3871
3872 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3873 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3874 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3875 unit_invalidate_cgroup_bpf(u);
3876
3877 return 0;
3878 }
3879
3880 int unit_deserialize_skip(FILE *f) {
3881 int r;
3882 assert(f);
3883
3884 /* Skip serialized data for this unit. We don't know what it is. */
3885
3886 for (;;) {
3887 _cleanup_free_ char *line = NULL;
3888 char *l;
3889
3890 r = read_line(f, LONG_LINE_MAX, &line);
3891 if (r < 0)
3892 return log_error_errno(r, "Failed to read serialization line: %m");
3893 if (r == 0)
3894 return 0;
3895
3896 l = strstrip(line);
3897
3898 /* End marker */
3899 if (isempty(l))
3900 return 1;
3901 }
3902 }
3903
3904 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3905 _cleanup_free_ char *e = NULL;
3906 Unit *device;
3907 int r;
3908
3909 assert(u);
3910
3911 /* Adds in links to the device node that this unit is based on */
3912 if (isempty(what))
3913 return 0;
3914
3915 if (!is_device_path(what))
3916 return 0;
3917
3918 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3919 if (!unit_type_supported(UNIT_DEVICE))
3920 return 0;
3921
3922 r = unit_name_from_path(what, ".device", &e);
3923 if (r < 0)
3924 return r;
3925
3926 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3927 if (r < 0)
3928 return r;
3929
3930 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3931 dep = UNIT_BINDS_TO;
3932
3933 return unit_add_two_dependencies(u, UNIT_AFTER,
3934 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3935 device, true, mask);
3936 }
3937
3938 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3939 _cleanup_free_ char *escaped = NULL, *target = NULL;
3940 int r;
3941
3942 assert(u);
3943
3944 if (isempty(what))
3945 return 0;
3946
3947 if (!path_startswith(what, "/dev/"))
3948 return 0;
3949
3950 /* If we don't support devices, then also don't bother with blockdev@.target */
3951 if (!unit_type_supported(UNIT_DEVICE))
3952 return 0;
3953
3954 r = unit_name_path_escape(what, &escaped);
3955 if (r < 0)
3956 return r;
3957
3958 r = unit_name_build("blockdev", escaped, ".target", &target);
3959 if (r < 0)
3960 return r;
3961
3962 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3963 }
3964
3965 int unit_coldplug(Unit *u) {
3966 int r = 0, q;
3967 char **i;
3968 Job *uj;
3969
3970 assert(u);
3971
3972 /* Make sure we don't enter a loop, when coldplugging recursively. */
3973 if (u->coldplugged)
3974 return 0;
3975
3976 u->coldplugged = true;
3977
3978 STRV_FOREACH(i, u->deserialized_refs) {
3979 q = bus_unit_track_add_name(u, *i);
3980 if (q < 0 && r >= 0)
3981 r = q;
3982 }
3983 u->deserialized_refs = strv_free(u->deserialized_refs);
3984
3985 if (UNIT_VTABLE(u)->coldplug) {
3986 q = UNIT_VTABLE(u)->coldplug(u);
3987 if (q < 0 && r >= 0)
3988 r = q;
3989 }
3990
3991 uj = u->job ?: u->nop_job;
3992 if (uj) {
3993 q = job_coldplug(uj);
3994 if (q < 0 && r >= 0)
3995 r = q;
3996 }
3997
3998 return r;
3999 }
4000
4001 void unit_catchup(Unit *u) {
4002 assert(u);
4003
4004 if (UNIT_VTABLE(u)->catchup)
4005 UNIT_VTABLE(u)->catchup(u);
4006 }
4007
4008 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
4009 struct stat st;
4010
4011 if (!path)
4012 return false;
4013
4014 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
4015 * are never out-of-date. */
4016 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
4017 return false;
4018
4019 if (stat(path, &st) < 0)
4020 /* What, cannot access this anymore? */
4021 return true;
4022
4023 if (path_masked)
4024 /* For masked files check if they are still so */
4025 return !null_or_empty(&st);
4026 else
4027 /* For non-empty files check the mtime */
4028 return timespec_load(&st.st_mtim) > mtime;
4029
4030 return false;
4031 }
4032
4033 bool unit_need_daemon_reload(Unit *u) {
4034 _cleanup_strv_free_ char **t = NULL;
4035 char **path;
4036
4037 assert(u);
4038
4039 /* For unit files, we allow masking… */
4040 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
4041 u->load_state == UNIT_MASKED))
4042 return true;
4043
4044 /* Source paths should not be masked… */
4045 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
4046 return true;
4047
4048 if (u->load_state == UNIT_LOADED)
4049 (void) unit_find_dropin_paths(u, &t);
4050 if (!strv_equal(u->dropin_paths, t))
4051 return true;
4052
4053 /* … any drop-ins that are masked are simply omitted from the list. */
4054 STRV_FOREACH(path, u->dropin_paths)
4055 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4056 return true;
4057
4058 return false;
4059 }
4060
4061 void unit_reset_failed(Unit *u) {
4062 assert(u);
4063
4064 if (UNIT_VTABLE(u)->reset_failed)
4065 UNIT_VTABLE(u)->reset_failed(u);
4066
4067 ratelimit_reset(&u->start_ratelimit);
4068 u->start_limit_hit = false;
4069 }
4070
4071 Unit *unit_following(Unit *u) {
4072 assert(u);
4073
4074 if (UNIT_VTABLE(u)->following)
4075 return UNIT_VTABLE(u)->following(u);
4076
4077 return NULL;
4078 }
4079
4080 bool unit_stop_pending(Unit *u) {
4081 assert(u);
4082
4083 /* This call does check the current state of the unit. It's
4084 * hence useful to be called from state change calls of the
4085 * unit itself, where the state isn't updated yet. This is
4086 * different from unit_inactive_or_pending() which checks both
4087 * the current state and for a queued job. */
4088
4089 return unit_has_job_type(u, JOB_STOP);
4090 }
4091
4092 bool unit_inactive_or_pending(Unit *u) {
4093 assert(u);
4094
4095 /* Returns true if the unit is inactive or going down */
4096
4097 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4098 return true;
4099
4100 if (unit_stop_pending(u))
4101 return true;
4102
4103 return false;
4104 }
4105
4106 bool unit_active_or_pending(Unit *u) {
4107 assert(u);
4108
4109 /* Returns true if the unit is active or going up */
4110
4111 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4112 return true;
4113
4114 if (u->job &&
4115 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4116 return true;
4117
4118 return false;
4119 }
4120
4121 bool unit_will_restart_default(Unit *u) {
4122 assert(u);
4123
4124 return unit_has_job_type(u, JOB_START);
4125 }
4126
4127 bool unit_will_restart(Unit *u) {
4128 assert(u);
4129
4130 if (!UNIT_VTABLE(u)->will_restart)
4131 return false;
4132
4133 return UNIT_VTABLE(u)->will_restart(u);
4134 }
4135
4136 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4137 assert(u);
4138 assert(w >= 0 && w < _KILL_WHO_MAX);
4139 assert(SIGNAL_VALID(signo));
4140
4141 if (!UNIT_VTABLE(u)->kill)
4142 return -EOPNOTSUPP;
4143
4144 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4145 }
4146
4147 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4148 _cleanup_set_free_ Set *pid_set = NULL;
4149 int r;
4150
4151 pid_set = set_new(NULL);
4152 if (!pid_set)
4153 return NULL;
4154
4155 /* Exclude the main/control pids from being killed via the cgroup */
4156 if (main_pid > 0) {
4157 r = set_put(pid_set, PID_TO_PTR(main_pid));
4158 if (r < 0)
4159 return NULL;
4160 }
4161
4162 if (control_pid > 0) {
4163 r = set_put(pid_set, PID_TO_PTR(control_pid));
4164 if (r < 0)
4165 return NULL;
4166 }
4167
4168 return TAKE_PTR(pid_set);
4169 }
4170
4171 int unit_kill_common(
4172 Unit *u,
4173 KillWho who,
4174 int signo,
4175 pid_t main_pid,
4176 pid_t control_pid,
4177 sd_bus_error *error) {
4178
4179 int r = 0;
4180 bool killed = false;
4181
4182 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4183 if (main_pid < 0)
4184 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4185 else if (main_pid == 0)
4186 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4187 }
4188
4189 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4190 if (control_pid < 0)
4191 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4192 else if (control_pid == 0)
4193 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4194 }
4195
4196 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4197 if (control_pid > 0) {
4198 if (kill(control_pid, signo) < 0)
4199 r = -errno;
4200 else
4201 killed = true;
4202 }
4203
4204 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4205 if (main_pid > 0) {
4206 if (kill(main_pid, signo) < 0)
4207 r = -errno;
4208 else
4209 killed = true;
4210 }
4211
4212 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4213 _cleanup_set_free_ Set *pid_set = NULL;
4214 int q;
4215
4216 /* Exclude the main/control pids from being killed via the cgroup */
4217 pid_set = unit_pid_set(main_pid, control_pid);
4218 if (!pid_set)
4219 return -ENOMEM;
4220
4221 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4222 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4223 r = q;
4224 else
4225 killed = true;
4226 }
4227
4228 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4229 return -ESRCH;
4230
4231 return r;
4232 }
4233
4234 int unit_following_set(Unit *u, Set **s) {
4235 assert(u);
4236 assert(s);
4237
4238 if (UNIT_VTABLE(u)->following_set)
4239 return UNIT_VTABLE(u)->following_set(u, s);
4240
4241 *s = NULL;
4242 return 0;
4243 }
4244
4245 UnitFileState unit_get_unit_file_state(Unit *u) {
4246 int r;
4247
4248 assert(u);
4249
4250 if (u->unit_file_state < 0 && u->fragment_path) {
4251 r = unit_file_get_state(
4252 u->manager->unit_file_scope,
4253 NULL,
4254 u->id,
4255 &u->unit_file_state);
4256 if (r < 0)
4257 u->unit_file_state = UNIT_FILE_BAD;
4258 }
4259
4260 return u->unit_file_state;
4261 }
4262
4263 int unit_get_unit_file_preset(Unit *u) {
4264 assert(u);
4265
4266 if (u->unit_file_preset < 0 && u->fragment_path)
4267 u->unit_file_preset = unit_file_query_preset(
4268 u->manager->unit_file_scope,
4269 NULL,
4270 basename(u->fragment_path));
4271
4272 return u->unit_file_preset;
4273 }
4274
4275 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4276 assert(ref);
4277 assert(source);
4278 assert(target);
4279
4280 if (ref->target)
4281 unit_ref_unset(ref);
4282
4283 ref->source = source;
4284 ref->target = target;
4285 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4286 return target;
4287 }
4288
4289 void unit_ref_unset(UnitRef *ref) {
4290 assert(ref);
4291
4292 if (!ref->target)
4293 return;
4294
4295 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4296 * be unreferenced now. */
4297 unit_add_to_gc_queue(ref->target);
4298
4299 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4300 ref->source = ref->target = NULL;
4301 }
4302
4303 static int user_from_unit_name(Unit *u, char **ret) {
4304
4305 static const uint8_t hash_key[] = {
4306 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4307 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4308 };
4309
4310 _cleanup_free_ char *n = NULL;
4311 int r;
4312
4313 r = unit_name_to_prefix(u->id, &n);
4314 if (r < 0)
4315 return r;
4316
4317 if (valid_user_group_name(n, 0)) {
4318 *ret = TAKE_PTR(n);
4319 return 0;
4320 }
4321
4322 /* If we can't use the unit name as a user name, then let's hash it and use that */
4323 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4324 return -ENOMEM;
4325
4326 return 0;
4327 }
4328
4329 int unit_patch_contexts(Unit *u) {
4330 CGroupContext *cc;
4331 ExecContext *ec;
4332 unsigned i;
4333 int r;
4334
4335 assert(u);
4336
4337 /* Patch in the manager defaults into the exec and cgroup
4338 * contexts, _after_ the rest of the settings have been
4339 * initialized */
4340
4341 ec = unit_get_exec_context(u);
4342 if (ec) {
4343 /* This only copies in the ones that need memory */
4344 for (i = 0; i < _RLIMIT_MAX; i++)
4345 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4346 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4347 if (!ec->rlimit[i])
4348 return -ENOMEM;
4349 }
4350
4351 if (MANAGER_IS_USER(u->manager) &&
4352 !ec->working_directory) {
4353
4354 r = get_home_dir(&ec->working_directory);
4355 if (r < 0)
4356 return r;
4357
4358 /* Allow user services to run, even if the
4359 * home directory is missing */
4360 ec->working_directory_missing_ok = true;
4361 }
4362
4363 if (ec->private_devices)
4364 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4365
4366 if (ec->protect_kernel_modules)
4367 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4368
4369 if (ec->protect_kernel_logs)
4370 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4371
4372 if (ec->protect_clock)
4373 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4374
4375 if (ec->dynamic_user) {
4376 if (!ec->user) {
4377 r = user_from_unit_name(u, &ec->user);
4378 if (r < 0)
4379 return r;
4380 }
4381
4382 if (!ec->group) {
4383 ec->group = strdup(ec->user);
4384 if (!ec->group)
4385 return -ENOMEM;
4386 }
4387
4388 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4389 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4390 * sandbox. */
4391
4392 ec->private_tmp = true;
4393 ec->remove_ipc = true;
4394 ec->protect_system = PROTECT_SYSTEM_STRICT;
4395 if (ec->protect_home == PROTECT_HOME_NO)
4396 ec->protect_home = PROTECT_HOME_READ_ONLY;
4397
4398 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4399 * them. */
4400 ec->no_new_privileges = true;
4401 ec->restrict_suid_sgid = true;
4402 }
4403 }
4404
4405 cc = unit_get_cgroup_context(u);
4406 if (cc && ec) {
4407
4408 if (ec->private_devices &&
4409 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4410 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4411
4412 if (ec->root_image &&
4413 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4414
4415 /* When RootImage= is specified, the following devices are touched. */
4416 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4417 if (r < 0)
4418 return r;
4419
4420 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4421 if (r < 0)
4422 return r;
4423
4424 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4425 if (r < 0)
4426 return r;
4427
4428 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices */
4429 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "modprobe@loop.service", true, UNIT_DEPENDENCY_FILE);
4430 if (r < 0)
4431 return r;
4432 }
4433
4434 if (ec->protect_clock) {
4435 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4436 if (r < 0)
4437 return r;
4438 }
4439 }
4440
4441 return 0;
4442 }
4443
4444 ExecContext *unit_get_exec_context(Unit *u) {
4445 size_t offset;
4446 assert(u);
4447
4448 if (u->type < 0)
4449 return NULL;
4450
4451 offset = UNIT_VTABLE(u)->exec_context_offset;
4452 if (offset <= 0)
4453 return NULL;
4454
4455 return (ExecContext*) ((uint8_t*) u + offset);
4456 }
4457
4458 KillContext *unit_get_kill_context(Unit *u) {
4459 size_t offset;
4460 assert(u);
4461
4462 if (u->type < 0)
4463 return NULL;
4464
4465 offset = UNIT_VTABLE(u)->kill_context_offset;
4466 if (offset <= 0)
4467 return NULL;
4468
4469 return (KillContext*) ((uint8_t*) u + offset);
4470 }
4471
4472 CGroupContext *unit_get_cgroup_context(Unit *u) {
4473 size_t offset;
4474
4475 if (u->type < 0)
4476 return NULL;
4477
4478 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4479 if (offset <= 0)
4480 return NULL;
4481
4482 return (CGroupContext*) ((uint8_t*) u + offset);
4483 }
4484
4485 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4486 size_t offset;
4487
4488 if (u->type < 0)
4489 return NULL;
4490
4491 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4492 if (offset <= 0)
4493 return NULL;
4494
4495 return *(ExecRuntime**) ((uint8_t*) u + offset);
4496 }
4497
4498 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4499 assert(u);
4500
4501 if (UNIT_WRITE_FLAGS_NOOP(flags))
4502 return NULL;
4503
4504 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4505 return u->manager->lookup_paths.transient;
4506
4507 if (flags & UNIT_PERSISTENT)
4508 return u->manager->lookup_paths.persistent_control;
4509
4510 if (flags & UNIT_RUNTIME)
4511 return u->manager->lookup_paths.runtime_control;
4512
4513 return NULL;
4514 }
4515
4516 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4517 char *ret = NULL;
4518
4519 if (!s)
4520 return NULL;
4521
4522 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4523 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4524 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4525 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4526 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4527 * allocations. */
4528
4529 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4530 ret = specifier_escape(s);
4531 if (!ret)
4532 return NULL;
4533
4534 s = ret;
4535 }
4536
4537 if (flags & UNIT_ESCAPE_C) {
4538 char *a;
4539
4540 a = cescape(s);
4541 free(ret);
4542 if (!a)
4543 return NULL;
4544
4545 ret = a;
4546 }
4547
4548 if (buf) {
4549 *buf = ret;
4550 return ret ?: (char*) s;
4551 }
4552
4553 return ret ?: strdup(s);
4554 }
4555
4556 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4557 _cleanup_free_ char *result = NULL;
4558 size_t n = 0, allocated = 0;
4559 char **i;
4560
4561 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4562 * way suitable for ExecStart= stanzas */
4563
4564 STRV_FOREACH(i, l) {
4565 _cleanup_free_ char *buf = NULL;
4566 const char *p;
4567 size_t a;
4568 char *q;
4569
4570 p = unit_escape_setting(*i, flags, &buf);
4571 if (!p)
4572 return NULL;
4573
4574 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4575 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4576 return NULL;
4577
4578 q = result + n;
4579 if (n > 0)
4580 *(q++) = ' ';
4581
4582 *(q++) = '"';
4583 q = stpcpy(q, p);
4584 *(q++) = '"';
4585
4586 n += a;
4587 }
4588
4589 if (!GREEDY_REALLOC(result, allocated, n + 1))
4590 return NULL;
4591
4592 result[n] = 0;
4593
4594 return TAKE_PTR(result);
4595 }
4596
4597 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4598 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4599 const char *dir, *wrapped;
4600 int r;
4601
4602 assert(u);
4603 assert(name);
4604 assert(data);
4605
4606 if (UNIT_WRITE_FLAGS_NOOP(flags))
4607 return 0;
4608
4609 data = unit_escape_setting(data, flags, &escaped);
4610 if (!data)
4611 return -ENOMEM;
4612
4613 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4614 * previous section header is the same */
4615
4616 if (flags & UNIT_PRIVATE) {
4617 if (!UNIT_VTABLE(u)->private_section)
4618 return -EINVAL;
4619
4620 if (!u->transient_file || u->last_section_private < 0)
4621 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4622 else if (u->last_section_private == 0)
4623 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4624 } else {
4625 if (!u->transient_file || u->last_section_private < 0)
4626 data = strjoina("[Unit]\n", data);
4627 else if (u->last_section_private > 0)
4628 data = strjoina("\n[Unit]\n", data);
4629 }
4630
4631 if (u->transient_file) {
4632 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4633 * write to the transient unit file. */
4634 fputs(data, u->transient_file);
4635
4636 if (!endswith(data, "\n"))
4637 fputc('\n', u->transient_file);
4638
4639 /* Remember which section we wrote this entry to */
4640 u->last_section_private = !!(flags & UNIT_PRIVATE);
4641 return 0;
4642 }
4643
4644 dir = unit_drop_in_dir(u, flags);
4645 if (!dir)
4646 return -EINVAL;
4647
4648 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4649 "# or an equivalent operation. Do not edit.\n",
4650 data,
4651 "\n");
4652
4653 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4654 if (r < 0)
4655 return r;
4656
4657 (void) mkdir_p_label(p, 0755);
4658
4659 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4660 * recreate the cache after every drop-in we write. */
4661 if (u->manager->unit_path_cache) {
4662 r = set_put_strdup(u->manager->unit_path_cache, p);
4663 if (r < 0)
4664 return r;
4665 }
4666
4667 r = write_string_file_atomic_label(q, wrapped);
4668 if (r < 0)
4669 return r;
4670
4671 r = strv_push(&u->dropin_paths, q);
4672 if (r < 0)
4673 return r;
4674 q = NULL;
4675
4676 strv_uniq(u->dropin_paths);
4677
4678 u->dropin_mtime = now(CLOCK_REALTIME);
4679
4680 return 0;
4681 }
4682
4683 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4684 _cleanup_free_ char *p = NULL;
4685 va_list ap;
4686 int r;
4687
4688 assert(u);
4689 assert(name);
4690 assert(format);
4691
4692 if (UNIT_WRITE_FLAGS_NOOP(flags))
4693 return 0;
4694
4695 va_start(ap, format);
4696 r = vasprintf(&p, format, ap);
4697 va_end(ap);
4698
4699 if (r < 0)
4700 return -ENOMEM;
4701
4702 return unit_write_setting(u, flags, name, p);
4703 }
4704
4705 int unit_make_transient(Unit *u) {
4706 _cleanup_free_ char *path = NULL;
4707 FILE *f;
4708
4709 assert(u);
4710
4711 if (!UNIT_VTABLE(u)->can_transient)
4712 return -EOPNOTSUPP;
4713
4714 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4715
4716 path = path_join(u->manager->lookup_paths.transient, u->id);
4717 if (!path)
4718 return -ENOMEM;
4719
4720 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4721 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4722
4723 RUN_WITH_UMASK(0022) {
4724 f = fopen(path, "we");
4725 if (!f)
4726 return -errno;
4727 }
4728
4729 safe_fclose(u->transient_file);
4730 u->transient_file = f;
4731
4732 free_and_replace(u->fragment_path, path);
4733
4734 u->source_path = mfree(u->source_path);
4735 u->dropin_paths = strv_free(u->dropin_paths);
4736 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4737
4738 u->load_state = UNIT_STUB;
4739 u->load_error = 0;
4740 u->transient = true;
4741
4742 unit_add_to_dbus_queue(u);
4743 unit_add_to_gc_queue(u);
4744
4745 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4746 u->transient_file);
4747
4748 return 0;
4749 }
4750
4751 static int log_kill(pid_t pid, int sig, void *userdata) {
4752 _cleanup_free_ char *comm = NULL;
4753
4754 (void) get_process_comm(pid, &comm);
4755
4756 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4757 only, like for example systemd's own PAM stub process. */
4758 if (comm && comm[0] == '(')
4759 return 0;
4760
4761 log_unit_notice(userdata,
4762 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4763 pid,
4764 strna(comm),
4765 signal_to_string(sig));
4766
4767 return 1;
4768 }
4769
4770 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4771 assert(c);
4772
4773 switch (k) {
4774
4775 case KILL_TERMINATE:
4776 case KILL_TERMINATE_AND_LOG:
4777 *noteworthy = false;
4778 return c->kill_signal;
4779
4780 case KILL_RESTART:
4781 *noteworthy = false;
4782 return restart_kill_signal(c);
4783
4784 case KILL_KILL:
4785 *noteworthy = true;
4786 return c->final_kill_signal;
4787
4788 case KILL_WATCHDOG:
4789 *noteworthy = true;
4790 return c->watchdog_signal;
4791
4792 default:
4793 assert_not_reached("KillOperation unknown");
4794 }
4795 }
4796
4797 int unit_kill_context(
4798 Unit *u,
4799 KillContext *c,
4800 KillOperation k,
4801 pid_t main_pid,
4802 pid_t control_pid,
4803 bool main_pid_alien) {
4804
4805 bool wait_for_exit = false, send_sighup;
4806 cg_kill_log_func_t log_func = NULL;
4807 int sig, r;
4808
4809 assert(u);
4810 assert(c);
4811
4812 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4813 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4814
4815 if (c->kill_mode == KILL_NONE)
4816 return 0;
4817
4818 bool noteworthy;
4819 sig = operation_to_signal(c, k, &noteworthy);
4820 if (noteworthy)
4821 log_func = log_kill;
4822
4823 send_sighup =
4824 c->send_sighup &&
4825 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4826 sig != SIGHUP;
4827
4828 if (main_pid > 0) {
4829 if (log_func)
4830 log_func(main_pid, sig, u);
4831
4832 r = kill_and_sigcont(main_pid, sig);
4833 if (r < 0 && r != -ESRCH) {
4834 _cleanup_free_ char *comm = NULL;
4835 (void) get_process_comm(main_pid, &comm);
4836
4837 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4838 } else {
4839 if (!main_pid_alien)
4840 wait_for_exit = true;
4841
4842 if (r != -ESRCH && send_sighup)
4843 (void) kill(main_pid, SIGHUP);
4844 }
4845 }
4846
4847 if (control_pid > 0) {
4848 if (log_func)
4849 log_func(control_pid, sig, u);
4850
4851 r = kill_and_sigcont(control_pid, sig);
4852 if (r < 0 && r != -ESRCH) {
4853 _cleanup_free_ char *comm = NULL;
4854 (void) get_process_comm(control_pid, &comm);
4855
4856 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4857 } else {
4858 wait_for_exit = true;
4859
4860 if (r != -ESRCH && send_sighup)
4861 (void) kill(control_pid, SIGHUP);
4862 }
4863 }
4864
4865 if (u->cgroup_path &&
4866 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4867 _cleanup_set_free_ Set *pid_set = NULL;
4868
4869 /* Exclude the main/control pids from being killed via the cgroup */
4870 pid_set = unit_pid_set(main_pid, control_pid);
4871 if (!pid_set)
4872 return -ENOMEM;
4873
4874 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4875 sig,
4876 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4877 pid_set,
4878 log_func, u);
4879 if (r < 0) {
4880 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4881 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4882
4883 } else if (r > 0) {
4884
4885 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4886 * we are running in a container or if this is a delegation unit, simply because cgroup
4887 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4888 * of containers it can be confused easily by left-over directories in the cgroup — which
4889 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4890 * there we get proper events. Hence rely on them. */
4891
4892 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4893 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4894 wait_for_exit = true;
4895
4896 if (send_sighup) {
4897 set_free(pid_set);
4898
4899 pid_set = unit_pid_set(main_pid, control_pid);
4900 if (!pid_set)
4901 return -ENOMEM;
4902
4903 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4904 SIGHUP,
4905 CGROUP_IGNORE_SELF,
4906 pid_set,
4907 NULL, NULL);
4908 }
4909 }
4910 }
4911
4912 return wait_for_exit;
4913 }
4914
4915 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4916 _cleanup_free_ char *p = NULL;
4917 UnitDependencyInfo di;
4918 int r;
4919
4920 assert(u);
4921 assert(path);
4922
4923 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4924 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4925 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4926 * determine which units to make themselves a dependency of. */
4927
4928 if (!path_is_absolute(path))
4929 return -EINVAL;
4930
4931 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4932 if (r < 0)
4933 return r;
4934
4935 p = strdup(path);
4936 if (!p)
4937 return -ENOMEM;
4938
4939 path = path_simplify(p, true);
4940
4941 if (!path_is_normalized(path))
4942 return -EPERM;
4943
4944 if (hashmap_contains(u->requires_mounts_for, path))
4945 return 0;
4946
4947 di = (UnitDependencyInfo) {
4948 .origin_mask = mask
4949 };
4950
4951 r = hashmap_put(u->requires_mounts_for, path, di.data);
4952 if (r < 0)
4953 return r;
4954 p = NULL;
4955
4956 char prefix[strlen(path) + 1];
4957 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4958 Set *x;
4959
4960 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4961 if (!x) {
4962 _cleanup_free_ char *q = NULL;
4963
4964 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4965 if (r < 0)
4966 return r;
4967
4968 q = strdup(prefix);
4969 if (!q)
4970 return -ENOMEM;
4971
4972 x = set_new(NULL);
4973 if (!x)
4974 return -ENOMEM;
4975
4976 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4977 if (r < 0) {
4978 set_free(x);
4979 return r;
4980 }
4981 q = NULL;
4982 }
4983
4984 r = set_put(x, u);
4985 if (r < 0)
4986 return r;
4987 }
4988
4989 return 0;
4990 }
4991
4992 int unit_setup_exec_runtime(Unit *u) {
4993 ExecRuntime **rt;
4994 size_t offset;
4995 Unit *other;
4996 Iterator i;
4997 void *v;
4998 int r;
4999
5000 offset = UNIT_VTABLE(u)->exec_runtime_offset;
5001 assert(offset > 0);
5002
5003 /* Check if there already is an ExecRuntime for this unit? */
5004 rt = (ExecRuntime**) ((uint8_t*) u + offset);
5005 if (*rt)
5006 return 0;
5007
5008 /* Try to get it from somebody else */
5009 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
5010 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
5011 if (r == 1)
5012 return 1;
5013 }
5014
5015 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
5016 }
5017
5018 int unit_setup_dynamic_creds(Unit *u) {
5019 ExecContext *ec;
5020 DynamicCreds *dcreds;
5021 size_t offset;
5022
5023 assert(u);
5024
5025 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
5026 assert(offset > 0);
5027 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
5028
5029 ec = unit_get_exec_context(u);
5030 assert(ec);
5031
5032 if (!ec->dynamic_user)
5033 return 0;
5034
5035 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
5036 }
5037
5038 bool unit_type_supported(UnitType t) {
5039 if (_unlikely_(t < 0))
5040 return false;
5041 if (_unlikely_(t >= _UNIT_TYPE_MAX))
5042 return false;
5043
5044 if (!unit_vtable[t]->supported)
5045 return true;
5046
5047 return unit_vtable[t]->supported();
5048 }
5049
5050 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5051 int r;
5052
5053 assert(u);
5054 assert(where);
5055
5056 r = dir_is_empty(where);
5057 if (r > 0 || r == -ENOTDIR)
5058 return;
5059 if (r < 0) {
5060 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5061 return;
5062 }
5063
5064 log_struct(LOG_NOTICE,
5065 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5066 LOG_UNIT_ID(u),
5067 LOG_UNIT_INVOCATION_ID(u),
5068 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5069 "WHERE=%s", where);
5070 }
5071
5072 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5073 _cleanup_free_ char *canonical_where = NULL;
5074 int r;
5075
5076 assert(u);
5077 assert(where);
5078
5079 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5080 if (r < 0) {
5081 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5082 return 0;
5083 }
5084
5085 /* We will happily ignore a trailing slash (or any redundant slashes) */
5086 if (path_equal(where, canonical_where))
5087 return 0;
5088
5089 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5090 log_struct(LOG_ERR,
5091 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5092 LOG_UNIT_ID(u),
5093 LOG_UNIT_INVOCATION_ID(u),
5094 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5095 "WHERE=%s", where);
5096
5097 return -ELOOP;
5098 }
5099
5100 bool unit_is_pristine(Unit *u) {
5101 assert(u);
5102
5103 /* Check if the unit already exists or is already around,
5104 * in a number of different ways. Note that to cater for unit
5105 * types such as slice, we are generally fine with units that
5106 * are marked UNIT_LOADED even though nothing was actually
5107 * loaded, as those unit types don't require a file on disk. */
5108
5109 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5110 u->fragment_path ||
5111 u->source_path ||
5112 !strv_isempty(u->dropin_paths) ||
5113 u->job ||
5114 u->merged_into);
5115 }
5116
5117 pid_t unit_control_pid(Unit *u) {
5118 assert(u);
5119
5120 if (UNIT_VTABLE(u)->control_pid)
5121 return UNIT_VTABLE(u)->control_pid(u);
5122
5123 return 0;
5124 }
5125
5126 pid_t unit_main_pid(Unit *u) {
5127 assert(u);
5128
5129 if (UNIT_VTABLE(u)->main_pid)
5130 return UNIT_VTABLE(u)->main_pid(u);
5131
5132 return 0;
5133 }
5134
5135 static void unit_unref_uid_internal(
5136 Unit *u,
5137 uid_t *ref_uid,
5138 bool destroy_now,
5139 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5140
5141 assert(u);
5142 assert(ref_uid);
5143 assert(_manager_unref_uid);
5144
5145 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5146 * gid_t are actually the same time, with the same validity rules.
5147 *
5148 * Drops a reference to UID/GID from a unit. */
5149
5150 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5151 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5152
5153 if (!uid_is_valid(*ref_uid))
5154 return;
5155
5156 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5157 *ref_uid = UID_INVALID;
5158 }
5159
5160 static void unit_unref_uid(Unit *u, bool destroy_now) {
5161 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5162 }
5163
5164 static void unit_unref_gid(Unit *u, bool destroy_now) {
5165 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5166 }
5167
5168 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5169 assert(u);
5170
5171 unit_unref_uid(u, destroy_now);
5172 unit_unref_gid(u, destroy_now);
5173 }
5174
5175 static int unit_ref_uid_internal(
5176 Unit *u,
5177 uid_t *ref_uid,
5178 uid_t uid,
5179 bool clean_ipc,
5180 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5181
5182 int r;
5183
5184 assert(u);
5185 assert(ref_uid);
5186 assert(uid_is_valid(uid));
5187 assert(_manager_ref_uid);
5188
5189 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5190 * are actually the same type, and have the same validity rules.
5191 *
5192 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5193 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5194 * drops to zero. */
5195
5196 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5197 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5198
5199 if (*ref_uid == uid)
5200 return 0;
5201
5202 if (uid_is_valid(*ref_uid)) /* Already set? */
5203 return -EBUSY;
5204
5205 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5206 if (r < 0)
5207 return r;
5208
5209 *ref_uid = uid;
5210 return 1;
5211 }
5212
5213 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5214 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5215 }
5216
5217 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5218 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5219 }
5220
5221 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5222 int r = 0, q = 0;
5223
5224 assert(u);
5225
5226 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5227
5228 if (uid_is_valid(uid)) {
5229 r = unit_ref_uid(u, uid, clean_ipc);
5230 if (r < 0)
5231 return r;
5232 }
5233
5234 if (gid_is_valid(gid)) {
5235 q = unit_ref_gid(u, gid, clean_ipc);
5236 if (q < 0) {
5237 if (r > 0)
5238 unit_unref_uid(u, false);
5239
5240 return q;
5241 }
5242 }
5243
5244 return r > 0 || q > 0;
5245 }
5246
5247 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5248 ExecContext *c;
5249 int r;
5250
5251 assert(u);
5252
5253 c = unit_get_exec_context(u);
5254
5255 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5256 if (r < 0)
5257 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5258
5259 return r;
5260 }
5261
5262 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5263 int r;
5264
5265 assert(u);
5266
5267 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5268 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5269 * objects when no service references the UID/GID anymore. */
5270
5271 r = unit_ref_uid_gid(u, uid, gid);
5272 if (r > 0)
5273 unit_add_to_dbus_queue(u);
5274 }
5275
5276 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5277 int r;
5278
5279 assert(u);
5280
5281 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5282
5283 if (sd_id128_equal(u->invocation_id, id))
5284 return 0;
5285
5286 if (!sd_id128_is_null(u->invocation_id))
5287 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5288
5289 if (sd_id128_is_null(id)) {
5290 r = 0;
5291 goto reset;
5292 }
5293
5294 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5295 if (r < 0)
5296 goto reset;
5297
5298 u->invocation_id = id;
5299 sd_id128_to_string(id, u->invocation_id_string);
5300
5301 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5302 if (r < 0)
5303 goto reset;
5304
5305 return 0;
5306
5307 reset:
5308 u->invocation_id = SD_ID128_NULL;
5309 u->invocation_id_string[0] = 0;
5310 return r;
5311 }
5312
5313 int unit_acquire_invocation_id(Unit *u) {
5314 sd_id128_t id;
5315 int r;
5316
5317 assert(u);
5318
5319 r = sd_id128_randomize(&id);
5320 if (r < 0)
5321 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5322
5323 r = unit_set_invocation_id(u, id);
5324 if (r < 0)
5325 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5326
5327 unit_add_to_dbus_queue(u);
5328 return 0;
5329 }
5330
5331 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5332 int r;
5333
5334 assert(u);
5335 assert(p);
5336
5337 /* Copy parameters from manager */
5338 r = manager_get_effective_environment(u->manager, &p->environment);
5339 if (r < 0)
5340 return r;
5341
5342 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5343 p->cgroup_supported = u->manager->cgroup_supported;
5344 p->prefix = u->manager->prefix;
5345 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5346
5347 /* Copy parameters from unit */
5348 p->cgroup_path = u->cgroup_path;
5349 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5350
5351 return 0;
5352 }
5353
5354 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5355 int r;
5356
5357 assert(u);
5358 assert(ret);
5359
5360 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5361 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5362
5363 (void) unit_realize_cgroup(u);
5364
5365 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5366 if (r != 0)
5367 return r;
5368
5369 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5370 (void) ignore_signals(SIGPIPE, -1);
5371
5372 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5373
5374 if (u->cgroup_path) {
5375 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5376 if (r < 0) {
5377 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5378 _exit(EXIT_CGROUP);
5379 }
5380 }
5381
5382 return 0;
5383 }
5384
5385 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5386 pid_t pid;
5387 int r;
5388
5389 assert(u);
5390 assert(ret_pid);
5391
5392 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5393 if (r < 0)
5394 return r;
5395 if (r == 0) {
5396 int ret = EXIT_SUCCESS;
5397 char **i;
5398
5399 STRV_FOREACH(i, paths) {
5400 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5401 if (r < 0) {
5402 log_error_errno(r, "Failed to remove '%s': %m", *i);
5403 ret = EXIT_FAILURE;
5404 }
5405 }
5406
5407 _exit(ret);
5408 }
5409
5410 r = unit_watch_pid(u, pid, true);
5411 if (r < 0)
5412 return r;
5413
5414 *ret_pid = pid;
5415 return 0;
5416 }
5417
5418 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5419 assert(u);
5420 assert(d >= 0);
5421 assert(d < _UNIT_DEPENDENCY_MAX);
5422 assert(other);
5423
5424 if (di.origin_mask == 0 && di.destination_mask == 0) {
5425 /* No bit set anymore, let's drop the whole entry */
5426 assert_se(hashmap_remove(u->dependencies[d], other));
5427 log_unit_debug(u, "lost dependency %s=%s", unit_dependency_to_string(d), other->id);
5428 } else
5429 /* Mask was reduced, let's update the entry */
5430 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5431 }
5432
5433 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5434 UnitDependency d;
5435
5436 assert(u);
5437
5438 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5439
5440 if (mask == 0)
5441 return;
5442
5443 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5444 bool done;
5445
5446 do {
5447 UnitDependencyInfo di;
5448 Unit *other;
5449 Iterator i;
5450
5451 done = true;
5452
5453 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5454 UnitDependency q;
5455
5456 if ((di.origin_mask & ~mask) == di.origin_mask)
5457 continue;
5458 di.origin_mask &= ~mask;
5459 unit_update_dependency_mask(u, d, other, di);
5460
5461 /* We updated the dependency from our unit to the other unit now. But most dependencies
5462 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5463 * all dependency types on the other unit and delete all those which point to us and
5464 * have the right mask set. */
5465
5466 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5467 UnitDependencyInfo dj;
5468
5469 dj.data = hashmap_get(other->dependencies[q], u);
5470 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5471 continue;
5472 dj.destination_mask &= ~mask;
5473
5474 unit_update_dependency_mask(other, q, u, dj);
5475 }
5476
5477 unit_add_to_gc_queue(other);
5478
5479 done = false;
5480 break;
5481 }
5482
5483 } while (!done);
5484 }
5485 }
5486
5487 static int unit_get_invocation_path(Unit *u, char **ret) {
5488 char *p;
5489 int r;
5490
5491 assert(u);
5492 assert(ret);
5493
5494 if (MANAGER_IS_SYSTEM(u->manager))
5495 p = strjoin("/run/systemd/units/invocation:", u->id);
5496 else {
5497 _cleanup_free_ char *user_path = NULL;
5498 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5499 if (r < 0)
5500 return r;
5501 p = strjoin(user_path, u->id);
5502 }
5503
5504 if (!p)
5505 return -ENOMEM;
5506
5507 *ret = p;
5508 return 0;
5509 }
5510
5511 static int unit_export_invocation_id(Unit *u) {
5512 _cleanup_free_ char *p = NULL;
5513 int r;
5514
5515 assert(u);
5516
5517 if (u->exported_invocation_id)
5518 return 0;
5519
5520 if (sd_id128_is_null(u->invocation_id))
5521 return 0;
5522
5523 r = unit_get_invocation_path(u, &p);
5524 if (r < 0)
5525 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5526
5527 r = symlink_atomic(u->invocation_id_string, p);
5528 if (r < 0)
5529 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5530
5531 u->exported_invocation_id = true;
5532 return 0;
5533 }
5534
5535 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5536 const char *p;
5537 char buf[2];
5538 int r;
5539
5540 assert(u);
5541 assert(c);
5542
5543 if (u->exported_log_level_max)
5544 return 0;
5545
5546 if (c->log_level_max < 0)
5547 return 0;
5548
5549 assert(c->log_level_max <= 7);
5550
5551 buf[0] = '0' + c->log_level_max;
5552 buf[1] = 0;
5553
5554 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5555 r = symlink_atomic(buf, p);
5556 if (r < 0)
5557 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5558
5559 u->exported_log_level_max = true;
5560 return 0;
5561 }
5562
5563 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5564 _cleanup_close_ int fd = -1;
5565 struct iovec *iovec;
5566 const char *p;
5567 char *pattern;
5568 le64_t *sizes;
5569 ssize_t n;
5570 size_t i;
5571 int r;
5572
5573 if (u->exported_log_extra_fields)
5574 return 0;
5575
5576 if (c->n_log_extra_fields <= 0)
5577 return 0;
5578
5579 sizes = newa(le64_t, c->n_log_extra_fields);
5580 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5581
5582 for (i = 0; i < c->n_log_extra_fields; i++) {
5583 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5584
5585 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5586 iovec[i*2+1] = c->log_extra_fields[i];
5587 }
5588
5589 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5590 pattern = strjoina(p, ".XXXXXX");
5591
5592 fd = mkostemp_safe(pattern);
5593 if (fd < 0)
5594 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5595
5596 n = writev(fd, iovec, c->n_log_extra_fields*2);
5597 if (n < 0) {
5598 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5599 goto fail;
5600 }
5601
5602 (void) fchmod(fd, 0644);
5603
5604 if (rename(pattern, p) < 0) {
5605 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5606 goto fail;
5607 }
5608
5609 u->exported_log_extra_fields = true;
5610 return 0;
5611
5612 fail:
5613 (void) unlink(pattern);
5614 return r;
5615 }
5616
5617 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5618 _cleanup_free_ char *buf = NULL;
5619 const char *p;
5620 int r;
5621
5622 assert(u);
5623 assert(c);
5624
5625 if (u->exported_log_ratelimit_interval)
5626 return 0;
5627
5628 if (c->log_ratelimit_interval_usec == 0)
5629 return 0;
5630
5631 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5632
5633 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5634 return log_oom();
5635
5636 r = symlink_atomic(buf, p);
5637 if (r < 0)
5638 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5639
5640 u->exported_log_ratelimit_interval = true;
5641 return 0;
5642 }
5643
5644 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5645 _cleanup_free_ char *buf = NULL;
5646 const char *p;
5647 int r;
5648
5649 assert(u);
5650 assert(c);
5651
5652 if (u->exported_log_ratelimit_burst)
5653 return 0;
5654
5655 if (c->log_ratelimit_burst == 0)
5656 return 0;
5657
5658 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5659
5660 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5661 return log_oom();
5662
5663 r = symlink_atomic(buf, p);
5664 if (r < 0)
5665 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5666
5667 u->exported_log_ratelimit_burst = true;
5668 return 0;
5669 }
5670
5671 void unit_export_state_files(Unit *u) {
5672 const ExecContext *c;
5673
5674 assert(u);
5675
5676 if (!u->id)
5677 return;
5678
5679 if (MANAGER_IS_TEST_RUN(u->manager))
5680 return;
5681
5682 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5683 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5684 * the IPC system itself and PID 1 also log to the journal.
5685 *
5686 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5687 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5688 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5689 * namespace at least.
5690 *
5691 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5692 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5693 * them with one. */
5694
5695 (void) unit_export_invocation_id(u);
5696
5697 if (!MANAGER_IS_SYSTEM(u->manager))
5698 return;
5699
5700 c = unit_get_exec_context(u);
5701 if (c) {
5702 (void) unit_export_log_level_max(u, c);
5703 (void) unit_export_log_extra_fields(u, c);
5704 (void) unit_export_log_ratelimit_interval(u, c);
5705 (void) unit_export_log_ratelimit_burst(u, c);
5706 }
5707 }
5708
5709 void unit_unlink_state_files(Unit *u) {
5710 const char *p;
5711
5712 assert(u);
5713
5714 if (!u->id)
5715 return;
5716
5717 /* Undoes the effect of unit_export_state() */
5718
5719 if (u->exported_invocation_id) {
5720 _cleanup_free_ char *invocation_path = NULL;
5721 int r = unit_get_invocation_path(u, &invocation_path);
5722 if (r >= 0) {
5723 (void) unlink(invocation_path);
5724 u->exported_invocation_id = false;
5725 }
5726 }
5727
5728 if (!MANAGER_IS_SYSTEM(u->manager))
5729 return;
5730
5731 if (u->exported_log_level_max) {
5732 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5733 (void) unlink(p);
5734
5735 u->exported_log_level_max = false;
5736 }
5737
5738 if (u->exported_log_extra_fields) {
5739 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5740 (void) unlink(p);
5741
5742 u->exported_log_extra_fields = false;
5743 }
5744
5745 if (u->exported_log_ratelimit_interval) {
5746 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5747 (void) unlink(p);
5748
5749 u->exported_log_ratelimit_interval = false;
5750 }
5751
5752 if (u->exported_log_ratelimit_burst) {
5753 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5754 (void) unlink(p);
5755
5756 u->exported_log_ratelimit_burst = false;
5757 }
5758 }
5759
5760 int unit_prepare_exec(Unit *u) {
5761 int r;
5762
5763 assert(u);
5764
5765 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5766 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5767 r = bpf_firewall_load_custom(u);
5768 if (r < 0)
5769 return r;
5770
5771 /* Prepares everything so that we can fork of a process for this unit */
5772
5773 (void) unit_realize_cgroup(u);
5774
5775 if (u->reset_accounting) {
5776 (void) unit_reset_accounting(u);
5777 u->reset_accounting = false;
5778 }
5779
5780 unit_export_state_files(u);
5781
5782 r = unit_setup_exec_runtime(u);
5783 if (r < 0)
5784 return r;
5785
5786 r = unit_setup_dynamic_creds(u);
5787 if (r < 0)
5788 return r;
5789
5790 return 0;
5791 }
5792
5793 static int log_leftover(pid_t pid, int sig, void *userdata) {
5794 _cleanup_free_ char *comm = NULL;
5795
5796 (void) get_process_comm(pid, &comm);
5797
5798 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5799 return 0;
5800
5801 log_unit_warning(userdata,
5802 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5803 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5804 pid, strna(comm));
5805
5806 return 1;
5807 }
5808
5809 int unit_warn_leftover_processes(Unit *u) {
5810 assert(u);
5811
5812 (void) unit_pick_cgroup_path(u);
5813
5814 if (!u->cgroup_path)
5815 return 0;
5816
5817 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5818 }
5819
5820 bool unit_needs_console(Unit *u) {
5821 ExecContext *ec;
5822 UnitActiveState state;
5823
5824 assert(u);
5825
5826 state = unit_active_state(u);
5827
5828 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5829 return false;
5830
5831 if (UNIT_VTABLE(u)->needs_console)
5832 return UNIT_VTABLE(u)->needs_console(u);
5833
5834 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5835 ec = unit_get_exec_context(u);
5836 if (!ec)
5837 return false;
5838
5839 return exec_context_may_touch_console(ec);
5840 }
5841
5842 const char *unit_label_path(const Unit *u) {
5843 const char *p;
5844
5845 assert(u);
5846
5847 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5848 * when validating access checks. */
5849
5850 p = u->source_path ?: u->fragment_path;
5851 if (!p)
5852 return NULL;
5853
5854 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5855 if (path_equal(p, "/dev/null"))
5856 return NULL;
5857
5858 return p;
5859 }
5860
5861 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5862 int r;
5863
5864 assert(u);
5865
5866 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5867 * and not a kernel thread either */
5868
5869 /* First, a simple range check */
5870 if (!pid_is_valid(pid))
5871 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5872
5873 /* Some extra safety check */
5874 if (pid == 1 || pid == getpid_cached())
5875 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5876
5877 /* Don't even begin to bother with kernel threads */
5878 r = is_kernel_thread(pid);
5879 if (r == -ESRCH)
5880 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5881 if (r < 0)
5882 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5883 if (r > 0)
5884 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5885
5886 return 0;
5887 }
5888
5889 void unit_log_success(Unit *u) {
5890 assert(u);
5891
5892 log_struct(LOG_INFO,
5893 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5894 LOG_UNIT_ID(u),
5895 LOG_UNIT_INVOCATION_ID(u),
5896 LOG_UNIT_MESSAGE(u, "Succeeded."));
5897 }
5898
5899 void unit_log_failure(Unit *u, const char *result) {
5900 assert(u);
5901 assert(result);
5902
5903 log_struct(LOG_WARNING,
5904 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5905 LOG_UNIT_ID(u),
5906 LOG_UNIT_INVOCATION_ID(u),
5907 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5908 "UNIT_RESULT=%s", result);
5909 }
5910
5911 void unit_log_skip(Unit *u, const char *result) {
5912 assert(u);
5913 assert(result);
5914
5915 log_struct(LOG_INFO,
5916 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5917 LOG_UNIT_ID(u),
5918 LOG_UNIT_INVOCATION_ID(u),
5919 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5920 "UNIT_RESULT=%s", result);
5921 }
5922
5923 void unit_log_process_exit(
5924 Unit *u,
5925 const char *kind,
5926 const char *command,
5927 bool success,
5928 int code,
5929 int status) {
5930
5931 int level;
5932
5933 assert(u);
5934 assert(kind);
5935
5936 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5937 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5938 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5939 * WARNING. */
5940 if (success)
5941 level = LOG_DEBUG;
5942 else if (code == CLD_EXITED)
5943 level = LOG_NOTICE;
5944 else
5945 level = LOG_WARNING;
5946
5947 log_struct(level,
5948 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5949 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5950 kind,
5951 sigchld_code_to_string(code), status,
5952 strna(code == CLD_EXITED
5953 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5954 : signal_to_string(status))),
5955 "EXIT_CODE=%s", sigchld_code_to_string(code),
5956 "EXIT_STATUS=%i", status,
5957 "COMMAND=%s", strna(command),
5958 LOG_UNIT_ID(u),
5959 LOG_UNIT_INVOCATION_ID(u));
5960 }
5961
5962 int unit_exit_status(Unit *u) {
5963 assert(u);
5964
5965 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5966 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5967 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5968 * service process has exited abnormally (signal/coredump). */
5969
5970 if (!UNIT_VTABLE(u)->exit_status)
5971 return -EOPNOTSUPP;
5972
5973 return UNIT_VTABLE(u)->exit_status(u);
5974 }
5975
5976 int unit_failure_action_exit_status(Unit *u) {
5977 int r;
5978
5979 assert(u);
5980
5981 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5982
5983 if (u->failure_action_exit_status >= 0)
5984 return u->failure_action_exit_status;
5985
5986 r = unit_exit_status(u);
5987 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5988 return 255;
5989
5990 return r;
5991 }
5992
5993 int unit_success_action_exit_status(Unit *u) {
5994 int r;
5995
5996 assert(u);
5997
5998 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5999
6000 if (u->success_action_exit_status >= 0)
6001 return u->success_action_exit_status;
6002
6003 r = unit_exit_status(u);
6004 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6005 return 255;
6006
6007 return r;
6008 }
6009
6010 int unit_test_trigger_loaded(Unit *u) {
6011 Unit *trigger;
6012
6013 /* Tests whether the unit to trigger is loaded */
6014
6015 trigger = UNIT_TRIGGER(u);
6016 if (!trigger)
6017 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6018 "Refusing to start, no unit to trigger.");
6019 if (trigger->load_state != UNIT_LOADED)
6020 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6021 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6022
6023 return 0;
6024 }
6025
6026 void unit_destroy_runtime_directory(Unit *u, const ExecContext *context) {
6027 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
6028 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
6029 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6030 }
6031
6032 int unit_clean(Unit *u, ExecCleanMask mask) {
6033 UnitActiveState state;
6034
6035 assert(u);
6036
6037 /* Special return values:
6038 *
6039 * -EOPNOTSUPP → cleaning not supported for this unit type
6040 * -EUNATCH → cleaning not defined for this resource type
6041 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6042 * a job queued or similar
6043 */
6044
6045 if (!UNIT_VTABLE(u)->clean)
6046 return -EOPNOTSUPP;
6047
6048 if (mask == 0)
6049 return -EUNATCH;
6050
6051 if (u->load_state != UNIT_LOADED)
6052 return -EBUSY;
6053
6054 if (u->job)
6055 return -EBUSY;
6056
6057 state = unit_active_state(u);
6058 if (!IN_SET(state, UNIT_INACTIVE))
6059 return -EBUSY;
6060
6061 return UNIT_VTABLE(u)->clean(u, mask);
6062 }
6063
6064 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6065 assert(u);
6066
6067 if (!UNIT_VTABLE(u)->clean ||
6068 u->load_state != UNIT_LOADED) {
6069 *ret = 0;
6070 return 0;
6071 }
6072
6073 /* When the clean() method is set, can_clean() really should be set too */
6074 assert(UNIT_VTABLE(u)->can_clean);
6075
6076 return UNIT_VTABLE(u)->can_clean(u, ret);
6077 }
6078
6079 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6080 [COLLECT_INACTIVE] = "inactive",
6081 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6082 };
6083
6084 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);