]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
util: rename socket_protocol_{from,to}_name() to ip_protocol_{from,to}_name()
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
39 #include "set.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
42 #include "special.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
48 #include "strv.h"
49 #include "terminal-util.h"
50 #include "umask-util.h"
51 #include "unit-name.h"
52 #include "unit.h"
53 #include "user-util.h"
54 #include "virt.h"
55
56 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
57 [UNIT_SERVICE] = &service_vtable,
58 [UNIT_SOCKET] = &socket_vtable,
59 [UNIT_TARGET] = &target_vtable,
60 [UNIT_DEVICE] = &device_vtable,
61 [UNIT_MOUNT] = &mount_vtable,
62 [UNIT_AUTOMOUNT] = &automount_vtable,
63 [UNIT_SWAP] = &swap_vtable,
64 [UNIT_TIMER] = &timer_vtable,
65 [UNIT_PATH] = &path_vtable,
66 [UNIT_SLICE] = &slice_vtable,
67 [UNIT_SCOPE] = &scope_vtable,
68 };
69
70 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
71
72 Unit *unit_new(Manager *m, size_t size) {
73 Unit *u;
74
75 assert(m);
76 assert(size >= sizeof(Unit));
77
78 u = malloc0(size);
79 if (!u)
80 return NULL;
81
82 u->names = set_new(&string_hash_ops);
83 if (!u->names)
84 return mfree(u);
85
86 u->manager = m;
87 u->type = _UNIT_TYPE_INVALID;
88 u->default_dependencies = true;
89 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
90 u->unit_file_preset = -1;
91 u->on_failure_job_mode = JOB_REPLACE;
92 u->cgroup_inotify_wd = -1;
93 u->job_timeout = USEC_INFINITY;
94 u->job_running_timeout = USEC_INFINITY;
95 u->ref_uid = UID_INVALID;
96 u->ref_gid = GID_INVALID;
97 u->cpu_usage_last = NSEC_INFINITY;
98 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
99 u->failure_action_exit_status = u->success_action_exit_status = -1;
100
101 u->ip_accounting_ingress_map_fd = -1;
102 u->ip_accounting_egress_map_fd = -1;
103 u->ipv4_allow_map_fd = -1;
104 u->ipv6_allow_map_fd = -1;
105 u->ipv4_deny_map_fd = -1;
106 u->ipv6_deny_map_fd = -1;
107
108 u->last_section_private = -1;
109
110 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
111 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
112
113 return u;
114 }
115
116 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
117 _cleanup_(unit_freep) Unit *u = NULL;
118 int r;
119
120 u = unit_new(m, size);
121 if (!u)
122 return -ENOMEM;
123
124 r = unit_add_name(u, name);
125 if (r < 0)
126 return r;
127
128 *ret = TAKE_PTR(u);
129
130 return r;
131 }
132
133 bool unit_has_name(Unit *u, const char *name) {
134 assert(u);
135 assert(name);
136
137 return set_contains(u->names, (char*) name);
138 }
139
140 static void unit_init(Unit *u) {
141 CGroupContext *cc;
142 ExecContext *ec;
143 KillContext *kc;
144
145 assert(u);
146 assert(u->manager);
147 assert(u->type >= 0);
148
149 cc = unit_get_cgroup_context(u);
150 if (cc) {
151 cgroup_context_init(cc);
152
153 /* Copy in the manager defaults into the cgroup
154 * context, _before_ the rest of the settings have
155 * been initialized */
156
157 cc->cpu_accounting = u->manager->default_cpu_accounting;
158 cc->io_accounting = u->manager->default_io_accounting;
159 cc->ip_accounting = u->manager->default_ip_accounting;
160 cc->blockio_accounting = u->manager->default_blockio_accounting;
161 cc->memory_accounting = u->manager->default_memory_accounting;
162 cc->tasks_accounting = u->manager->default_tasks_accounting;
163 cc->ip_accounting = u->manager->default_ip_accounting;
164
165 if (u->type != UNIT_SLICE)
166 cc->tasks_max = u->manager->default_tasks_max;
167 }
168
169 ec = unit_get_exec_context(u);
170 if (ec) {
171 exec_context_init(ec);
172
173 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
174 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
175 }
176
177 kc = unit_get_kill_context(u);
178 if (kc)
179 kill_context_init(kc);
180
181 if (UNIT_VTABLE(u)->init)
182 UNIT_VTABLE(u)->init(u);
183 }
184
185 int unit_add_name(Unit *u, const char *text) {
186 _cleanup_free_ char *s = NULL, *i = NULL;
187 UnitType t;
188 int r;
189
190 assert(u);
191 assert(text);
192
193 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
194
195 if (!u->instance)
196 return -EINVAL;
197
198 r = unit_name_replace_instance(text, u->instance, &s);
199 if (r < 0)
200 return r;
201 } else {
202 s = strdup(text);
203 if (!s)
204 return -ENOMEM;
205 }
206
207 if (set_contains(u->names, s))
208 return 0;
209 if (hashmap_contains(u->manager->units, s))
210 return -EEXIST;
211
212 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
213 return -EINVAL;
214
215 t = unit_name_to_type(s);
216 if (t < 0)
217 return -EINVAL;
218
219 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
220 return -EINVAL;
221
222 r = unit_name_to_instance(s, &i);
223 if (r < 0)
224 return r;
225
226 if (i && !unit_type_may_template(t))
227 return -EINVAL;
228
229 /* Ensure that this unit is either instanced or not instanced,
230 * but not both. Note that we do allow names with different
231 * instance names however! */
232 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
233 return -EINVAL;
234
235 if (!unit_type_may_alias(t) && !set_isempty(u->names))
236 return -EEXIST;
237
238 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
239 return -E2BIG;
240
241 r = set_put(u->names, s);
242 if (r < 0)
243 return r;
244 assert(r > 0);
245
246 r = hashmap_put(u->manager->units, s, u);
247 if (r < 0) {
248 (void) set_remove(u->names, s);
249 return r;
250 }
251
252 if (u->type == _UNIT_TYPE_INVALID) {
253 u->type = t;
254 u->id = s;
255 u->instance = TAKE_PTR(i);
256
257 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
258
259 unit_init(u);
260 }
261
262 s = NULL;
263
264 unit_add_to_dbus_queue(u);
265 return 0;
266 }
267
268 int unit_choose_id(Unit *u, const char *name) {
269 _cleanup_free_ char *t = NULL;
270 char *s, *i;
271 int r;
272
273 assert(u);
274 assert(name);
275
276 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
277
278 if (!u->instance)
279 return -EINVAL;
280
281 r = unit_name_replace_instance(name, u->instance, &t);
282 if (r < 0)
283 return r;
284
285 name = t;
286 }
287
288 /* Selects one of the names of this unit as the id */
289 s = set_get(u->names, (char*) name);
290 if (!s)
291 return -ENOENT;
292
293 /* Determine the new instance from the new id */
294 r = unit_name_to_instance(s, &i);
295 if (r < 0)
296 return r;
297
298 u->id = s;
299
300 free(u->instance);
301 u->instance = i;
302
303 unit_add_to_dbus_queue(u);
304
305 return 0;
306 }
307
308 int unit_set_description(Unit *u, const char *description) {
309 int r;
310
311 assert(u);
312
313 r = free_and_strdup(&u->description, empty_to_null(description));
314 if (r < 0)
315 return r;
316 if (r > 0)
317 unit_add_to_dbus_queue(u);
318
319 return 0;
320 }
321
322 bool unit_may_gc(Unit *u) {
323 UnitActiveState state;
324 int r;
325
326 assert(u);
327
328 /* Checks whether the unit is ready to be unloaded for garbage collection.
329 * Returns true when the unit may be collected, and false if there's some
330 * reason to keep it loaded.
331 *
332 * References from other units are *not* checked here. Instead, this is done
333 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
334 */
335
336 if (u->job)
337 return false;
338
339 if (u->nop_job)
340 return false;
341
342 state = unit_active_state(u);
343
344 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
345 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
346 UNIT_VTABLE(u)->release_resources)
347 UNIT_VTABLE(u)->release_resources(u);
348
349 if (u->perpetual)
350 return false;
351
352 if (sd_bus_track_count(u->bus_track) > 0)
353 return false;
354
355 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
356 switch (u->collect_mode) {
357
358 case COLLECT_INACTIVE:
359 if (state != UNIT_INACTIVE)
360 return false;
361
362 break;
363
364 case COLLECT_INACTIVE_OR_FAILED:
365 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
366 return false;
367
368 break;
369
370 default:
371 assert_not_reached("Unknown garbage collection mode");
372 }
373
374 if (u->cgroup_path) {
375 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
376 * around. Units with active processes should never be collected. */
377
378 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
379 if (r < 0)
380 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
381 if (r <= 0)
382 return false;
383 }
384
385 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
386 return false;
387
388 return true;
389 }
390
391 void unit_add_to_load_queue(Unit *u) {
392 assert(u);
393 assert(u->type != _UNIT_TYPE_INVALID);
394
395 if (u->load_state != UNIT_STUB || u->in_load_queue)
396 return;
397
398 LIST_PREPEND(load_queue, u->manager->load_queue, u);
399 u->in_load_queue = true;
400 }
401
402 void unit_add_to_cleanup_queue(Unit *u) {
403 assert(u);
404
405 if (u->in_cleanup_queue)
406 return;
407
408 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
409 u->in_cleanup_queue = true;
410 }
411
412 void unit_add_to_gc_queue(Unit *u) {
413 assert(u);
414
415 if (u->in_gc_queue || u->in_cleanup_queue)
416 return;
417
418 if (!unit_may_gc(u))
419 return;
420
421 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
422 u->in_gc_queue = true;
423 }
424
425 void unit_add_to_dbus_queue(Unit *u) {
426 assert(u);
427 assert(u->type != _UNIT_TYPE_INVALID);
428
429 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
430 return;
431
432 /* Shortcut things if nobody cares */
433 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
434 sd_bus_track_count(u->bus_track) <= 0 &&
435 set_isempty(u->manager->private_buses)) {
436 u->sent_dbus_new_signal = true;
437 return;
438 }
439
440 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
441 u->in_dbus_queue = true;
442 }
443
444 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
445 assert(u);
446
447 if (u->in_stop_when_unneeded_queue)
448 return;
449
450 if (!u->stop_when_unneeded)
451 return;
452
453 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
454 return;
455
456 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
457 u->in_stop_when_unneeded_queue = true;
458 }
459
460 static void bidi_set_free(Unit *u, Hashmap *h) {
461 Unit *other;
462 Iterator i;
463 void *v;
464
465 assert(u);
466
467 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468
469 HASHMAP_FOREACH_KEY(v, other, h, i) {
470 UnitDependency d;
471
472 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
473 hashmap_remove(other->dependencies[d], u);
474
475 unit_add_to_gc_queue(other);
476 }
477
478 hashmap_free(h);
479 }
480
481 static void unit_remove_transient(Unit *u) {
482 char **i;
483
484 assert(u);
485
486 if (!u->transient)
487 return;
488
489 if (u->fragment_path)
490 (void) unlink(u->fragment_path);
491
492 STRV_FOREACH(i, u->dropin_paths) {
493 _cleanup_free_ char *p = NULL, *pp = NULL;
494
495 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
496 if (!p)
497 continue;
498
499 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
500 if (!pp)
501 continue;
502
503 /* Only drop transient drop-ins */
504 if (!path_equal(u->manager->lookup_paths.transient, pp))
505 continue;
506
507 (void) unlink(*i);
508 (void) rmdir(p);
509 }
510 }
511
512 static void unit_free_requires_mounts_for(Unit *u) {
513 assert(u);
514
515 for (;;) {
516 _cleanup_free_ char *path;
517
518 path = hashmap_steal_first_key(u->requires_mounts_for);
519 if (!path)
520 break;
521 else {
522 char s[strlen(path) + 1];
523
524 PATH_FOREACH_PREFIX_MORE(s, path) {
525 char *y;
526 Set *x;
527
528 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
529 if (!x)
530 continue;
531
532 (void) set_remove(x, u);
533
534 if (set_isempty(x)) {
535 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
536 free(y);
537 set_free(x);
538 }
539 }
540 }
541 }
542
543 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
544 }
545
546 static void unit_done(Unit *u) {
547 ExecContext *ec;
548 CGroupContext *cc;
549
550 assert(u);
551
552 if (u->type < 0)
553 return;
554
555 if (UNIT_VTABLE(u)->done)
556 UNIT_VTABLE(u)->done(u);
557
558 ec = unit_get_exec_context(u);
559 if (ec)
560 exec_context_done(ec);
561
562 cc = unit_get_cgroup_context(u);
563 if (cc)
564 cgroup_context_done(cc);
565 }
566
567 void unit_free(Unit *u) {
568 UnitDependency d;
569 Iterator i;
570 char *t;
571
572 if (!u)
573 return;
574
575 if (UNIT_ISSET(u->slice)) {
576 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
577 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
578
579 /* And make sure the parent is realized again, updating cgroup memberships */
580 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
581 }
582
583 u->transient_file = safe_fclose(u->transient_file);
584
585 if (!MANAGER_IS_RELOADING(u->manager))
586 unit_remove_transient(u);
587
588 bus_unit_send_removed_signal(u);
589
590 unit_done(u);
591
592 unit_dequeue_rewatch_pids(u);
593
594 sd_bus_slot_unref(u->match_bus_slot);
595 sd_bus_track_unref(u->bus_track);
596 u->deserialized_refs = strv_free(u->deserialized_refs);
597
598 unit_free_requires_mounts_for(u);
599
600 SET_FOREACH(t, u->names, i)
601 hashmap_remove_value(u->manager->units, t, u);
602
603 if (!sd_id128_is_null(u->invocation_id))
604 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
605
606 if (u->job) {
607 Job *j = u->job;
608 job_uninstall(j);
609 job_free(j);
610 }
611
612 if (u->nop_job) {
613 Job *j = u->nop_job;
614 job_uninstall(j);
615 job_free(j);
616 }
617
618 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
619 bidi_set_free(u, u->dependencies[d]);
620
621 if (u->on_console)
622 manager_unref_console(u->manager);
623
624 unit_release_cgroup(u);
625
626 if (!MANAGER_IS_RELOADING(u->manager))
627 unit_unlink_state_files(u);
628
629 unit_unref_uid_gid(u, false);
630
631 (void) manager_update_failed_units(u->manager, u, false);
632 set_remove(u->manager->startup_units, u);
633
634 unit_unwatch_all_pids(u);
635
636 unit_ref_unset(&u->slice);
637 while (u->refs_by_target)
638 unit_ref_unset(u->refs_by_target);
639
640 if (u->type != _UNIT_TYPE_INVALID)
641 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
642
643 if (u->in_load_queue)
644 LIST_REMOVE(load_queue, u->manager->load_queue, u);
645
646 if (u->in_dbus_queue)
647 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
648
649 if (u->in_gc_queue)
650 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
651
652 if (u->in_cgroup_realize_queue)
653 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
654
655 if (u->in_cgroup_empty_queue)
656 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
657
658 if (u->in_cleanup_queue)
659 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
660
661 if (u->in_target_deps_queue)
662 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
663
664 if (u->in_stop_when_unneeded_queue)
665 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
666
667 safe_close(u->ip_accounting_ingress_map_fd);
668 safe_close(u->ip_accounting_egress_map_fd);
669
670 safe_close(u->ipv4_allow_map_fd);
671 safe_close(u->ipv6_allow_map_fd);
672 safe_close(u->ipv4_deny_map_fd);
673 safe_close(u->ipv6_deny_map_fd);
674
675 bpf_program_unref(u->ip_bpf_ingress);
676 bpf_program_unref(u->ip_bpf_ingress_installed);
677 bpf_program_unref(u->ip_bpf_egress);
678 bpf_program_unref(u->ip_bpf_egress_installed);
679
680 bpf_program_unref(u->bpf_device_control_installed);
681
682 condition_free_list(u->conditions);
683 condition_free_list(u->asserts);
684
685 free(u->description);
686 strv_free(u->documentation);
687 free(u->fragment_path);
688 free(u->source_path);
689 strv_free(u->dropin_paths);
690 free(u->instance);
691
692 free(u->job_timeout_reboot_arg);
693
694 set_free_free(u->names);
695
696 free(u->reboot_arg);
697
698 free(u);
699 }
700
701 UnitActiveState unit_active_state(Unit *u) {
702 assert(u);
703
704 if (u->load_state == UNIT_MERGED)
705 return unit_active_state(unit_follow_merge(u));
706
707 /* After a reload it might happen that a unit is not correctly
708 * loaded but still has a process around. That's why we won't
709 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
710
711 return UNIT_VTABLE(u)->active_state(u);
712 }
713
714 const char* unit_sub_state_to_string(Unit *u) {
715 assert(u);
716
717 return UNIT_VTABLE(u)->sub_state_to_string(u);
718 }
719
720 static int set_complete_move(Set **s, Set **other) {
721 assert(s);
722 assert(other);
723
724 if (!other)
725 return 0;
726
727 if (*s)
728 return set_move(*s, *other);
729 else
730 *s = TAKE_PTR(*other);
731
732 return 0;
733 }
734
735 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
736 assert(s);
737 assert(other);
738
739 if (!*other)
740 return 0;
741
742 if (*s)
743 return hashmap_move(*s, *other);
744 else
745 *s = TAKE_PTR(*other);
746
747 return 0;
748 }
749
750 static int merge_names(Unit *u, Unit *other) {
751 char *t;
752 Iterator i;
753 int r;
754
755 assert(u);
756 assert(other);
757
758 r = set_complete_move(&u->names, &other->names);
759 if (r < 0)
760 return r;
761
762 set_free_free(other->names);
763 other->names = NULL;
764 other->id = NULL;
765
766 SET_FOREACH(t, u->names, i)
767 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
768
769 return 0;
770 }
771
772 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
773 unsigned n_reserve;
774
775 assert(u);
776 assert(other);
777 assert(d < _UNIT_DEPENDENCY_MAX);
778
779 /*
780 * If u does not have this dependency set allocated, there is no need
781 * to reserve anything. In that case other's set will be transferred
782 * as a whole to u by complete_move().
783 */
784 if (!u->dependencies[d])
785 return 0;
786
787 /* merge_dependencies() will skip a u-on-u dependency */
788 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
789
790 return hashmap_reserve(u->dependencies[d], n_reserve);
791 }
792
793 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
794 Iterator i;
795 Unit *back;
796 void *v;
797 int r;
798
799 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
800
801 assert(u);
802 assert(other);
803 assert(d < _UNIT_DEPENDENCY_MAX);
804
805 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
806 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
807 UnitDependency k;
808
809 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
810 * pointers back, and let's fix them up, to instead point to 'u'. */
811
812 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
813 if (back == u) {
814 /* Do not add dependencies between u and itself. */
815 if (hashmap_remove(back->dependencies[k], other))
816 maybe_warn_about_dependency(u, other_id, k);
817 } else {
818 UnitDependencyInfo di_u, di_other, di_merged;
819
820 /* Let's drop this dependency between "back" and "other", and let's create it between
821 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
822 * and any such dependency which might already exist */
823
824 di_other.data = hashmap_get(back->dependencies[k], other);
825 if (!di_other.data)
826 continue; /* dependency isn't set, let's try the next one */
827
828 di_u.data = hashmap_get(back->dependencies[k], u);
829
830 di_merged = (UnitDependencyInfo) {
831 .origin_mask = di_u.origin_mask | di_other.origin_mask,
832 .destination_mask = di_u.destination_mask | di_other.destination_mask,
833 };
834
835 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
836 if (r < 0)
837 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
838 assert(r >= 0);
839
840 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
841 }
842 }
843
844 }
845
846 /* Also do not move dependencies on u to itself */
847 back = hashmap_remove(other->dependencies[d], u);
848 if (back)
849 maybe_warn_about_dependency(u, other_id, d);
850
851 /* The move cannot fail. The caller must have performed a reservation. */
852 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
853
854 other->dependencies[d] = hashmap_free(other->dependencies[d]);
855 }
856
857 int unit_merge(Unit *u, Unit *other) {
858 UnitDependency d;
859 const char *other_id = NULL;
860 int r;
861
862 assert(u);
863 assert(other);
864 assert(u->manager == other->manager);
865 assert(u->type != _UNIT_TYPE_INVALID);
866
867 other = unit_follow_merge(other);
868
869 if (other == u)
870 return 0;
871
872 if (u->type != other->type)
873 return -EINVAL;
874
875 if (!u->instance != !other->instance)
876 return -EINVAL;
877
878 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
879 return -EEXIST;
880
881 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
882 return -EEXIST;
883
884 if (other->job)
885 return -EEXIST;
886
887 if (other->nop_job)
888 return -EEXIST;
889
890 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
891 return -EEXIST;
892
893 if (other->id)
894 other_id = strdupa(other->id);
895
896 /* Make reservations to ensure merge_dependencies() won't fail */
897 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
898 r = reserve_dependencies(u, other, d);
899 /*
900 * We don't rollback reservations if we fail. We don't have
901 * a way to undo reservations. A reservation is not a leak.
902 */
903 if (r < 0)
904 return r;
905 }
906
907 /* Merge names */
908 r = merge_names(u, other);
909 if (r < 0)
910 return r;
911
912 /* Redirect all references */
913 while (other->refs_by_target)
914 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
915
916 /* Merge dependencies */
917 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
918 merge_dependencies(u, other, other_id, d);
919
920 other->load_state = UNIT_MERGED;
921 other->merged_into = u;
922
923 /* If there is still some data attached to the other node, we
924 * don't need it anymore, and can free it. */
925 if (other->load_state != UNIT_STUB)
926 if (UNIT_VTABLE(other)->done)
927 UNIT_VTABLE(other)->done(other);
928
929 unit_add_to_dbus_queue(u);
930 unit_add_to_cleanup_queue(other);
931
932 return 0;
933 }
934
935 int unit_merge_by_name(Unit *u, const char *name) {
936 _cleanup_free_ char *s = NULL;
937 Unit *other;
938 int r;
939
940 assert(u);
941 assert(name);
942
943 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
944 if (!u->instance)
945 return -EINVAL;
946
947 r = unit_name_replace_instance(name, u->instance, &s);
948 if (r < 0)
949 return r;
950
951 name = s;
952 }
953
954 other = manager_get_unit(u->manager, name);
955 if (other)
956 return unit_merge(u, other);
957
958 return unit_add_name(u, name);
959 }
960
961 Unit* unit_follow_merge(Unit *u) {
962 assert(u);
963
964 while (u->load_state == UNIT_MERGED)
965 assert_se(u = u->merged_into);
966
967 return u;
968 }
969
970 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
971 ExecDirectoryType dt;
972 char **dp;
973 int r;
974
975 assert(u);
976 assert(c);
977
978 if (c->working_directory && !c->working_directory_missing_ok) {
979 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
980 if (r < 0)
981 return r;
982 }
983
984 if (c->root_directory) {
985 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
986 if (r < 0)
987 return r;
988 }
989
990 if (c->root_image) {
991 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
992 if (r < 0)
993 return r;
994 }
995
996 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
997 if (!u->manager->prefix[dt])
998 continue;
999
1000 STRV_FOREACH(dp, c->directories[dt].paths) {
1001 _cleanup_free_ char *p;
1002
1003 p = strjoin(u->manager->prefix[dt], "/", *dp);
1004 if (!p)
1005 return -ENOMEM;
1006
1007 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1008 if (r < 0)
1009 return r;
1010 }
1011 }
1012
1013 if (!MANAGER_IS_SYSTEM(u->manager))
1014 return 0;
1015
1016 if (c->private_tmp) {
1017 const char *p;
1018
1019 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1020 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1021 if (r < 0)
1022 return r;
1023 }
1024
1025 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1026 if (r < 0)
1027 return r;
1028 }
1029
1030 if (!IN_SET(c->std_output,
1031 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1032 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1033 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1034 !IN_SET(c->std_error,
1035 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1036 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1037 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1038 return 0;
1039
1040 /* If syslog or kernel logging is requested, make sure our own
1041 * logging daemon is run first. */
1042
1043 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1044 if (r < 0)
1045 return r;
1046
1047 return 0;
1048 }
1049
1050 const char *unit_description(Unit *u) {
1051 assert(u);
1052
1053 if (u->description)
1054 return u->description;
1055
1056 return strna(u->id);
1057 }
1058
1059 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1060 const struct {
1061 UnitDependencyMask mask;
1062 const char *name;
1063 } table[] = {
1064 { UNIT_DEPENDENCY_FILE, "file" },
1065 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1066 { UNIT_DEPENDENCY_DEFAULT, "default" },
1067 { UNIT_DEPENDENCY_UDEV, "udev" },
1068 { UNIT_DEPENDENCY_PATH, "path" },
1069 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1070 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1071 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1072 };
1073 size_t i;
1074
1075 assert(f);
1076 assert(kind);
1077 assert(space);
1078
1079 for (i = 0; i < ELEMENTSOF(table); i++) {
1080
1081 if (mask == 0)
1082 break;
1083
1084 if (FLAGS_SET(mask, table[i].mask)) {
1085 if (*space)
1086 fputc(' ', f);
1087 else
1088 *space = true;
1089
1090 fputs(kind, f);
1091 fputs("-", f);
1092 fputs(table[i].name, f);
1093
1094 mask &= ~table[i].mask;
1095 }
1096 }
1097
1098 assert(mask == 0);
1099 }
1100
1101 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1102 char *t, **j;
1103 UnitDependency d;
1104 Iterator i;
1105 const char *prefix2;
1106 char
1107 timestamp0[FORMAT_TIMESTAMP_MAX],
1108 timestamp1[FORMAT_TIMESTAMP_MAX],
1109 timestamp2[FORMAT_TIMESTAMP_MAX],
1110 timestamp3[FORMAT_TIMESTAMP_MAX],
1111 timestamp4[FORMAT_TIMESTAMP_MAX],
1112 timespan[FORMAT_TIMESPAN_MAX];
1113 Unit *following;
1114 _cleanup_set_free_ Set *following_set = NULL;
1115 const char *n;
1116 CGroupMask m;
1117 int r;
1118
1119 assert(u);
1120 assert(u->type >= 0);
1121
1122 prefix = strempty(prefix);
1123 prefix2 = strjoina(prefix, "\t");
1124
1125 fprintf(f,
1126 "%s-> Unit %s:\n"
1127 "%s\tDescription: %s\n"
1128 "%s\tInstance: %s\n"
1129 "%s\tUnit Load State: %s\n"
1130 "%s\tUnit Active State: %s\n"
1131 "%s\tState Change Timestamp: %s\n"
1132 "%s\tInactive Exit Timestamp: %s\n"
1133 "%s\tActive Enter Timestamp: %s\n"
1134 "%s\tActive Exit Timestamp: %s\n"
1135 "%s\tInactive Enter Timestamp: %s\n"
1136 "%s\tMay GC: %s\n"
1137 "%s\tNeed Daemon Reload: %s\n"
1138 "%s\tTransient: %s\n"
1139 "%s\tPerpetual: %s\n"
1140 "%s\tGarbage Collection Mode: %s\n"
1141 "%s\tSlice: %s\n"
1142 "%s\tCGroup: %s\n"
1143 "%s\tCGroup realized: %s\n",
1144 prefix, u->id,
1145 prefix, unit_description(u),
1146 prefix, strna(u->instance),
1147 prefix, unit_load_state_to_string(u->load_state),
1148 prefix, unit_active_state_to_string(unit_active_state(u)),
1149 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1150 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1151 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1152 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1153 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1154 prefix, yes_no(unit_may_gc(u)),
1155 prefix, yes_no(unit_need_daemon_reload(u)),
1156 prefix, yes_no(u->transient),
1157 prefix, yes_no(u->perpetual),
1158 prefix, collect_mode_to_string(u->collect_mode),
1159 prefix, strna(unit_slice_name(u)),
1160 prefix, strna(u->cgroup_path),
1161 prefix, yes_no(u->cgroup_realized));
1162
1163 if (u->cgroup_realized_mask != 0) {
1164 _cleanup_free_ char *s = NULL;
1165 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1166 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1167 }
1168
1169 if (u->cgroup_enabled_mask != 0) {
1170 _cleanup_free_ char *s = NULL;
1171 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1172 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1173 }
1174
1175 m = unit_get_own_mask(u);
1176 if (m != 0) {
1177 _cleanup_free_ char *s = NULL;
1178 (void) cg_mask_to_string(m, &s);
1179 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1180 }
1181
1182 m = unit_get_members_mask(u);
1183 if (m != 0) {
1184 _cleanup_free_ char *s = NULL;
1185 (void) cg_mask_to_string(m, &s);
1186 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1187 }
1188
1189 m = unit_get_delegate_mask(u);
1190 if (m != 0) {
1191 _cleanup_free_ char *s = NULL;
1192 (void) cg_mask_to_string(m, &s);
1193 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1194 }
1195
1196 SET_FOREACH(t, u->names, i)
1197 fprintf(f, "%s\tName: %s\n", prefix, t);
1198
1199 if (!sd_id128_is_null(u->invocation_id))
1200 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1201 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1202
1203 STRV_FOREACH(j, u->documentation)
1204 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1205
1206 following = unit_following(u);
1207 if (following)
1208 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1209
1210 r = unit_following_set(u, &following_set);
1211 if (r >= 0) {
1212 Unit *other;
1213
1214 SET_FOREACH(other, following_set, i)
1215 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1216 }
1217
1218 if (u->fragment_path)
1219 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1220
1221 if (u->source_path)
1222 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1223
1224 STRV_FOREACH(j, u->dropin_paths)
1225 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1226
1227 if (u->failure_action != EMERGENCY_ACTION_NONE)
1228 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1229 if (u->failure_action_exit_status >= 0)
1230 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1231 if (u->success_action != EMERGENCY_ACTION_NONE)
1232 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1233 if (u->success_action_exit_status >= 0)
1234 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1235
1236 if (u->job_timeout != USEC_INFINITY)
1237 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1238
1239 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1240 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1241
1242 if (u->job_timeout_reboot_arg)
1243 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1244
1245 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1246 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1247
1248 if (dual_timestamp_is_set(&u->condition_timestamp))
1249 fprintf(f,
1250 "%s\tCondition Timestamp: %s\n"
1251 "%s\tCondition Result: %s\n",
1252 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1253 prefix, yes_no(u->condition_result));
1254
1255 if (dual_timestamp_is_set(&u->assert_timestamp))
1256 fprintf(f,
1257 "%s\tAssert Timestamp: %s\n"
1258 "%s\tAssert Result: %s\n",
1259 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1260 prefix, yes_no(u->assert_result));
1261
1262 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1263 UnitDependencyInfo di;
1264 Unit *other;
1265
1266 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1267 bool space = false;
1268
1269 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1270
1271 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1272 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1273
1274 fputs(")\n", f);
1275 }
1276 }
1277
1278 if (!hashmap_isempty(u->requires_mounts_for)) {
1279 UnitDependencyInfo di;
1280 const char *path;
1281
1282 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1283 bool space = false;
1284
1285 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1286
1287 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1288 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1289
1290 fputs(")\n", f);
1291 }
1292 }
1293
1294 if (u->load_state == UNIT_LOADED) {
1295
1296 fprintf(f,
1297 "%s\tStopWhenUnneeded: %s\n"
1298 "%s\tRefuseManualStart: %s\n"
1299 "%s\tRefuseManualStop: %s\n"
1300 "%s\tDefaultDependencies: %s\n"
1301 "%s\tOnFailureJobMode: %s\n"
1302 "%s\tIgnoreOnIsolate: %s\n",
1303 prefix, yes_no(u->stop_when_unneeded),
1304 prefix, yes_no(u->refuse_manual_start),
1305 prefix, yes_no(u->refuse_manual_stop),
1306 prefix, yes_no(u->default_dependencies),
1307 prefix, job_mode_to_string(u->on_failure_job_mode),
1308 prefix, yes_no(u->ignore_on_isolate));
1309
1310 if (UNIT_VTABLE(u)->dump)
1311 UNIT_VTABLE(u)->dump(u, f, prefix2);
1312
1313 } else if (u->load_state == UNIT_MERGED)
1314 fprintf(f,
1315 "%s\tMerged into: %s\n",
1316 prefix, u->merged_into->id);
1317 else if (u->load_state == UNIT_ERROR)
1318 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1319
1320 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1321 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1322
1323 if (u->job)
1324 job_dump(u->job, f, prefix2);
1325
1326 if (u->nop_job)
1327 job_dump(u->nop_job, f, prefix2);
1328 }
1329
1330 /* Common implementation for multiple backends */
1331 int unit_load_fragment_and_dropin(Unit *u) {
1332 int r;
1333
1334 assert(u);
1335
1336 /* Load a .{service,socket,...} file */
1337 r = unit_load_fragment(u);
1338 if (r < 0)
1339 return r;
1340
1341 if (u->load_state == UNIT_STUB)
1342 return -ENOENT;
1343
1344 /* Load drop-in directory data. If u is an alias, we might be reloading the
1345 * target unit needlessly. But we cannot be sure which drops-ins have already
1346 * been loaded and which not, at least without doing complicated book-keeping,
1347 * so let's always reread all drop-ins. */
1348 return unit_load_dropin(unit_follow_merge(u));
1349 }
1350
1351 /* Common implementation for multiple backends */
1352 int unit_load_fragment_and_dropin_optional(Unit *u) {
1353 int r;
1354
1355 assert(u);
1356
1357 /* Same as unit_load_fragment_and_dropin(), but whether
1358 * something can be loaded or not doesn't matter. */
1359
1360 /* Load a .service/.socket/.slice/… file */
1361 r = unit_load_fragment(u);
1362 if (r < 0)
1363 return r;
1364
1365 if (u->load_state == UNIT_STUB)
1366 u->load_state = UNIT_LOADED;
1367
1368 /* Load drop-in directory data */
1369 return unit_load_dropin(unit_follow_merge(u));
1370 }
1371
1372 void unit_add_to_target_deps_queue(Unit *u) {
1373 Manager *m = u->manager;
1374
1375 assert(u);
1376
1377 if (u->in_target_deps_queue)
1378 return;
1379
1380 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1381 u->in_target_deps_queue = true;
1382 }
1383
1384 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1385 assert(u);
1386 assert(target);
1387
1388 if (target->type != UNIT_TARGET)
1389 return 0;
1390
1391 /* Only add the dependency if both units are loaded, so that
1392 * that loop check below is reliable */
1393 if (u->load_state != UNIT_LOADED ||
1394 target->load_state != UNIT_LOADED)
1395 return 0;
1396
1397 /* If either side wants no automatic dependencies, then let's
1398 * skip this */
1399 if (!u->default_dependencies ||
1400 !target->default_dependencies)
1401 return 0;
1402
1403 /* Don't create loops */
1404 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1405 return 0;
1406
1407 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1408 }
1409
1410 static int unit_add_slice_dependencies(Unit *u) {
1411 UnitDependencyMask mask;
1412 assert(u);
1413
1414 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1415 return 0;
1416
1417 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1418 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1419 relationship). */
1420 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1421
1422 if (UNIT_ISSET(u->slice))
1423 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1424
1425 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1426 return 0;
1427
1428 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1429 }
1430
1431 static int unit_add_mount_dependencies(Unit *u) {
1432 UnitDependencyInfo di;
1433 const char *path;
1434 Iterator i;
1435 int r;
1436
1437 assert(u);
1438
1439 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1440 char prefix[strlen(path) + 1];
1441
1442 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1443 _cleanup_free_ char *p = NULL;
1444 Unit *m;
1445
1446 r = unit_name_from_path(prefix, ".mount", &p);
1447 if (r < 0)
1448 return r;
1449
1450 m = manager_get_unit(u->manager, p);
1451 if (!m) {
1452 /* Make sure to load the mount unit if
1453 * it exists. If so the dependencies
1454 * on this unit will be added later
1455 * during the loading of the mount
1456 * unit. */
1457 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1458 continue;
1459 }
1460 if (m == u)
1461 continue;
1462
1463 if (m->load_state != UNIT_LOADED)
1464 continue;
1465
1466 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1467 if (r < 0)
1468 return r;
1469
1470 if (m->fragment_path) {
1471 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1472 if (r < 0)
1473 return r;
1474 }
1475 }
1476 }
1477
1478 return 0;
1479 }
1480
1481 static int unit_add_startup_units(Unit *u) {
1482 CGroupContext *c;
1483 int r;
1484
1485 c = unit_get_cgroup_context(u);
1486 if (!c)
1487 return 0;
1488
1489 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1490 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1491 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1492 return 0;
1493
1494 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1495 if (r < 0)
1496 return r;
1497
1498 return set_put(u->manager->startup_units, u);
1499 }
1500
1501 int unit_load(Unit *u) {
1502 int r;
1503
1504 assert(u);
1505
1506 if (u->in_load_queue) {
1507 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1508 u->in_load_queue = false;
1509 }
1510
1511 if (u->type == _UNIT_TYPE_INVALID)
1512 return -EINVAL;
1513
1514 if (u->load_state != UNIT_STUB)
1515 return 0;
1516
1517 if (u->transient_file) {
1518 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1519 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1520
1521 r = fflush_and_check(u->transient_file);
1522 if (r < 0)
1523 goto fail;
1524
1525 u->transient_file = safe_fclose(u->transient_file);
1526 u->fragment_mtime = now(CLOCK_REALTIME);
1527 }
1528
1529 if (UNIT_VTABLE(u)->load) {
1530 r = UNIT_VTABLE(u)->load(u);
1531 if (r < 0)
1532 goto fail;
1533 }
1534
1535 if (u->load_state == UNIT_STUB) {
1536 r = -ENOENT;
1537 goto fail;
1538 }
1539
1540 if (u->load_state == UNIT_LOADED) {
1541 unit_add_to_target_deps_queue(u);
1542
1543 r = unit_add_slice_dependencies(u);
1544 if (r < 0)
1545 goto fail;
1546
1547 r = unit_add_mount_dependencies(u);
1548 if (r < 0)
1549 goto fail;
1550
1551 r = unit_add_startup_units(u);
1552 if (r < 0)
1553 goto fail;
1554
1555 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1556 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1557 r = -ENOEXEC;
1558 goto fail;
1559 }
1560
1561 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1562 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1563
1564 /* We finished loading, let's ensure our parents recalculate the members mask */
1565 unit_invalidate_cgroup_members_masks(u);
1566 }
1567
1568 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1569
1570 unit_add_to_dbus_queue(unit_follow_merge(u));
1571 unit_add_to_gc_queue(u);
1572
1573 return 0;
1574
1575 fail:
1576 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1577 * return ENOEXEC to ensure units are placed in this state after loading */
1578
1579 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1580 r == -ENOEXEC ? UNIT_BAD_SETTING :
1581 UNIT_ERROR;
1582 u->load_error = r;
1583
1584 unit_add_to_dbus_queue(u);
1585 unit_add_to_gc_queue(u);
1586
1587 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1588 }
1589
1590 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1591 Condition *c;
1592 int triggered = -1;
1593
1594 assert(u);
1595 assert(to_string);
1596
1597 /* If the condition list is empty, then it is true */
1598 if (!first)
1599 return true;
1600
1601 /* Otherwise, if all of the non-trigger conditions apply and
1602 * if any of the trigger conditions apply (unless there are
1603 * none) we return true */
1604 LIST_FOREACH(conditions, c, first) {
1605 int r;
1606
1607 r = condition_test(c);
1608 if (r < 0)
1609 log_unit_warning(u,
1610 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1611 to_string(c->type),
1612 c->trigger ? "|" : "",
1613 c->negate ? "!" : "",
1614 c->parameter);
1615 else
1616 log_unit_debug(u,
1617 "%s=%s%s%s %s.",
1618 to_string(c->type),
1619 c->trigger ? "|" : "",
1620 c->negate ? "!" : "",
1621 c->parameter,
1622 condition_result_to_string(c->result));
1623
1624 if (!c->trigger && r <= 0)
1625 return false;
1626
1627 if (c->trigger && triggered <= 0)
1628 triggered = r > 0;
1629 }
1630
1631 return triggered != 0;
1632 }
1633
1634 static bool unit_condition_test(Unit *u) {
1635 assert(u);
1636
1637 dual_timestamp_get(&u->condition_timestamp);
1638 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1639
1640 return u->condition_result;
1641 }
1642
1643 static bool unit_assert_test(Unit *u) {
1644 assert(u);
1645
1646 dual_timestamp_get(&u->assert_timestamp);
1647 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1648
1649 return u->assert_result;
1650 }
1651
1652 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1653 const char *d;
1654
1655 d = unit_description(u);
1656 if (log_get_show_color())
1657 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1658
1659 DISABLE_WARNING_FORMAT_NONLITERAL;
1660 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1661 REENABLE_WARNING;
1662 }
1663
1664 int unit_start_limit_test(Unit *u) {
1665 const char *reason;
1666
1667 assert(u);
1668
1669 if (ratelimit_below(&u->start_limit)) {
1670 u->start_limit_hit = false;
1671 return 0;
1672 }
1673
1674 log_unit_warning(u, "Start request repeated too quickly.");
1675 u->start_limit_hit = true;
1676
1677 reason = strjoina("unit ", u->id, " failed");
1678
1679 return emergency_action(u->manager, u->start_limit_action,
1680 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1681 u->reboot_arg, -1, reason);
1682 }
1683
1684 bool unit_shall_confirm_spawn(Unit *u) {
1685 assert(u);
1686
1687 if (manager_is_confirm_spawn_disabled(u->manager))
1688 return false;
1689
1690 /* For some reasons units remaining in the same process group
1691 * as PID 1 fail to acquire the console even if it's not used
1692 * by any process. So skip the confirmation question for them. */
1693 return !unit_get_exec_context(u)->same_pgrp;
1694 }
1695
1696 static bool unit_verify_deps(Unit *u) {
1697 Unit *other;
1698 Iterator j;
1699 void *v;
1700
1701 assert(u);
1702
1703 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1704 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1705 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1706 * conjunction with After= as for them any such check would make things entirely racy. */
1707
1708 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1709
1710 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1711 continue;
1712
1713 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1714 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1715 return false;
1716 }
1717 }
1718
1719 return true;
1720 }
1721
1722 /* Errors:
1723 * -EBADR: This unit type does not support starting.
1724 * -EALREADY: Unit is already started.
1725 * -EAGAIN: An operation is already in progress. Retry later.
1726 * -ECANCELED: Too many requests for now.
1727 * -EPROTO: Assert failed
1728 * -EINVAL: Unit not loaded
1729 * -EOPNOTSUPP: Unit type not supported
1730 * -ENOLINK: The necessary dependencies are not fulfilled.
1731 * -ESTALE: This unit has been started before and can't be started a second time
1732 */
1733 int unit_start(Unit *u) {
1734 UnitActiveState state;
1735 Unit *following;
1736
1737 assert(u);
1738
1739 /* If this is already started, then this will succeed. Note
1740 * that this will even succeed if this unit is not startable
1741 * by the user. This is relied on to detect when we need to
1742 * wait for units and when waiting is finished. */
1743 state = unit_active_state(u);
1744 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1745 return -EALREADY;
1746
1747 /* Units that aren't loaded cannot be started */
1748 if (u->load_state != UNIT_LOADED)
1749 return -EINVAL;
1750
1751 /* Refuse starting scope units more than once */
1752 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1753 return -ESTALE;
1754
1755 /* If the conditions failed, don't do anything at all. If we
1756 * already are activating this call might still be useful to
1757 * speed up activation in case there is some hold-off time,
1758 * but we don't want to recheck the condition in that case. */
1759 if (state != UNIT_ACTIVATING &&
1760 !unit_condition_test(u)) {
1761 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1762 return -ECOMM;
1763 }
1764
1765 /* If the asserts failed, fail the entire job */
1766 if (state != UNIT_ACTIVATING &&
1767 !unit_assert_test(u)) {
1768 log_unit_notice(u, "Starting requested but asserts failed.");
1769 return -EPROTO;
1770 }
1771
1772 /* Units of types that aren't supported cannot be
1773 * started. Note that we do this test only after the condition
1774 * checks, so that we rather return condition check errors
1775 * (which are usually not considered a true failure) than "not
1776 * supported" errors (which are considered a failure).
1777 */
1778 if (!unit_supported(u))
1779 return -EOPNOTSUPP;
1780
1781 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1782 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1783 * effect anymore, due to a reload or due to a failed condition. */
1784 if (!unit_verify_deps(u))
1785 return -ENOLINK;
1786
1787 /* Forward to the main object, if we aren't it. */
1788 following = unit_following(u);
1789 if (following) {
1790 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1791 return unit_start(following);
1792 }
1793
1794 /* If it is stopped, but we cannot start it, then fail */
1795 if (!UNIT_VTABLE(u)->start)
1796 return -EBADR;
1797
1798 /* We don't suppress calls to ->start() here when we are
1799 * already starting, to allow this request to be used as a
1800 * "hurry up" call, for example when the unit is in some "auto
1801 * restart" state where it waits for a holdoff timer to elapse
1802 * before it will start again. */
1803
1804 unit_add_to_dbus_queue(u);
1805
1806 return UNIT_VTABLE(u)->start(u);
1807 }
1808
1809 bool unit_can_start(Unit *u) {
1810 assert(u);
1811
1812 if (u->load_state != UNIT_LOADED)
1813 return false;
1814
1815 if (!unit_supported(u))
1816 return false;
1817
1818 /* Scope units may be started only once */
1819 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1820 return false;
1821
1822 return !!UNIT_VTABLE(u)->start;
1823 }
1824
1825 bool unit_can_isolate(Unit *u) {
1826 assert(u);
1827
1828 return unit_can_start(u) &&
1829 u->allow_isolate;
1830 }
1831
1832 /* Errors:
1833 * -EBADR: This unit type does not support stopping.
1834 * -EALREADY: Unit is already stopped.
1835 * -EAGAIN: An operation is already in progress. Retry later.
1836 */
1837 int unit_stop(Unit *u) {
1838 UnitActiveState state;
1839 Unit *following;
1840
1841 assert(u);
1842
1843 state = unit_active_state(u);
1844 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1845 return -EALREADY;
1846
1847 following = unit_following(u);
1848 if (following) {
1849 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1850 return unit_stop(following);
1851 }
1852
1853 if (!UNIT_VTABLE(u)->stop)
1854 return -EBADR;
1855
1856 unit_add_to_dbus_queue(u);
1857
1858 return UNIT_VTABLE(u)->stop(u);
1859 }
1860
1861 bool unit_can_stop(Unit *u) {
1862 assert(u);
1863
1864 if (!unit_supported(u))
1865 return false;
1866
1867 if (u->perpetual)
1868 return false;
1869
1870 return !!UNIT_VTABLE(u)->stop;
1871 }
1872
1873 /* Errors:
1874 * -EBADR: This unit type does not support reloading.
1875 * -ENOEXEC: Unit is not started.
1876 * -EAGAIN: An operation is already in progress. Retry later.
1877 */
1878 int unit_reload(Unit *u) {
1879 UnitActiveState state;
1880 Unit *following;
1881
1882 assert(u);
1883
1884 if (u->load_state != UNIT_LOADED)
1885 return -EINVAL;
1886
1887 if (!unit_can_reload(u))
1888 return -EBADR;
1889
1890 state = unit_active_state(u);
1891 if (state == UNIT_RELOADING)
1892 return -EALREADY;
1893
1894 if (state != UNIT_ACTIVE) {
1895 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1896 return -ENOEXEC;
1897 }
1898
1899 following = unit_following(u);
1900 if (following) {
1901 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1902 return unit_reload(following);
1903 }
1904
1905 unit_add_to_dbus_queue(u);
1906
1907 if (!UNIT_VTABLE(u)->reload) {
1908 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1909 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1910 return 0;
1911 }
1912
1913 return UNIT_VTABLE(u)->reload(u);
1914 }
1915
1916 bool unit_can_reload(Unit *u) {
1917 assert(u);
1918
1919 if (UNIT_VTABLE(u)->can_reload)
1920 return UNIT_VTABLE(u)->can_reload(u);
1921
1922 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1923 return true;
1924
1925 return UNIT_VTABLE(u)->reload;
1926 }
1927
1928 bool unit_is_unneeded(Unit *u) {
1929 static const UnitDependency deps[] = {
1930 UNIT_REQUIRED_BY,
1931 UNIT_REQUISITE_OF,
1932 UNIT_WANTED_BY,
1933 UNIT_BOUND_BY,
1934 };
1935 size_t j;
1936
1937 assert(u);
1938
1939 if (!u->stop_when_unneeded)
1940 return false;
1941
1942 /* Don't clean up while the unit is transitioning or is even inactive. */
1943 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1944 return false;
1945 if (u->job)
1946 return false;
1947
1948 for (j = 0; j < ELEMENTSOF(deps); j++) {
1949 Unit *other;
1950 Iterator i;
1951 void *v;
1952
1953 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1954 * restart, then don't clean this one up. */
1955
1956 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1957 if (other->job)
1958 return false;
1959
1960 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1961 return false;
1962
1963 if (unit_will_restart(other))
1964 return false;
1965 }
1966 }
1967
1968 return true;
1969 }
1970
1971 static void check_unneeded_dependencies(Unit *u) {
1972
1973 static const UnitDependency deps[] = {
1974 UNIT_REQUIRES,
1975 UNIT_REQUISITE,
1976 UNIT_WANTS,
1977 UNIT_BINDS_TO,
1978 };
1979 size_t j;
1980
1981 assert(u);
1982
1983 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1984
1985 for (j = 0; j < ELEMENTSOF(deps); j++) {
1986 Unit *other;
1987 Iterator i;
1988 void *v;
1989
1990 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
1991 unit_submit_to_stop_when_unneeded_queue(other);
1992 }
1993 }
1994
1995 static void unit_check_binds_to(Unit *u) {
1996 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1997 bool stop = false;
1998 Unit *other;
1999 Iterator i;
2000 void *v;
2001 int r;
2002
2003 assert(u);
2004
2005 if (u->job)
2006 return;
2007
2008 if (unit_active_state(u) != UNIT_ACTIVE)
2009 return;
2010
2011 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2012 if (other->job)
2013 continue;
2014
2015 if (!other->coldplugged)
2016 /* We might yet create a job for the other unit… */
2017 continue;
2018
2019 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2020 continue;
2021
2022 stop = true;
2023 break;
2024 }
2025
2026 if (!stop)
2027 return;
2028
2029 /* If stopping a unit fails continuously we might enter a stop
2030 * loop here, hence stop acting on the service being
2031 * unnecessary after a while. */
2032 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2033 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2034 return;
2035 }
2036
2037 assert(other);
2038 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2039
2040 /* A unit we need to run is gone. Sniff. Let's stop this. */
2041 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2042 if (r < 0)
2043 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2044 }
2045
2046 static void retroactively_start_dependencies(Unit *u) {
2047 Iterator i;
2048 Unit *other;
2049 void *v;
2050
2051 assert(u);
2052 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2053
2054 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2055 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2056 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2057 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2058
2059 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2060 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2061 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2062 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2063
2064 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2065 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2066 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2067 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2068
2069 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2070 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2071 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2072
2073 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2074 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2075 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2076 }
2077
2078 static void retroactively_stop_dependencies(Unit *u) {
2079 Unit *other;
2080 Iterator i;
2081 void *v;
2082
2083 assert(u);
2084 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2085
2086 /* Pull down units which are bound to us recursively if enabled */
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2090 }
2091
2092 void unit_start_on_failure(Unit *u) {
2093 Unit *other;
2094 Iterator i;
2095 void *v;
2096 int r;
2097
2098 assert(u);
2099
2100 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2101 return;
2102
2103 log_unit_info(u, "Triggering OnFailure= dependencies.");
2104
2105 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2106 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2107
2108 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2109 if (r < 0)
2110 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2111 }
2112 }
2113
2114 void unit_trigger_notify(Unit *u) {
2115 Unit *other;
2116 Iterator i;
2117 void *v;
2118
2119 assert(u);
2120
2121 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2122 if (UNIT_VTABLE(other)->trigger_notify)
2123 UNIT_VTABLE(other)->trigger_notify(other, u);
2124 }
2125
2126 static int unit_log_resources(Unit *u) {
2127 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2128 bool any_traffic = false, have_ip_accounting = false;
2129 _cleanup_free_ char *igress = NULL, *egress = NULL;
2130 size_t n_message_parts = 0, n_iovec = 0;
2131 char* message_parts[3 + 1], *t;
2132 nsec_t nsec = NSEC_INFINITY;
2133 CGroupIPAccountingMetric m;
2134 size_t i;
2135 int r;
2136 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2137 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2138 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2139 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2140 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2141 };
2142
2143 assert(u);
2144
2145 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2146 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2147 * information and the complete data in structured fields. */
2148
2149 (void) unit_get_cpu_usage(u, &nsec);
2150 if (nsec != NSEC_INFINITY) {
2151 char buf[FORMAT_TIMESPAN_MAX] = "";
2152
2153 /* Format the CPU time for inclusion in the structured log message */
2154 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2155 r = log_oom();
2156 goto finish;
2157 }
2158 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2159
2160 /* Format the CPU time for inclusion in the human language message string */
2161 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2162 t = strjoin("consumed ", buf, " CPU time");
2163 if (!t) {
2164 r = log_oom();
2165 goto finish;
2166 }
2167
2168 message_parts[n_message_parts++] = t;
2169 }
2170
2171 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2172 char buf[FORMAT_BYTES_MAX] = "";
2173 uint64_t value = UINT64_MAX;
2174
2175 assert(ip_fields[m]);
2176
2177 (void) unit_get_ip_accounting(u, m, &value);
2178 if (value == UINT64_MAX)
2179 continue;
2180
2181 have_ip_accounting = true;
2182 if (value > 0)
2183 any_traffic = true;
2184
2185 /* Format IP accounting data for inclusion in the structured log message */
2186 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2187 r = log_oom();
2188 goto finish;
2189 }
2190 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2191
2192 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2193 * bytes counters (and not for the packets counters) */
2194 if (m == CGROUP_IP_INGRESS_BYTES) {
2195 assert(!igress);
2196 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2197 if (!igress) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2202 assert(!egress);
2203 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2204 if (!egress) {
2205 r = log_oom();
2206 goto finish;
2207 }
2208 }
2209 }
2210
2211 if (have_ip_accounting) {
2212 if (any_traffic) {
2213 if (igress)
2214 message_parts[n_message_parts++] = TAKE_PTR(igress);
2215 if (egress)
2216 message_parts[n_message_parts++] = TAKE_PTR(egress);
2217
2218 } else {
2219 char *k;
2220
2221 k = strdup("no IP traffic");
2222 if (!k) {
2223 r = log_oom();
2224 goto finish;
2225 }
2226
2227 message_parts[n_message_parts++] = k;
2228 }
2229 }
2230
2231 /* Is there any accounting data available at all? */
2232 if (n_iovec == 0) {
2233 r = 0;
2234 goto finish;
2235 }
2236
2237 if (n_message_parts == 0)
2238 t = strjoina("MESSAGE=", u->id, ": Completed.");
2239 else {
2240 _cleanup_free_ char *joined;
2241
2242 message_parts[n_message_parts] = NULL;
2243
2244 joined = strv_join(message_parts, ", ");
2245 if (!joined) {
2246 r = log_oom();
2247 goto finish;
2248 }
2249
2250 joined[0] = ascii_toupper(joined[0]);
2251 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2252 }
2253
2254 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2255 * and hence don't increase n_iovec for them */
2256 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2257 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2258
2259 t = strjoina(u->manager->unit_log_field, u->id);
2260 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2261
2262 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2263 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2264
2265 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2266 r = 0;
2267
2268 finish:
2269 for (i = 0; i < n_message_parts; i++)
2270 free(message_parts[i]);
2271
2272 for (i = 0; i < n_iovec; i++)
2273 free(iovec[i].iov_base);
2274
2275 return r;
2276
2277 }
2278
2279 static void unit_update_on_console(Unit *u) {
2280 bool b;
2281
2282 assert(u);
2283
2284 b = unit_needs_console(u);
2285 if (u->on_console == b)
2286 return;
2287
2288 u->on_console = b;
2289 if (b)
2290 manager_ref_console(u->manager);
2291 else
2292 manager_unref_console(u->manager);
2293 }
2294
2295 static void unit_emit_audit_start(Unit *u) {
2296 assert(u);
2297
2298 if (u->type != UNIT_SERVICE)
2299 return;
2300
2301 /* Write audit record if we have just finished starting up */
2302 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2303 u->in_audit = true;
2304 }
2305
2306 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2307 assert(u);
2308
2309 if (u->type != UNIT_SERVICE)
2310 return;
2311
2312 if (u->in_audit) {
2313 /* Write audit record if we have just finished shutting down */
2314 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2315 u->in_audit = false;
2316 } else {
2317 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2318 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2319
2320 if (state == UNIT_INACTIVE)
2321 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2322 }
2323 }
2324
2325 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2326 bool unexpected;
2327 const char *reason;
2328 Manager *m;
2329
2330 assert(u);
2331 assert(os < _UNIT_ACTIVE_STATE_MAX);
2332 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2333
2334 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2335 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2336 * remounted this function will be called too! */
2337
2338 m = u->manager;
2339
2340 /* Update timestamps for state changes */
2341 if (!MANAGER_IS_RELOADING(m)) {
2342 dual_timestamp_get(&u->state_change_timestamp);
2343
2344 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2345 u->inactive_exit_timestamp = u->state_change_timestamp;
2346 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2347 u->inactive_enter_timestamp = u->state_change_timestamp;
2348
2349 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2350 u->active_enter_timestamp = u->state_change_timestamp;
2351 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2352 u->active_exit_timestamp = u->state_change_timestamp;
2353 }
2354
2355 /* Keep track of failed units */
2356 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2357
2358 /* Make sure the cgroup and state files are always removed when we become inactive */
2359 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2360 unit_prune_cgroup(u);
2361 unit_unlink_state_files(u);
2362 }
2363
2364 unit_update_on_console(u);
2365
2366 if (u->job) {
2367 unexpected = false;
2368
2369 if (u->job->state == JOB_WAITING)
2370
2371 /* So we reached a different state for this
2372 * job. Let's see if we can run it now if it
2373 * failed previously due to EAGAIN. */
2374 job_add_to_run_queue(u->job);
2375
2376 /* Let's check whether this state change constitutes a
2377 * finished job, or maybe contradicts a running job and
2378 * hence needs to invalidate jobs. */
2379
2380 switch (u->job->type) {
2381
2382 case JOB_START:
2383 case JOB_VERIFY_ACTIVE:
2384
2385 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2386 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2387 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2388 unexpected = true;
2389
2390 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2391 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2392 }
2393
2394 break;
2395
2396 case JOB_RELOAD:
2397 case JOB_RELOAD_OR_START:
2398 case JOB_TRY_RELOAD:
2399
2400 if (u->job->state == JOB_RUNNING) {
2401 if (ns == UNIT_ACTIVE)
2402 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2403 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2404 unexpected = true;
2405
2406 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2407 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2408 }
2409 }
2410
2411 break;
2412
2413 case JOB_STOP:
2414 case JOB_RESTART:
2415 case JOB_TRY_RESTART:
2416
2417 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2418 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2419 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2420 unexpected = true;
2421 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2422 }
2423
2424 break;
2425
2426 default:
2427 assert_not_reached("Job type unknown");
2428 }
2429
2430 } else
2431 unexpected = true;
2432
2433 if (!MANAGER_IS_RELOADING(m)) {
2434
2435 /* If this state change happened without being
2436 * requested by a job, then let's retroactively start
2437 * or stop dependencies. We skip that step when
2438 * deserializing, since we don't want to create any
2439 * additional jobs just because something is already
2440 * activated. */
2441
2442 if (unexpected) {
2443 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2444 retroactively_start_dependencies(u);
2445 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2446 retroactively_stop_dependencies(u);
2447 }
2448
2449 /* stop unneeded units regardless if going down was expected or not */
2450 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2451 check_unneeded_dependencies(u);
2452
2453 if (ns != os && ns == UNIT_FAILED) {
2454 log_unit_debug(u, "Unit entered failed state.");
2455
2456 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2457 unit_start_on_failure(u);
2458 }
2459
2460 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2461 /* This unit just finished starting up */
2462
2463 unit_emit_audit_start(u);
2464 manager_send_unit_plymouth(m, u);
2465 }
2466
2467 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2468 /* This unit just stopped/failed. */
2469
2470 unit_emit_audit_stop(u, ns);
2471 unit_log_resources(u);
2472 }
2473 }
2474
2475 manager_recheck_journal(m);
2476 manager_recheck_dbus(m);
2477
2478 unit_trigger_notify(u);
2479
2480 if (!MANAGER_IS_RELOADING(m)) {
2481 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2482 unit_submit_to_stop_when_unneeded_queue(u);
2483
2484 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2485 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2486 * without ever entering started.) */
2487 unit_check_binds_to(u);
2488
2489 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2490 reason = strjoina("unit ", u->id, " failed");
2491 (void) emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2492 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2493 reason = strjoina("unit ", u->id, " succeeded");
2494 (void) emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2495 }
2496 }
2497
2498 unit_add_to_dbus_queue(u);
2499 unit_add_to_gc_queue(u);
2500 }
2501
2502 int unit_watch_pid(Unit *u, pid_t pid) {
2503 int r;
2504
2505 assert(u);
2506 assert(pid_is_valid(pid));
2507
2508 /* Watch a specific PID */
2509
2510 r = set_ensure_allocated(&u->pids, NULL);
2511 if (r < 0)
2512 return r;
2513
2514 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2515 if (r < 0)
2516 return r;
2517
2518 /* First try, let's add the unit keyed by "pid". */
2519 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2520 if (r == -EEXIST) {
2521 Unit **array;
2522 bool found = false;
2523 size_t n = 0;
2524
2525 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2526 * to an array of Units rather than just a Unit), lists us already. */
2527
2528 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2529 if (array)
2530 for (; array[n]; n++)
2531 if (array[n] == u)
2532 found = true;
2533
2534 if (found) /* Found it already? if so, do nothing */
2535 r = 0;
2536 else {
2537 Unit **new_array;
2538
2539 /* Allocate a new array */
2540 new_array = new(Unit*, n + 2);
2541 if (!new_array)
2542 return -ENOMEM;
2543
2544 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2545 new_array[n] = u;
2546 new_array[n+1] = NULL;
2547
2548 /* Add or replace the old array */
2549 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2550 if (r < 0) {
2551 free(new_array);
2552 return r;
2553 }
2554
2555 free(array);
2556 }
2557 } else if (r < 0)
2558 return r;
2559
2560 r = set_put(u->pids, PID_TO_PTR(pid));
2561 if (r < 0)
2562 return r;
2563
2564 return 0;
2565 }
2566
2567 void unit_unwatch_pid(Unit *u, pid_t pid) {
2568 Unit **array;
2569
2570 assert(u);
2571 assert(pid_is_valid(pid));
2572
2573 /* First let's drop the unit in case it's keyed as "pid". */
2574 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2575
2576 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2577 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2578 if (array) {
2579 size_t n, m = 0;
2580
2581 /* Let's iterate through the array, dropping our own entry */
2582 for (n = 0; array[n]; n++)
2583 if (array[n] != u)
2584 array[m++] = array[n];
2585 array[m] = NULL;
2586
2587 if (m == 0) {
2588 /* The array is now empty, remove the entire entry */
2589 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2590 free(array);
2591 }
2592 }
2593
2594 (void) set_remove(u->pids, PID_TO_PTR(pid));
2595 }
2596
2597 void unit_unwatch_all_pids(Unit *u) {
2598 assert(u);
2599
2600 while (!set_isempty(u->pids))
2601 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2602
2603 u->pids = set_free(u->pids);
2604 }
2605
2606 static void unit_tidy_watch_pids(Unit *u) {
2607 pid_t except1, except2;
2608 Iterator i;
2609 void *e;
2610
2611 assert(u);
2612
2613 /* Cleans dead PIDs from our list */
2614
2615 except1 = unit_main_pid(u);
2616 except2 = unit_control_pid(u);
2617
2618 SET_FOREACH(e, u->pids, i) {
2619 pid_t pid = PTR_TO_PID(e);
2620
2621 if (pid == except1 || pid == except2)
2622 continue;
2623
2624 if (!pid_is_unwaited(pid))
2625 unit_unwatch_pid(u, pid);
2626 }
2627 }
2628
2629 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2630 Unit *u = userdata;
2631
2632 assert(s);
2633 assert(u);
2634
2635 unit_tidy_watch_pids(u);
2636 unit_watch_all_pids(u);
2637
2638 /* If the PID set is empty now, then let's finish this off. */
2639 unit_synthesize_cgroup_empty_event(u);
2640
2641 return 0;
2642 }
2643
2644 int unit_enqueue_rewatch_pids(Unit *u) {
2645 int r;
2646
2647 assert(u);
2648
2649 if (!u->cgroup_path)
2650 return -ENOENT;
2651
2652 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2653 if (r < 0)
2654 return r;
2655 if (r > 0) /* On unified we can use proper notifications */
2656 return 0;
2657
2658 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2659 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2660 * involves issuing kill(pid, 0) on all processes we watch. */
2661
2662 if (!u->rewatch_pids_event_source) {
2663 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2664
2665 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2666 if (r < 0)
2667 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2668
2669 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2670 if (r < 0)
2671 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2672
2673 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2674
2675 u->rewatch_pids_event_source = TAKE_PTR(s);
2676 }
2677
2678 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2679 if (r < 0)
2680 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2681
2682 return 0;
2683 }
2684
2685 void unit_dequeue_rewatch_pids(Unit *u) {
2686 int r;
2687 assert(u);
2688
2689 if (!u->rewatch_pids_event_source)
2690 return;
2691
2692 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2693 if (r < 0)
2694 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2695
2696 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2697 }
2698
2699 bool unit_job_is_applicable(Unit *u, JobType j) {
2700 assert(u);
2701 assert(j >= 0 && j < _JOB_TYPE_MAX);
2702
2703 switch (j) {
2704
2705 case JOB_VERIFY_ACTIVE:
2706 case JOB_START:
2707 case JOB_NOP:
2708 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2709 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2710 * jobs for it. */
2711 return true;
2712
2713 case JOB_STOP:
2714 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2715 * external events), hence it makes no sense to permit enqueing such a request either. */
2716 return !u->perpetual;
2717
2718 case JOB_RESTART:
2719 case JOB_TRY_RESTART:
2720 return unit_can_stop(u) && unit_can_start(u);
2721
2722 case JOB_RELOAD:
2723 case JOB_TRY_RELOAD:
2724 return unit_can_reload(u);
2725
2726 case JOB_RELOAD_OR_START:
2727 return unit_can_reload(u) && unit_can_start(u);
2728
2729 default:
2730 assert_not_reached("Invalid job type");
2731 }
2732 }
2733
2734 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2735 assert(u);
2736
2737 /* Only warn about some unit types */
2738 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2739 return;
2740
2741 if (streq_ptr(u->id, other))
2742 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2743 else
2744 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2745 }
2746
2747 static int unit_add_dependency_hashmap(
2748 Hashmap **h,
2749 Unit *other,
2750 UnitDependencyMask origin_mask,
2751 UnitDependencyMask destination_mask) {
2752
2753 UnitDependencyInfo info;
2754 int r;
2755
2756 assert(h);
2757 assert(other);
2758 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2759 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2760 assert(origin_mask > 0 || destination_mask > 0);
2761
2762 r = hashmap_ensure_allocated(h, NULL);
2763 if (r < 0)
2764 return r;
2765
2766 assert_cc(sizeof(void*) == sizeof(info));
2767
2768 info.data = hashmap_get(*h, other);
2769 if (info.data) {
2770 /* Entry already exists. Add in our mask. */
2771
2772 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2773 FLAGS_SET(destination_mask, info.destination_mask))
2774 return 0; /* NOP */
2775
2776 info.origin_mask |= origin_mask;
2777 info.destination_mask |= destination_mask;
2778
2779 r = hashmap_update(*h, other, info.data);
2780 } else {
2781 info = (UnitDependencyInfo) {
2782 .origin_mask = origin_mask,
2783 .destination_mask = destination_mask,
2784 };
2785
2786 r = hashmap_put(*h, other, info.data);
2787 }
2788 if (r < 0)
2789 return r;
2790
2791 return 1;
2792 }
2793
2794 int unit_add_dependency(
2795 Unit *u,
2796 UnitDependency d,
2797 Unit *other,
2798 bool add_reference,
2799 UnitDependencyMask mask) {
2800
2801 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2802 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2803 [UNIT_WANTS] = UNIT_WANTED_BY,
2804 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2805 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2806 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2807 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2808 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2809 [UNIT_WANTED_BY] = UNIT_WANTS,
2810 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2811 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2812 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2813 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2814 [UNIT_BEFORE] = UNIT_AFTER,
2815 [UNIT_AFTER] = UNIT_BEFORE,
2816 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2817 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2818 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2819 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2820 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2821 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2822 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2823 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2824 };
2825 Unit *original_u = u, *original_other = other;
2826 int r;
2827
2828 assert(u);
2829 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2830 assert(other);
2831
2832 u = unit_follow_merge(u);
2833 other = unit_follow_merge(other);
2834
2835 /* We won't allow dependencies on ourselves. We will not
2836 * consider them an error however. */
2837 if (u == other) {
2838 maybe_warn_about_dependency(original_u, original_other->id, d);
2839 return 0;
2840 }
2841
2842 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2843 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2844 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2845 return 0;
2846 }
2847
2848 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2849 if (r < 0)
2850 return r;
2851
2852 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2853 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2854 if (r < 0)
2855 return r;
2856 }
2857
2858 if (add_reference) {
2859 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2860 if (r < 0)
2861 return r;
2862
2863 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2864 if (r < 0)
2865 return r;
2866 }
2867
2868 unit_add_to_dbus_queue(u);
2869 return 0;
2870 }
2871
2872 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2873 int r;
2874
2875 assert(u);
2876
2877 r = unit_add_dependency(u, d, other, add_reference, mask);
2878 if (r < 0)
2879 return r;
2880
2881 return unit_add_dependency(u, e, other, add_reference, mask);
2882 }
2883
2884 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2885 int r;
2886
2887 assert(u);
2888 assert(name);
2889 assert(buf);
2890 assert(ret);
2891
2892 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2893 *buf = NULL;
2894 *ret = name;
2895 return 0;
2896 }
2897
2898 if (u->instance)
2899 r = unit_name_replace_instance(name, u->instance, buf);
2900 else {
2901 _cleanup_free_ char *i = NULL;
2902
2903 r = unit_name_to_prefix(u->id, &i);
2904 if (r < 0)
2905 return r;
2906
2907 r = unit_name_replace_instance(name, i, buf);
2908 }
2909 if (r < 0)
2910 return r;
2911
2912 *ret = *buf;
2913 return 0;
2914 }
2915
2916 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2917 _cleanup_free_ char *buf = NULL;
2918 Unit *other;
2919 int r;
2920
2921 assert(u);
2922 assert(name);
2923
2924 r = resolve_template(u, name, &buf, &name);
2925 if (r < 0)
2926 return r;
2927
2928 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2929 if (r < 0)
2930 return r;
2931
2932 return unit_add_dependency(u, d, other, add_reference, mask);
2933 }
2934
2935 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2936 _cleanup_free_ char *buf = NULL;
2937 Unit *other;
2938 int r;
2939
2940 assert(u);
2941 assert(name);
2942
2943 r = resolve_template(u, name, &buf, &name);
2944 if (r < 0)
2945 return r;
2946
2947 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2948 if (r < 0)
2949 return r;
2950
2951 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2952 }
2953
2954 int set_unit_path(const char *p) {
2955 /* This is mostly for debug purposes */
2956 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2957 return -errno;
2958
2959 return 0;
2960 }
2961
2962 char *unit_dbus_path(Unit *u) {
2963 assert(u);
2964
2965 if (!u->id)
2966 return NULL;
2967
2968 return unit_dbus_path_from_name(u->id);
2969 }
2970
2971 char *unit_dbus_path_invocation_id(Unit *u) {
2972 assert(u);
2973
2974 if (sd_id128_is_null(u->invocation_id))
2975 return NULL;
2976
2977 return unit_dbus_path_from_name(u->invocation_id_string);
2978 }
2979
2980 int unit_set_slice(Unit *u, Unit *slice) {
2981 assert(u);
2982 assert(slice);
2983
2984 /* Sets the unit slice if it has not been set before. Is extra
2985 * careful, to only allow this for units that actually have a
2986 * cgroup context. Also, we don't allow to set this for slices
2987 * (since the parent slice is derived from the name). Make
2988 * sure the unit we set is actually a slice. */
2989
2990 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2991 return -EOPNOTSUPP;
2992
2993 if (u->type == UNIT_SLICE)
2994 return -EINVAL;
2995
2996 if (unit_active_state(u) != UNIT_INACTIVE)
2997 return -EBUSY;
2998
2999 if (slice->type != UNIT_SLICE)
3000 return -EINVAL;
3001
3002 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3003 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3004 return -EPERM;
3005
3006 if (UNIT_DEREF(u->slice) == slice)
3007 return 0;
3008
3009 /* Disallow slice changes if @u is already bound to cgroups */
3010 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3011 return -EBUSY;
3012
3013 unit_ref_set(&u->slice, u, slice);
3014 return 1;
3015 }
3016
3017 int unit_set_default_slice(Unit *u) {
3018 const char *slice_name;
3019 Unit *slice;
3020 int r;
3021
3022 assert(u);
3023
3024 if (UNIT_ISSET(u->slice))
3025 return 0;
3026
3027 if (u->instance) {
3028 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3029
3030 /* Implicitly place all instantiated units in their
3031 * own per-template slice */
3032
3033 r = unit_name_to_prefix(u->id, &prefix);
3034 if (r < 0)
3035 return r;
3036
3037 /* The prefix is already escaped, but it might include
3038 * "-" which has a special meaning for slice units,
3039 * hence escape it here extra. */
3040 escaped = unit_name_escape(prefix);
3041 if (!escaped)
3042 return -ENOMEM;
3043
3044 if (MANAGER_IS_SYSTEM(u->manager))
3045 slice_name = strjoina("system-", escaped, ".slice");
3046 else
3047 slice_name = strjoina(escaped, ".slice");
3048 } else
3049 slice_name =
3050 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3051 ? SPECIAL_SYSTEM_SLICE
3052 : SPECIAL_ROOT_SLICE;
3053
3054 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3055 if (r < 0)
3056 return r;
3057
3058 return unit_set_slice(u, slice);
3059 }
3060
3061 const char *unit_slice_name(Unit *u) {
3062 assert(u);
3063
3064 if (!UNIT_ISSET(u->slice))
3065 return NULL;
3066
3067 return UNIT_DEREF(u->slice)->id;
3068 }
3069
3070 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3071 _cleanup_free_ char *t = NULL;
3072 int r;
3073
3074 assert(u);
3075 assert(type);
3076 assert(_found);
3077
3078 r = unit_name_change_suffix(u->id, type, &t);
3079 if (r < 0)
3080 return r;
3081 if (unit_has_name(u, t))
3082 return -EINVAL;
3083
3084 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3085 assert(r < 0 || *_found != u);
3086 return r;
3087 }
3088
3089 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3090 const char *name, *old_owner, *new_owner;
3091 Unit *u = userdata;
3092 int r;
3093
3094 assert(message);
3095 assert(u);
3096
3097 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3098 if (r < 0) {
3099 bus_log_parse_error(r);
3100 return 0;
3101 }
3102
3103 old_owner = empty_to_null(old_owner);
3104 new_owner = empty_to_null(new_owner);
3105
3106 if (UNIT_VTABLE(u)->bus_name_owner_change)
3107 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3108
3109 return 0;
3110 }
3111
3112 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3113 const char *match;
3114
3115 assert(u);
3116 assert(bus);
3117 assert(name);
3118
3119 if (u->match_bus_slot)
3120 return -EBUSY;
3121
3122 match = strjoina("type='signal',"
3123 "sender='org.freedesktop.DBus',"
3124 "path='/org/freedesktop/DBus',"
3125 "interface='org.freedesktop.DBus',"
3126 "member='NameOwnerChanged',"
3127 "arg0='", name, "'");
3128
3129 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3130 }
3131
3132 int unit_watch_bus_name(Unit *u, const char *name) {
3133 int r;
3134
3135 assert(u);
3136 assert(name);
3137
3138 /* Watch a specific name on the bus. We only support one unit
3139 * watching each name for now. */
3140
3141 if (u->manager->api_bus) {
3142 /* If the bus is already available, install the match directly.
3143 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3144 r = unit_install_bus_match(u, u->manager->api_bus, name);
3145 if (r < 0)
3146 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3147 }
3148
3149 r = hashmap_put(u->manager->watch_bus, name, u);
3150 if (r < 0) {
3151 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3152 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3153 }
3154
3155 return 0;
3156 }
3157
3158 void unit_unwatch_bus_name(Unit *u, const char *name) {
3159 assert(u);
3160 assert(name);
3161
3162 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3163 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3164 }
3165
3166 bool unit_can_serialize(Unit *u) {
3167 assert(u);
3168
3169 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3170 }
3171
3172 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3173 _cleanup_free_ char *s = NULL;
3174 int r;
3175
3176 assert(f);
3177 assert(key);
3178
3179 if (mask == 0)
3180 return 0;
3181
3182 r = cg_mask_to_string(mask, &s);
3183 if (r < 0)
3184 return log_error_errno(r, "Failed to format cgroup mask: %m");
3185
3186 return serialize_item(f, key, s);
3187 }
3188
3189 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3190 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3191 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3192 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3193 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3194 };
3195
3196 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3197 CGroupIPAccountingMetric m;
3198 int r;
3199
3200 assert(u);
3201 assert(f);
3202 assert(fds);
3203
3204 if (unit_can_serialize(u)) {
3205 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3206 if (r < 0)
3207 return r;
3208 }
3209
3210 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3211
3212 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3213 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3214 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3215 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3216
3217 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3218 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3219
3220 if (dual_timestamp_is_set(&u->condition_timestamp))
3221 (void) serialize_bool(f, "condition-result", u->condition_result);
3222
3223 if (dual_timestamp_is_set(&u->assert_timestamp))
3224 (void) serialize_bool(f, "assert-result", u->assert_result);
3225
3226 (void) serialize_bool(f, "transient", u->transient);
3227 (void) serialize_bool(f, "in-audit", u->in_audit);
3228
3229 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3230 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3231 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3232 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3233 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3234
3235 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3236 if (u->cpu_usage_last != NSEC_INFINITY)
3237 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3238
3239 if (u->cgroup_path)
3240 (void) serialize_item(f, "cgroup", u->cgroup_path);
3241
3242 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3243 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3244 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3245 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3246
3247 if (uid_is_valid(u->ref_uid))
3248 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3249 if (gid_is_valid(u->ref_gid))
3250 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3251
3252 if (!sd_id128_is_null(u->invocation_id))
3253 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3254
3255 bus_track_serialize(u->bus_track, f, "ref");
3256
3257 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3258 uint64_t v;
3259
3260 r = unit_get_ip_accounting(u, m, &v);
3261 if (r >= 0)
3262 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3263 }
3264
3265 if (serialize_jobs) {
3266 if (u->job) {
3267 fputs("job\n", f);
3268 job_serialize(u->job, f);
3269 }
3270
3271 if (u->nop_job) {
3272 fputs("job\n", f);
3273 job_serialize(u->nop_job, f);
3274 }
3275 }
3276
3277 /* End marker */
3278 fputc('\n', f);
3279 return 0;
3280 }
3281
3282 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3283 int r;
3284
3285 assert(u);
3286 assert(f);
3287 assert(fds);
3288
3289 for (;;) {
3290 _cleanup_free_ char *line = NULL;
3291 CGroupIPAccountingMetric m;
3292 char *l, *v;
3293 size_t k;
3294
3295 r = read_line(f, LONG_LINE_MAX, &line);
3296 if (r < 0)
3297 return log_error_errno(r, "Failed to read serialization line: %m");
3298 if (r == 0) /* eof */
3299 break;
3300
3301 l = strstrip(line);
3302 if (isempty(l)) /* End marker */
3303 break;
3304
3305 k = strcspn(l, "=");
3306
3307 if (l[k] == '=') {
3308 l[k] = 0;
3309 v = l+k+1;
3310 } else
3311 v = l+k;
3312
3313 if (streq(l, "job")) {
3314 if (v[0] == '\0') {
3315 /* new-style serialized job */
3316 Job *j;
3317
3318 j = job_new_raw(u);
3319 if (!j)
3320 return log_oom();
3321
3322 r = job_deserialize(j, f);
3323 if (r < 0) {
3324 job_free(j);
3325 return r;
3326 }
3327
3328 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3329 if (r < 0) {
3330 job_free(j);
3331 return r;
3332 }
3333
3334 r = job_install_deserialized(j);
3335 if (r < 0) {
3336 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3337 job_free(j);
3338 return r;
3339 }
3340 } else /* legacy for pre-44 */
3341 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3342 continue;
3343 } else if (streq(l, "state-change-timestamp")) {
3344 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3345 continue;
3346 } else if (streq(l, "inactive-exit-timestamp")) {
3347 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3348 continue;
3349 } else if (streq(l, "active-enter-timestamp")) {
3350 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3351 continue;
3352 } else if (streq(l, "active-exit-timestamp")) {
3353 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3354 continue;
3355 } else if (streq(l, "inactive-enter-timestamp")) {
3356 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3357 continue;
3358 } else if (streq(l, "condition-timestamp")) {
3359 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3360 continue;
3361 } else if (streq(l, "assert-timestamp")) {
3362 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3363 continue;
3364 } else if (streq(l, "condition-result")) {
3365
3366 r = parse_boolean(v);
3367 if (r < 0)
3368 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3369 else
3370 u->condition_result = r;
3371
3372 continue;
3373
3374 } else if (streq(l, "assert-result")) {
3375
3376 r = parse_boolean(v);
3377 if (r < 0)
3378 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3379 else
3380 u->assert_result = r;
3381
3382 continue;
3383
3384 } else if (streq(l, "transient")) {
3385
3386 r = parse_boolean(v);
3387 if (r < 0)
3388 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3389 else
3390 u->transient = r;
3391
3392 continue;
3393
3394 } else if (streq(l, "in-audit")) {
3395
3396 r = parse_boolean(v);
3397 if (r < 0)
3398 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3399 else
3400 u->in_audit = r;
3401
3402 continue;
3403
3404 } else if (streq(l, "exported-invocation-id")) {
3405
3406 r = parse_boolean(v);
3407 if (r < 0)
3408 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3409 else
3410 u->exported_invocation_id = r;
3411
3412 continue;
3413
3414 } else if (streq(l, "exported-log-level-max")) {
3415
3416 r = parse_boolean(v);
3417 if (r < 0)
3418 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3419 else
3420 u->exported_log_level_max = r;
3421
3422 continue;
3423
3424 } else if (streq(l, "exported-log-extra-fields")) {
3425
3426 r = parse_boolean(v);
3427 if (r < 0)
3428 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3429 else
3430 u->exported_log_extra_fields = r;
3431
3432 continue;
3433
3434 } else if (streq(l, "exported-log-rate-limit-interval")) {
3435
3436 r = parse_boolean(v);
3437 if (r < 0)
3438 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3439 else
3440 u->exported_log_rate_limit_interval = r;
3441
3442 continue;
3443
3444 } else if (streq(l, "exported-log-rate-limit-burst")) {
3445
3446 r = parse_boolean(v);
3447 if (r < 0)
3448 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3449 else
3450 u->exported_log_rate_limit_burst = r;
3451
3452 continue;
3453
3454 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3455
3456 r = safe_atou64(v, &u->cpu_usage_base);
3457 if (r < 0)
3458 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3459
3460 continue;
3461
3462 } else if (streq(l, "cpu-usage-last")) {
3463
3464 r = safe_atou64(v, &u->cpu_usage_last);
3465 if (r < 0)
3466 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3467
3468 continue;
3469
3470 } else if (streq(l, "cgroup")) {
3471
3472 r = unit_set_cgroup_path(u, v);
3473 if (r < 0)
3474 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3475
3476 (void) unit_watch_cgroup(u);
3477
3478 continue;
3479 } else if (streq(l, "cgroup-realized")) {
3480 int b;
3481
3482 b = parse_boolean(v);
3483 if (b < 0)
3484 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3485 else
3486 u->cgroup_realized = b;
3487
3488 continue;
3489
3490 } else if (streq(l, "cgroup-realized-mask")) {
3491
3492 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3493 if (r < 0)
3494 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3495 continue;
3496
3497 } else if (streq(l, "cgroup-enabled-mask")) {
3498
3499 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3500 if (r < 0)
3501 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3502 continue;
3503
3504 } else if (streq(l, "cgroup-invalidated-mask")) {
3505
3506 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3507 if (r < 0)
3508 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3509 continue;
3510
3511 } else if (streq(l, "ref-uid")) {
3512 uid_t uid;
3513
3514 r = parse_uid(v, &uid);
3515 if (r < 0)
3516 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3517 else
3518 unit_ref_uid_gid(u, uid, GID_INVALID);
3519
3520 continue;
3521
3522 } else if (streq(l, "ref-gid")) {
3523 gid_t gid;
3524
3525 r = parse_gid(v, &gid);
3526 if (r < 0)
3527 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3528 else
3529 unit_ref_uid_gid(u, UID_INVALID, gid);
3530
3531 continue;
3532
3533 } else if (streq(l, "ref")) {
3534
3535 r = strv_extend(&u->deserialized_refs, v);
3536 if (r < 0)
3537 return log_oom();
3538
3539 continue;
3540 } else if (streq(l, "invocation-id")) {
3541 sd_id128_t id;
3542
3543 r = sd_id128_from_string(v, &id);
3544 if (r < 0)
3545 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3546 else {
3547 r = unit_set_invocation_id(u, id);
3548 if (r < 0)
3549 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3550 }
3551
3552 continue;
3553 }
3554
3555 /* Check if this is an IP accounting metric serialization field */
3556 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3557 if (streq(l, ip_accounting_metric_field[m]))
3558 break;
3559 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3560 uint64_t c;
3561
3562 r = safe_atou64(v, &c);
3563 if (r < 0)
3564 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3565 else
3566 u->ip_accounting_extra[m] = c;
3567 continue;
3568 }
3569
3570 if (unit_can_serialize(u)) {
3571 r = exec_runtime_deserialize_compat(u, l, v, fds);
3572 if (r < 0) {
3573 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3574 continue;
3575 }
3576
3577 /* Returns positive if key was handled by the call */
3578 if (r > 0)
3579 continue;
3580
3581 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3582 if (r < 0)
3583 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3584 }
3585 }
3586
3587 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3588 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3589 * before 228 where the base for timeouts was not persistent across reboots. */
3590
3591 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3592 dual_timestamp_get(&u->state_change_timestamp);
3593
3594 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3595 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3596 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3597 unit_invalidate_cgroup_bpf(u);
3598
3599 return 0;
3600 }
3601
3602 int unit_deserialize_skip(FILE *f) {
3603 int r;
3604 assert(f);
3605
3606 /* Skip serialized data for this unit. We don't know what it is. */
3607
3608 for (;;) {
3609 _cleanup_free_ char *line = NULL;
3610 char *l;
3611
3612 r = read_line(f, LONG_LINE_MAX, &line);
3613 if (r < 0)
3614 return log_error_errno(r, "Failed to read serialization line: %m");
3615 if (r == 0)
3616 return 0;
3617
3618 l = strstrip(line);
3619
3620 /* End marker */
3621 if (isempty(l))
3622 return 1;
3623 }
3624 }
3625
3626 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3627 Unit *device;
3628 _cleanup_free_ char *e = NULL;
3629 int r;
3630
3631 assert(u);
3632
3633 /* Adds in links to the device node that this unit is based on */
3634 if (isempty(what))
3635 return 0;
3636
3637 if (!is_device_path(what))
3638 return 0;
3639
3640 /* When device units aren't supported (such as in a
3641 * container), don't create dependencies on them. */
3642 if (!unit_type_supported(UNIT_DEVICE))
3643 return 0;
3644
3645 r = unit_name_from_path(what, ".device", &e);
3646 if (r < 0)
3647 return r;
3648
3649 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3650 if (r < 0)
3651 return r;
3652
3653 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3654 dep = UNIT_BINDS_TO;
3655
3656 r = unit_add_two_dependencies(u, UNIT_AFTER,
3657 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3658 device, true, mask);
3659 if (r < 0)
3660 return r;
3661
3662 if (wants) {
3663 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3664 if (r < 0)
3665 return r;
3666 }
3667
3668 return 0;
3669 }
3670
3671 int unit_coldplug(Unit *u) {
3672 int r = 0, q;
3673 char **i;
3674
3675 assert(u);
3676
3677 /* Make sure we don't enter a loop, when coldplugging recursively. */
3678 if (u->coldplugged)
3679 return 0;
3680
3681 u->coldplugged = true;
3682
3683 STRV_FOREACH(i, u->deserialized_refs) {
3684 q = bus_unit_track_add_name(u, *i);
3685 if (q < 0 && r >= 0)
3686 r = q;
3687 }
3688 u->deserialized_refs = strv_free(u->deserialized_refs);
3689
3690 if (UNIT_VTABLE(u)->coldplug) {
3691 q = UNIT_VTABLE(u)->coldplug(u);
3692 if (q < 0 && r >= 0)
3693 r = q;
3694 }
3695
3696 if (u->job) {
3697 q = job_coldplug(u->job);
3698 if (q < 0 && r >= 0)
3699 r = q;
3700 }
3701
3702 return r;
3703 }
3704
3705 void unit_catchup(Unit *u) {
3706 assert(u);
3707
3708 if (UNIT_VTABLE(u)->catchup)
3709 UNIT_VTABLE(u)->catchup(u);
3710 }
3711
3712 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3713 struct stat st;
3714
3715 if (!path)
3716 return false;
3717
3718 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3719 * are never out-of-date. */
3720 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3721 return false;
3722
3723 if (stat(path, &st) < 0)
3724 /* What, cannot access this anymore? */
3725 return true;
3726
3727 if (path_masked)
3728 /* For masked files check if they are still so */
3729 return !null_or_empty(&st);
3730 else
3731 /* For non-empty files check the mtime */
3732 return timespec_load(&st.st_mtim) > mtime;
3733
3734 return false;
3735 }
3736
3737 bool unit_need_daemon_reload(Unit *u) {
3738 _cleanup_strv_free_ char **t = NULL;
3739 char **path;
3740
3741 assert(u);
3742
3743 /* For unit files, we allow masking… */
3744 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3745 u->load_state == UNIT_MASKED))
3746 return true;
3747
3748 /* Source paths should not be masked… */
3749 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3750 return true;
3751
3752 if (u->load_state == UNIT_LOADED)
3753 (void) unit_find_dropin_paths(u, &t);
3754 if (!strv_equal(u->dropin_paths, t))
3755 return true;
3756
3757 /* … any drop-ins that are masked are simply omitted from the list. */
3758 STRV_FOREACH(path, u->dropin_paths)
3759 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3760 return true;
3761
3762 return false;
3763 }
3764
3765 void unit_reset_failed(Unit *u) {
3766 assert(u);
3767
3768 if (UNIT_VTABLE(u)->reset_failed)
3769 UNIT_VTABLE(u)->reset_failed(u);
3770
3771 RATELIMIT_RESET(u->start_limit);
3772 u->start_limit_hit = false;
3773 }
3774
3775 Unit *unit_following(Unit *u) {
3776 assert(u);
3777
3778 if (UNIT_VTABLE(u)->following)
3779 return UNIT_VTABLE(u)->following(u);
3780
3781 return NULL;
3782 }
3783
3784 bool unit_stop_pending(Unit *u) {
3785 assert(u);
3786
3787 /* This call does check the current state of the unit. It's
3788 * hence useful to be called from state change calls of the
3789 * unit itself, where the state isn't updated yet. This is
3790 * different from unit_inactive_or_pending() which checks both
3791 * the current state and for a queued job. */
3792
3793 return u->job && u->job->type == JOB_STOP;
3794 }
3795
3796 bool unit_inactive_or_pending(Unit *u) {
3797 assert(u);
3798
3799 /* Returns true if the unit is inactive or going down */
3800
3801 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3802 return true;
3803
3804 if (unit_stop_pending(u))
3805 return true;
3806
3807 return false;
3808 }
3809
3810 bool unit_active_or_pending(Unit *u) {
3811 assert(u);
3812
3813 /* Returns true if the unit is active or going up */
3814
3815 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3816 return true;
3817
3818 if (u->job &&
3819 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3820 return true;
3821
3822 return false;
3823 }
3824
3825 bool unit_will_restart(Unit *u) {
3826 assert(u);
3827
3828 if (!UNIT_VTABLE(u)->will_restart)
3829 return false;
3830
3831 return UNIT_VTABLE(u)->will_restart(u);
3832 }
3833
3834 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3835 assert(u);
3836 assert(w >= 0 && w < _KILL_WHO_MAX);
3837 assert(SIGNAL_VALID(signo));
3838
3839 if (!UNIT_VTABLE(u)->kill)
3840 return -EOPNOTSUPP;
3841
3842 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3843 }
3844
3845 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3846 _cleanup_set_free_ Set *pid_set = NULL;
3847 int r;
3848
3849 pid_set = set_new(NULL);
3850 if (!pid_set)
3851 return NULL;
3852
3853 /* Exclude the main/control pids from being killed via the cgroup */
3854 if (main_pid > 0) {
3855 r = set_put(pid_set, PID_TO_PTR(main_pid));
3856 if (r < 0)
3857 return NULL;
3858 }
3859
3860 if (control_pid > 0) {
3861 r = set_put(pid_set, PID_TO_PTR(control_pid));
3862 if (r < 0)
3863 return NULL;
3864 }
3865
3866 return TAKE_PTR(pid_set);
3867 }
3868
3869 int unit_kill_common(
3870 Unit *u,
3871 KillWho who,
3872 int signo,
3873 pid_t main_pid,
3874 pid_t control_pid,
3875 sd_bus_error *error) {
3876
3877 int r = 0;
3878 bool killed = false;
3879
3880 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3881 if (main_pid < 0)
3882 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3883 else if (main_pid == 0)
3884 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3885 }
3886
3887 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3888 if (control_pid < 0)
3889 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3890 else if (control_pid == 0)
3891 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3892 }
3893
3894 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3895 if (control_pid > 0) {
3896 if (kill(control_pid, signo) < 0)
3897 r = -errno;
3898 else
3899 killed = true;
3900 }
3901
3902 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3903 if (main_pid > 0) {
3904 if (kill(main_pid, signo) < 0)
3905 r = -errno;
3906 else
3907 killed = true;
3908 }
3909
3910 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3911 _cleanup_set_free_ Set *pid_set = NULL;
3912 int q;
3913
3914 /* Exclude the main/control pids from being killed via the cgroup */
3915 pid_set = unit_pid_set(main_pid, control_pid);
3916 if (!pid_set)
3917 return -ENOMEM;
3918
3919 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3920 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3921 r = q;
3922 else
3923 killed = true;
3924 }
3925
3926 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3927 return -ESRCH;
3928
3929 return r;
3930 }
3931
3932 int unit_following_set(Unit *u, Set **s) {
3933 assert(u);
3934 assert(s);
3935
3936 if (UNIT_VTABLE(u)->following_set)
3937 return UNIT_VTABLE(u)->following_set(u, s);
3938
3939 *s = NULL;
3940 return 0;
3941 }
3942
3943 UnitFileState unit_get_unit_file_state(Unit *u) {
3944 int r;
3945
3946 assert(u);
3947
3948 if (u->unit_file_state < 0 && u->fragment_path) {
3949 r = unit_file_get_state(
3950 u->manager->unit_file_scope,
3951 NULL,
3952 u->id,
3953 &u->unit_file_state);
3954 if (r < 0)
3955 u->unit_file_state = UNIT_FILE_BAD;
3956 }
3957
3958 return u->unit_file_state;
3959 }
3960
3961 int unit_get_unit_file_preset(Unit *u) {
3962 assert(u);
3963
3964 if (u->unit_file_preset < 0 && u->fragment_path)
3965 u->unit_file_preset = unit_file_query_preset(
3966 u->manager->unit_file_scope,
3967 NULL,
3968 basename(u->fragment_path));
3969
3970 return u->unit_file_preset;
3971 }
3972
3973 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3974 assert(ref);
3975 assert(source);
3976 assert(target);
3977
3978 if (ref->target)
3979 unit_ref_unset(ref);
3980
3981 ref->source = source;
3982 ref->target = target;
3983 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3984 return target;
3985 }
3986
3987 void unit_ref_unset(UnitRef *ref) {
3988 assert(ref);
3989
3990 if (!ref->target)
3991 return;
3992
3993 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3994 * be unreferenced now. */
3995 unit_add_to_gc_queue(ref->target);
3996
3997 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3998 ref->source = ref->target = NULL;
3999 }
4000
4001 static int user_from_unit_name(Unit *u, char **ret) {
4002
4003 static const uint8_t hash_key[] = {
4004 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4005 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4006 };
4007
4008 _cleanup_free_ char *n = NULL;
4009 int r;
4010
4011 r = unit_name_to_prefix(u->id, &n);
4012 if (r < 0)
4013 return r;
4014
4015 if (valid_user_group_name(n)) {
4016 *ret = TAKE_PTR(n);
4017 return 0;
4018 }
4019
4020 /* If we can't use the unit name as a user name, then let's hash it and use that */
4021 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4022 return -ENOMEM;
4023
4024 return 0;
4025 }
4026
4027 int unit_patch_contexts(Unit *u) {
4028 CGroupContext *cc;
4029 ExecContext *ec;
4030 unsigned i;
4031 int r;
4032
4033 assert(u);
4034
4035 /* Patch in the manager defaults into the exec and cgroup
4036 * contexts, _after_ the rest of the settings have been
4037 * initialized */
4038
4039 ec = unit_get_exec_context(u);
4040 if (ec) {
4041 /* This only copies in the ones that need memory */
4042 for (i = 0; i < _RLIMIT_MAX; i++)
4043 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4044 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4045 if (!ec->rlimit[i])
4046 return -ENOMEM;
4047 }
4048
4049 if (MANAGER_IS_USER(u->manager) &&
4050 !ec->working_directory) {
4051
4052 r = get_home_dir(&ec->working_directory);
4053 if (r < 0)
4054 return r;
4055
4056 /* Allow user services to run, even if the
4057 * home directory is missing */
4058 ec->working_directory_missing_ok = true;
4059 }
4060
4061 if (ec->private_devices)
4062 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4063
4064 if (ec->protect_kernel_modules)
4065 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4066
4067 if (ec->dynamic_user) {
4068 if (!ec->user) {
4069 r = user_from_unit_name(u, &ec->user);
4070 if (r < 0)
4071 return r;
4072 }
4073
4074 if (!ec->group) {
4075 ec->group = strdup(ec->user);
4076 if (!ec->group)
4077 return -ENOMEM;
4078 }
4079
4080 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4081 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4082
4083 ec->private_tmp = true;
4084 ec->remove_ipc = true;
4085 ec->protect_system = PROTECT_SYSTEM_STRICT;
4086 if (ec->protect_home == PROTECT_HOME_NO)
4087 ec->protect_home = PROTECT_HOME_READ_ONLY;
4088 }
4089 }
4090
4091 cc = unit_get_cgroup_context(u);
4092 if (cc && ec) {
4093
4094 if (ec->private_devices &&
4095 cc->device_policy == CGROUP_AUTO)
4096 cc->device_policy = CGROUP_CLOSED;
4097
4098 if (ec->root_image &&
4099 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4100
4101 /* When RootImage= is specified, the following devices are touched. */
4102 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4103 if (r < 0)
4104 return r;
4105
4106 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4107 if (r < 0)
4108 return r;
4109
4110 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4111 if (r < 0)
4112 return r;
4113 }
4114 }
4115
4116 return 0;
4117 }
4118
4119 ExecContext *unit_get_exec_context(Unit *u) {
4120 size_t offset;
4121 assert(u);
4122
4123 if (u->type < 0)
4124 return NULL;
4125
4126 offset = UNIT_VTABLE(u)->exec_context_offset;
4127 if (offset <= 0)
4128 return NULL;
4129
4130 return (ExecContext*) ((uint8_t*) u + offset);
4131 }
4132
4133 KillContext *unit_get_kill_context(Unit *u) {
4134 size_t offset;
4135 assert(u);
4136
4137 if (u->type < 0)
4138 return NULL;
4139
4140 offset = UNIT_VTABLE(u)->kill_context_offset;
4141 if (offset <= 0)
4142 return NULL;
4143
4144 return (KillContext*) ((uint8_t*) u + offset);
4145 }
4146
4147 CGroupContext *unit_get_cgroup_context(Unit *u) {
4148 size_t offset;
4149
4150 if (u->type < 0)
4151 return NULL;
4152
4153 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4154 if (offset <= 0)
4155 return NULL;
4156
4157 return (CGroupContext*) ((uint8_t*) u + offset);
4158 }
4159
4160 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4161 size_t offset;
4162
4163 if (u->type < 0)
4164 return NULL;
4165
4166 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4167 if (offset <= 0)
4168 return NULL;
4169
4170 return *(ExecRuntime**) ((uint8_t*) u + offset);
4171 }
4172
4173 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4174 assert(u);
4175
4176 if (UNIT_WRITE_FLAGS_NOOP(flags))
4177 return NULL;
4178
4179 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4180 return u->manager->lookup_paths.transient;
4181
4182 if (flags & UNIT_PERSISTENT)
4183 return u->manager->lookup_paths.persistent_control;
4184
4185 if (flags & UNIT_RUNTIME)
4186 return u->manager->lookup_paths.runtime_control;
4187
4188 return NULL;
4189 }
4190
4191 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4192 char *ret = NULL;
4193
4194 if (!s)
4195 return NULL;
4196
4197 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4198 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4199 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4200 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4201 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4202 * allocations. */
4203
4204 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4205 ret = specifier_escape(s);
4206 if (!ret)
4207 return NULL;
4208
4209 s = ret;
4210 }
4211
4212 if (flags & UNIT_ESCAPE_C) {
4213 char *a;
4214
4215 a = cescape(s);
4216 free(ret);
4217 if (!a)
4218 return NULL;
4219
4220 ret = a;
4221 }
4222
4223 if (buf) {
4224 *buf = ret;
4225 return ret ?: (char*) s;
4226 }
4227
4228 return ret ?: strdup(s);
4229 }
4230
4231 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4232 _cleanup_free_ char *result = NULL;
4233 size_t n = 0, allocated = 0;
4234 char **i;
4235
4236 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4237 * way suitable for ExecStart= stanzas */
4238
4239 STRV_FOREACH(i, l) {
4240 _cleanup_free_ char *buf = NULL;
4241 const char *p;
4242 size_t a;
4243 char *q;
4244
4245 p = unit_escape_setting(*i, flags, &buf);
4246 if (!p)
4247 return NULL;
4248
4249 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4250 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4251 return NULL;
4252
4253 q = result + n;
4254 if (n > 0)
4255 *(q++) = ' ';
4256
4257 *(q++) = '"';
4258 q = stpcpy(q, p);
4259 *(q++) = '"';
4260
4261 n += a;
4262 }
4263
4264 if (!GREEDY_REALLOC(result, allocated, n + 1))
4265 return NULL;
4266
4267 result[n] = 0;
4268
4269 return TAKE_PTR(result);
4270 }
4271
4272 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4273 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4274 const char *dir, *wrapped;
4275 int r;
4276
4277 assert(u);
4278 assert(name);
4279 assert(data);
4280
4281 if (UNIT_WRITE_FLAGS_NOOP(flags))
4282 return 0;
4283
4284 data = unit_escape_setting(data, flags, &escaped);
4285 if (!data)
4286 return -ENOMEM;
4287
4288 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4289 * previous section header is the same */
4290
4291 if (flags & UNIT_PRIVATE) {
4292 if (!UNIT_VTABLE(u)->private_section)
4293 return -EINVAL;
4294
4295 if (!u->transient_file || u->last_section_private < 0)
4296 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4297 else if (u->last_section_private == 0)
4298 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4299 } else {
4300 if (!u->transient_file || u->last_section_private < 0)
4301 data = strjoina("[Unit]\n", data);
4302 else if (u->last_section_private > 0)
4303 data = strjoina("\n[Unit]\n", data);
4304 }
4305
4306 if (u->transient_file) {
4307 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4308 * write to the transient unit file. */
4309 fputs(data, u->transient_file);
4310
4311 if (!endswith(data, "\n"))
4312 fputc('\n', u->transient_file);
4313
4314 /* Remember which section we wrote this entry to */
4315 u->last_section_private = !!(flags & UNIT_PRIVATE);
4316 return 0;
4317 }
4318
4319 dir = unit_drop_in_dir(u, flags);
4320 if (!dir)
4321 return -EINVAL;
4322
4323 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4324 "# or an equivalent operation. Do not edit.\n",
4325 data,
4326 "\n");
4327
4328 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4329 if (r < 0)
4330 return r;
4331
4332 (void) mkdir_p_label(p, 0755);
4333 r = write_string_file_atomic_label(q, wrapped);
4334 if (r < 0)
4335 return r;
4336
4337 r = strv_push(&u->dropin_paths, q);
4338 if (r < 0)
4339 return r;
4340 q = NULL;
4341
4342 strv_uniq(u->dropin_paths);
4343
4344 u->dropin_mtime = now(CLOCK_REALTIME);
4345
4346 return 0;
4347 }
4348
4349 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4350 _cleanup_free_ char *p = NULL;
4351 va_list ap;
4352 int r;
4353
4354 assert(u);
4355 assert(name);
4356 assert(format);
4357
4358 if (UNIT_WRITE_FLAGS_NOOP(flags))
4359 return 0;
4360
4361 va_start(ap, format);
4362 r = vasprintf(&p, format, ap);
4363 va_end(ap);
4364
4365 if (r < 0)
4366 return -ENOMEM;
4367
4368 return unit_write_setting(u, flags, name, p);
4369 }
4370
4371 int unit_make_transient(Unit *u) {
4372 _cleanup_free_ char *path = NULL;
4373 FILE *f;
4374
4375 assert(u);
4376
4377 if (!UNIT_VTABLE(u)->can_transient)
4378 return -EOPNOTSUPP;
4379
4380 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4381
4382 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4383 if (!path)
4384 return -ENOMEM;
4385
4386 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4387 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4388
4389 RUN_WITH_UMASK(0022) {
4390 f = fopen(path, "we");
4391 if (!f)
4392 return -errno;
4393 }
4394
4395 safe_fclose(u->transient_file);
4396 u->transient_file = f;
4397
4398 free_and_replace(u->fragment_path, path);
4399
4400 u->source_path = mfree(u->source_path);
4401 u->dropin_paths = strv_free(u->dropin_paths);
4402 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4403
4404 u->load_state = UNIT_STUB;
4405 u->load_error = 0;
4406 u->transient = true;
4407
4408 unit_add_to_dbus_queue(u);
4409 unit_add_to_gc_queue(u);
4410
4411 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4412 u->transient_file);
4413
4414 return 0;
4415 }
4416
4417 static void log_kill(pid_t pid, int sig, void *userdata) {
4418 _cleanup_free_ char *comm = NULL;
4419
4420 (void) get_process_comm(pid, &comm);
4421
4422 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4423 only, like for example systemd's own PAM stub process. */
4424 if (comm && comm[0] == '(')
4425 return;
4426
4427 log_unit_notice(userdata,
4428 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4429 pid,
4430 strna(comm),
4431 signal_to_string(sig));
4432 }
4433
4434 static int operation_to_signal(KillContext *c, KillOperation k) {
4435 assert(c);
4436
4437 switch (k) {
4438
4439 case KILL_TERMINATE:
4440 case KILL_TERMINATE_AND_LOG:
4441 return c->kill_signal;
4442
4443 case KILL_KILL:
4444 return c->final_kill_signal;
4445
4446 case KILL_WATCHDOG:
4447 return c->watchdog_signal;
4448
4449 default:
4450 assert_not_reached("KillOperation unknown");
4451 }
4452 }
4453
4454 int unit_kill_context(
4455 Unit *u,
4456 KillContext *c,
4457 KillOperation k,
4458 pid_t main_pid,
4459 pid_t control_pid,
4460 bool main_pid_alien) {
4461
4462 bool wait_for_exit = false, send_sighup;
4463 cg_kill_log_func_t log_func = NULL;
4464 int sig, r;
4465
4466 assert(u);
4467 assert(c);
4468
4469 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4470 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4471
4472 if (c->kill_mode == KILL_NONE)
4473 return 0;
4474
4475 sig = operation_to_signal(c, k);
4476
4477 send_sighup =
4478 c->send_sighup &&
4479 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4480 sig != SIGHUP;
4481
4482 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4483 log_func = log_kill;
4484
4485 if (main_pid > 0) {
4486 if (log_func)
4487 log_func(main_pid, sig, u);
4488
4489 r = kill_and_sigcont(main_pid, sig);
4490 if (r < 0 && r != -ESRCH) {
4491 _cleanup_free_ char *comm = NULL;
4492 (void) get_process_comm(main_pid, &comm);
4493
4494 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4495 } else {
4496 if (!main_pid_alien)
4497 wait_for_exit = true;
4498
4499 if (r != -ESRCH && send_sighup)
4500 (void) kill(main_pid, SIGHUP);
4501 }
4502 }
4503
4504 if (control_pid > 0) {
4505 if (log_func)
4506 log_func(control_pid, sig, u);
4507
4508 r = kill_and_sigcont(control_pid, sig);
4509 if (r < 0 && r != -ESRCH) {
4510 _cleanup_free_ char *comm = NULL;
4511 (void) get_process_comm(control_pid, &comm);
4512
4513 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4514 } else {
4515 wait_for_exit = true;
4516
4517 if (r != -ESRCH && send_sighup)
4518 (void) kill(control_pid, SIGHUP);
4519 }
4520 }
4521
4522 if (u->cgroup_path &&
4523 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4524 _cleanup_set_free_ Set *pid_set = NULL;
4525
4526 /* Exclude the main/control pids from being killed via the cgroup */
4527 pid_set = unit_pid_set(main_pid, control_pid);
4528 if (!pid_set)
4529 return -ENOMEM;
4530
4531 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4532 sig,
4533 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4534 pid_set,
4535 log_func, u);
4536 if (r < 0) {
4537 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4538 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4539
4540 } else if (r > 0) {
4541
4542 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4543 * we are running in a container or if this is a delegation unit, simply because cgroup
4544 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4545 * of containers it can be confused easily by left-over directories in the cgroup — which
4546 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4547 * there we get proper events. Hence rely on them. */
4548
4549 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4550 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4551 wait_for_exit = true;
4552
4553 if (send_sighup) {
4554 set_free(pid_set);
4555
4556 pid_set = unit_pid_set(main_pid, control_pid);
4557 if (!pid_set)
4558 return -ENOMEM;
4559
4560 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4561 SIGHUP,
4562 CGROUP_IGNORE_SELF,
4563 pid_set,
4564 NULL, NULL);
4565 }
4566 }
4567 }
4568
4569 return wait_for_exit;
4570 }
4571
4572 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4573 _cleanup_free_ char *p = NULL;
4574 char *prefix;
4575 UnitDependencyInfo di;
4576 int r;
4577
4578 assert(u);
4579 assert(path);
4580
4581 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4582 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4583 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4584 * determine which units to make themselves a dependency of. */
4585
4586 if (!path_is_absolute(path))
4587 return -EINVAL;
4588
4589 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4590 if (r < 0)
4591 return r;
4592
4593 p = strdup(path);
4594 if (!p)
4595 return -ENOMEM;
4596
4597 path = path_simplify(p, false);
4598
4599 if (!path_is_normalized(path))
4600 return -EPERM;
4601
4602 if (hashmap_contains(u->requires_mounts_for, path))
4603 return 0;
4604
4605 di = (UnitDependencyInfo) {
4606 .origin_mask = mask
4607 };
4608
4609 r = hashmap_put(u->requires_mounts_for, path, di.data);
4610 if (r < 0)
4611 return r;
4612 p = NULL;
4613
4614 prefix = alloca(strlen(path) + 1);
4615 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4616 Set *x;
4617
4618 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4619 if (!x) {
4620 _cleanup_free_ char *q = NULL;
4621
4622 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4623 if (r < 0)
4624 return r;
4625
4626 q = strdup(prefix);
4627 if (!q)
4628 return -ENOMEM;
4629
4630 x = set_new(NULL);
4631 if (!x)
4632 return -ENOMEM;
4633
4634 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4635 if (r < 0) {
4636 set_free(x);
4637 return r;
4638 }
4639 q = NULL;
4640 }
4641
4642 r = set_put(x, u);
4643 if (r < 0)
4644 return r;
4645 }
4646
4647 return 0;
4648 }
4649
4650 int unit_setup_exec_runtime(Unit *u) {
4651 ExecRuntime **rt;
4652 size_t offset;
4653 Unit *other;
4654 Iterator i;
4655 void *v;
4656 int r;
4657
4658 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4659 assert(offset > 0);
4660
4661 /* Check if there already is an ExecRuntime for this unit? */
4662 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4663 if (*rt)
4664 return 0;
4665
4666 /* Try to get it from somebody else */
4667 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4668 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4669 if (r == 1)
4670 return 1;
4671 }
4672
4673 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4674 }
4675
4676 int unit_setup_dynamic_creds(Unit *u) {
4677 ExecContext *ec;
4678 DynamicCreds *dcreds;
4679 size_t offset;
4680
4681 assert(u);
4682
4683 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4684 assert(offset > 0);
4685 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4686
4687 ec = unit_get_exec_context(u);
4688 assert(ec);
4689
4690 if (!ec->dynamic_user)
4691 return 0;
4692
4693 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4694 }
4695
4696 bool unit_type_supported(UnitType t) {
4697 if (_unlikely_(t < 0))
4698 return false;
4699 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4700 return false;
4701
4702 if (!unit_vtable[t]->supported)
4703 return true;
4704
4705 return unit_vtable[t]->supported();
4706 }
4707
4708 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4709 int r;
4710
4711 assert(u);
4712 assert(where);
4713
4714 r = dir_is_empty(where);
4715 if (r > 0 || r == -ENOTDIR)
4716 return;
4717 if (r < 0) {
4718 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4719 return;
4720 }
4721
4722 log_struct(LOG_NOTICE,
4723 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4724 LOG_UNIT_ID(u),
4725 LOG_UNIT_INVOCATION_ID(u),
4726 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4727 "WHERE=%s", where);
4728 }
4729
4730 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4731 _cleanup_free_ char *canonical_where;
4732 int r;
4733
4734 assert(u);
4735 assert(where);
4736
4737 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4738 if (r < 0) {
4739 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4740 return 0;
4741 }
4742
4743 /* We will happily ignore a trailing slash (or any redundant slashes) */
4744 if (path_equal(where, canonical_where))
4745 return 0;
4746
4747 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4748 log_struct(LOG_ERR,
4749 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4750 LOG_UNIT_ID(u),
4751 LOG_UNIT_INVOCATION_ID(u),
4752 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4753 "WHERE=%s", where);
4754
4755 return -ELOOP;
4756 }
4757
4758 bool unit_is_pristine(Unit *u) {
4759 assert(u);
4760
4761 /* Check if the unit already exists or is already around,
4762 * in a number of different ways. Note that to cater for unit
4763 * types such as slice, we are generally fine with units that
4764 * are marked UNIT_LOADED even though nothing was actually
4765 * loaded, as those unit types don't require a file on disk. */
4766
4767 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4768 u->fragment_path ||
4769 u->source_path ||
4770 !strv_isempty(u->dropin_paths) ||
4771 u->job ||
4772 u->merged_into);
4773 }
4774
4775 pid_t unit_control_pid(Unit *u) {
4776 assert(u);
4777
4778 if (UNIT_VTABLE(u)->control_pid)
4779 return UNIT_VTABLE(u)->control_pid(u);
4780
4781 return 0;
4782 }
4783
4784 pid_t unit_main_pid(Unit *u) {
4785 assert(u);
4786
4787 if (UNIT_VTABLE(u)->main_pid)
4788 return UNIT_VTABLE(u)->main_pid(u);
4789
4790 return 0;
4791 }
4792
4793 static void unit_unref_uid_internal(
4794 Unit *u,
4795 uid_t *ref_uid,
4796 bool destroy_now,
4797 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4798
4799 assert(u);
4800 assert(ref_uid);
4801 assert(_manager_unref_uid);
4802
4803 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4804 * gid_t are actually the same time, with the same validity rules.
4805 *
4806 * Drops a reference to UID/GID from a unit. */
4807
4808 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4809 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4810
4811 if (!uid_is_valid(*ref_uid))
4812 return;
4813
4814 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4815 *ref_uid = UID_INVALID;
4816 }
4817
4818 void unit_unref_uid(Unit *u, bool destroy_now) {
4819 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4820 }
4821
4822 void unit_unref_gid(Unit *u, bool destroy_now) {
4823 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4824 }
4825
4826 static int unit_ref_uid_internal(
4827 Unit *u,
4828 uid_t *ref_uid,
4829 uid_t uid,
4830 bool clean_ipc,
4831 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4832
4833 int r;
4834
4835 assert(u);
4836 assert(ref_uid);
4837 assert(uid_is_valid(uid));
4838 assert(_manager_ref_uid);
4839
4840 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4841 * are actually the same type, and have the same validity rules.
4842 *
4843 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4844 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4845 * drops to zero. */
4846
4847 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4848 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4849
4850 if (*ref_uid == uid)
4851 return 0;
4852
4853 if (uid_is_valid(*ref_uid)) /* Already set? */
4854 return -EBUSY;
4855
4856 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4857 if (r < 0)
4858 return r;
4859
4860 *ref_uid = uid;
4861 return 1;
4862 }
4863
4864 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4865 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4866 }
4867
4868 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4869 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4870 }
4871
4872 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4873 int r = 0, q = 0;
4874
4875 assert(u);
4876
4877 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4878
4879 if (uid_is_valid(uid)) {
4880 r = unit_ref_uid(u, uid, clean_ipc);
4881 if (r < 0)
4882 return r;
4883 }
4884
4885 if (gid_is_valid(gid)) {
4886 q = unit_ref_gid(u, gid, clean_ipc);
4887 if (q < 0) {
4888 if (r > 0)
4889 unit_unref_uid(u, false);
4890
4891 return q;
4892 }
4893 }
4894
4895 return r > 0 || q > 0;
4896 }
4897
4898 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4899 ExecContext *c;
4900 int r;
4901
4902 assert(u);
4903
4904 c = unit_get_exec_context(u);
4905
4906 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4907 if (r < 0)
4908 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4909
4910 return r;
4911 }
4912
4913 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4914 assert(u);
4915
4916 unit_unref_uid(u, destroy_now);
4917 unit_unref_gid(u, destroy_now);
4918 }
4919
4920 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4921 int r;
4922
4923 assert(u);
4924
4925 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4926 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4927 * objects when no service references the UID/GID anymore. */
4928
4929 r = unit_ref_uid_gid(u, uid, gid);
4930 if (r > 0)
4931 bus_unit_send_change_signal(u);
4932 }
4933
4934 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4935 int r;
4936
4937 assert(u);
4938
4939 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4940
4941 if (sd_id128_equal(u->invocation_id, id))
4942 return 0;
4943
4944 if (!sd_id128_is_null(u->invocation_id))
4945 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4946
4947 if (sd_id128_is_null(id)) {
4948 r = 0;
4949 goto reset;
4950 }
4951
4952 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4953 if (r < 0)
4954 goto reset;
4955
4956 u->invocation_id = id;
4957 sd_id128_to_string(id, u->invocation_id_string);
4958
4959 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4960 if (r < 0)
4961 goto reset;
4962
4963 return 0;
4964
4965 reset:
4966 u->invocation_id = SD_ID128_NULL;
4967 u->invocation_id_string[0] = 0;
4968 return r;
4969 }
4970
4971 int unit_acquire_invocation_id(Unit *u) {
4972 sd_id128_t id;
4973 int r;
4974
4975 assert(u);
4976
4977 r = sd_id128_randomize(&id);
4978 if (r < 0)
4979 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4980
4981 r = unit_set_invocation_id(u, id);
4982 if (r < 0)
4983 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4984
4985 return 0;
4986 }
4987
4988 int unit_set_exec_params(Unit *u, ExecParameters *p) {
4989 int r;
4990
4991 assert(u);
4992 assert(p);
4993
4994 /* Copy parameters from manager */
4995 r = manager_get_effective_environment(u->manager, &p->environment);
4996 if (r < 0)
4997 return r;
4998
4999 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5000 p->cgroup_supported = u->manager->cgroup_supported;
5001 p->prefix = u->manager->prefix;
5002 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5003
5004 /* Copy paramaters from unit */
5005 p->cgroup_path = u->cgroup_path;
5006 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5007
5008 return 0;
5009 }
5010
5011 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5012 int r;
5013
5014 assert(u);
5015 assert(ret);
5016
5017 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5018 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5019
5020 (void) unit_realize_cgroup(u);
5021
5022 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5023 if (r != 0)
5024 return r;
5025
5026 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5027 (void) ignore_signals(SIGPIPE, -1);
5028
5029 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5030
5031 if (u->cgroup_path) {
5032 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5033 if (r < 0) {
5034 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5035 _exit(EXIT_CGROUP);
5036 }
5037 }
5038
5039 return 0;
5040 }
5041
5042 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5043 assert(u);
5044 assert(d >= 0);
5045 assert(d < _UNIT_DEPENDENCY_MAX);
5046 assert(other);
5047
5048 if (di.origin_mask == 0 && di.destination_mask == 0) {
5049 /* No bit set anymore, let's drop the whole entry */
5050 assert_se(hashmap_remove(u->dependencies[d], other));
5051 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5052 } else
5053 /* Mask was reduced, let's update the entry */
5054 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5055 }
5056
5057 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5058 UnitDependency d;
5059
5060 assert(u);
5061
5062 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5063
5064 if (mask == 0)
5065 return;
5066
5067 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5068 bool done;
5069
5070 do {
5071 UnitDependencyInfo di;
5072 Unit *other;
5073 Iterator i;
5074
5075 done = true;
5076
5077 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5078 UnitDependency q;
5079
5080 if ((di.origin_mask & ~mask) == di.origin_mask)
5081 continue;
5082 di.origin_mask &= ~mask;
5083 unit_update_dependency_mask(u, d, other, di);
5084
5085 /* We updated the dependency from our unit to the other unit now. But most dependencies
5086 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5087 * all dependency types on the other unit and delete all those which point to us and
5088 * have the right mask set. */
5089
5090 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5091 UnitDependencyInfo dj;
5092
5093 dj.data = hashmap_get(other->dependencies[q], u);
5094 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5095 continue;
5096 dj.destination_mask &= ~mask;
5097
5098 unit_update_dependency_mask(other, q, u, dj);
5099 }
5100
5101 unit_add_to_gc_queue(other);
5102
5103 done = false;
5104 break;
5105 }
5106
5107 } while (!done);
5108 }
5109 }
5110
5111 static int unit_export_invocation_id(Unit *u) {
5112 const char *p;
5113 int r;
5114
5115 assert(u);
5116
5117 if (u->exported_invocation_id)
5118 return 0;
5119
5120 if (sd_id128_is_null(u->invocation_id))
5121 return 0;
5122
5123 p = strjoina("/run/systemd/units/invocation:", u->id);
5124 r = symlink_atomic(u->invocation_id_string, p);
5125 if (r < 0)
5126 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5127
5128 u->exported_invocation_id = true;
5129 return 0;
5130 }
5131
5132 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5133 const char *p;
5134 char buf[2];
5135 int r;
5136
5137 assert(u);
5138 assert(c);
5139
5140 if (u->exported_log_level_max)
5141 return 0;
5142
5143 if (c->log_level_max < 0)
5144 return 0;
5145
5146 assert(c->log_level_max <= 7);
5147
5148 buf[0] = '0' + c->log_level_max;
5149 buf[1] = 0;
5150
5151 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5152 r = symlink_atomic(buf, p);
5153 if (r < 0)
5154 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5155
5156 u->exported_log_level_max = true;
5157 return 0;
5158 }
5159
5160 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5161 _cleanup_close_ int fd = -1;
5162 struct iovec *iovec;
5163 const char *p;
5164 char *pattern;
5165 le64_t *sizes;
5166 ssize_t n;
5167 size_t i;
5168 int r;
5169
5170 if (u->exported_log_extra_fields)
5171 return 0;
5172
5173 if (c->n_log_extra_fields <= 0)
5174 return 0;
5175
5176 sizes = newa(le64_t, c->n_log_extra_fields);
5177 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5178
5179 for (i = 0; i < c->n_log_extra_fields; i++) {
5180 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5181
5182 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5183 iovec[i*2+1] = c->log_extra_fields[i];
5184 }
5185
5186 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5187 pattern = strjoina(p, ".XXXXXX");
5188
5189 fd = mkostemp_safe(pattern);
5190 if (fd < 0)
5191 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5192
5193 n = writev(fd, iovec, c->n_log_extra_fields*2);
5194 if (n < 0) {
5195 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5196 goto fail;
5197 }
5198
5199 (void) fchmod(fd, 0644);
5200
5201 if (rename(pattern, p) < 0) {
5202 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5203 goto fail;
5204 }
5205
5206 u->exported_log_extra_fields = true;
5207 return 0;
5208
5209 fail:
5210 (void) unlink(pattern);
5211 return r;
5212 }
5213
5214 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5215 _cleanup_free_ char *buf = NULL;
5216 const char *p;
5217 int r;
5218
5219 assert(u);
5220 assert(c);
5221
5222 if (u->exported_log_rate_limit_interval)
5223 return 0;
5224
5225 if (c->log_rate_limit_interval_usec == 0)
5226 return 0;
5227
5228 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5229
5230 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5231 return log_oom();
5232
5233 r = symlink_atomic(buf, p);
5234 if (r < 0)
5235 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5236
5237 u->exported_log_rate_limit_interval = true;
5238 return 0;
5239 }
5240
5241 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5242 _cleanup_free_ char *buf = NULL;
5243 const char *p;
5244 int r;
5245
5246 assert(u);
5247 assert(c);
5248
5249 if (u->exported_log_rate_limit_burst)
5250 return 0;
5251
5252 if (c->log_rate_limit_burst == 0)
5253 return 0;
5254
5255 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5256
5257 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5258 return log_oom();
5259
5260 r = symlink_atomic(buf, p);
5261 if (r < 0)
5262 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5263
5264 u->exported_log_rate_limit_burst = true;
5265 return 0;
5266 }
5267
5268 void unit_export_state_files(Unit *u) {
5269 const ExecContext *c;
5270
5271 assert(u);
5272
5273 if (!u->id)
5274 return;
5275
5276 if (!MANAGER_IS_SYSTEM(u->manager))
5277 return;
5278
5279 if (MANAGER_IS_TEST_RUN(u->manager))
5280 return;
5281
5282 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5283 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5284 * the IPC system itself and PID 1 also log to the journal.
5285 *
5286 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5287 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5288 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5289 * namespace at least.
5290 *
5291 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5292 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5293 * them with one. */
5294
5295 (void) unit_export_invocation_id(u);
5296
5297 c = unit_get_exec_context(u);
5298 if (c) {
5299 (void) unit_export_log_level_max(u, c);
5300 (void) unit_export_log_extra_fields(u, c);
5301 (void) unit_export_log_rate_limit_interval(u, c);
5302 (void) unit_export_log_rate_limit_burst(u, c);
5303 }
5304 }
5305
5306 void unit_unlink_state_files(Unit *u) {
5307 const char *p;
5308
5309 assert(u);
5310
5311 if (!u->id)
5312 return;
5313
5314 if (!MANAGER_IS_SYSTEM(u->manager))
5315 return;
5316
5317 /* Undoes the effect of unit_export_state() */
5318
5319 if (u->exported_invocation_id) {
5320 p = strjoina("/run/systemd/units/invocation:", u->id);
5321 (void) unlink(p);
5322
5323 u->exported_invocation_id = false;
5324 }
5325
5326 if (u->exported_log_level_max) {
5327 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5328 (void) unlink(p);
5329
5330 u->exported_log_level_max = false;
5331 }
5332
5333 if (u->exported_log_extra_fields) {
5334 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5335 (void) unlink(p);
5336
5337 u->exported_log_extra_fields = false;
5338 }
5339
5340 if (u->exported_log_rate_limit_interval) {
5341 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5342 (void) unlink(p);
5343
5344 u->exported_log_rate_limit_interval = false;
5345 }
5346
5347 if (u->exported_log_rate_limit_burst) {
5348 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5349 (void) unlink(p);
5350
5351 u->exported_log_rate_limit_burst = false;
5352 }
5353 }
5354
5355 int unit_prepare_exec(Unit *u) {
5356 int r;
5357
5358 assert(u);
5359
5360 /* Prepares everything so that we can fork of a process for this unit */
5361
5362 (void) unit_realize_cgroup(u);
5363
5364 if (u->reset_accounting) {
5365 (void) unit_reset_cpu_accounting(u);
5366 (void) unit_reset_ip_accounting(u);
5367 u->reset_accounting = false;
5368 }
5369
5370 unit_export_state_files(u);
5371
5372 r = unit_setup_exec_runtime(u);
5373 if (r < 0)
5374 return r;
5375
5376 r = unit_setup_dynamic_creds(u);
5377 if (r < 0)
5378 return r;
5379
5380 return 0;
5381 }
5382
5383 static void log_leftover(pid_t pid, int sig, void *userdata) {
5384 _cleanup_free_ char *comm = NULL;
5385
5386 (void) get_process_comm(pid, &comm);
5387
5388 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5389 return;
5390
5391 log_unit_warning(userdata,
5392 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5393 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5394 pid, strna(comm));
5395 }
5396
5397 void unit_warn_leftover_processes(Unit *u) {
5398 assert(u);
5399
5400 (void) unit_pick_cgroup_path(u);
5401
5402 if (!u->cgroup_path)
5403 return;
5404
5405 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5406 }
5407
5408 bool unit_needs_console(Unit *u) {
5409 ExecContext *ec;
5410 UnitActiveState state;
5411
5412 assert(u);
5413
5414 state = unit_active_state(u);
5415
5416 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5417 return false;
5418
5419 if (UNIT_VTABLE(u)->needs_console)
5420 return UNIT_VTABLE(u)->needs_console(u);
5421
5422 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5423 ec = unit_get_exec_context(u);
5424 if (!ec)
5425 return false;
5426
5427 return exec_context_may_touch_console(ec);
5428 }
5429
5430 const char *unit_label_path(Unit *u) {
5431 const char *p;
5432
5433 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5434 * when validating access checks. */
5435
5436 p = u->source_path ?: u->fragment_path;
5437 if (!p)
5438 return NULL;
5439
5440 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5441 if (path_equal(p, "/dev/null"))
5442 return NULL;
5443
5444 return p;
5445 }
5446
5447 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5448 int r;
5449
5450 assert(u);
5451
5452 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5453 * and not a kernel thread either */
5454
5455 /* First, a simple range check */
5456 if (!pid_is_valid(pid))
5457 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5458
5459 /* Some extra safety check */
5460 if (pid == 1 || pid == getpid_cached())
5461 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5462
5463 /* Don't even begin to bother with kernel threads */
5464 r = is_kernel_thread(pid);
5465 if (r == -ESRCH)
5466 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5467 if (r < 0)
5468 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5469 if (r > 0)
5470 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5471
5472 return 0;
5473 }
5474
5475 void unit_log_success(Unit *u) {
5476 assert(u);
5477
5478 log_struct(LOG_INFO,
5479 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5480 LOG_UNIT_ID(u),
5481 LOG_UNIT_INVOCATION_ID(u),
5482 LOG_UNIT_MESSAGE(u, "Succeeded."));
5483 }
5484
5485 void unit_log_failure(Unit *u, const char *result) {
5486 assert(u);
5487 assert(result);
5488
5489 log_struct(LOG_WARNING,
5490 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5491 LOG_UNIT_ID(u),
5492 LOG_UNIT_INVOCATION_ID(u),
5493 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5494 "UNIT_RESULT=%s", result);
5495 }
5496
5497 void unit_log_process_exit(
5498 Unit *u,
5499 int level,
5500 const char *kind,
5501 const char *command,
5502 int code,
5503 int status) {
5504
5505 assert(u);
5506 assert(kind);
5507
5508 if (code != CLD_EXITED)
5509 level = LOG_WARNING;
5510
5511 log_struct(level,
5512 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5513 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5514 kind,
5515 sigchld_code_to_string(code), status,
5516 strna(code == CLD_EXITED
5517 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5518 : signal_to_string(status))),
5519 "EXIT_CODE=%s", sigchld_code_to_string(code),
5520 "EXIT_STATUS=%i", status,
5521 "COMMAND=%s", strna(command),
5522 LOG_UNIT_ID(u),
5523 LOG_UNIT_INVOCATION_ID(u));
5524 }
5525
5526 int unit_exit_status(Unit *u) {
5527 assert(u);
5528
5529 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5530 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5531 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5532 * service process has exited abnormally (signal/coredump). */
5533
5534 if (!UNIT_VTABLE(u)->exit_status)
5535 return -EOPNOTSUPP;
5536
5537 return UNIT_VTABLE(u)->exit_status(u);
5538 }
5539
5540 int unit_failure_action_exit_status(Unit *u) {
5541 int r;
5542
5543 assert(u);
5544
5545 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5546
5547 if (u->failure_action_exit_status >= 0)
5548 return u->failure_action_exit_status;
5549
5550 r = unit_exit_status(u);
5551 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5552 return 255;
5553
5554 return r;
5555 }
5556
5557 int unit_success_action_exit_status(Unit *u) {
5558 int r;
5559
5560 assert(u);
5561
5562 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5563
5564 if (u->success_action_exit_status >= 0)
5565 return u->success_action_exit_status;
5566
5567 r = unit_exit_status(u);
5568 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5569 return 255;
5570
5571 return r;
5572 }
5573
5574 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5575 [COLLECT_INACTIVE] = "inactive",
5576 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5577 };
5578
5579 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);