]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
util-lib: don't include fileio.h from fileio-label.h
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "load-dropin.h"
31 #include "load-fragment.h"
32 #include "log.h"
33 #include "macro.h"
34 #include "missing.h"
35 #include "mkdir.h"
36 #include "parse-util.h"
37 #include "path-util.h"
38 #include "process-util.h"
39 #include "serialize.h"
40 #include "set.h"
41 #include "signal-util.h"
42 #include "sparse-endian.h"
43 #include "special.h"
44 #include "specifier.h"
45 #include "stat-util.h"
46 #include "stdio-util.h"
47 #include "string-table.h"
48 #include "string-util.h"
49 #include "strv.h"
50 #include "terminal-util.h"
51 #include "umask-util.h"
52 #include "unit-name.h"
53 #include "unit.h"
54 #include "user-util.h"
55 #include "virt.h"
56
57 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
58 [UNIT_SERVICE] = &service_vtable,
59 [UNIT_SOCKET] = &socket_vtable,
60 [UNIT_TARGET] = &target_vtable,
61 [UNIT_DEVICE] = &device_vtable,
62 [UNIT_MOUNT] = &mount_vtable,
63 [UNIT_AUTOMOUNT] = &automount_vtable,
64 [UNIT_SWAP] = &swap_vtable,
65 [UNIT_TIMER] = &timer_vtable,
66 [UNIT_PATH] = &path_vtable,
67 [UNIT_SLICE] = &slice_vtable,
68 [UNIT_SCOPE] = &scope_vtable,
69 };
70
71 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
72
73 Unit *unit_new(Manager *m, size_t size) {
74 Unit *u;
75
76 assert(m);
77 assert(size >= sizeof(Unit));
78
79 u = malloc0(size);
80 if (!u)
81 return NULL;
82
83 u->names = set_new(&string_hash_ops);
84 if (!u->names)
85 return mfree(u);
86
87 u->manager = m;
88 u->type = _UNIT_TYPE_INVALID;
89 u->default_dependencies = true;
90 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
91 u->unit_file_preset = -1;
92 u->on_failure_job_mode = JOB_REPLACE;
93 u->cgroup_inotify_wd = -1;
94 u->job_timeout = USEC_INFINITY;
95 u->job_running_timeout = USEC_INFINITY;
96 u->ref_uid = UID_INVALID;
97 u->ref_gid = GID_INVALID;
98 u->cpu_usage_last = NSEC_INFINITY;
99 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
100 u->failure_action_exit_status = u->success_action_exit_status = -1;
101
102 u->ip_accounting_ingress_map_fd = -1;
103 u->ip_accounting_egress_map_fd = -1;
104 u->ipv4_allow_map_fd = -1;
105 u->ipv6_allow_map_fd = -1;
106 u->ipv4_deny_map_fd = -1;
107 u->ipv6_deny_map_fd = -1;
108
109 u->last_section_private = -1;
110
111 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
112 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
113
114 return u;
115 }
116
117 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
118 _cleanup_(unit_freep) Unit *u = NULL;
119 int r;
120
121 u = unit_new(m, size);
122 if (!u)
123 return -ENOMEM;
124
125 r = unit_add_name(u, name);
126 if (r < 0)
127 return r;
128
129 *ret = TAKE_PTR(u);
130
131 return r;
132 }
133
134 bool unit_has_name(Unit *u, const char *name) {
135 assert(u);
136 assert(name);
137
138 return set_contains(u->names, (char*) name);
139 }
140
141 static void unit_init(Unit *u) {
142 CGroupContext *cc;
143 ExecContext *ec;
144 KillContext *kc;
145
146 assert(u);
147 assert(u->manager);
148 assert(u->type >= 0);
149
150 cc = unit_get_cgroup_context(u);
151 if (cc) {
152 cgroup_context_init(cc);
153
154 /* Copy in the manager defaults into the cgroup
155 * context, _before_ the rest of the settings have
156 * been initialized */
157
158 cc->cpu_accounting = u->manager->default_cpu_accounting;
159 cc->io_accounting = u->manager->default_io_accounting;
160 cc->ip_accounting = u->manager->default_ip_accounting;
161 cc->blockio_accounting = u->manager->default_blockio_accounting;
162 cc->memory_accounting = u->manager->default_memory_accounting;
163 cc->tasks_accounting = u->manager->default_tasks_accounting;
164 cc->ip_accounting = u->manager->default_ip_accounting;
165
166 if (u->type != UNIT_SLICE)
167 cc->tasks_max = u->manager->default_tasks_max;
168 }
169
170 ec = unit_get_exec_context(u);
171 if (ec) {
172 exec_context_init(ec);
173
174 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
175 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
176 }
177
178 kc = unit_get_kill_context(u);
179 if (kc)
180 kill_context_init(kc);
181
182 if (UNIT_VTABLE(u)->init)
183 UNIT_VTABLE(u)->init(u);
184 }
185
186 int unit_add_name(Unit *u, const char *text) {
187 _cleanup_free_ char *s = NULL, *i = NULL;
188 UnitType t;
189 int r;
190
191 assert(u);
192 assert(text);
193
194 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
195
196 if (!u->instance)
197 return -EINVAL;
198
199 r = unit_name_replace_instance(text, u->instance, &s);
200 if (r < 0)
201 return r;
202 } else {
203 s = strdup(text);
204 if (!s)
205 return -ENOMEM;
206 }
207
208 if (set_contains(u->names, s))
209 return 0;
210 if (hashmap_contains(u->manager->units, s))
211 return -EEXIST;
212
213 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
214 return -EINVAL;
215
216 t = unit_name_to_type(s);
217 if (t < 0)
218 return -EINVAL;
219
220 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
221 return -EINVAL;
222
223 r = unit_name_to_instance(s, &i);
224 if (r < 0)
225 return r;
226
227 if (i && !unit_type_may_template(t))
228 return -EINVAL;
229
230 /* Ensure that this unit is either instanced or not instanced,
231 * but not both. Note that we do allow names with different
232 * instance names however! */
233 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
234 return -EINVAL;
235
236 if (!unit_type_may_alias(t) && !set_isempty(u->names))
237 return -EEXIST;
238
239 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
240 return -E2BIG;
241
242 r = set_put(u->names, s);
243 if (r < 0)
244 return r;
245 assert(r > 0);
246
247 r = hashmap_put(u->manager->units, s, u);
248 if (r < 0) {
249 (void) set_remove(u->names, s);
250 return r;
251 }
252
253 if (u->type == _UNIT_TYPE_INVALID) {
254 u->type = t;
255 u->id = s;
256 u->instance = TAKE_PTR(i);
257
258 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
259
260 unit_init(u);
261 }
262
263 s = NULL;
264
265 unit_add_to_dbus_queue(u);
266 return 0;
267 }
268
269 int unit_choose_id(Unit *u, const char *name) {
270 _cleanup_free_ char *t = NULL;
271 char *s, *i;
272 int r;
273
274 assert(u);
275 assert(name);
276
277 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
278
279 if (!u->instance)
280 return -EINVAL;
281
282 r = unit_name_replace_instance(name, u->instance, &t);
283 if (r < 0)
284 return r;
285
286 name = t;
287 }
288
289 /* Selects one of the names of this unit as the id */
290 s = set_get(u->names, (char*) name);
291 if (!s)
292 return -ENOENT;
293
294 /* Determine the new instance from the new id */
295 r = unit_name_to_instance(s, &i);
296 if (r < 0)
297 return r;
298
299 u->id = s;
300
301 free(u->instance);
302 u->instance = i;
303
304 unit_add_to_dbus_queue(u);
305
306 return 0;
307 }
308
309 int unit_set_description(Unit *u, const char *description) {
310 int r;
311
312 assert(u);
313
314 r = free_and_strdup(&u->description, empty_to_null(description));
315 if (r < 0)
316 return r;
317 if (r > 0)
318 unit_add_to_dbus_queue(u);
319
320 return 0;
321 }
322
323 bool unit_may_gc(Unit *u) {
324 UnitActiveState state;
325 int r;
326
327 assert(u);
328
329 /* Checks whether the unit is ready to be unloaded for garbage collection.
330 * Returns true when the unit may be collected, and false if there's some
331 * reason to keep it loaded.
332 *
333 * References from other units are *not* checked here. Instead, this is done
334 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
335 */
336
337 if (u->job)
338 return false;
339
340 if (u->nop_job)
341 return false;
342
343 state = unit_active_state(u);
344
345 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
346 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
347 UNIT_VTABLE(u)->release_resources)
348 UNIT_VTABLE(u)->release_resources(u);
349
350 if (u->perpetual)
351 return false;
352
353 if (sd_bus_track_count(u->bus_track) > 0)
354 return false;
355
356 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
357 switch (u->collect_mode) {
358
359 case COLLECT_INACTIVE:
360 if (state != UNIT_INACTIVE)
361 return false;
362
363 break;
364
365 case COLLECT_INACTIVE_OR_FAILED:
366 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
367 return false;
368
369 break;
370
371 default:
372 assert_not_reached("Unknown garbage collection mode");
373 }
374
375 if (u->cgroup_path) {
376 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
377 * around. Units with active processes should never be collected. */
378
379 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
380 if (r < 0)
381 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
382 if (r <= 0)
383 return false;
384 }
385
386 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
387 return false;
388
389 return true;
390 }
391
392 void unit_add_to_load_queue(Unit *u) {
393 assert(u);
394 assert(u->type != _UNIT_TYPE_INVALID);
395
396 if (u->load_state != UNIT_STUB || u->in_load_queue)
397 return;
398
399 LIST_PREPEND(load_queue, u->manager->load_queue, u);
400 u->in_load_queue = true;
401 }
402
403 void unit_add_to_cleanup_queue(Unit *u) {
404 assert(u);
405
406 if (u->in_cleanup_queue)
407 return;
408
409 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
410 u->in_cleanup_queue = true;
411 }
412
413 void unit_add_to_gc_queue(Unit *u) {
414 assert(u);
415
416 if (u->in_gc_queue || u->in_cleanup_queue)
417 return;
418
419 if (!unit_may_gc(u))
420 return;
421
422 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
423 u->in_gc_queue = true;
424 }
425
426 void unit_add_to_dbus_queue(Unit *u) {
427 assert(u);
428 assert(u->type != _UNIT_TYPE_INVALID);
429
430 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
431 return;
432
433 /* Shortcut things if nobody cares */
434 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
435 sd_bus_track_count(u->bus_track) <= 0 &&
436 set_isempty(u->manager->private_buses)) {
437 u->sent_dbus_new_signal = true;
438 return;
439 }
440
441 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
442 u->in_dbus_queue = true;
443 }
444
445 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
446 assert(u);
447
448 if (u->in_stop_when_unneeded_queue)
449 return;
450
451 if (!u->stop_when_unneeded)
452 return;
453
454 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
455 return;
456
457 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
458 u->in_stop_when_unneeded_queue = true;
459 }
460
461 static void bidi_set_free(Unit *u, Hashmap *h) {
462 Unit *other;
463 Iterator i;
464 void *v;
465
466 assert(u);
467
468 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
469
470 HASHMAP_FOREACH_KEY(v, other, h, i) {
471 UnitDependency d;
472
473 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
474 hashmap_remove(other->dependencies[d], u);
475
476 unit_add_to_gc_queue(other);
477 }
478
479 hashmap_free(h);
480 }
481
482 static void unit_remove_transient(Unit *u) {
483 char **i;
484
485 assert(u);
486
487 if (!u->transient)
488 return;
489
490 if (u->fragment_path)
491 (void) unlink(u->fragment_path);
492
493 STRV_FOREACH(i, u->dropin_paths) {
494 _cleanup_free_ char *p = NULL, *pp = NULL;
495
496 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
497 if (!p)
498 continue;
499
500 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
501 if (!pp)
502 continue;
503
504 /* Only drop transient drop-ins */
505 if (!path_equal(u->manager->lookup_paths.transient, pp))
506 continue;
507
508 (void) unlink(*i);
509 (void) rmdir(p);
510 }
511 }
512
513 static void unit_free_requires_mounts_for(Unit *u) {
514 assert(u);
515
516 for (;;) {
517 _cleanup_free_ char *path;
518
519 path = hashmap_steal_first_key(u->requires_mounts_for);
520 if (!path)
521 break;
522 else {
523 char s[strlen(path) + 1];
524
525 PATH_FOREACH_PREFIX_MORE(s, path) {
526 char *y;
527 Set *x;
528
529 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
530 if (!x)
531 continue;
532
533 (void) set_remove(x, u);
534
535 if (set_isempty(x)) {
536 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
537 free(y);
538 set_free(x);
539 }
540 }
541 }
542 }
543
544 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
545 }
546
547 static void unit_done(Unit *u) {
548 ExecContext *ec;
549 CGroupContext *cc;
550
551 assert(u);
552
553 if (u->type < 0)
554 return;
555
556 if (UNIT_VTABLE(u)->done)
557 UNIT_VTABLE(u)->done(u);
558
559 ec = unit_get_exec_context(u);
560 if (ec)
561 exec_context_done(ec);
562
563 cc = unit_get_cgroup_context(u);
564 if (cc)
565 cgroup_context_done(cc);
566 }
567
568 void unit_free(Unit *u) {
569 UnitDependency d;
570 Iterator i;
571 char *t;
572
573 if (!u)
574 return;
575
576 if (UNIT_ISSET(u->slice)) {
577 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
578 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
579
580 /* And make sure the parent is realized again, updating cgroup memberships */
581 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
582 }
583
584 u->transient_file = safe_fclose(u->transient_file);
585
586 if (!MANAGER_IS_RELOADING(u->manager))
587 unit_remove_transient(u);
588
589 bus_unit_send_removed_signal(u);
590
591 unit_done(u);
592
593 unit_dequeue_rewatch_pids(u);
594
595 sd_bus_slot_unref(u->match_bus_slot);
596 sd_bus_track_unref(u->bus_track);
597 u->deserialized_refs = strv_free(u->deserialized_refs);
598
599 unit_free_requires_mounts_for(u);
600
601 SET_FOREACH(t, u->names, i)
602 hashmap_remove_value(u->manager->units, t, u);
603
604 if (!sd_id128_is_null(u->invocation_id))
605 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
606
607 if (u->job) {
608 Job *j = u->job;
609 job_uninstall(j);
610 job_free(j);
611 }
612
613 if (u->nop_job) {
614 Job *j = u->nop_job;
615 job_uninstall(j);
616 job_free(j);
617 }
618
619 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
620 bidi_set_free(u, u->dependencies[d]);
621
622 if (u->on_console)
623 manager_unref_console(u->manager);
624
625 unit_release_cgroup(u);
626
627 if (!MANAGER_IS_RELOADING(u->manager))
628 unit_unlink_state_files(u);
629
630 unit_unref_uid_gid(u, false);
631
632 (void) manager_update_failed_units(u->manager, u, false);
633 set_remove(u->manager->startup_units, u);
634
635 unit_unwatch_all_pids(u);
636
637 unit_ref_unset(&u->slice);
638 while (u->refs_by_target)
639 unit_ref_unset(u->refs_by_target);
640
641 if (u->type != _UNIT_TYPE_INVALID)
642 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
643
644 if (u->in_load_queue)
645 LIST_REMOVE(load_queue, u->manager->load_queue, u);
646
647 if (u->in_dbus_queue)
648 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
649
650 if (u->in_gc_queue)
651 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
652
653 if (u->in_cgroup_realize_queue)
654 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
655
656 if (u->in_cgroup_empty_queue)
657 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
658
659 if (u->in_cleanup_queue)
660 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
661
662 if (u->in_target_deps_queue)
663 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
664
665 if (u->in_stop_when_unneeded_queue)
666 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
667
668 safe_close(u->ip_accounting_ingress_map_fd);
669 safe_close(u->ip_accounting_egress_map_fd);
670
671 safe_close(u->ipv4_allow_map_fd);
672 safe_close(u->ipv6_allow_map_fd);
673 safe_close(u->ipv4_deny_map_fd);
674 safe_close(u->ipv6_deny_map_fd);
675
676 bpf_program_unref(u->ip_bpf_ingress);
677 bpf_program_unref(u->ip_bpf_ingress_installed);
678 bpf_program_unref(u->ip_bpf_egress);
679 bpf_program_unref(u->ip_bpf_egress_installed);
680
681 bpf_program_unref(u->bpf_device_control_installed);
682
683 condition_free_list(u->conditions);
684 condition_free_list(u->asserts);
685
686 free(u->description);
687 strv_free(u->documentation);
688 free(u->fragment_path);
689 free(u->source_path);
690 strv_free(u->dropin_paths);
691 free(u->instance);
692
693 free(u->job_timeout_reboot_arg);
694
695 set_free_free(u->names);
696
697 free(u->reboot_arg);
698
699 free(u);
700 }
701
702 UnitActiveState unit_active_state(Unit *u) {
703 assert(u);
704
705 if (u->load_state == UNIT_MERGED)
706 return unit_active_state(unit_follow_merge(u));
707
708 /* After a reload it might happen that a unit is not correctly
709 * loaded but still has a process around. That's why we won't
710 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
711
712 return UNIT_VTABLE(u)->active_state(u);
713 }
714
715 const char* unit_sub_state_to_string(Unit *u) {
716 assert(u);
717
718 return UNIT_VTABLE(u)->sub_state_to_string(u);
719 }
720
721 static int set_complete_move(Set **s, Set **other) {
722 assert(s);
723 assert(other);
724
725 if (!other)
726 return 0;
727
728 if (*s)
729 return set_move(*s, *other);
730 else
731 *s = TAKE_PTR(*other);
732
733 return 0;
734 }
735
736 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
737 assert(s);
738 assert(other);
739
740 if (!*other)
741 return 0;
742
743 if (*s)
744 return hashmap_move(*s, *other);
745 else
746 *s = TAKE_PTR(*other);
747
748 return 0;
749 }
750
751 static int merge_names(Unit *u, Unit *other) {
752 char *t;
753 Iterator i;
754 int r;
755
756 assert(u);
757 assert(other);
758
759 r = set_complete_move(&u->names, &other->names);
760 if (r < 0)
761 return r;
762
763 set_free_free(other->names);
764 other->names = NULL;
765 other->id = NULL;
766
767 SET_FOREACH(t, u->names, i)
768 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
769
770 return 0;
771 }
772
773 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
774 unsigned n_reserve;
775
776 assert(u);
777 assert(other);
778 assert(d < _UNIT_DEPENDENCY_MAX);
779
780 /*
781 * If u does not have this dependency set allocated, there is no need
782 * to reserve anything. In that case other's set will be transferred
783 * as a whole to u by complete_move().
784 */
785 if (!u->dependencies[d])
786 return 0;
787
788 /* merge_dependencies() will skip a u-on-u dependency */
789 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
790
791 return hashmap_reserve(u->dependencies[d], n_reserve);
792 }
793
794 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
795 Iterator i;
796 Unit *back;
797 void *v;
798 int r;
799
800 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
801
802 assert(u);
803 assert(other);
804 assert(d < _UNIT_DEPENDENCY_MAX);
805
806 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
807 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
808 UnitDependency k;
809
810 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
811 * pointers back, and let's fix them up, to instead point to 'u'. */
812
813 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
814 if (back == u) {
815 /* Do not add dependencies between u and itself. */
816 if (hashmap_remove(back->dependencies[k], other))
817 maybe_warn_about_dependency(u, other_id, k);
818 } else {
819 UnitDependencyInfo di_u, di_other, di_merged;
820
821 /* Let's drop this dependency between "back" and "other", and let's create it between
822 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
823 * and any such dependency which might already exist */
824
825 di_other.data = hashmap_get(back->dependencies[k], other);
826 if (!di_other.data)
827 continue; /* dependency isn't set, let's try the next one */
828
829 di_u.data = hashmap_get(back->dependencies[k], u);
830
831 di_merged = (UnitDependencyInfo) {
832 .origin_mask = di_u.origin_mask | di_other.origin_mask,
833 .destination_mask = di_u.destination_mask | di_other.destination_mask,
834 };
835
836 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
837 if (r < 0)
838 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
839 assert(r >= 0);
840
841 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
842 }
843 }
844
845 }
846
847 /* Also do not move dependencies on u to itself */
848 back = hashmap_remove(other->dependencies[d], u);
849 if (back)
850 maybe_warn_about_dependency(u, other_id, d);
851
852 /* The move cannot fail. The caller must have performed a reservation. */
853 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
854
855 other->dependencies[d] = hashmap_free(other->dependencies[d]);
856 }
857
858 int unit_merge(Unit *u, Unit *other) {
859 UnitDependency d;
860 const char *other_id = NULL;
861 int r;
862
863 assert(u);
864 assert(other);
865 assert(u->manager == other->manager);
866 assert(u->type != _UNIT_TYPE_INVALID);
867
868 other = unit_follow_merge(other);
869
870 if (other == u)
871 return 0;
872
873 if (u->type != other->type)
874 return -EINVAL;
875
876 if (!u->instance != !other->instance)
877 return -EINVAL;
878
879 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
880 return -EEXIST;
881
882 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
883 return -EEXIST;
884
885 if (other->job)
886 return -EEXIST;
887
888 if (other->nop_job)
889 return -EEXIST;
890
891 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
892 return -EEXIST;
893
894 if (other->id)
895 other_id = strdupa(other->id);
896
897 /* Make reservations to ensure merge_dependencies() won't fail */
898 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
899 r = reserve_dependencies(u, other, d);
900 /*
901 * We don't rollback reservations if we fail. We don't have
902 * a way to undo reservations. A reservation is not a leak.
903 */
904 if (r < 0)
905 return r;
906 }
907
908 /* Merge names */
909 r = merge_names(u, other);
910 if (r < 0)
911 return r;
912
913 /* Redirect all references */
914 while (other->refs_by_target)
915 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
916
917 /* Merge dependencies */
918 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
919 merge_dependencies(u, other, other_id, d);
920
921 other->load_state = UNIT_MERGED;
922 other->merged_into = u;
923
924 /* If there is still some data attached to the other node, we
925 * don't need it anymore, and can free it. */
926 if (other->load_state != UNIT_STUB)
927 if (UNIT_VTABLE(other)->done)
928 UNIT_VTABLE(other)->done(other);
929
930 unit_add_to_dbus_queue(u);
931 unit_add_to_cleanup_queue(other);
932
933 return 0;
934 }
935
936 int unit_merge_by_name(Unit *u, const char *name) {
937 _cleanup_free_ char *s = NULL;
938 Unit *other;
939 int r;
940
941 assert(u);
942 assert(name);
943
944 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
945 if (!u->instance)
946 return -EINVAL;
947
948 r = unit_name_replace_instance(name, u->instance, &s);
949 if (r < 0)
950 return r;
951
952 name = s;
953 }
954
955 other = manager_get_unit(u->manager, name);
956 if (other)
957 return unit_merge(u, other);
958
959 return unit_add_name(u, name);
960 }
961
962 Unit* unit_follow_merge(Unit *u) {
963 assert(u);
964
965 while (u->load_state == UNIT_MERGED)
966 assert_se(u = u->merged_into);
967
968 return u;
969 }
970
971 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
972 ExecDirectoryType dt;
973 char **dp;
974 int r;
975
976 assert(u);
977 assert(c);
978
979 if (c->working_directory && !c->working_directory_missing_ok) {
980 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
981 if (r < 0)
982 return r;
983 }
984
985 if (c->root_directory) {
986 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
987 if (r < 0)
988 return r;
989 }
990
991 if (c->root_image) {
992 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
993 if (r < 0)
994 return r;
995 }
996
997 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
998 if (!u->manager->prefix[dt])
999 continue;
1000
1001 STRV_FOREACH(dp, c->directories[dt].paths) {
1002 _cleanup_free_ char *p;
1003
1004 p = strjoin(u->manager->prefix[dt], "/", *dp);
1005 if (!p)
1006 return -ENOMEM;
1007
1008 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1009 if (r < 0)
1010 return r;
1011 }
1012 }
1013
1014 if (!MANAGER_IS_SYSTEM(u->manager))
1015 return 0;
1016
1017 if (c->private_tmp) {
1018 const char *p;
1019
1020 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1021 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1022 if (r < 0)
1023 return r;
1024 }
1025
1026 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1027 if (r < 0)
1028 return r;
1029 }
1030
1031 if (!IN_SET(c->std_output,
1032 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1033 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1034 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1035 !IN_SET(c->std_error,
1036 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1037 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1038 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1039 return 0;
1040
1041 /* If syslog or kernel logging is requested, make sure our own
1042 * logging daemon is run first. */
1043
1044 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1045 if (r < 0)
1046 return r;
1047
1048 return 0;
1049 }
1050
1051 const char *unit_description(Unit *u) {
1052 assert(u);
1053
1054 if (u->description)
1055 return u->description;
1056
1057 return strna(u->id);
1058 }
1059
1060 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1061 const struct {
1062 UnitDependencyMask mask;
1063 const char *name;
1064 } table[] = {
1065 { UNIT_DEPENDENCY_FILE, "file" },
1066 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1067 { UNIT_DEPENDENCY_DEFAULT, "default" },
1068 { UNIT_DEPENDENCY_UDEV, "udev" },
1069 { UNIT_DEPENDENCY_PATH, "path" },
1070 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1071 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1072 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1073 };
1074 size_t i;
1075
1076 assert(f);
1077 assert(kind);
1078 assert(space);
1079
1080 for (i = 0; i < ELEMENTSOF(table); i++) {
1081
1082 if (mask == 0)
1083 break;
1084
1085 if (FLAGS_SET(mask, table[i].mask)) {
1086 if (*space)
1087 fputc(' ', f);
1088 else
1089 *space = true;
1090
1091 fputs(kind, f);
1092 fputs("-", f);
1093 fputs(table[i].name, f);
1094
1095 mask &= ~table[i].mask;
1096 }
1097 }
1098
1099 assert(mask == 0);
1100 }
1101
1102 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1103 char *t, **j;
1104 UnitDependency d;
1105 Iterator i;
1106 const char *prefix2;
1107 char
1108 timestamp0[FORMAT_TIMESTAMP_MAX],
1109 timestamp1[FORMAT_TIMESTAMP_MAX],
1110 timestamp2[FORMAT_TIMESTAMP_MAX],
1111 timestamp3[FORMAT_TIMESTAMP_MAX],
1112 timestamp4[FORMAT_TIMESTAMP_MAX],
1113 timespan[FORMAT_TIMESPAN_MAX];
1114 Unit *following;
1115 _cleanup_set_free_ Set *following_set = NULL;
1116 const char *n;
1117 CGroupMask m;
1118 int r;
1119
1120 assert(u);
1121 assert(u->type >= 0);
1122
1123 prefix = strempty(prefix);
1124 prefix2 = strjoina(prefix, "\t");
1125
1126 fprintf(f,
1127 "%s-> Unit %s:\n"
1128 "%s\tDescription: %s\n"
1129 "%s\tInstance: %s\n"
1130 "%s\tUnit Load State: %s\n"
1131 "%s\tUnit Active State: %s\n"
1132 "%s\tState Change Timestamp: %s\n"
1133 "%s\tInactive Exit Timestamp: %s\n"
1134 "%s\tActive Enter Timestamp: %s\n"
1135 "%s\tActive Exit Timestamp: %s\n"
1136 "%s\tInactive Enter Timestamp: %s\n"
1137 "%s\tMay GC: %s\n"
1138 "%s\tNeed Daemon Reload: %s\n"
1139 "%s\tTransient: %s\n"
1140 "%s\tPerpetual: %s\n"
1141 "%s\tGarbage Collection Mode: %s\n"
1142 "%s\tSlice: %s\n"
1143 "%s\tCGroup: %s\n"
1144 "%s\tCGroup realized: %s\n",
1145 prefix, u->id,
1146 prefix, unit_description(u),
1147 prefix, strna(u->instance),
1148 prefix, unit_load_state_to_string(u->load_state),
1149 prefix, unit_active_state_to_string(unit_active_state(u)),
1150 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1151 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1152 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1153 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1154 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1155 prefix, yes_no(unit_may_gc(u)),
1156 prefix, yes_no(unit_need_daemon_reload(u)),
1157 prefix, yes_no(u->transient),
1158 prefix, yes_no(u->perpetual),
1159 prefix, collect_mode_to_string(u->collect_mode),
1160 prefix, strna(unit_slice_name(u)),
1161 prefix, strna(u->cgroup_path),
1162 prefix, yes_no(u->cgroup_realized));
1163
1164 if (u->cgroup_realized_mask != 0) {
1165 _cleanup_free_ char *s = NULL;
1166 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1167 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1168 }
1169
1170 if (u->cgroup_enabled_mask != 0) {
1171 _cleanup_free_ char *s = NULL;
1172 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1173 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1174 }
1175
1176 m = unit_get_own_mask(u);
1177 if (m != 0) {
1178 _cleanup_free_ char *s = NULL;
1179 (void) cg_mask_to_string(m, &s);
1180 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1181 }
1182
1183 m = unit_get_members_mask(u);
1184 if (m != 0) {
1185 _cleanup_free_ char *s = NULL;
1186 (void) cg_mask_to_string(m, &s);
1187 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1188 }
1189
1190 m = unit_get_delegate_mask(u);
1191 if (m != 0) {
1192 _cleanup_free_ char *s = NULL;
1193 (void) cg_mask_to_string(m, &s);
1194 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1195 }
1196
1197 SET_FOREACH(t, u->names, i)
1198 fprintf(f, "%s\tName: %s\n", prefix, t);
1199
1200 if (!sd_id128_is_null(u->invocation_id))
1201 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1202 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1203
1204 STRV_FOREACH(j, u->documentation)
1205 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1206
1207 following = unit_following(u);
1208 if (following)
1209 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1210
1211 r = unit_following_set(u, &following_set);
1212 if (r >= 0) {
1213 Unit *other;
1214
1215 SET_FOREACH(other, following_set, i)
1216 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1217 }
1218
1219 if (u->fragment_path)
1220 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1221
1222 if (u->source_path)
1223 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1224
1225 STRV_FOREACH(j, u->dropin_paths)
1226 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1227
1228 if (u->failure_action != EMERGENCY_ACTION_NONE)
1229 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1230 if (u->failure_action_exit_status >= 0)
1231 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1232 if (u->success_action != EMERGENCY_ACTION_NONE)
1233 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1234 if (u->success_action_exit_status >= 0)
1235 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1236
1237 if (u->job_timeout != USEC_INFINITY)
1238 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1239
1240 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1241 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1242
1243 if (u->job_timeout_reboot_arg)
1244 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1245
1246 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1247 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1248
1249 if (dual_timestamp_is_set(&u->condition_timestamp))
1250 fprintf(f,
1251 "%s\tCondition Timestamp: %s\n"
1252 "%s\tCondition Result: %s\n",
1253 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1254 prefix, yes_no(u->condition_result));
1255
1256 if (dual_timestamp_is_set(&u->assert_timestamp))
1257 fprintf(f,
1258 "%s\tAssert Timestamp: %s\n"
1259 "%s\tAssert Result: %s\n",
1260 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1261 prefix, yes_no(u->assert_result));
1262
1263 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1264 UnitDependencyInfo di;
1265 Unit *other;
1266
1267 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1268 bool space = false;
1269
1270 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1271
1272 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1273 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1274
1275 fputs(")\n", f);
1276 }
1277 }
1278
1279 if (!hashmap_isempty(u->requires_mounts_for)) {
1280 UnitDependencyInfo di;
1281 const char *path;
1282
1283 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1284 bool space = false;
1285
1286 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1287
1288 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1289 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1290
1291 fputs(")\n", f);
1292 }
1293 }
1294
1295 if (u->load_state == UNIT_LOADED) {
1296
1297 fprintf(f,
1298 "%s\tStopWhenUnneeded: %s\n"
1299 "%s\tRefuseManualStart: %s\n"
1300 "%s\tRefuseManualStop: %s\n"
1301 "%s\tDefaultDependencies: %s\n"
1302 "%s\tOnFailureJobMode: %s\n"
1303 "%s\tIgnoreOnIsolate: %s\n",
1304 prefix, yes_no(u->stop_when_unneeded),
1305 prefix, yes_no(u->refuse_manual_start),
1306 prefix, yes_no(u->refuse_manual_stop),
1307 prefix, yes_no(u->default_dependencies),
1308 prefix, job_mode_to_string(u->on_failure_job_mode),
1309 prefix, yes_no(u->ignore_on_isolate));
1310
1311 if (UNIT_VTABLE(u)->dump)
1312 UNIT_VTABLE(u)->dump(u, f, prefix2);
1313
1314 } else if (u->load_state == UNIT_MERGED)
1315 fprintf(f,
1316 "%s\tMerged into: %s\n",
1317 prefix, u->merged_into->id);
1318 else if (u->load_state == UNIT_ERROR)
1319 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1320
1321 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1322 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1323
1324 if (u->job)
1325 job_dump(u->job, f, prefix2);
1326
1327 if (u->nop_job)
1328 job_dump(u->nop_job, f, prefix2);
1329 }
1330
1331 /* Common implementation for multiple backends */
1332 int unit_load_fragment_and_dropin(Unit *u) {
1333 int r;
1334
1335 assert(u);
1336
1337 /* Load a .{service,socket,...} file */
1338 r = unit_load_fragment(u);
1339 if (r < 0)
1340 return r;
1341
1342 if (u->load_state == UNIT_STUB)
1343 return -ENOENT;
1344
1345 /* Load drop-in directory data. If u is an alias, we might be reloading the
1346 * target unit needlessly. But we cannot be sure which drops-ins have already
1347 * been loaded and which not, at least without doing complicated book-keeping,
1348 * so let's always reread all drop-ins. */
1349 return unit_load_dropin(unit_follow_merge(u));
1350 }
1351
1352 /* Common implementation for multiple backends */
1353 int unit_load_fragment_and_dropin_optional(Unit *u) {
1354 int r;
1355
1356 assert(u);
1357
1358 /* Same as unit_load_fragment_and_dropin(), but whether
1359 * something can be loaded or not doesn't matter. */
1360
1361 /* Load a .service/.socket/.slice/… file */
1362 r = unit_load_fragment(u);
1363 if (r < 0)
1364 return r;
1365
1366 if (u->load_state == UNIT_STUB)
1367 u->load_state = UNIT_LOADED;
1368
1369 /* Load drop-in directory data */
1370 return unit_load_dropin(unit_follow_merge(u));
1371 }
1372
1373 void unit_add_to_target_deps_queue(Unit *u) {
1374 Manager *m = u->manager;
1375
1376 assert(u);
1377
1378 if (u->in_target_deps_queue)
1379 return;
1380
1381 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1382 u->in_target_deps_queue = true;
1383 }
1384
1385 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1386 assert(u);
1387 assert(target);
1388
1389 if (target->type != UNIT_TARGET)
1390 return 0;
1391
1392 /* Only add the dependency if both units are loaded, so that
1393 * that loop check below is reliable */
1394 if (u->load_state != UNIT_LOADED ||
1395 target->load_state != UNIT_LOADED)
1396 return 0;
1397
1398 /* If either side wants no automatic dependencies, then let's
1399 * skip this */
1400 if (!u->default_dependencies ||
1401 !target->default_dependencies)
1402 return 0;
1403
1404 /* Don't create loops */
1405 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1406 return 0;
1407
1408 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1409 }
1410
1411 static int unit_add_slice_dependencies(Unit *u) {
1412 UnitDependencyMask mask;
1413 assert(u);
1414
1415 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1416 return 0;
1417
1418 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1419 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1420 relationship). */
1421 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1422
1423 if (UNIT_ISSET(u->slice))
1424 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1425
1426 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1427 return 0;
1428
1429 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1430 }
1431
1432 static int unit_add_mount_dependencies(Unit *u) {
1433 UnitDependencyInfo di;
1434 const char *path;
1435 Iterator i;
1436 int r;
1437
1438 assert(u);
1439
1440 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1441 char prefix[strlen(path) + 1];
1442
1443 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1444 _cleanup_free_ char *p = NULL;
1445 Unit *m;
1446
1447 r = unit_name_from_path(prefix, ".mount", &p);
1448 if (r < 0)
1449 return r;
1450
1451 m = manager_get_unit(u->manager, p);
1452 if (!m) {
1453 /* Make sure to load the mount unit if
1454 * it exists. If so the dependencies
1455 * on this unit will be added later
1456 * during the loading of the mount
1457 * unit. */
1458 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1459 continue;
1460 }
1461 if (m == u)
1462 continue;
1463
1464 if (m->load_state != UNIT_LOADED)
1465 continue;
1466
1467 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1468 if (r < 0)
1469 return r;
1470
1471 if (m->fragment_path) {
1472 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1473 if (r < 0)
1474 return r;
1475 }
1476 }
1477 }
1478
1479 return 0;
1480 }
1481
1482 static int unit_add_startup_units(Unit *u) {
1483 CGroupContext *c;
1484 int r;
1485
1486 c = unit_get_cgroup_context(u);
1487 if (!c)
1488 return 0;
1489
1490 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1491 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1492 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1493 return 0;
1494
1495 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1496 if (r < 0)
1497 return r;
1498
1499 return set_put(u->manager->startup_units, u);
1500 }
1501
1502 int unit_load(Unit *u) {
1503 int r;
1504
1505 assert(u);
1506
1507 if (u->in_load_queue) {
1508 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1509 u->in_load_queue = false;
1510 }
1511
1512 if (u->type == _UNIT_TYPE_INVALID)
1513 return -EINVAL;
1514
1515 if (u->load_state != UNIT_STUB)
1516 return 0;
1517
1518 if (u->transient_file) {
1519 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1520 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1521
1522 r = fflush_and_check(u->transient_file);
1523 if (r < 0)
1524 goto fail;
1525
1526 u->transient_file = safe_fclose(u->transient_file);
1527 u->fragment_mtime = now(CLOCK_REALTIME);
1528 }
1529
1530 if (UNIT_VTABLE(u)->load) {
1531 r = UNIT_VTABLE(u)->load(u);
1532 if (r < 0)
1533 goto fail;
1534 }
1535
1536 if (u->load_state == UNIT_STUB) {
1537 r = -ENOENT;
1538 goto fail;
1539 }
1540
1541 if (u->load_state == UNIT_LOADED) {
1542 unit_add_to_target_deps_queue(u);
1543
1544 r = unit_add_slice_dependencies(u);
1545 if (r < 0)
1546 goto fail;
1547
1548 r = unit_add_mount_dependencies(u);
1549 if (r < 0)
1550 goto fail;
1551
1552 r = unit_add_startup_units(u);
1553 if (r < 0)
1554 goto fail;
1555
1556 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1557 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1558 r = -ENOEXEC;
1559 goto fail;
1560 }
1561
1562 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1563 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1564
1565 /* We finished loading, let's ensure our parents recalculate the members mask */
1566 unit_invalidate_cgroup_members_masks(u);
1567 }
1568
1569 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1570
1571 unit_add_to_dbus_queue(unit_follow_merge(u));
1572 unit_add_to_gc_queue(u);
1573
1574 return 0;
1575
1576 fail:
1577 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1578 * return ENOEXEC to ensure units are placed in this state after loading */
1579
1580 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1581 r == -ENOEXEC ? UNIT_BAD_SETTING :
1582 UNIT_ERROR;
1583 u->load_error = r;
1584
1585 unit_add_to_dbus_queue(u);
1586 unit_add_to_gc_queue(u);
1587
1588 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1589 }
1590
1591 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1592 Condition *c;
1593 int triggered = -1;
1594
1595 assert(u);
1596 assert(to_string);
1597
1598 /* If the condition list is empty, then it is true */
1599 if (!first)
1600 return true;
1601
1602 /* Otherwise, if all of the non-trigger conditions apply and
1603 * if any of the trigger conditions apply (unless there are
1604 * none) we return true */
1605 LIST_FOREACH(conditions, c, first) {
1606 int r;
1607
1608 r = condition_test(c);
1609 if (r < 0)
1610 log_unit_warning(u,
1611 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1612 to_string(c->type),
1613 c->trigger ? "|" : "",
1614 c->negate ? "!" : "",
1615 c->parameter);
1616 else
1617 log_unit_debug(u,
1618 "%s=%s%s%s %s.",
1619 to_string(c->type),
1620 c->trigger ? "|" : "",
1621 c->negate ? "!" : "",
1622 c->parameter,
1623 condition_result_to_string(c->result));
1624
1625 if (!c->trigger && r <= 0)
1626 return false;
1627
1628 if (c->trigger && triggered <= 0)
1629 triggered = r > 0;
1630 }
1631
1632 return triggered != 0;
1633 }
1634
1635 static bool unit_condition_test(Unit *u) {
1636 assert(u);
1637
1638 dual_timestamp_get(&u->condition_timestamp);
1639 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1640
1641 return u->condition_result;
1642 }
1643
1644 static bool unit_assert_test(Unit *u) {
1645 assert(u);
1646
1647 dual_timestamp_get(&u->assert_timestamp);
1648 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1649
1650 return u->assert_result;
1651 }
1652
1653 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1654 const char *d;
1655
1656 d = unit_description(u);
1657 if (log_get_show_color())
1658 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1659
1660 DISABLE_WARNING_FORMAT_NONLITERAL;
1661 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1662 REENABLE_WARNING;
1663 }
1664
1665 int unit_start_limit_test(Unit *u) {
1666 const char *reason;
1667
1668 assert(u);
1669
1670 if (ratelimit_below(&u->start_limit)) {
1671 u->start_limit_hit = false;
1672 return 0;
1673 }
1674
1675 log_unit_warning(u, "Start request repeated too quickly.");
1676 u->start_limit_hit = true;
1677
1678 reason = strjoina("unit ", u->id, " failed");
1679
1680 return emergency_action(u->manager, u->start_limit_action,
1681 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1682 u->reboot_arg, -1, reason);
1683 }
1684
1685 bool unit_shall_confirm_spawn(Unit *u) {
1686 assert(u);
1687
1688 if (manager_is_confirm_spawn_disabled(u->manager))
1689 return false;
1690
1691 /* For some reasons units remaining in the same process group
1692 * as PID 1 fail to acquire the console even if it's not used
1693 * by any process. So skip the confirmation question for them. */
1694 return !unit_get_exec_context(u)->same_pgrp;
1695 }
1696
1697 static bool unit_verify_deps(Unit *u) {
1698 Unit *other;
1699 Iterator j;
1700 void *v;
1701
1702 assert(u);
1703
1704 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1705 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1706 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1707 * conjunction with After= as for them any such check would make things entirely racy. */
1708
1709 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1710
1711 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1712 continue;
1713
1714 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1715 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1716 return false;
1717 }
1718 }
1719
1720 return true;
1721 }
1722
1723 /* Errors:
1724 * -EBADR: This unit type does not support starting.
1725 * -EALREADY: Unit is already started.
1726 * -EAGAIN: An operation is already in progress. Retry later.
1727 * -ECANCELED: Too many requests for now.
1728 * -EPROTO: Assert failed
1729 * -EINVAL: Unit not loaded
1730 * -EOPNOTSUPP: Unit type not supported
1731 * -ENOLINK: The necessary dependencies are not fulfilled.
1732 * -ESTALE: This unit has been started before and can't be started a second time
1733 */
1734 int unit_start(Unit *u) {
1735 UnitActiveState state;
1736 Unit *following;
1737
1738 assert(u);
1739
1740 /* If this is already started, then this will succeed. Note
1741 * that this will even succeed if this unit is not startable
1742 * by the user. This is relied on to detect when we need to
1743 * wait for units and when waiting is finished. */
1744 state = unit_active_state(u);
1745 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1746 return -EALREADY;
1747
1748 /* Units that aren't loaded cannot be started */
1749 if (u->load_state != UNIT_LOADED)
1750 return -EINVAL;
1751
1752 /* Refuse starting scope units more than once */
1753 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1754 return -ESTALE;
1755
1756 /* If the conditions failed, don't do anything at all. If we
1757 * already are activating this call might still be useful to
1758 * speed up activation in case there is some hold-off time,
1759 * but we don't want to recheck the condition in that case. */
1760 if (state != UNIT_ACTIVATING &&
1761 !unit_condition_test(u)) {
1762 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1763 return -ECOMM;
1764 }
1765
1766 /* If the asserts failed, fail the entire job */
1767 if (state != UNIT_ACTIVATING &&
1768 !unit_assert_test(u)) {
1769 log_unit_notice(u, "Starting requested but asserts failed.");
1770 return -EPROTO;
1771 }
1772
1773 /* Units of types that aren't supported cannot be
1774 * started. Note that we do this test only after the condition
1775 * checks, so that we rather return condition check errors
1776 * (which are usually not considered a true failure) than "not
1777 * supported" errors (which are considered a failure).
1778 */
1779 if (!unit_supported(u))
1780 return -EOPNOTSUPP;
1781
1782 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1783 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1784 * effect anymore, due to a reload or due to a failed condition. */
1785 if (!unit_verify_deps(u))
1786 return -ENOLINK;
1787
1788 /* Forward to the main object, if we aren't it. */
1789 following = unit_following(u);
1790 if (following) {
1791 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1792 return unit_start(following);
1793 }
1794
1795 /* If it is stopped, but we cannot start it, then fail */
1796 if (!UNIT_VTABLE(u)->start)
1797 return -EBADR;
1798
1799 /* We don't suppress calls to ->start() here when we are
1800 * already starting, to allow this request to be used as a
1801 * "hurry up" call, for example when the unit is in some "auto
1802 * restart" state where it waits for a holdoff timer to elapse
1803 * before it will start again. */
1804
1805 unit_add_to_dbus_queue(u);
1806
1807 return UNIT_VTABLE(u)->start(u);
1808 }
1809
1810 bool unit_can_start(Unit *u) {
1811 assert(u);
1812
1813 if (u->load_state != UNIT_LOADED)
1814 return false;
1815
1816 if (!unit_supported(u))
1817 return false;
1818
1819 /* Scope units may be started only once */
1820 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1821 return false;
1822
1823 return !!UNIT_VTABLE(u)->start;
1824 }
1825
1826 bool unit_can_isolate(Unit *u) {
1827 assert(u);
1828
1829 return unit_can_start(u) &&
1830 u->allow_isolate;
1831 }
1832
1833 /* Errors:
1834 * -EBADR: This unit type does not support stopping.
1835 * -EALREADY: Unit is already stopped.
1836 * -EAGAIN: An operation is already in progress. Retry later.
1837 */
1838 int unit_stop(Unit *u) {
1839 UnitActiveState state;
1840 Unit *following;
1841
1842 assert(u);
1843
1844 state = unit_active_state(u);
1845 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1846 return -EALREADY;
1847
1848 following = unit_following(u);
1849 if (following) {
1850 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1851 return unit_stop(following);
1852 }
1853
1854 if (!UNIT_VTABLE(u)->stop)
1855 return -EBADR;
1856
1857 unit_add_to_dbus_queue(u);
1858
1859 return UNIT_VTABLE(u)->stop(u);
1860 }
1861
1862 bool unit_can_stop(Unit *u) {
1863 assert(u);
1864
1865 if (!unit_supported(u))
1866 return false;
1867
1868 if (u->perpetual)
1869 return false;
1870
1871 return !!UNIT_VTABLE(u)->stop;
1872 }
1873
1874 /* Errors:
1875 * -EBADR: This unit type does not support reloading.
1876 * -ENOEXEC: Unit is not started.
1877 * -EAGAIN: An operation is already in progress. Retry later.
1878 */
1879 int unit_reload(Unit *u) {
1880 UnitActiveState state;
1881 Unit *following;
1882
1883 assert(u);
1884
1885 if (u->load_state != UNIT_LOADED)
1886 return -EINVAL;
1887
1888 if (!unit_can_reload(u))
1889 return -EBADR;
1890
1891 state = unit_active_state(u);
1892 if (state == UNIT_RELOADING)
1893 return -EALREADY;
1894
1895 if (state != UNIT_ACTIVE) {
1896 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1897 return -ENOEXEC;
1898 }
1899
1900 following = unit_following(u);
1901 if (following) {
1902 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1903 return unit_reload(following);
1904 }
1905
1906 unit_add_to_dbus_queue(u);
1907
1908 if (!UNIT_VTABLE(u)->reload) {
1909 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1910 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1911 return 0;
1912 }
1913
1914 return UNIT_VTABLE(u)->reload(u);
1915 }
1916
1917 bool unit_can_reload(Unit *u) {
1918 assert(u);
1919
1920 if (UNIT_VTABLE(u)->can_reload)
1921 return UNIT_VTABLE(u)->can_reload(u);
1922
1923 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1924 return true;
1925
1926 return UNIT_VTABLE(u)->reload;
1927 }
1928
1929 bool unit_is_unneeded(Unit *u) {
1930 static const UnitDependency deps[] = {
1931 UNIT_REQUIRED_BY,
1932 UNIT_REQUISITE_OF,
1933 UNIT_WANTED_BY,
1934 UNIT_BOUND_BY,
1935 };
1936 size_t j;
1937
1938 assert(u);
1939
1940 if (!u->stop_when_unneeded)
1941 return false;
1942
1943 /* Don't clean up while the unit is transitioning or is even inactive. */
1944 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1945 return false;
1946 if (u->job)
1947 return false;
1948
1949 for (j = 0; j < ELEMENTSOF(deps); j++) {
1950 Unit *other;
1951 Iterator i;
1952 void *v;
1953
1954 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1955 * restart, then don't clean this one up. */
1956
1957 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1958 if (other->job)
1959 return false;
1960
1961 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1962 return false;
1963
1964 if (unit_will_restart(other))
1965 return false;
1966 }
1967 }
1968
1969 return true;
1970 }
1971
1972 static void check_unneeded_dependencies(Unit *u) {
1973
1974 static const UnitDependency deps[] = {
1975 UNIT_REQUIRES,
1976 UNIT_REQUISITE,
1977 UNIT_WANTS,
1978 UNIT_BINDS_TO,
1979 };
1980 size_t j;
1981
1982 assert(u);
1983
1984 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1985
1986 for (j = 0; j < ELEMENTSOF(deps); j++) {
1987 Unit *other;
1988 Iterator i;
1989 void *v;
1990
1991 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
1992 unit_submit_to_stop_when_unneeded_queue(other);
1993 }
1994 }
1995
1996 static void unit_check_binds_to(Unit *u) {
1997 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1998 bool stop = false;
1999 Unit *other;
2000 Iterator i;
2001 void *v;
2002 int r;
2003
2004 assert(u);
2005
2006 if (u->job)
2007 return;
2008
2009 if (unit_active_state(u) != UNIT_ACTIVE)
2010 return;
2011
2012 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2013 if (other->job)
2014 continue;
2015
2016 if (!other->coldplugged)
2017 /* We might yet create a job for the other unit… */
2018 continue;
2019
2020 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2021 continue;
2022
2023 stop = true;
2024 break;
2025 }
2026
2027 if (!stop)
2028 return;
2029
2030 /* If stopping a unit fails continuously we might enter a stop
2031 * loop here, hence stop acting on the service being
2032 * unnecessary after a while. */
2033 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2034 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2035 return;
2036 }
2037
2038 assert(other);
2039 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2040
2041 /* A unit we need to run is gone. Sniff. Let's stop this. */
2042 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2043 if (r < 0)
2044 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2045 }
2046
2047 static void retroactively_start_dependencies(Unit *u) {
2048 Iterator i;
2049 Unit *other;
2050 void *v;
2051
2052 assert(u);
2053 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2054
2055 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2056 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2057 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2058 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2059
2060 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2061 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2062 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2063 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2064
2065 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2066 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2067 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2068 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2069
2070 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2071 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2072 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2073
2074 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2075 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2076 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2077 }
2078
2079 static void retroactively_stop_dependencies(Unit *u) {
2080 Unit *other;
2081 Iterator i;
2082 void *v;
2083
2084 assert(u);
2085 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2086
2087 /* Pull down units which are bound to us recursively if enabled */
2088 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2089 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2090 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2091 }
2092
2093 void unit_start_on_failure(Unit *u) {
2094 Unit *other;
2095 Iterator i;
2096 void *v;
2097 int r;
2098
2099 assert(u);
2100
2101 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2102 return;
2103
2104 log_unit_info(u, "Triggering OnFailure= dependencies.");
2105
2106 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2107 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2108
2109 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2110 if (r < 0)
2111 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2112 }
2113 }
2114
2115 void unit_trigger_notify(Unit *u) {
2116 Unit *other;
2117 Iterator i;
2118 void *v;
2119
2120 assert(u);
2121
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2123 if (UNIT_VTABLE(other)->trigger_notify)
2124 UNIT_VTABLE(other)->trigger_notify(other, u);
2125 }
2126
2127 static int unit_log_resources(Unit *u) {
2128 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2129 bool any_traffic = false, have_ip_accounting = false;
2130 _cleanup_free_ char *igress = NULL, *egress = NULL;
2131 size_t n_message_parts = 0, n_iovec = 0;
2132 char* message_parts[3 + 1], *t;
2133 nsec_t nsec = NSEC_INFINITY;
2134 CGroupIPAccountingMetric m;
2135 size_t i;
2136 int r;
2137 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2138 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2139 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2140 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2141 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2142 };
2143
2144 assert(u);
2145
2146 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2147 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2148 * information and the complete data in structured fields. */
2149
2150 (void) unit_get_cpu_usage(u, &nsec);
2151 if (nsec != NSEC_INFINITY) {
2152 char buf[FORMAT_TIMESPAN_MAX] = "";
2153
2154 /* Format the CPU time for inclusion in the structured log message */
2155 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2156 r = log_oom();
2157 goto finish;
2158 }
2159 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2160
2161 /* Format the CPU time for inclusion in the human language message string */
2162 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2163 t = strjoin("consumed ", buf, " CPU time");
2164 if (!t) {
2165 r = log_oom();
2166 goto finish;
2167 }
2168
2169 message_parts[n_message_parts++] = t;
2170 }
2171
2172 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2173 char buf[FORMAT_BYTES_MAX] = "";
2174 uint64_t value = UINT64_MAX;
2175
2176 assert(ip_fields[m]);
2177
2178 (void) unit_get_ip_accounting(u, m, &value);
2179 if (value == UINT64_MAX)
2180 continue;
2181
2182 have_ip_accounting = true;
2183 if (value > 0)
2184 any_traffic = true;
2185
2186 /* Format IP accounting data for inclusion in the structured log message */
2187 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2188 r = log_oom();
2189 goto finish;
2190 }
2191 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2192
2193 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2194 * bytes counters (and not for the packets counters) */
2195 if (m == CGROUP_IP_INGRESS_BYTES) {
2196 assert(!igress);
2197 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2198 if (!igress) {
2199 r = log_oom();
2200 goto finish;
2201 }
2202 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2203 assert(!egress);
2204 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2205 if (!egress) {
2206 r = log_oom();
2207 goto finish;
2208 }
2209 }
2210 }
2211
2212 if (have_ip_accounting) {
2213 if (any_traffic) {
2214 if (igress)
2215 message_parts[n_message_parts++] = TAKE_PTR(igress);
2216 if (egress)
2217 message_parts[n_message_parts++] = TAKE_PTR(egress);
2218
2219 } else {
2220 char *k;
2221
2222 k = strdup("no IP traffic");
2223 if (!k) {
2224 r = log_oom();
2225 goto finish;
2226 }
2227
2228 message_parts[n_message_parts++] = k;
2229 }
2230 }
2231
2232 /* Is there any accounting data available at all? */
2233 if (n_iovec == 0) {
2234 r = 0;
2235 goto finish;
2236 }
2237
2238 if (n_message_parts == 0)
2239 t = strjoina("MESSAGE=", u->id, ": Completed.");
2240 else {
2241 _cleanup_free_ char *joined;
2242
2243 message_parts[n_message_parts] = NULL;
2244
2245 joined = strv_join(message_parts, ", ");
2246 if (!joined) {
2247 r = log_oom();
2248 goto finish;
2249 }
2250
2251 joined[0] = ascii_toupper(joined[0]);
2252 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2253 }
2254
2255 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2256 * and hence don't increase n_iovec for them */
2257 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2258 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2259
2260 t = strjoina(u->manager->unit_log_field, u->id);
2261 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2262
2263 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2264 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2265
2266 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2267 r = 0;
2268
2269 finish:
2270 for (i = 0; i < n_message_parts; i++)
2271 free(message_parts[i]);
2272
2273 for (i = 0; i < n_iovec; i++)
2274 free(iovec[i].iov_base);
2275
2276 return r;
2277
2278 }
2279
2280 static void unit_update_on_console(Unit *u) {
2281 bool b;
2282
2283 assert(u);
2284
2285 b = unit_needs_console(u);
2286 if (u->on_console == b)
2287 return;
2288
2289 u->on_console = b;
2290 if (b)
2291 manager_ref_console(u->manager);
2292 else
2293 manager_unref_console(u->manager);
2294 }
2295
2296 static void unit_emit_audit_start(Unit *u) {
2297 assert(u);
2298
2299 if (u->type != UNIT_SERVICE)
2300 return;
2301
2302 /* Write audit record if we have just finished starting up */
2303 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2304 u->in_audit = true;
2305 }
2306
2307 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2308 assert(u);
2309
2310 if (u->type != UNIT_SERVICE)
2311 return;
2312
2313 if (u->in_audit) {
2314 /* Write audit record if we have just finished shutting down */
2315 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2316 u->in_audit = false;
2317 } else {
2318 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2319 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2320
2321 if (state == UNIT_INACTIVE)
2322 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2323 }
2324 }
2325
2326 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2327 bool unexpected;
2328 const char *reason;
2329 Manager *m;
2330
2331 assert(u);
2332 assert(os < _UNIT_ACTIVE_STATE_MAX);
2333 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2334
2335 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2336 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2337 * remounted this function will be called too! */
2338
2339 m = u->manager;
2340
2341 /* Update timestamps for state changes */
2342 if (!MANAGER_IS_RELOADING(m)) {
2343 dual_timestamp_get(&u->state_change_timestamp);
2344
2345 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2346 u->inactive_exit_timestamp = u->state_change_timestamp;
2347 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2348 u->inactive_enter_timestamp = u->state_change_timestamp;
2349
2350 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2351 u->active_enter_timestamp = u->state_change_timestamp;
2352 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2353 u->active_exit_timestamp = u->state_change_timestamp;
2354 }
2355
2356 /* Keep track of failed units */
2357 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2358
2359 /* Make sure the cgroup and state files are always removed when we become inactive */
2360 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2361 unit_prune_cgroup(u);
2362 unit_unlink_state_files(u);
2363 }
2364
2365 unit_update_on_console(u);
2366
2367 if (u->job) {
2368 unexpected = false;
2369
2370 if (u->job->state == JOB_WAITING)
2371
2372 /* So we reached a different state for this
2373 * job. Let's see if we can run it now if it
2374 * failed previously due to EAGAIN. */
2375 job_add_to_run_queue(u->job);
2376
2377 /* Let's check whether this state change constitutes a
2378 * finished job, or maybe contradicts a running job and
2379 * hence needs to invalidate jobs. */
2380
2381 switch (u->job->type) {
2382
2383 case JOB_START:
2384 case JOB_VERIFY_ACTIVE:
2385
2386 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2387 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2388 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2389 unexpected = true;
2390
2391 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2392 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2393 }
2394
2395 break;
2396
2397 case JOB_RELOAD:
2398 case JOB_RELOAD_OR_START:
2399 case JOB_TRY_RELOAD:
2400
2401 if (u->job->state == JOB_RUNNING) {
2402 if (ns == UNIT_ACTIVE)
2403 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2404 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2405 unexpected = true;
2406
2407 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2408 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2409 }
2410 }
2411
2412 break;
2413
2414 case JOB_STOP:
2415 case JOB_RESTART:
2416 case JOB_TRY_RESTART:
2417
2418 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2419 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2420 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2421 unexpected = true;
2422 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2423 }
2424
2425 break;
2426
2427 default:
2428 assert_not_reached("Job type unknown");
2429 }
2430
2431 } else
2432 unexpected = true;
2433
2434 if (!MANAGER_IS_RELOADING(m)) {
2435
2436 /* If this state change happened without being
2437 * requested by a job, then let's retroactively start
2438 * or stop dependencies. We skip that step when
2439 * deserializing, since we don't want to create any
2440 * additional jobs just because something is already
2441 * activated. */
2442
2443 if (unexpected) {
2444 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2445 retroactively_start_dependencies(u);
2446 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2447 retroactively_stop_dependencies(u);
2448 }
2449
2450 /* stop unneeded units regardless if going down was expected or not */
2451 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2452 check_unneeded_dependencies(u);
2453
2454 if (ns != os && ns == UNIT_FAILED) {
2455 log_unit_debug(u, "Unit entered failed state.");
2456
2457 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2458 unit_start_on_failure(u);
2459 }
2460
2461 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2462 /* This unit just finished starting up */
2463
2464 unit_emit_audit_start(u);
2465 manager_send_unit_plymouth(m, u);
2466 }
2467
2468 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2469 /* This unit just stopped/failed. */
2470
2471 unit_emit_audit_stop(u, ns);
2472 unit_log_resources(u);
2473 }
2474 }
2475
2476 manager_recheck_journal(m);
2477 manager_recheck_dbus(m);
2478
2479 unit_trigger_notify(u);
2480
2481 if (!MANAGER_IS_RELOADING(m)) {
2482 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2483 unit_submit_to_stop_when_unneeded_queue(u);
2484
2485 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2486 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2487 * without ever entering started.) */
2488 unit_check_binds_to(u);
2489
2490 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2491 reason = strjoina("unit ", u->id, " failed");
2492 (void) emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2493 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2494 reason = strjoina("unit ", u->id, " succeeded");
2495 (void) emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2496 }
2497 }
2498
2499 unit_add_to_dbus_queue(u);
2500 unit_add_to_gc_queue(u);
2501 }
2502
2503 int unit_watch_pid(Unit *u, pid_t pid) {
2504 int r;
2505
2506 assert(u);
2507 assert(pid_is_valid(pid));
2508
2509 /* Watch a specific PID */
2510
2511 r = set_ensure_allocated(&u->pids, NULL);
2512 if (r < 0)
2513 return r;
2514
2515 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2516 if (r < 0)
2517 return r;
2518
2519 /* First try, let's add the unit keyed by "pid". */
2520 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2521 if (r == -EEXIST) {
2522 Unit **array;
2523 bool found = false;
2524 size_t n = 0;
2525
2526 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2527 * to an array of Units rather than just a Unit), lists us already. */
2528
2529 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2530 if (array)
2531 for (; array[n]; n++)
2532 if (array[n] == u)
2533 found = true;
2534
2535 if (found) /* Found it already? if so, do nothing */
2536 r = 0;
2537 else {
2538 Unit **new_array;
2539
2540 /* Allocate a new array */
2541 new_array = new(Unit*, n + 2);
2542 if (!new_array)
2543 return -ENOMEM;
2544
2545 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2546 new_array[n] = u;
2547 new_array[n+1] = NULL;
2548
2549 /* Add or replace the old array */
2550 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2551 if (r < 0) {
2552 free(new_array);
2553 return r;
2554 }
2555
2556 free(array);
2557 }
2558 } else if (r < 0)
2559 return r;
2560
2561 r = set_put(u->pids, PID_TO_PTR(pid));
2562 if (r < 0)
2563 return r;
2564
2565 return 0;
2566 }
2567
2568 void unit_unwatch_pid(Unit *u, pid_t pid) {
2569 Unit **array;
2570
2571 assert(u);
2572 assert(pid_is_valid(pid));
2573
2574 /* First let's drop the unit in case it's keyed as "pid". */
2575 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2576
2577 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2578 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2579 if (array) {
2580 size_t n, m = 0;
2581
2582 /* Let's iterate through the array, dropping our own entry */
2583 for (n = 0; array[n]; n++)
2584 if (array[n] != u)
2585 array[m++] = array[n];
2586 array[m] = NULL;
2587
2588 if (m == 0) {
2589 /* The array is now empty, remove the entire entry */
2590 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2591 free(array);
2592 }
2593 }
2594
2595 (void) set_remove(u->pids, PID_TO_PTR(pid));
2596 }
2597
2598 void unit_unwatch_all_pids(Unit *u) {
2599 assert(u);
2600
2601 while (!set_isempty(u->pids))
2602 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2603
2604 u->pids = set_free(u->pids);
2605 }
2606
2607 static void unit_tidy_watch_pids(Unit *u) {
2608 pid_t except1, except2;
2609 Iterator i;
2610 void *e;
2611
2612 assert(u);
2613
2614 /* Cleans dead PIDs from our list */
2615
2616 except1 = unit_main_pid(u);
2617 except2 = unit_control_pid(u);
2618
2619 SET_FOREACH(e, u->pids, i) {
2620 pid_t pid = PTR_TO_PID(e);
2621
2622 if (pid == except1 || pid == except2)
2623 continue;
2624
2625 if (!pid_is_unwaited(pid))
2626 unit_unwatch_pid(u, pid);
2627 }
2628 }
2629
2630 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2631 Unit *u = userdata;
2632
2633 assert(s);
2634 assert(u);
2635
2636 unit_tidy_watch_pids(u);
2637 unit_watch_all_pids(u);
2638
2639 /* If the PID set is empty now, then let's finish this off. */
2640 unit_synthesize_cgroup_empty_event(u);
2641
2642 return 0;
2643 }
2644
2645 int unit_enqueue_rewatch_pids(Unit *u) {
2646 int r;
2647
2648 assert(u);
2649
2650 if (!u->cgroup_path)
2651 return -ENOENT;
2652
2653 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2654 if (r < 0)
2655 return r;
2656 if (r > 0) /* On unified we can use proper notifications */
2657 return 0;
2658
2659 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2660 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2661 * involves issuing kill(pid, 0) on all processes we watch. */
2662
2663 if (!u->rewatch_pids_event_source) {
2664 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2665
2666 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2667 if (r < 0)
2668 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2669
2670 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2671 if (r < 0)
2672 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2673
2674 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2675
2676 u->rewatch_pids_event_source = TAKE_PTR(s);
2677 }
2678
2679 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2680 if (r < 0)
2681 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2682
2683 return 0;
2684 }
2685
2686 void unit_dequeue_rewatch_pids(Unit *u) {
2687 int r;
2688 assert(u);
2689
2690 if (!u->rewatch_pids_event_source)
2691 return;
2692
2693 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2694 if (r < 0)
2695 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2696
2697 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2698 }
2699
2700 bool unit_job_is_applicable(Unit *u, JobType j) {
2701 assert(u);
2702 assert(j >= 0 && j < _JOB_TYPE_MAX);
2703
2704 switch (j) {
2705
2706 case JOB_VERIFY_ACTIVE:
2707 case JOB_START:
2708 case JOB_NOP:
2709 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2710 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2711 * jobs for it. */
2712 return true;
2713
2714 case JOB_STOP:
2715 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2716 * external events), hence it makes no sense to permit enqueing such a request either. */
2717 return !u->perpetual;
2718
2719 case JOB_RESTART:
2720 case JOB_TRY_RESTART:
2721 return unit_can_stop(u) && unit_can_start(u);
2722
2723 case JOB_RELOAD:
2724 case JOB_TRY_RELOAD:
2725 return unit_can_reload(u);
2726
2727 case JOB_RELOAD_OR_START:
2728 return unit_can_reload(u) && unit_can_start(u);
2729
2730 default:
2731 assert_not_reached("Invalid job type");
2732 }
2733 }
2734
2735 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2736 assert(u);
2737
2738 /* Only warn about some unit types */
2739 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2740 return;
2741
2742 if (streq_ptr(u->id, other))
2743 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2744 else
2745 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2746 }
2747
2748 static int unit_add_dependency_hashmap(
2749 Hashmap **h,
2750 Unit *other,
2751 UnitDependencyMask origin_mask,
2752 UnitDependencyMask destination_mask) {
2753
2754 UnitDependencyInfo info;
2755 int r;
2756
2757 assert(h);
2758 assert(other);
2759 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2760 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2761 assert(origin_mask > 0 || destination_mask > 0);
2762
2763 r = hashmap_ensure_allocated(h, NULL);
2764 if (r < 0)
2765 return r;
2766
2767 assert_cc(sizeof(void*) == sizeof(info));
2768
2769 info.data = hashmap_get(*h, other);
2770 if (info.data) {
2771 /* Entry already exists. Add in our mask. */
2772
2773 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2774 FLAGS_SET(destination_mask, info.destination_mask))
2775 return 0; /* NOP */
2776
2777 info.origin_mask |= origin_mask;
2778 info.destination_mask |= destination_mask;
2779
2780 r = hashmap_update(*h, other, info.data);
2781 } else {
2782 info = (UnitDependencyInfo) {
2783 .origin_mask = origin_mask,
2784 .destination_mask = destination_mask,
2785 };
2786
2787 r = hashmap_put(*h, other, info.data);
2788 }
2789 if (r < 0)
2790 return r;
2791
2792 return 1;
2793 }
2794
2795 int unit_add_dependency(
2796 Unit *u,
2797 UnitDependency d,
2798 Unit *other,
2799 bool add_reference,
2800 UnitDependencyMask mask) {
2801
2802 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2803 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2804 [UNIT_WANTS] = UNIT_WANTED_BY,
2805 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2806 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2807 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2808 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2809 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2810 [UNIT_WANTED_BY] = UNIT_WANTS,
2811 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2812 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2813 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2814 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2815 [UNIT_BEFORE] = UNIT_AFTER,
2816 [UNIT_AFTER] = UNIT_BEFORE,
2817 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2818 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2819 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2820 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2821 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2822 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2823 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2824 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2825 };
2826 Unit *original_u = u, *original_other = other;
2827 int r;
2828
2829 assert(u);
2830 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2831 assert(other);
2832
2833 u = unit_follow_merge(u);
2834 other = unit_follow_merge(other);
2835
2836 /* We won't allow dependencies on ourselves. We will not
2837 * consider them an error however. */
2838 if (u == other) {
2839 maybe_warn_about_dependency(original_u, original_other->id, d);
2840 return 0;
2841 }
2842
2843 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2844 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2845 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2846 return 0;
2847 }
2848
2849 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2850 if (r < 0)
2851 return r;
2852
2853 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2854 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2855 if (r < 0)
2856 return r;
2857 }
2858
2859 if (add_reference) {
2860 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2861 if (r < 0)
2862 return r;
2863
2864 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2865 if (r < 0)
2866 return r;
2867 }
2868
2869 unit_add_to_dbus_queue(u);
2870 return 0;
2871 }
2872
2873 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2874 int r;
2875
2876 assert(u);
2877
2878 r = unit_add_dependency(u, d, other, add_reference, mask);
2879 if (r < 0)
2880 return r;
2881
2882 return unit_add_dependency(u, e, other, add_reference, mask);
2883 }
2884
2885 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2886 int r;
2887
2888 assert(u);
2889 assert(name);
2890 assert(buf);
2891 assert(ret);
2892
2893 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2894 *buf = NULL;
2895 *ret = name;
2896 return 0;
2897 }
2898
2899 if (u->instance)
2900 r = unit_name_replace_instance(name, u->instance, buf);
2901 else {
2902 _cleanup_free_ char *i = NULL;
2903
2904 r = unit_name_to_prefix(u->id, &i);
2905 if (r < 0)
2906 return r;
2907
2908 r = unit_name_replace_instance(name, i, buf);
2909 }
2910 if (r < 0)
2911 return r;
2912
2913 *ret = *buf;
2914 return 0;
2915 }
2916
2917 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2918 _cleanup_free_ char *buf = NULL;
2919 Unit *other;
2920 int r;
2921
2922 assert(u);
2923 assert(name);
2924
2925 r = resolve_template(u, name, &buf, &name);
2926 if (r < 0)
2927 return r;
2928
2929 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2930 if (r < 0)
2931 return r;
2932
2933 return unit_add_dependency(u, d, other, add_reference, mask);
2934 }
2935
2936 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2937 _cleanup_free_ char *buf = NULL;
2938 Unit *other;
2939 int r;
2940
2941 assert(u);
2942 assert(name);
2943
2944 r = resolve_template(u, name, &buf, &name);
2945 if (r < 0)
2946 return r;
2947
2948 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2949 if (r < 0)
2950 return r;
2951
2952 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2953 }
2954
2955 int set_unit_path(const char *p) {
2956 /* This is mostly for debug purposes */
2957 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2958 return -errno;
2959
2960 return 0;
2961 }
2962
2963 char *unit_dbus_path(Unit *u) {
2964 assert(u);
2965
2966 if (!u->id)
2967 return NULL;
2968
2969 return unit_dbus_path_from_name(u->id);
2970 }
2971
2972 char *unit_dbus_path_invocation_id(Unit *u) {
2973 assert(u);
2974
2975 if (sd_id128_is_null(u->invocation_id))
2976 return NULL;
2977
2978 return unit_dbus_path_from_name(u->invocation_id_string);
2979 }
2980
2981 int unit_set_slice(Unit *u, Unit *slice) {
2982 assert(u);
2983 assert(slice);
2984
2985 /* Sets the unit slice if it has not been set before. Is extra
2986 * careful, to only allow this for units that actually have a
2987 * cgroup context. Also, we don't allow to set this for slices
2988 * (since the parent slice is derived from the name). Make
2989 * sure the unit we set is actually a slice. */
2990
2991 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2992 return -EOPNOTSUPP;
2993
2994 if (u->type == UNIT_SLICE)
2995 return -EINVAL;
2996
2997 if (unit_active_state(u) != UNIT_INACTIVE)
2998 return -EBUSY;
2999
3000 if (slice->type != UNIT_SLICE)
3001 return -EINVAL;
3002
3003 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3004 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3005 return -EPERM;
3006
3007 if (UNIT_DEREF(u->slice) == slice)
3008 return 0;
3009
3010 /* Disallow slice changes if @u is already bound to cgroups */
3011 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3012 return -EBUSY;
3013
3014 unit_ref_set(&u->slice, u, slice);
3015 return 1;
3016 }
3017
3018 int unit_set_default_slice(Unit *u) {
3019 const char *slice_name;
3020 Unit *slice;
3021 int r;
3022
3023 assert(u);
3024
3025 if (UNIT_ISSET(u->slice))
3026 return 0;
3027
3028 if (u->instance) {
3029 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3030
3031 /* Implicitly place all instantiated units in their
3032 * own per-template slice */
3033
3034 r = unit_name_to_prefix(u->id, &prefix);
3035 if (r < 0)
3036 return r;
3037
3038 /* The prefix is already escaped, but it might include
3039 * "-" which has a special meaning for slice units,
3040 * hence escape it here extra. */
3041 escaped = unit_name_escape(prefix);
3042 if (!escaped)
3043 return -ENOMEM;
3044
3045 if (MANAGER_IS_SYSTEM(u->manager))
3046 slice_name = strjoina("system-", escaped, ".slice");
3047 else
3048 slice_name = strjoina(escaped, ".slice");
3049 } else
3050 slice_name =
3051 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3052 ? SPECIAL_SYSTEM_SLICE
3053 : SPECIAL_ROOT_SLICE;
3054
3055 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3056 if (r < 0)
3057 return r;
3058
3059 return unit_set_slice(u, slice);
3060 }
3061
3062 const char *unit_slice_name(Unit *u) {
3063 assert(u);
3064
3065 if (!UNIT_ISSET(u->slice))
3066 return NULL;
3067
3068 return UNIT_DEREF(u->slice)->id;
3069 }
3070
3071 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3072 _cleanup_free_ char *t = NULL;
3073 int r;
3074
3075 assert(u);
3076 assert(type);
3077 assert(_found);
3078
3079 r = unit_name_change_suffix(u->id, type, &t);
3080 if (r < 0)
3081 return r;
3082 if (unit_has_name(u, t))
3083 return -EINVAL;
3084
3085 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3086 assert(r < 0 || *_found != u);
3087 return r;
3088 }
3089
3090 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3091 const char *name, *old_owner, *new_owner;
3092 Unit *u = userdata;
3093 int r;
3094
3095 assert(message);
3096 assert(u);
3097
3098 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3099 if (r < 0) {
3100 bus_log_parse_error(r);
3101 return 0;
3102 }
3103
3104 old_owner = empty_to_null(old_owner);
3105 new_owner = empty_to_null(new_owner);
3106
3107 if (UNIT_VTABLE(u)->bus_name_owner_change)
3108 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3109
3110 return 0;
3111 }
3112
3113 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3114 const char *match;
3115
3116 assert(u);
3117 assert(bus);
3118 assert(name);
3119
3120 if (u->match_bus_slot)
3121 return -EBUSY;
3122
3123 match = strjoina("type='signal',"
3124 "sender='org.freedesktop.DBus',"
3125 "path='/org/freedesktop/DBus',"
3126 "interface='org.freedesktop.DBus',"
3127 "member='NameOwnerChanged',"
3128 "arg0='", name, "'");
3129
3130 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3131 }
3132
3133 int unit_watch_bus_name(Unit *u, const char *name) {
3134 int r;
3135
3136 assert(u);
3137 assert(name);
3138
3139 /* Watch a specific name on the bus. We only support one unit
3140 * watching each name for now. */
3141
3142 if (u->manager->api_bus) {
3143 /* If the bus is already available, install the match directly.
3144 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3145 r = unit_install_bus_match(u, u->manager->api_bus, name);
3146 if (r < 0)
3147 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3148 }
3149
3150 r = hashmap_put(u->manager->watch_bus, name, u);
3151 if (r < 0) {
3152 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3153 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3154 }
3155
3156 return 0;
3157 }
3158
3159 void unit_unwatch_bus_name(Unit *u, const char *name) {
3160 assert(u);
3161 assert(name);
3162
3163 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3164 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3165 }
3166
3167 bool unit_can_serialize(Unit *u) {
3168 assert(u);
3169
3170 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3171 }
3172
3173 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3174 _cleanup_free_ char *s = NULL;
3175 int r;
3176
3177 assert(f);
3178 assert(key);
3179
3180 if (mask == 0)
3181 return 0;
3182
3183 r = cg_mask_to_string(mask, &s);
3184 if (r < 0)
3185 return log_error_errno(r, "Failed to format cgroup mask: %m");
3186
3187 return serialize_item(f, key, s);
3188 }
3189
3190 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3191 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3192 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3193 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3194 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3195 };
3196
3197 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3198 CGroupIPAccountingMetric m;
3199 int r;
3200
3201 assert(u);
3202 assert(f);
3203 assert(fds);
3204
3205 if (unit_can_serialize(u)) {
3206 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3207 if (r < 0)
3208 return r;
3209 }
3210
3211 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3212
3213 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3214 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3215 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3216 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3217
3218 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3219 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3220
3221 if (dual_timestamp_is_set(&u->condition_timestamp))
3222 (void) serialize_bool(f, "condition-result", u->condition_result);
3223
3224 if (dual_timestamp_is_set(&u->assert_timestamp))
3225 (void) serialize_bool(f, "assert-result", u->assert_result);
3226
3227 (void) serialize_bool(f, "transient", u->transient);
3228 (void) serialize_bool(f, "in-audit", u->in_audit);
3229
3230 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3231 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3232 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3233 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3234 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3235
3236 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3237 if (u->cpu_usage_last != NSEC_INFINITY)
3238 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3239
3240 if (u->cgroup_path)
3241 (void) serialize_item(f, "cgroup", u->cgroup_path);
3242
3243 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3244 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3245 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3246 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3247
3248 if (uid_is_valid(u->ref_uid))
3249 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3250 if (gid_is_valid(u->ref_gid))
3251 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3252
3253 if (!sd_id128_is_null(u->invocation_id))
3254 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3255
3256 bus_track_serialize(u->bus_track, f, "ref");
3257
3258 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3259 uint64_t v;
3260
3261 r = unit_get_ip_accounting(u, m, &v);
3262 if (r >= 0)
3263 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3264 }
3265
3266 if (serialize_jobs) {
3267 if (u->job) {
3268 fputs("job\n", f);
3269 job_serialize(u->job, f);
3270 }
3271
3272 if (u->nop_job) {
3273 fputs("job\n", f);
3274 job_serialize(u->nop_job, f);
3275 }
3276 }
3277
3278 /* End marker */
3279 fputc('\n', f);
3280 return 0;
3281 }
3282
3283 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3284 int r;
3285
3286 assert(u);
3287 assert(f);
3288 assert(fds);
3289
3290 for (;;) {
3291 _cleanup_free_ char *line = NULL;
3292 CGroupIPAccountingMetric m;
3293 char *l, *v;
3294 size_t k;
3295
3296 r = read_line(f, LONG_LINE_MAX, &line);
3297 if (r < 0)
3298 return log_error_errno(r, "Failed to read serialization line: %m");
3299 if (r == 0) /* eof */
3300 break;
3301
3302 l = strstrip(line);
3303 if (isempty(l)) /* End marker */
3304 break;
3305
3306 k = strcspn(l, "=");
3307
3308 if (l[k] == '=') {
3309 l[k] = 0;
3310 v = l+k+1;
3311 } else
3312 v = l+k;
3313
3314 if (streq(l, "job")) {
3315 if (v[0] == '\0') {
3316 /* new-style serialized job */
3317 Job *j;
3318
3319 j = job_new_raw(u);
3320 if (!j)
3321 return log_oom();
3322
3323 r = job_deserialize(j, f);
3324 if (r < 0) {
3325 job_free(j);
3326 return r;
3327 }
3328
3329 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3330 if (r < 0) {
3331 job_free(j);
3332 return r;
3333 }
3334
3335 r = job_install_deserialized(j);
3336 if (r < 0) {
3337 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3338 job_free(j);
3339 return r;
3340 }
3341 } else /* legacy for pre-44 */
3342 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3343 continue;
3344 } else if (streq(l, "state-change-timestamp")) {
3345 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3346 continue;
3347 } else if (streq(l, "inactive-exit-timestamp")) {
3348 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3349 continue;
3350 } else if (streq(l, "active-enter-timestamp")) {
3351 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3352 continue;
3353 } else if (streq(l, "active-exit-timestamp")) {
3354 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3355 continue;
3356 } else if (streq(l, "inactive-enter-timestamp")) {
3357 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3358 continue;
3359 } else if (streq(l, "condition-timestamp")) {
3360 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3361 continue;
3362 } else if (streq(l, "assert-timestamp")) {
3363 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3364 continue;
3365 } else if (streq(l, "condition-result")) {
3366
3367 r = parse_boolean(v);
3368 if (r < 0)
3369 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3370 else
3371 u->condition_result = r;
3372
3373 continue;
3374
3375 } else if (streq(l, "assert-result")) {
3376
3377 r = parse_boolean(v);
3378 if (r < 0)
3379 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3380 else
3381 u->assert_result = r;
3382
3383 continue;
3384
3385 } else if (streq(l, "transient")) {
3386
3387 r = parse_boolean(v);
3388 if (r < 0)
3389 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3390 else
3391 u->transient = r;
3392
3393 continue;
3394
3395 } else if (streq(l, "in-audit")) {
3396
3397 r = parse_boolean(v);
3398 if (r < 0)
3399 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3400 else
3401 u->in_audit = r;
3402
3403 continue;
3404
3405 } else if (streq(l, "exported-invocation-id")) {
3406
3407 r = parse_boolean(v);
3408 if (r < 0)
3409 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3410 else
3411 u->exported_invocation_id = r;
3412
3413 continue;
3414
3415 } else if (streq(l, "exported-log-level-max")) {
3416
3417 r = parse_boolean(v);
3418 if (r < 0)
3419 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3420 else
3421 u->exported_log_level_max = r;
3422
3423 continue;
3424
3425 } else if (streq(l, "exported-log-extra-fields")) {
3426
3427 r = parse_boolean(v);
3428 if (r < 0)
3429 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3430 else
3431 u->exported_log_extra_fields = r;
3432
3433 continue;
3434
3435 } else if (streq(l, "exported-log-rate-limit-interval")) {
3436
3437 r = parse_boolean(v);
3438 if (r < 0)
3439 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3440 else
3441 u->exported_log_rate_limit_interval = r;
3442
3443 continue;
3444
3445 } else if (streq(l, "exported-log-rate-limit-burst")) {
3446
3447 r = parse_boolean(v);
3448 if (r < 0)
3449 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3450 else
3451 u->exported_log_rate_limit_burst = r;
3452
3453 continue;
3454
3455 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3456
3457 r = safe_atou64(v, &u->cpu_usage_base);
3458 if (r < 0)
3459 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3460
3461 continue;
3462
3463 } else if (streq(l, "cpu-usage-last")) {
3464
3465 r = safe_atou64(v, &u->cpu_usage_last);
3466 if (r < 0)
3467 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3468
3469 continue;
3470
3471 } else if (streq(l, "cgroup")) {
3472
3473 r = unit_set_cgroup_path(u, v);
3474 if (r < 0)
3475 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3476
3477 (void) unit_watch_cgroup(u);
3478
3479 continue;
3480 } else if (streq(l, "cgroup-realized")) {
3481 int b;
3482
3483 b = parse_boolean(v);
3484 if (b < 0)
3485 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3486 else
3487 u->cgroup_realized = b;
3488
3489 continue;
3490
3491 } else if (streq(l, "cgroup-realized-mask")) {
3492
3493 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3494 if (r < 0)
3495 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3496 continue;
3497
3498 } else if (streq(l, "cgroup-enabled-mask")) {
3499
3500 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3501 if (r < 0)
3502 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3503 continue;
3504
3505 } else if (streq(l, "cgroup-invalidated-mask")) {
3506
3507 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3508 if (r < 0)
3509 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3510 continue;
3511
3512 } else if (streq(l, "ref-uid")) {
3513 uid_t uid;
3514
3515 r = parse_uid(v, &uid);
3516 if (r < 0)
3517 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3518 else
3519 unit_ref_uid_gid(u, uid, GID_INVALID);
3520
3521 continue;
3522
3523 } else if (streq(l, "ref-gid")) {
3524 gid_t gid;
3525
3526 r = parse_gid(v, &gid);
3527 if (r < 0)
3528 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3529 else
3530 unit_ref_uid_gid(u, UID_INVALID, gid);
3531
3532 continue;
3533
3534 } else if (streq(l, "ref")) {
3535
3536 r = strv_extend(&u->deserialized_refs, v);
3537 if (r < 0)
3538 return log_oom();
3539
3540 continue;
3541 } else if (streq(l, "invocation-id")) {
3542 sd_id128_t id;
3543
3544 r = sd_id128_from_string(v, &id);
3545 if (r < 0)
3546 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3547 else {
3548 r = unit_set_invocation_id(u, id);
3549 if (r < 0)
3550 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3551 }
3552
3553 continue;
3554 }
3555
3556 /* Check if this is an IP accounting metric serialization field */
3557 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3558 if (streq(l, ip_accounting_metric_field[m]))
3559 break;
3560 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3561 uint64_t c;
3562
3563 r = safe_atou64(v, &c);
3564 if (r < 0)
3565 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3566 else
3567 u->ip_accounting_extra[m] = c;
3568 continue;
3569 }
3570
3571 if (unit_can_serialize(u)) {
3572 r = exec_runtime_deserialize_compat(u, l, v, fds);
3573 if (r < 0) {
3574 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3575 continue;
3576 }
3577
3578 /* Returns positive if key was handled by the call */
3579 if (r > 0)
3580 continue;
3581
3582 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3583 if (r < 0)
3584 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3585 }
3586 }
3587
3588 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3589 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3590 * before 228 where the base for timeouts was not persistent across reboots. */
3591
3592 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3593 dual_timestamp_get(&u->state_change_timestamp);
3594
3595 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3596 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3597 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3598 unit_invalidate_cgroup_bpf(u);
3599
3600 return 0;
3601 }
3602
3603 int unit_deserialize_skip(FILE *f) {
3604 int r;
3605 assert(f);
3606
3607 /* Skip serialized data for this unit. We don't know what it is. */
3608
3609 for (;;) {
3610 _cleanup_free_ char *line = NULL;
3611 char *l;
3612
3613 r = read_line(f, LONG_LINE_MAX, &line);
3614 if (r < 0)
3615 return log_error_errno(r, "Failed to read serialization line: %m");
3616 if (r == 0)
3617 return 0;
3618
3619 l = strstrip(line);
3620
3621 /* End marker */
3622 if (isempty(l))
3623 return 1;
3624 }
3625 }
3626
3627 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3628 Unit *device;
3629 _cleanup_free_ char *e = NULL;
3630 int r;
3631
3632 assert(u);
3633
3634 /* Adds in links to the device node that this unit is based on */
3635 if (isempty(what))
3636 return 0;
3637
3638 if (!is_device_path(what))
3639 return 0;
3640
3641 /* When device units aren't supported (such as in a
3642 * container), don't create dependencies on them. */
3643 if (!unit_type_supported(UNIT_DEVICE))
3644 return 0;
3645
3646 r = unit_name_from_path(what, ".device", &e);
3647 if (r < 0)
3648 return r;
3649
3650 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3651 if (r < 0)
3652 return r;
3653
3654 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3655 dep = UNIT_BINDS_TO;
3656
3657 r = unit_add_two_dependencies(u, UNIT_AFTER,
3658 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3659 device, true, mask);
3660 if (r < 0)
3661 return r;
3662
3663 if (wants) {
3664 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3665 if (r < 0)
3666 return r;
3667 }
3668
3669 return 0;
3670 }
3671
3672 int unit_coldplug(Unit *u) {
3673 int r = 0, q;
3674 char **i;
3675
3676 assert(u);
3677
3678 /* Make sure we don't enter a loop, when coldplugging recursively. */
3679 if (u->coldplugged)
3680 return 0;
3681
3682 u->coldplugged = true;
3683
3684 STRV_FOREACH(i, u->deserialized_refs) {
3685 q = bus_unit_track_add_name(u, *i);
3686 if (q < 0 && r >= 0)
3687 r = q;
3688 }
3689 u->deserialized_refs = strv_free(u->deserialized_refs);
3690
3691 if (UNIT_VTABLE(u)->coldplug) {
3692 q = UNIT_VTABLE(u)->coldplug(u);
3693 if (q < 0 && r >= 0)
3694 r = q;
3695 }
3696
3697 if (u->job) {
3698 q = job_coldplug(u->job);
3699 if (q < 0 && r >= 0)
3700 r = q;
3701 }
3702
3703 return r;
3704 }
3705
3706 void unit_catchup(Unit *u) {
3707 assert(u);
3708
3709 if (UNIT_VTABLE(u)->catchup)
3710 UNIT_VTABLE(u)->catchup(u);
3711 }
3712
3713 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3714 struct stat st;
3715
3716 if (!path)
3717 return false;
3718
3719 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3720 * are never out-of-date. */
3721 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3722 return false;
3723
3724 if (stat(path, &st) < 0)
3725 /* What, cannot access this anymore? */
3726 return true;
3727
3728 if (path_masked)
3729 /* For masked files check if they are still so */
3730 return !null_or_empty(&st);
3731 else
3732 /* For non-empty files check the mtime */
3733 return timespec_load(&st.st_mtim) > mtime;
3734
3735 return false;
3736 }
3737
3738 bool unit_need_daemon_reload(Unit *u) {
3739 _cleanup_strv_free_ char **t = NULL;
3740 char **path;
3741
3742 assert(u);
3743
3744 /* For unit files, we allow masking… */
3745 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3746 u->load_state == UNIT_MASKED))
3747 return true;
3748
3749 /* Source paths should not be masked… */
3750 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3751 return true;
3752
3753 if (u->load_state == UNIT_LOADED)
3754 (void) unit_find_dropin_paths(u, &t);
3755 if (!strv_equal(u->dropin_paths, t))
3756 return true;
3757
3758 /* … any drop-ins that are masked are simply omitted from the list. */
3759 STRV_FOREACH(path, u->dropin_paths)
3760 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3761 return true;
3762
3763 return false;
3764 }
3765
3766 void unit_reset_failed(Unit *u) {
3767 assert(u);
3768
3769 if (UNIT_VTABLE(u)->reset_failed)
3770 UNIT_VTABLE(u)->reset_failed(u);
3771
3772 RATELIMIT_RESET(u->start_limit);
3773 u->start_limit_hit = false;
3774 }
3775
3776 Unit *unit_following(Unit *u) {
3777 assert(u);
3778
3779 if (UNIT_VTABLE(u)->following)
3780 return UNIT_VTABLE(u)->following(u);
3781
3782 return NULL;
3783 }
3784
3785 bool unit_stop_pending(Unit *u) {
3786 assert(u);
3787
3788 /* This call does check the current state of the unit. It's
3789 * hence useful to be called from state change calls of the
3790 * unit itself, where the state isn't updated yet. This is
3791 * different from unit_inactive_or_pending() which checks both
3792 * the current state and for a queued job. */
3793
3794 return u->job && u->job->type == JOB_STOP;
3795 }
3796
3797 bool unit_inactive_or_pending(Unit *u) {
3798 assert(u);
3799
3800 /* Returns true if the unit is inactive or going down */
3801
3802 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3803 return true;
3804
3805 if (unit_stop_pending(u))
3806 return true;
3807
3808 return false;
3809 }
3810
3811 bool unit_active_or_pending(Unit *u) {
3812 assert(u);
3813
3814 /* Returns true if the unit is active or going up */
3815
3816 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3817 return true;
3818
3819 if (u->job &&
3820 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3821 return true;
3822
3823 return false;
3824 }
3825
3826 bool unit_will_restart(Unit *u) {
3827 assert(u);
3828
3829 if (!UNIT_VTABLE(u)->will_restart)
3830 return false;
3831
3832 return UNIT_VTABLE(u)->will_restart(u);
3833 }
3834
3835 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3836 assert(u);
3837 assert(w >= 0 && w < _KILL_WHO_MAX);
3838 assert(SIGNAL_VALID(signo));
3839
3840 if (!UNIT_VTABLE(u)->kill)
3841 return -EOPNOTSUPP;
3842
3843 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3844 }
3845
3846 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3847 _cleanup_set_free_ Set *pid_set = NULL;
3848 int r;
3849
3850 pid_set = set_new(NULL);
3851 if (!pid_set)
3852 return NULL;
3853
3854 /* Exclude the main/control pids from being killed via the cgroup */
3855 if (main_pid > 0) {
3856 r = set_put(pid_set, PID_TO_PTR(main_pid));
3857 if (r < 0)
3858 return NULL;
3859 }
3860
3861 if (control_pid > 0) {
3862 r = set_put(pid_set, PID_TO_PTR(control_pid));
3863 if (r < 0)
3864 return NULL;
3865 }
3866
3867 return TAKE_PTR(pid_set);
3868 }
3869
3870 int unit_kill_common(
3871 Unit *u,
3872 KillWho who,
3873 int signo,
3874 pid_t main_pid,
3875 pid_t control_pid,
3876 sd_bus_error *error) {
3877
3878 int r = 0;
3879 bool killed = false;
3880
3881 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3882 if (main_pid < 0)
3883 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3884 else if (main_pid == 0)
3885 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3886 }
3887
3888 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3889 if (control_pid < 0)
3890 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3891 else if (control_pid == 0)
3892 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3893 }
3894
3895 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3896 if (control_pid > 0) {
3897 if (kill(control_pid, signo) < 0)
3898 r = -errno;
3899 else
3900 killed = true;
3901 }
3902
3903 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3904 if (main_pid > 0) {
3905 if (kill(main_pid, signo) < 0)
3906 r = -errno;
3907 else
3908 killed = true;
3909 }
3910
3911 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3912 _cleanup_set_free_ Set *pid_set = NULL;
3913 int q;
3914
3915 /* Exclude the main/control pids from being killed via the cgroup */
3916 pid_set = unit_pid_set(main_pid, control_pid);
3917 if (!pid_set)
3918 return -ENOMEM;
3919
3920 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3921 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3922 r = q;
3923 else
3924 killed = true;
3925 }
3926
3927 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3928 return -ESRCH;
3929
3930 return r;
3931 }
3932
3933 int unit_following_set(Unit *u, Set **s) {
3934 assert(u);
3935 assert(s);
3936
3937 if (UNIT_VTABLE(u)->following_set)
3938 return UNIT_VTABLE(u)->following_set(u, s);
3939
3940 *s = NULL;
3941 return 0;
3942 }
3943
3944 UnitFileState unit_get_unit_file_state(Unit *u) {
3945 int r;
3946
3947 assert(u);
3948
3949 if (u->unit_file_state < 0 && u->fragment_path) {
3950 r = unit_file_get_state(
3951 u->manager->unit_file_scope,
3952 NULL,
3953 u->id,
3954 &u->unit_file_state);
3955 if (r < 0)
3956 u->unit_file_state = UNIT_FILE_BAD;
3957 }
3958
3959 return u->unit_file_state;
3960 }
3961
3962 int unit_get_unit_file_preset(Unit *u) {
3963 assert(u);
3964
3965 if (u->unit_file_preset < 0 && u->fragment_path)
3966 u->unit_file_preset = unit_file_query_preset(
3967 u->manager->unit_file_scope,
3968 NULL,
3969 basename(u->fragment_path));
3970
3971 return u->unit_file_preset;
3972 }
3973
3974 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3975 assert(ref);
3976 assert(source);
3977 assert(target);
3978
3979 if (ref->target)
3980 unit_ref_unset(ref);
3981
3982 ref->source = source;
3983 ref->target = target;
3984 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3985 return target;
3986 }
3987
3988 void unit_ref_unset(UnitRef *ref) {
3989 assert(ref);
3990
3991 if (!ref->target)
3992 return;
3993
3994 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3995 * be unreferenced now. */
3996 unit_add_to_gc_queue(ref->target);
3997
3998 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3999 ref->source = ref->target = NULL;
4000 }
4001
4002 static int user_from_unit_name(Unit *u, char **ret) {
4003
4004 static const uint8_t hash_key[] = {
4005 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4006 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4007 };
4008
4009 _cleanup_free_ char *n = NULL;
4010 int r;
4011
4012 r = unit_name_to_prefix(u->id, &n);
4013 if (r < 0)
4014 return r;
4015
4016 if (valid_user_group_name(n)) {
4017 *ret = TAKE_PTR(n);
4018 return 0;
4019 }
4020
4021 /* If we can't use the unit name as a user name, then let's hash it and use that */
4022 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4023 return -ENOMEM;
4024
4025 return 0;
4026 }
4027
4028 int unit_patch_contexts(Unit *u) {
4029 CGroupContext *cc;
4030 ExecContext *ec;
4031 unsigned i;
4032 int r;
4033
4034 assert(u);
4035
4036 /* Patch in the manager defaults into the exec and cgroup
4037 * contexts, _after_ the rest of the settings have been
4038 * initialized */
4039
4040 ec = unit_get_exec_context(u);
4041 if (ec) {
4042 /* This only copies in the ones that need memory */
4043 for (i = 0; i < _RLIMIT_MAX; i++)
4044 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4045 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4046 if (!ec->rlimit[i])
4047 return -ENOMEM;
4048 }
4049
4050 if (MANAGER_IS_USER(u->manager) &&
4051 !ec->working_directory) {
4052
4053 r = get_home_dir(&ec->working_directory);
4054 if (r < 0)
4055 return r;
4056
4057 /* Allow user services to run, even if the
4058 * home directory is missing */
4059 ec->working_directory_missing_ok = true;
4060 }
4061
4062 if (ec->private_devices)
4063 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4064
4065 if (ec->protect_kernel_modules)
4066 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4067
4068 if (ec->dynamic_user) {
4069 if (!ec->user) {
4070 r = user_from_unit_name(u, &ec->user);
4071 if (r < 0)
4072 return r;
4073 }
4074
4075 if (!ec->group) {
4076 ec->group = strdup(ec->user);
4077 if (!ec->group)
4078 return -ENOMEM;
4079 }
4080
4081 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4082 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4083
4084 ec->private_tmp = true;
4085 ec->remove_ipc = true;
4086 ec->protect_system = PROTECT_SYSTEM_STRICT;
4087 if (ec->protect_home == PROTECT_HOME_NO)
4088 ec->protect_home = PROTECT_HOME_READ_ONLY;
4089 }
4090 }
4091
4092 cc = unit_get_cgroup_context(u);
4093 if (cc && ec) {
4094
4095 if (ec->private_devices &&
4096 cc->device_policy == CGROUP_AUTO)
4097 cc->device_policy = CGROUP_CLOSED;
4098
4099 if (ec->root_image &&
4100 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4101
4102 /* When RootImage= is specified, the following devices are touched. */
4103 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4104 if (r < 0)
4105 return r;
4106
4107 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4108 if (r < 0)
4109 return r;
4110
4111 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4112 if (r < 0)
4113 return r;
4114 }
4115 }
4116
4117 return 0;
4118 }
4119
4120 ExecContext *unit_get_exec_context(Unit *u) {
4121 size_t offset;
4122 assert(u);
4123
4124 if (u->type < 0)
4125 return NULL;
4126
4127 offset = UNIT_VTABLE(u)->exec_context_offset;
4128 if (offset <= 0)
4129 return NULL;
4130
4131 return (ExecContext*) ((uint8_t*) u + offset);
4132 }
4133
4134 KillContext *unit_get_kill_context(Unit *u) {
4135 size_t offset;
4136 assert(u);
4137
4138 if (u->type < 0)
4139 return NULL;
4140
4141 offset = UNIT_VTABLE(u)->kill_context_offset;
4142 if (offset <= 0)
4143 return NULL;
4144
4145 return (KillContext*) ((uint8_t*) u + offset);
4146 }
4147
4148 CGroupContext *unit_get_cgroup_context(Unit *u) {
4149 size_t offset;
4150
4151 if (u->type < 0)
4152 return NULL;
4153
4154 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4155 if (offset <= 0)
4156 return NULL;
4157
4158 return (CGroupContext*) ((uint8_t*) u + offset);
4159 }
4160
4161 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4162 size_t offset;
4163
4164 if (u->type < 0)
4165 return NULL;
4166
4167 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4168 if (offset <= 0)
4169 return NULL;
4170
4171 return *(ExecRuntime**) ((uint8_t*) u + offset);
4172 }
4173
4174 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4175 assert(u);
4176
4177 if (UNIT_WRITE_FLAGS_NOOP(flags))
4178 return NULL;
4179
4180 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4181 return u->manager->lookup_paths.transient;
4182
4183 if (flags & UNIT_PERSISTENT)
4184 return u->manager->lookup_paths.persistent_control;
4185
4186 if (flags & UNIT_RUNTIME)
4187 return u->manager->lookup_paths.runtime_control;
4188
4189 return NULL;
4190 }
4191
4192 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4193 char *ret = NULL;
4194
4195 if (!s)
4196 return NULL;
4197
4198 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4199 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4200 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4201 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4202 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4203 * allocations. */
4204
4205 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4206 ret = specifier_escape(s);
4207 if (!ret)
4208 return NULL;
4209
4210 s = ret;
4211 }
4212
4213 if (flags & UNIT_ESCAPE_C) {
4214 char *a;
4215
4216 a = cescape(s);
4217 free(ret);
4218 if (!a)
4219 return NULL;
4220
4221 ret = a;
4222 }
4223
4224 if (buf) {
4225 *buf = ret;
4226 return ret ?: (char*) s;
4227 }
4228
4229 return ret ?: strdup(s);
4230 }
4231
4232 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4233 _cleanup_free_ char *result = NULL;
4234 size_t n = 0, allocated = 0;
4235 char **i;
4236
4237 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4238 * way suitable for ExecStart= stanzas */
4239
4240 STRV_FOREACH(i, l) {
4241 _cleanup_free_ char *buf = NULL;
4242 const char *p;
4243 size_t a;
4244 char *q;
4245
4246 p = unit_escape_setting(*i, flags, &buf);
4247 if (!p)
4248 return NULL;
4249
4250 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4251 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4252 return NULL;
4253
4254 q = result + n;
4255 if (n > 0)
4256 *(q++) = ' ';
4257
4258 *(q++) = '"';
4259 q = stpcpy(q, p);
4260 *(q++) = '"';
4261
4262 n += a;
4263 }
4264
4265 if (!GREEDY_REALLOC(result, allocated, n + 1))
4266 return NULL;
4267
4268 result[n] = 0;
4269
4270 return TAKE_PTR(result);
4271 }
4272
4273 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4274 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4275 const char *dir, *wrapped;
4276 int r;
4277
4278 assert(u);
4279 assert(name);
4280 assert(data);
4281
4282 if (UNIT_WRITE_FLAGS_NOOP(flags))
4283 return 0;
4284
4285 data = unit_escape_setting(data, flags, &escaped);
4286 if (!data)
4287 return -ENOMEM;
4288
4289 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4290 * previous section header is the same */
4291
4292 if (flags & UNIT_PRIVATE) {
4293 if (!UNIT_VTABLE(u)->private_section)
4294 return -EINVAL;
4295
4296 if (!u->transient_file || u->last_section_private < 0)
4297 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4298 else if (u->last_section_private == 0)
4299 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4300 } else {
4301 if (!u->transient_file || u->last_section_private < 0)
4302 data = strjoina("[Unit]\n", data);
4303 else if (u->last_section_private > 0)
4304 data = strjoina("\n[Unit]\n", data);
4305 }
4306
4307 if (u->transient_file) {
4308 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4309 * write to the transient unit file. */
4310 fputs(data, u->transient_file);
4311
4312 if (!endswith(data, "\n"))
4313 fputc('\n', u->transient_file);
4314
4315 /* Remember which section we wrote this entry to */
4316 u->last_section_private = !!(flags & UNIT_PRIVATE);
4317 return 0;
4318 }
4319
4320 dir = unit_drop_in_dir(u, flags);
4321 if (!dir)
4322 return -EINVAL;
4323
4324 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4325 "# or an equivalent operation. Do not edit.\n",
4326 data,
4327 "\n");
4328
4329 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4330 if (r < 0)
4331 return r;
4332
4333 (void) mkdir_p_label(p, 0755);
4334 r = write_string_file_atomic_label(q, wrapped);
4335 if (r < 0)
4336 return r;
4337
4338 r = strv_push(&u->dropin_paths, q);
4339 if (r < 0)
4340 return r;
4341 q = NULL;
4342
4343 strv_uniq(u->dropin_paths);
4344
4345 u->dropin_mtime = now(CLOCK_REALTIME);
4346
4347 return 0;
4348 }
4349
4350 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4351 _cleanup_free_ char *p = NULL;
4352 va_list ap;
4353 int r;
4354
4355 assert(u);
4356 assert(name);
4357 assert(format);
4358
4359 if (UNIT_WRITE_FLAGS_NOOP(flags))
4360 return 0;
4361
4362 va_start(ap, format);
4363 r = vasprintf(&p, format, ap);
4364 va_end(ap);
4365
4366 if (r < 0)
4367 return -ENOMEM;
4368
4369 return unit_write_setting(u, flags, name, p);
4370 }
4371
4372 int unit_make_transient(Unit *u) {
4373 _cleanup_free_ char *path = NULL;
4374 FILE *f;
4375
4376 assert(u);
4377
4378 if (!UNIT_VTABLE(u)->can_transient)
4379 return -EOPNOTSUPP;
4380
4381 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4382
4383 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4384 if (!path)
4385 return -ENOMEM;
4386
4387 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4388 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4389
4390 RUN_WITH_UMASK(0022) {
4391 f = fopen(path, "we");
4392 if (!f)
4393 return -errno;
4394 }
4395
4396 safe_fclose(u->transient_file);
4397 u->transient_file = f;
4398
4399 free_and_replace(u->fragment_path, path);
4400
4401 u->source_path = mfree(u->source_path);
4402 u->dropin_paths = strv_free(u->dropin_paths);
4403 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4404
4405 u->load_state = UNIT_STUB;
4406 u->load_error = 0;
4407 u->transient = true;
4408
4409 unit_add_to_dbus_queue(u);
4410 unit_add_to_gc_queue(u);
4411
4412 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4413 u->transient_file);
4414
4415 return 0;
4416 }
4417
4418 static void log_kill(pid_t pid, int sig, void *userdata) {
4419 _cleanup_free_ char *comm = NULL;
4420
4421 (void) get_process_comm(pid, &comm);
4422
4423 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4424 only, like for example systemd's own PAM stub process. */
4425 if (comm && comm[0] == '(')
4426 return;
4427
4428 log_unit_notice(userdata,
4429 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4430 pid,
4431 strna(comm),
4432 signal_to_string(sig));
4433 }
4434
4435 static int operation_to_signal(KillContext *c, KillOperation k) {
4436 assert(c);
4437
4438 switch (k) {
4439
4440 case KILL_TERMINATE:
4441 case KILL_TERMINATE_AND_LOG:
4442 return c->kill_signal;
4443
4444 case KILL_KILL:
4445 return c->final_kill_signal;
4446
4447 case KILL_WATCHDOG:
4448 return c->watchdog_signal;
4449
4450 default:
4451 assert_not_reached("KillOperation unknown");
4452 }
4453 }
4454
4455 int unit_kill_context(
4456 Unit *u,
4457 KillContext *c,
4458 KillOperation k,
4459 pid_t main_pid,
4460 pid_t control_pid,
4461 bool main_pid_alien) {
4462
4463 bool wait_for_exit = false, send_sighup;
4464 cg_kill_log_func_t log_func = NULL;
4465 int sig, r;
4466
4467 assert(u);
4468 assert(c);
4469
4470 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4471 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4472
4473 if (c->kill_mode == KILL_NONE)
4474 return 0;
4475
4476 sig = operation_to_signal(c, k);
4477
4478 send_sighup =
4479 c->send_sighup &&
4480 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4481 sig != SIGHUP;
4482
4483 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4484 log_func = log_kill;
4485
4486 if (main_pid > 0) {
4487 if (log_func)
4488 log_func(main_pid, sig, u);
4489
4490 r = kill_and_sigcont(main_pid, sig);
4491 if (r < 0 && r != -ESRCH) {
4492 _cleanup_free_ char *comm = NULL;
4493 (void) get_process_comm(main_pid, &comm);
4494
4495 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4496 } else {
4497 if (!main_pid_alien)
4498 wait_for_exit = true;
4499
4500 if (r != -ESRCH && send_sighup)
4501 (void) kill(main_pid, SIGHUP);
4502 }
4503 }
4504
4505 if (control_pid > 0) {
4506 if (log_func)
4507 log_func(control_pid, sig, u);
4508
4509 r = kill_and_sigcont(control_pid, sig);
4510 if (r < 0 && r != -ESRCH) {
4511 _cleanup_free_ char *comm = NULL;
4512 (void) get_process_comm(control_pid, &comm);
4513
4514 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4515 } else {
4516 wait_for_exit = true;
4517
4518 if (r != -ESRCH && send_sighup)
4519 (void) kill(control_pid, SIGHUP);
4520 }
4521 }
4522
4523 if (u->cgroup_path &&
4524 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4525 _cleanup_set_free_ Set *pid_set = NULL;
4526
4527 /* Exclude the main/control pids from being killed via the cgroup */
4528 pid_set = unit_pid_set(main_pid, control_pid);
4529 if (!pid_set)
4530 return -ENOMEM;
4531
4532 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4533 sig,
4534 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4535 pid_set,
4536 log_func, u);
4537 if (r < 0) {
4538 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4539 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4540
4541 } else if (r > 0) {
4542
4543 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4544 * we are running in a container or if this is a delegation unit, simply because cgroup
4545 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4546 * of containers it can be confused easily by left-over directories in the cgroup — which
4547 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4548 * there we get proper events. Hence rely on them. */
4549
4550 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4551 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4552 wait_for_exit = true;
4553
4554 if (send_sighup) {
4555 set_free(pid_set);
4556
4557 pid_set = unit_pid_set(main_pid, control_pid);
4558 if (!pid_set)
4559 return -ENOMEM;
4560
4561 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4562 SIGHUP,
4563 CGROUP_IGNORE_SELF,
4564 pid_set,
4565 NULL, NULL);
4566 }
4567 }
4568 }
4569
4570 return wait_for_exit;
4571 }
4572
4573 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4574 _cleanup_free_ char *p = NULL;
4575 char *prefix;
4576 UnitDependencyInfo di;
4577 int r;
4578
4579 assert(u);
4580 assert(path);
4581
4582 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4583 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4584 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4585 * determine which units to make themselves a dependency of. */
4586
4587 if (!path_is_absolute(path))
4588 return -EINVAL;
4589
4590 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4591 if (r < 0)
4592 return r;
4593
4594 p = strdup(path);
4595 if (!p)
4596 return -ENOMEM;
4597
4598 path = path_simplify(p, false);
4599
4600 if (!path_is_normalized(path))
4601 return -EPERM;
4602
4603 if (hashmap_contains(u->requires_mounts_for, path))
4604 return 0;
4605
4606 di = (UnitDependencyInfo) {
4607 .origin_mask = mask
4608 };
4609
4610 r = hashmap_put(u->requires_mounts_for, path, di.data);
4611 if (r < 0)
4612 return r;
4613 p = NULL;
4614
4615 prefix = alloca(strlen(path) + 1);
4616 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4617 Set *x;
4618
4619 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4620 if (!x) {
4621 _cleanup_free_ char *q = NULL;
4622
4623 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4624 if (r < 0)
4625 return r;
4626
4627 q = strdup(prefix);
4628 if (!q)
4629 return -ENOMEM;
4630
4631 x = set_new(NULL);
4632 if (!x)
4633 return -ENOMEM;
4634
4635 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4636 if (r < 0) {
4637 set_free(x);
4638 return r;
4639 }
4640 q = NULL;
4641 }
4642
4643 r = set_put(x, u);
4644 if (r < 0)
4645 return r;
4646 }
4647
4648 return 0;
4649 }
4650
4651 int unit_setup_exec_runtime(Unit *u) {
4652 ExecRuntime **rt;
4653 size_t offset;
4654 Unit *other;
4655 Iterator i;
4656 void *v;
4657 int r;
4658
4659 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4660 assert(offset > 0);
4661
4662 /* Check if there already is an ExecRuntime for this unit? */
4663 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4664 if (*rt)
4665 return 0;
4666
4667 /* Try to get it from somebody else */
4668 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4669 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4670 if (r == 1)
4671 return 1;
4672 }
4673
4674 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4675 }
4676
4677 int unit_setup_dynamic_creds(Unit *u) {
4678 ExecContext *ec;
4679 DynamicCreds *dcreds;
4680 size_t offset;
4681
4682 assert(u);
4683
4684 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4685 assert(offset > 0);
4686 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4687
4688 ec = unit_get_exec_context(u);
4689 assert(ec);
4690
4691 if (!ec->dynamic_user)
4692 return 0;
4693
4694 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4695 }
4696
4697 bool unit_type_supported(UnitType t) {
4698 if (_unlikely_(t < 0))
4699 return false;
4700 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4701 return false;
4702
4703 if (!unit_vtable[t]->supported)
4704 return true;
4705
4706 return unit_vtable[t]->supported();
4707 }
4708
4709 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4710 int r;
4711
4712 assert(u);
4713 assert(where);
4714
4715 r = dir_is_empty(where);
4716 if (r > 0 || r == -ENOTDIR)
4717 return;
4718 if (r < 0) {
4719 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4720 return;
4721 }
4722
4723 log_struct(LOG_NOTICE,
4724 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4725 LOG_UNIT_ID(u),
4726 LOG_UNIT_INVOCATION_ID(u),
4727 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4728 "WHERE=%s", where);
4729 }
4730
4731 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4732 _cleanup_free_ char *canonical_where;
4733 int r;
4734
4735 assert(u);
4736 assert(where);
4737
4738 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4739 if (r < 0) {
4740 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4741 return 0;
4742 }
4743
4744 /* We will happily ignore a trailing slash (or any redundant slashes) */
4745 if (path_equal(where, canonical_where))
4746 return 0;
4747
4748 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4749 log_struct(LOG_ERR,
4750 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4751 LOG_UNIT_ID(u),
4752 LOG_UNIT_INVOCATION_ID(u),
4753 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4754 "WHERE=%s", where);
4755
4756 return -ELOOP;
4757 }
4758
4759 bool unit_is_pristine(Unit *u) {
4760 assert(u);
4761
4762 /* Check if the unit already exists or is already around,
4763 * in a number of different ways. Note that to cater for unit
4764 * types such as slice, we are generally fine with units that
4765 * are marked UNIT_LOADED even though nothing was actually
4766 * loaded, as those unit types don't require a file on disk. */
4767
4768 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4769 u->fragment_path ||
4770 u->source_path ||
4771 !strv_isempty(u->dropin_paths) ||
4772 u->job ||
4773 u->merged_into);
4774 }
4775
4776 pid_t unit_control_pid(Unit *u) {
4777 assert(u);
4778
4779 if (UNIT_VTABLE(u)->control_pid)
4780 return UNIT_VTABLE(u)->control_pid(u);
4781
4782 return 0;
4783 }
4784
4785 pid_t unit_main_pid(Unit *u) {
4786 assert(u);
4787
4788 if (UNIT_VTABLE(u)->main_pid)
4789 return UNIT_VTABLE(u)->main_pid(u);
4790
4791 return 0;
4792 }
4793
4794 static void unit_unref_uid_internal(
4795 Unit *u,
4796 uid_t *ref_uid,
4797 bool destroy_now,
4798 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4799
4800 assert(u);
4801 assert(ref_uid);
4802 assert(_manager_unref_uid);
4803
4804 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4805 * gid_t are actually the same time, with the same validity rules.
4806 *
4807 * Drops a reference to UID/GID from a unit. */
4808
4809 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4810 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4811
4812 if (!uid_is_valid(*ref_uid))
4813 return;
4814
4815 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4816 *ref_uid = UID_INVALID;
4817 }
4818
4819 void unit_unref_uid(Unit *u, bool destroy_now) {
4820 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4821 }
4822
4823 void unit_unref_gid(Unit *u, bool destroy_now) {
4824 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4825 }
4826
4827 static int unit_ref_uid_internal(
4828 Unit *u,
4829 uid_t *ref_uid,
4830 uid_t uid,
4831 bool clean_ipc,
4832 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4833
4834 int r;
4835
4836 assert(u);
4837 assert(ref_uid);
4838 assert(uid_is_valid(uid));
4839 assert(_manager_ref_uid);
4840
4841 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4842 * are actually the same type, and have the same validity rules.
4843 *
4844 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4845 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4846 * drops to zero. */
4847
4848 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4849 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4850
4851 if (*ref_uid == uid)
4852 return 0;
4853
4854 if (uid_is_valid(*ref_uid)) /* Already set? */
4855 return -EBUSY;
4856
4857 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4858 if (r < 0)
4859 return r;
4860
4861 *ref_uid = uid;
4862 return 1;
4863 }
4864
4865 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4866 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4867 }
4868
4869 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4870 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4871 }
4872
4873 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4874 int r = 0, q = 0;
4875
4876 assert(u);
4877
4878 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4879
4880 if (uid_is_valid(uid)) {
4881 r = unit_ref_uid(u, uid, clean_ipc);
4882 if (r < 0)
4883 return r;
4884 }
4885
4886 if (gid_is_valid(gid)) {
4887 q = unit_ref_gid(u, gid, clean_ipc);
4888 if (q < 0) {
4889 if (r > 0)
4890 unit_unref_uid(u, false);
4891
4892 return q;
4893 }
4894 }
4895
4896 return r > 0 || q > 0;
4897 }
4898
4899 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4900 ExecContext *c;
4901 int r;
4902
4903 assert(u);
4904
4905 c = unit_get_exec_context(u);
4906
4907 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4908 if (r < 0)
4909 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4910
4911 return r;
4912 }
4913
4914 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4915 assert(u);
4916
4917 unit_unref_uid(u, destroy_now);
4918 unit_unref_gid(u, destroy_now);
4919 }
4920
4921 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4922 int r;
4923
4924 assert(u);
4925
4926 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4927 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4928 * objects when no service references the UID/GID anymore. */
4929
4930 r = unit_ref_uid_gid(u, uid, gid);
4931 if (r > 0)
4932 bus_unit_send_change_signal(u);
4933 }
4934
4935 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4936 int r;
4937
4938 assert(u);
4939
4940 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4941
4942 if (sd_id128_equal(u->invocation_id, id))
4943 return 0;
4944
4945 if (!sd_id128_is_null(u->invocation_id))
4946 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4947
4948 if (sd_id128_is_null(id)) {
4949 r = 0;
4950 goto reset;
4951 }
4952
4953 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4954 if (r < 0)
4955 goto reset;
4956
4957 u->invocation_id = id;
4958 sd_id128_to_string(id, u->invocation_id_string);
4959
4960 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4961 if (r < 0)
4962 goto reset;
4963
4964 return 0;
4965
4966 reset:
4967 u->invocation_id = SD_ID128_NULL;
4968 u->invocation_id_string[0] = 0;
4969 return r;
4970 }
4971
4972 int unit_acquire_invocation_id(Unit *u) {
4973 sd_id128_t id;
4974 int r;
4975
4976 assert(u);
4977
4978 r = sd_id128_randomize(&id);
4979 if (r < 0)
4980 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4981
4982 r = unit_set_invocation_id(u, id);
4983 if (r < 0)
4984 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4985
4986 return 0;
4987 }
4988
4989 int unit_set_exec_params(Unit *u, ExecParameters *p) {
4990 int r;
4991
4992 assert(u);
4993 assert(p);
4994
4995 /* Copy parameters from manager */
4996 r = manager_get_effective_environment(u->manager, &p->environment);
4997 if (r < 0)
4998 return r;
4999
5000 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5001 p->cgroup_supported = u->manager->cgroup_supported;
5002 p->prefix = u->manager->prefix;
5003 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5004
5005 /* Copy paramaters from unit */
5006 p->cgroup_path = u->cgroup_path;
5007 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5008
5009 return 0;
5010 }
5011
5012 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5013 int r;
5014
5015 assert(u);
5016 assert(ret);
5017
5018 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5019 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5020
5021 (void) unit_realize_cgroup(u);
5022
5023 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5024 if (r != 0)
5025 return r;
5026
5027 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5028 (void) ignore_signals(SIGPIPE, -1);
5029
5030 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5031
5032 if (u->cgroup_path) {
5033 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5034 if (r < 0) {
5035 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5036 _exit(EXIT_CGROUP);
5037 }
5038 }
5039
5040 return 0;
5041 }
5042
5043 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5044 assert(u);
5045 assert(d >= 0);
5046 assert(d < _UNIT_DEPENDENCY_MAX);
5047 assert(other);
5048
5049 if (di.origin_mask == 0 && di.destination_mask == 0) {
5050 /* No bit set anymore, let's drop the whole entry */
5051 assert_se(hashmap_remove(u->dependencies[d], other));
5052 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5053 } else
5054 /* Mask was reduced, let's update the entry */
5055 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5056 }
5057
5058 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5059 UnitDependency d;
5060
5061 assert(u);
5062
5063 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5064
5065 if (mask == 0)
5066 return;
5067
5068 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5069 bool done;
5070
5071 do {
5072 UnitDependencyInfo di;
5073 Unit *other;
5074 Iterator i;
5075
5076 done = true;
5077
5078 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5079 UnitDependency q;
5080
5081 if ((di.origin_mask & ~mask) == di.origin_mask)
5082 continue;
5083 di.origin_mask &= ~mask;
5084 unit_update_dependency_mask(u, d, other, di);
5085
5086 /* We updated the dependency from our unit to the other unit now. But most dependencies
5087 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5088 * all dependency types on the other unit and delete all those which point to us and
5089 * have the right mask set. */
5090
5091 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5092 UnitDependencyInfo dj;
5093
5094 dj.data = hashmap_get(other->dependencies[q], u);
5095 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5096 continue;
5097 dj.destination_mask &= ~mask;
5098
5099 unit_update_dependency_mask(other, q, u, dj);
5100 }
5101
5102 unit_add_to_gc_queue(other);
5103
5104 done = false;
5105 break;
5106 }
5107
5108 } while (!done);
5109 }
5110 }
5111
5112 static int unit_export_invocation_id(Unit *u) {
5113 const char *p;
5114 int r;
5115
5116 assert(u);
5117
5118 if (u->exported_invocation_id)
5119 return 0;
5120
5121 if (sd_id128_is_null(u->invocation_id))
5122 return 0;
5123
5124 p = strjoina("/run/systemd/units/invocation:", u->id);
5125 r = symlink_atomic(u->invocation_id_string, p);
5126 if (r < 0)
5127 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5128
5129 u->exported_invocation_id = true;
5130 return 0;
5131 }
5132
5133 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5134 const char *p;
5135 char buf[2];
5136 int r;
5137
5138 assert(u);
5139 assert(c);
5140
5141 if (u->exported_log_level_max)
5142 return 0;
5143
5144 if (c->log_level_max < 0)
5145 return 0;
5146
5147 assert(c->log_level_max <= 7);
5148
5149 buf[0] = '0' + c->log_level_max;
5150 buf[1] = 0;
5151
5152 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5153 r = symlink_atomic(buf, p);
5154 if (r < 0)
5155 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5156
5157 u->exported_log_level_max = true;
5158 return 0;
5159 }
5160
5161 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5162 _cleanup_close_ int fd = -1;
5163 struct iovec *iovec;
5164 const char *p;
5165 char *pattern;
5166 le64_t *sizes;
5167 ssize_t n;
5168 size_t i;
5169 int r;
5170
5171 if (u->exported_log_extra_fields)
5172 return 0;
5173
5174 if (c->n_log_extra_fields <= 0)
5175 return 0;
5176
5177 sizes = newa(le64_t, c->n_log_extra_fields);
5178 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5179
5180 for (i = 0; i < c->n_log_extra_fields; i++) {
5181 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5182
5183 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5184 iovec[i*2+1] = c->log_extra_fields[i];
5185 }
5186
5187 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5188 pattern = strjoina(p, ".XXXXXX");
5189
5190 fd = mkostemp_safe(pattern);
5191 if (fd < 0)
5192 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5193
5194 n = writev(fd, iovec, c->n_log_extra_fields*2);
5195 if (n < 0) {
5196 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5197 goto fail;
5198 }
5199
5200 (void) fchmod(fd, 0644);
5201
5202 if (rename(pattern, p) < 0) {
5203 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5204 goto fail;
5205 }
5206
5207 u->exported_log_extra_fields = true;
5208 return 0;
5209
5210 fail:
5211 (void) unlink(pattern);
5212 return r;
5213 }
5214
5215 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5216 _cleanup_free_ char *buf = NULL;
5217 const char *p;
5218 int r;
5219
5220 assert(u);
5221 assert(c);
5222
5223 if (u->exported_log_rate_limit_interval)
5224 return 0;
5225
5226 if (c->log_rate_limit_interval_usec == 0)
5227 return 0;
5228
5229 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5230
5231 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5232 return log_oom();
5233
5234 r = symlink_atomic(buf, p);
5235 if (r < 0)
5236 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5237
5238 u->exported_log_rate_limit_interval = true;
5239 return 0;
5240 }
5241
5242 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5243 _cleanup_free_ char *buf = NULL;
5244 const char *p;
5245 int r;
5246
5247 assert(u);
5248 assert(c);
5249
5250 if (u->exported_log_rate_limit_burst)
5251 return 0;
5252
5253 if (c->log_rate_limit_burst == 0)
5254 return 0;
5255
5256 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5257
5258 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5259 return log_oom();
5260
5261 r = symlink_atomic(buf, p);
5262 if (r < 0)
5263 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5264
5265 u->exported_log_rate_limit_burst = true;
5266 return 0;
5267 }
5268
5269 void unit_export_state_files(Unit *u) {
5270 const ExecContext *c;
5271
5272 assert(u);
5273
5274 if (!u->id)
5275 return;
5276
5277 if (!MANAGER_IS_SYSTEM(u->manager))
5278 return;
5279
5280 if (MANAGER_IS_TEST_RUN(u->manager))
5281 return;
5282
5283 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5284 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5285 * the IPC system itself and PID 1 also log to the journal.
5286 *
5287 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5288 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5289 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5290 * namespace at least.
5291 *
5292 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5293 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5294 * them with one. */
5295
5296 (void) unit_export_invocation_id(u);
5297
5298 c = unit_get_exec_context(u);
5299 if (c) {
5300 (void) unit_export_log_level_max(u, c);
5301 (void) unit_export_log_extra_fields(u, c);
5302 (void) unit_export_log_rate_limit_interval(u, c);
5303 (void) unit_export_log_rate_limit_burst(u, c);
5304 }
5305 }
5306
5307 void unit_unlink_state_files(Unit *u) {
5308 const char *p;
5309
5310 assert(u);
5311
5312 if (!u->id)
5313 return;
5314
5315 if (!MANAGER_IS_SYSTEM(u->manager))
5316 return;
5317
5318 /* Undoes the effect of unit_export_state() */
5319
5320 if (u->exported_invocation_id) {
5321 p = strjoina("/run/systemd/units/invocation:", u->id);
5322 (void) unlink(p);
5323
5324 u->exported_invocation_id = false;
5325 }
5326
5327 if (u->exported_log_level_max) {
5328 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5329 (void) unlink(p);
5330
5331 u->exported_log_level_max = false;
5332 }
5333
5334 if (u->exported_log_extra_fields) {
5335 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5336 (void) unlink(p);
5337
5338 u->exported_log_extra_fields = false;
5339 }
5340
5341 if (u->exported_log_rate_limit_interval) {
5342 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5343 (void) unlink(p);
5344
5345 u->exported_log_rate_limit_interval = false;
5346 }
5347
5348 if (u->exported_log_rate_limit_burst) {
5349 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5350 (void) unlink(p);
5351
5352 u->exported_log_rate_limit_burst = false;
5353 }
5354 }
5355
5356 int unit_prepare_exec(Unit *u) {
5357 int r;
5358
5359 assert(u);
5360
5361 /* Prepares everything so that we can fork of a process for this unit */
5362
5363 (void) unit_realize_cgroup(u);
5364
5365 if (u->reset_accounting) {
5366 (void) unit_reset_cpu_accounting(u);
5367 (void) unit_reset_ip_accounting(u);
5368 u->reset_accounting = false;
5369 }
5370
5371 unit_export_state_files(u);
5372
5373 r = unit_setup_exec_runtime(u);
5374 if (r < 0)
5375 return r;
5376
5377 r = unit_setup_dynamic_creds(u);
5378 if (r < 0)
5379 return r;
5380
5381 return 0;
5382 }
5383
5384 static void log_leftover(pid_t pid, int sig, void *userdata) {
5385 _cleanup_free_ char *comm = NULL;
5386
5387 (void) get_process_comm(pid, &comm);
5388
5389 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5390 return;
5391
5392 log_unit_warning(userdata,
5393 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5394 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5395 pid, strna(comm));
5396 }
5397
5398 void unit_warn_leftover_processes(Unit *u) {
5399 assert(u);
5400
5401 (void) unit_pick_cgroup_path(u);
5402
5403 if (!u->cgroup_path)
5404 return;
5405
5406 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5407 }
5408
5409 bool unit_needs_console(Unit *u) {
5410 ExecContext *ec;
5411 UnitActiveState state;
5412
5413 assert(u);
5414
5415 state = unit_active_state(u);
5416
5417 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5418 return false;
5419
5420 if (UNIT_VTABLE(u)->needs_console)
5421 return UNIT_VTABLE(u)->needs_console(u);
5422
5423 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5424 ec = unit_get_exec_context(u);
5425 if (!ec)
5426 return false;
5427
5428 return exec_context_may_touch_console(ec);
5429 }
5430
5431 const char *unit_label_path(Unit *u) {
5432 const char *p;
5433
5434 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5435 * when validating access checks. */
5436
5437 p = u->source_path ?: u->fragment_path;
5438 if (!p)
5439 return NULL;
5440
5441 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5442 if (path_equal(p, "/dev/null"))
5443 return NULL;
5444
5445 return p;
5446 }
5447
5448 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5449 int r;
5450
5451 assert(u);
5452
5453 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5454 * and not a kernel thread either */
5455
5456 /* First, a simple range check */
5457 if (!pid_is_valid(pid))
5458 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5459
5460 /* Some extra safety check */
5461 if (pid == 1 || pid == getpid_cached())
5462 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5463
5464 /* Don't even begin to bother with kernel threads */
5465 r = is_kernel_thread(pid);
5466 if (r == -ESRCH)
5467 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5468 if (r < 0)
5469 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5470 if (r > 0)
5471 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5472
5473 return 0;
5474 }
5475
5476 void unit_log_success(Unit *u) {
5477 assert(u);
5478
5479 log_struct(LOG_INFO,
5480 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5481 LOG_UNIT_ID(u),
5482 LOG_UNIT_INVOCATION_ID(u),
5483 LOG_UNIT_MESSAGE(u, "Succeeded."));
5484 }
5485
5486 void unit_log_failure(Unit *u, const char *result) {
5487 assert(u);
5488 assert(result);
5489
5490 log_struct(LOG_WARNING,
5491 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5492 LOG_UNIT_ID(u),
5493 LOG_UNIT_INVOCATION_ID(u),
5494 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5495 "UNIT_RESULT=%s", result);
5496 }
5497
5498 void unit_log_process_exit(
5499 Unit *u,
5500 int level,
5501 const char *kind,
5502 const char *command,
5503 int code,
5504 int status) {
5505
5506 assert(u);
5507 assert(kind);
5508
5509 if (code != CLD_EXITED)
5510 level = LOG_WARNING;
5511
5512 log_struct(level,
5513 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5514 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5515 kind,
5516 sigchld_code_to_string(code), status,
5517 strna(code == CLD_EXITED
5518 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5519 : signal_to_string(status))),
5520 "EXIT_CODE=%s", sigchld_code_to_string(code),
5521 "EXIT_STATUS=%i", status,
5522 "COMMAND=%s", strna(command),
5523 LOG_UNIT_ID(u),
5524 LOG_UNIT_INVOCATION_ID(u));
5525 }
5526
5527 int unit_exit_status(Unit *u) {
5528 assert(u);
5529
5530 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5531 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5532 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5533 * service process has exited abnormally (signal/coredump). */
5534
5535 if (!UNIT_VTABLE(u)->exit_status)
5536 return -EOPNOTSUPP;
5537
5538 return UNIT_VTABLE(u)->exit_status(u);
5539 }
5540
5541 int unit_failure_action_exit_status(Unit *u) {
5542 int r;
5543
5544 assert(u);
5545
5546 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5547
5548 if (u->failure_action_exit_status >= 0)
5549 return u->failure_action_exit_status;
5550
5551 r = unit_exit_status(u);
5552 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5553 return 255;
5554
5555 return r;
5556 }
5557
5558 int unit_success_action_exit_status(Unit *u) {
5559 int r;
5560
5561 assert(u);
5562
5563 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5564
5565 if (u->success_action_exit_status >= 0)
5566 return u->success_action_exit_status;
5567
5568 r = unit_exit_status(u);
5569 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5570 return 255;
5571
5572 return r;
5573 }
5574
5575 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5576 [COLLECT_INACTIVE] = "inactive",
5577 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5578 };
5579
5580 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);