]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #10094 from keszybz/wants-loading
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "alloc-util.h"
14 #include "all-units.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "set.h"
39 #include "signal-util.h"
40 #include "sparse-endian.h"
41 #include "special.h"
42 #include "specifier.h"
43 #include "stat-util.h"
44 #include "stdio-util.h"
45 #include "string-table.h"
46 #include "string-util.h"
47 #include "strv.h"
48 #include "umask-util.h"
49 #include "unit-name.h"
50 #include "unit.h"
51 #include "user-util.h"
52 #include "virt.h"
53
54 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
55 [UNIT_SERVICE] = &service_vtable,
56 [UNIT_SOCKET] = &socket_vtable,
57 [UNIT_TARGET] = &target_vtable,
58 [UNIT_DEVICE] = &device_vtable,
59 [UNIT_MOUNT] = &mount_vtable,
60 [UNIT_AUTOMOUNT] = &automount_vtable,
61 [UNIT_SWAP] = &swap_vtable,
62 [UNIT_TIMER] = &timer_vtable,
63 [UNIT_PATH] = &path_vtable,
64 [UNIT_SLICE] = &slice_vtable,
65 [UNIT_SCOPE] = &scope_vtable,
66 };
67
68 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
69
70 Unit *unit_new(Manager *m, size_t size) {
71 Unit *u;
72
73 assert(m);
74 assert(size >= sizeof(Unit));
75
76 u = malloc0(size);
77 if (!u)
78 return NULL;
79
80 u->names = set_new(&string_hash_ops);
81 if (!u->names)
82 return mfree(u);
83
84 u->manager = m;
85 u->type = _UNIT_TYPE_INVALID;
86 u->default_dependencies = true;
87 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
88 u->unit_file_preset = -1;
89 u->on_failure_job_mode = JOB_REPLACE;
90 u->cgroup_inotify_wd = -1;
91 u->job_timeout = USEC_INFINITY;
92 u->job_running_timeout = USEC_INFINITY;
93 u->ref_uid = UID_INVALID;
94 u->ref_gid = GID_INVALID;
95 u->cpu_usage_last = NSEC_INFINITY;
96 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
97
98 u->ip_accounting_ingress_map_fd = -1;
99 u->ip_accounting_egress_map_fd = -1;
100 u->ipv4_allow_map_fd = -1;
101 u->ipv6_allow_map_fd = -1;
102 u->ipv4_deny_map_fd = -1;
103 u->ipv6_deny_map_fd = -1;
104
105 u->last_section_private = -1;
106
107 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
108 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
109
110 return u;
111 }
112
113 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
114 _cleanup_(unit_freep) Unit *u = NULL;
115 int r;
116
117 u = unit_new(m, size);
118 if (!u)
119 return -ENOMEM;
120
121 r = unit_add_name(u, name);
122 if (r < 0)
123 return r;
124
125 *ret = TAKE_PTR(u);
126
127 return r;
128 }
129
130 bool unit_has_name(Unit *u, const char *name) {
131 assert(u);
132 assert(name);
133
134 return set_contains(u->names, (char*) name);
135 }
136
137 static void unit_init(Unit *u) {
138 CGroupContext *cc;
139 ExecContext *ec;
140 KillContext *kc;
141
142 assert(u);
143 assert(u->manager);
144 assert(u->type >= 0);
145
146 cc = unit_get_cgroup_context(u);
147 if (cc) {
148 cgroup_context_init(cc);
149
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
153
154 cc->cpu_accounting = u->manager->default_cpu_accounting;
155 cc->io_accounting = u->manager->default_io_accounting;
156 cc->ip_accounting = u->manager->default_ip_accounting;
157 cc->blockio_accounting = u->manager->default_blockio_accounting;
158 cc->memory_accounting = u->manager->default_memory_accounting;
159 cc->tasks_accounting = u->manager->default_tasks_accounting;
160 cc->ip_accounting = u->manager->default_ip_accounting;
161
162 if (u->type != UNIT_SLICE)
163 cc->tasks_max = u->manager->default_tasks_max;
164 }
165
166 ec = unit_get_exec_context(u);
167 if (ec) {
168 exec_context_init(ec);
169
170 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
171 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
172 }
173
174 kc = unit_get_kill_context(u);
175 if (kc)
176 kill_context_init(kc);
177
178 if (UNIT_VTABLE(u)->init)
179 UNIT_VTABLE(u)->init(u);
180 }
181
182 int unit_add_name(Unit *u, const char *text) {
183 _cleanup_free_ char *s = NULL, *i = NULL;
184 UnitType t;
185 int r;
186
187 assert(u);
188 assert(text);
189
190 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
191
192 if (!u->instance)
193 return -EINVAL;
194
195 r = unit_name_replace_instance(text, u->instance, &s);
196 if (r < 0)
197 return r;
198 } else {
199 s = strdup(text);
200 if (!s)
201 return -ENOMEM;
202 }
203
204 if (set_contains(u->names, s))
205 return 0;
206 if (hashmap_contains(u->manager->units, s))
207 return -EEXIST;
208
209 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
210 return -EINVAL;
211
212 t = unit_name_to_type(s);
213 if (t < 0)
214 return -EINVAL;
215
216 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
217 return -EINVAL;
218
219 r = unit_name_to_instance(s, &i);
220 if (r < 0)
221 return r;
222
223 if (i && !unit_type_may_template(t))
224 return -EINVAL;
225
226 /* Ensure that this unit is either instanced or not instanced,
227 * but not both. Note that we do allow names with different
228 * instance names however! */
229 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
230 return -EINVAL;
231
232 if (!unit_type_may_alias(t) && !set_isempty(u->names))
233 return -EEXIST;
234
235 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
236 return -E2BIG;
237
238 r = set_put(u->names, s);
239 if (r < 0)
240 return r;
241 assert(r > 0);
242
243 r = hashmap_put(u->manager->units, s, u);
244 if (r < 0) {
245 (void) set_remove(u->names, s);
246 return r;
247 }
248
249 if (u->type == _UNIT_TYPE_INVALID) {
250 u->type = t;
251 u->id = s;
252 u->instance = TAKE_PTR(i);
253
254 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
255
256 unit_init(u);
257 }
258
259 s = NULL;
260
261 unit_add_to_dbus_queue(u);
262 return 0;
263 }
264
265 int unit_choose_id(Unit *u, const char *name) {
266 _cleanup_free_ char *t = NULL;
267 char *s, *i;
268 int r;
269
270 assert(u);
271 assert(name);
272
273 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
274
275 if (!u->instance)
276 return -EINVAL;
277
278 r = unit_name_replace_instance(name, u->instance, &t);
279 if (r < 0)
280 return r;
281
282 name = t;
283 }
284
285 /* Selects one of the names of this unit as the id */
286 s = set_get(u->names, (char*) name);
287 if (!s)
288 return -ENOENT;
289
290 /* Determine the new instance from the new id */
291 r = unit_name_to_instance(s, &i);
292 if (r < 0)
293 return r;
294
295 u->id = s;
296
297 free(u->instance);
298 u->instance = i;
299
300 unit_add_to_dbus_queue(u);
301
302 return 0;
303 }
304
305 int unit_set_description(Unit *u, const char *description) {
306 int r;
307
308 assert(u);
309
310 r = free_and_strdup(&u->description, empty_to_null(description));
311 if (r < 0)
312 return r;
313 if (r > 0)
314 unit_add_to_dbus_queue(u);
315
316 return 0;
317 }
318
319 bool unit_may_gc(Unit *u) {
320 UnitActiveState state;
321 int r;
322
323 assert(u);
324
325 /* Checks whether the unit is ready to be unloaded for garbage collection.
326 * Returns true when the unit may be collected, and false if there's some
327 * reason to keep it loaded.
328 *
329 * References from other units are *not* checked here. Instead, this is done
330 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
331 */
332
333 if (u->job)
334 return false;
335
336 if (u->nop_job)
337 return false;
338
339 state = unit_active_state(u);
340
341 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
342 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
343 UNIT_VTABLE(u)->release_resources)
344 UNIT_VTABLE(u)->release_resources(u);
345
346 if (u->perpetual)
347 return false;
348
349 if (sd_bus_track_count(u->bus_track) > 0)
350 return false;
351
352 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
353 switch (u->collect_mode) {
354
355 case COLLECT_INACTIVE:
356 if (state != UNIT_INACTIVE)
357 return false;
358
359 break;
360
361 case COLLECT_INACTIVE_OR_FAILED:
362 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
363 return false;
364
365 break;
366
367 default:
368 assert_not_reached("Unknown garbage collection mode");
369 }
370
371 if (u->cgroup_path) {
372 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
373 * around. Units with active processes should never be collected. */
374
375 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
376 if (r < 0)
377 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
378 if (r <= 0)
379 return false;
380 }
381
382 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
383 return false;
384
385 return true;
386 }
387
388 void unit_add_to_load_queue(Unit *u) {
389 assert(u);
390 assert(u->type != _UNIT_TYPE_INVALID);
391
392 if (u->load_state != UNIT_STUB || u->in_load_queue)
393 return;
394
395 LIST_PREPEND(load_queue, u->manager->load_queue, u);
396 u->in_load_queue = true;
397 }
398
399 void unit_add_to_cleanup_queue(Unit *u) {
400 assert(u);
401
402 if (u->in_cleanup_queue)
403 return;
404
405 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
406 u->in_cleanup_queue = true;
407 }
408
409 void unit_add_to_gc_queue(Unit *u) {
410 assert(u);
411
412 if (u->in_gc_queue || u->in_cleanup_queue)
413 return;
414
415 if (!unit_may_gc(u))
416 return;
417
418 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
419 u->in_gc_queue = true;
420 }
421
422 void unit_add_to_dbus_queue(Unit *u) {
423 assert(u);
424 assert(u->type != _UNIT_TYPE_INVALID);
425
426 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
427 return;
428
429 /* Shortcut things if nobody cares */
430 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
431 sd_bus_track_count(u->bus_track) <= 0 &&
432 set_isempty(u->manager->private_buses)) {
433 u->sent_dbus_new_signal = true;
434 return;
435 }
436
437 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
438 u->in_dbus_queue = true;
439 }
440
441 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
442 assert(u);
443
444 if (u->in_stop_when_unneeded_queue)
445 return;
446
447 if (!u->stop_when_unneeded)
448 return;
449
450 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
451 return;
452
453 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
454 u->in_stop_when_unneeded_queue = true;
455 }
456
457 static void bidi_set_free(Unit *u, Hashmap *h) {
458 Unit *other;
459 Iterator i;
460 void *v;
461
462 assert(u);
463
464 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
465
466 HASHMAP_FOREACH_KEY(v, other, h, i) {
467 UnitDependency d;
468
469 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
470 hashmap_remove(other->dependencies[d], u);
471
472 unit_add_to_gc_queue(other);
473 }
474
475 hashmap_free(h);
476 }
477
478 static void unit_remove_transient(Unit *u) {
479 char **i;
480
481 assert(u);
482
483 if (!u->transient)
484 return;
485
486 if (u->fragment_path)
487 (void) unlink(u->fragment_path);
488
489 STRV_FOREACH(i, u->dropin_paths) {
490 _cleanup_free_ char *p = NULL, *pp = NULL;
491
492 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
493 if (!p)
494 continue;
495
496 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
497 if (!pp)
498 continue;
499
500 /* Only drop transient drop-ins */
501 if (!path_equal(u->manager->lookup_paths.transient, pp))
502 continue;
503
504 (void) unlink(*i);
505 (void) rmdir(p);
506 }
507 }
508
509 static void unit_free_requires_mounts_for(Unit *u) {
510 assert(u);
511
512 for (;;) {
513 _cleanup_free_ char *path;
514
515 path = hashmap_steal_first_key(u->requires_mounts_for);
516 if (!path)
517 break;
518 else {
519 char s[strlen(path) + 1];
520
521 PATH_FOREACH_PREFIX_MORE(s, path) {
522 char *y;
523 Set *x;
524
525 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
526 if (!x)
527 continue;
528
529 (void) set_remove(x, u);
530
531 if (set_isempty(x)) {
532 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
533 free(y);
534 set_free(x);
535 }
536 }
537 }
538 }
539
540 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
541 }
542
543 static void unit_done(Unit *u) {
544 ExecContext *ec;
545 CGroupContext *cc;
546
547 assert(u);
548
549 if (u->type < 0)
550 return;
551
552 if (UNIT_VTABLE(u)->done)
553 UNIT_VTABLE(u)->done(u);
554
555 ec = unit_get_exec_context(u);
556 if (ec)
557 exec_context_done(ec);
558
559 cc = unit_get_cgroup_context(u);
560 if (cc)
561 cgroup_context_done(cc);
562 }
563
564 void unit_free(Unit *u) {
565 UnitDependency d;
566 Iterator i;
567 char *t;
568
569 if (!u)
570 return;
571
572 u->transient_file = safe_fclose(u->transient_file);
573
574 if (!MANAGER_IS_RELOADING(u->manager))
575 unit_remove_transient(u);
576
577 bus_unit_send_removed_signal(u);
578
579 unit_done(u);
580
581 unit_dequeue_rewatch_pids(u);
582
583 sd_bus_slot_unref(u->match_bus_slot);
584 sd_bus_track_unref(u->bus_track);
585 u->deserialized_refs = strv_free(u->deserialized_refs);
586
587 unit_free_requires_mounts_for(u);
588
589 SET_FOREACH(t, u->names, i)
590 hashmap_remove_value(u->manager->units, t, u);
591
592 if (!sd_id128_is_null(u->invocation_id))
593 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
594
595 if (u->job) {
596 Job *j = u->job;
597 job_uninstall(j);
598 job_free(j);
599 }
600
601 if (u->nop_job) {
602 Job *j = u->nop_job;
603 job_uninstall(j);
604 job_free(j);
605 }
606
607 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
608 bidi_set_free(u, u->dependencies[d]);
609
610 if (u->on_console)
611 manager_unref_console(u->manager);
612
613 unit_release_cgroup(u);
614
615 if (!MANAGER_IS_RELOADING(u->manager))
616 unit_unlink_state_files(u);
617
618 unit_unref_uid_gid(u, false);
619
620 (void) manager_update_failed_units(u->manager, u, false);
621 set_remove(u->manager->startup_units, u);
622
623 unit_unwatch_all_pids(u);
624
625 unit_ref_unset(&u->slice);
626 while (u->refs_by_target)
627 unit_ref_unset(u->refs_by_target);
628
629 if (u->type != _UNIT_TYPE_INVALID)
630 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
631
632 if (u->in_load_queue)
633 LIST_REMOVE(load_queue, u->manager->load_queue, u);
634
635 if (u->in_dbus_queue)
636 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
637
638 if (u->in_gc_queue)
639 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
640
641 if (u->in_cgroup_realize_queue)
642 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
643
644 if (u->in_cgroup_empty_queue)
645 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
646
647 if (u->in_cleanup_queue)
648 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
649
650 if (u->in_target_deps_queue)
651 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
652
653 if (u->in_stop_when_unneeded_queue)
654 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
655
656 safe_close(u->ip_accounting_ingress_map_fd);
657 safe_close(u->ip_accounting_egress_map_fd);
658
659 safe_close(u->ipv4_allow_map_fd);
660 safe_close(u->ipv6_allow_map_fd);
661 safe_close(u->ipv4_deny_map_fd);
662 safe_close(u->ipv6_deny_map_fd);
663
664 bpf_program_unref(u->ip_bpf_ingress);
665 bpf_program_unref(u->ip_bpf_ingress_installed);
666 bpf_program_unref(u->ip_bpf_egress);
667 bpf_program_unref(u->ip_bpf_egress_installed);
668
669 condition_free_list(u->conditions);
670 condition_free_list(u->asserts);
671
672 free(u->description);
673 strv_free(u->documentation);
674 free(u->fragment_path);
675 free(u->source_path);
676 strv_free(u->dropin_paths);
677 free(u->instance);
678
679 free(u->job_timeout_reboot_arg);
680
681 set_free_free(u->names);
682
683 free(u->reboot_arg);
684
685 free(u);
686 }
687
688 UnitActiveState unit_active_state(Unit *u) {
689 assert(u);
690
691 if (u->load_state == UNIT_MERGED)
692 return unit_active_state(unit_follow_merge(u));
693
694 /* After a reload it might happen that a unit is not correctly
695 * loaded but still has a process around. That's why we won't
696 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
697
698 return UNIT_VTABLE(u)->active_state(u);
699 }
700
701 const char* unit_sub_state_to_string(Unit *u) {
702 assert(u);
703
704 return UNIT_VTABLE(u)->sub_state_to_string(u);
705 }
706
707 static int set_complete_move(Set **s, Set **other) {
708 assert(s);
709 assert(other);
710
711 if (!other)
712 return 0;
713
714 if (*s)
715 return set_move(*s, *other);
716 else
717 *s = TAKE_PTR(*other);
718
719 return 0;
720 }
721
722 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
723 assert(s);
724 assert(other);
725
726 if (!*other)
727 return 0;
728
729 if (*s)
730 return hashmap_move(*s, *other);
731 else
732 *s = TAKE_PTR(*other);
733
734 return 0;
735 }
736
737 static int merge_names(Unit *u, Unit *other) {
738 char *t;
739 Iterator i;
740 int r;
741
742 assert(u);
743 assert(other);
744
745 r = set_complete_move(&u->names, &other->names);
746 if (r < 0)
747 return r;
748
749 set_free_free(other->names);
750 other->names = NULL;
751 other->id = NULL;
752
753 SET_FOREACH(t, u->names, i)
754 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
755
756 return 0;
757 }
758
759 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
760 unsigned n_reserve;
761
762 assert(u);
763 assert(other);
764 assert(d < _UNIT_DEPENDENCY_MAX);
765
766 /*
767 * If u does not have this dependency set allocated, there is no need
768 * to reserve anything. In that case other's set will be transferred
769 * as a whole to u by complete_move().
770 */
771 if (!u->dependencies[d])
772 return 0;
773
774 /* merge_dependencies() will skip a u-on-u dependency */
775 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
776
777 return hashmap_reserve(u->dependencies[d], n_reserve);
778 }
779
780 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
781 Iterator i;
782 Unit *back;
783 void *v;
784 int r;
785
786 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
787
788 assert(u);
789 assert(other);
790 assert(d < _UNIT_DEPENDENCY_MAX);
791
792 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
793 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
794 UnitDependency k;
795
796 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
797 * pointers back, and let's fix them up, to instead point to 'u'. */
798
799 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
800 if (back == u) {
801 /* Do not add dependencies between u and itself. */
802 if (hashmap_remove(back->dependencies[k], other))
803 maybe_warn_about_dependency(u, other_id, k);
804 } else {
805 UnitDependencyInfo di_u, di_other, di_merged;
806
807 /* Let's drop this dependency between "back" and "other", and let's create it between
808 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
809 * and any such dependency which might already exist */
810
811 di_other.data = hashmap_get(back->dependencies[k], other);
812 if (!di_other.data)
813 continue; /* dependency isn't set, let's try the next one */
814
815 di_u.data = hashmap_get(back->dependencies[k], u);
816
817 di_merged = (UnitDependencyInfo) {
818 .origin_mask = di_u.origin_mask | di_other.origin_mask,
819 .destination_mask = di_u.destination_mask | di_other.destination_mask,
820 };
821
822 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
823 if (r < 0)
824 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
825 assert(r >= 0);
826
827 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
828 }
829 }
830
831 }
832
833 /* Also do not move dependencies on u to itself */
834 back = hashmap_remove(other->dependencies[d], u);
835 if (back)
836 maybe_warn_about_dependency(u, other_id, d);
837
838 /* The move cannot fail. The caller must have performed a reservation. */
839 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
840
841 other->dependencies[d] = hashmap_free(other->dependencies[d]);
842 }
843
844 int unit_merge(Unit *u, Unit *other) {
845 UnitDependency d;
846 const char *other_id = NULL;
847 int r;
848
849 assert(u);
850 assert(other);
851 assert(u->manager == other->manager);
852 assert(u->type != _UNIT_TYPE_INVALID);
853
854 other = unit_follow_merge(other);
855
856 if (other == u)
857 return 0;
858
859 if (u->type != other->type)
860 return -EINVAL;
861
862 if (!u->instance != !other->instance)
863 return -EINVAL;
864
865 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
866 return -EEXIST;
867
868 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
869 return -EEXIST;
870
871 if (other->job)
872 return -EEXIST;
873
874 if (other->nop_job)
875 return -EEXIST;
876
877 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
878 return -EEXIST;
879
880 if (other->id)
881 other_id = strdupa(other->id);
882
883 /* Make reservations to ensure merge_dependencies() won't fail */
884 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
885 r = reserve_dependencies(u, other, d);
886 /*
887 * We don't rollback reservations if we fail. We don't have
888 * a way to undo reservations. A reservation is not a leak.
889 */
890 if (r < 0)
891 return r;
892 }
893
894 /* Merge names */
895 r = merge_names(u, other);
896 if (r < 0)
897 return r;
898
899 /* Redirect all references */
900 while (other->refs_by_target)
901 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
902
903 /* Merge dependencies */
904 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
905 merge_dependencies(u, other, other_id, d);
906
907 other->load_state = UNIT_MERGED;
908 other->merged_into = u;
909
910 /* If there is still some data attached to the other node, we
911 * don't need it anymore, and can free it. */
912 if (other->load_state != UNIT_STUB)
913 if (UNIT_VTABLE(other)->done)
914 UNIT_VTABLE(other)->done(other);
915
916 unit_add_to_dbus_queue(u);
917 unit_add_to_cleanup_queue(other);
918
919 return 0;
920 }
921
922 int unit_merge_by_name(Unit *u, const char *name) {
923 _cleanup_free_ char *s = NULL;
924 Unit *other;
925 int r;
926
927 assert(u);
928 assert(name);
929
930 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
931 if (!u->instance)
932 return -EINVAL;
933
934 r = unit_name_replace_instance(name, u->instance, &s);
935 if (r < 0)
936 return r;
937
938 name = s;
939 }
940
941 other = manager_get_unit(u->manager, name);
942 if (other)
943 return unit_merge(u, other);
944
945 return unit_add_name(u, name);
946 }
947
948 Unit* unit_follow_merge(Unit *u) {
949 assert(u);
950
951 while (u->load_state == UNIT_MERGED)
952 assert_se(u = u->merged_into);
953
954 return u;
955 }
956
957 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
958 ExecDirectoryType dt;
959 char **dp;
960 int r;
961
962 assert(u);
963 assert(c);
964
965 if (c->working_directory) {
966 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
967 if (r < 0)
968 return r;
969 }
970
971 if (c->root_directory) {
972 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
973 if (r < 0)
974 return r;
975 }
976
977 if (c->root_image) {
978 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
979 if (r < 0)
980 return r;
981 }
982
983 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
984 if (!u->manager->prefix[dt])
985 continue;
986
987 STRV_FOREACH(dp, c->directories[dt].paths) {
988 _cleanup_free_ char *p;
989
990 p = strjoin(u->manager->prefix[dt], "/", *dp);
991 if (!p)
992 return -ENOMEM;
993
994 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
995 if (r < 0)
996 return r;
997 }
998 }
999
1000 if (!MANAGER_IS_SYSTEM(u->manager))
1001 return 0;
1002
1003 if (c->private_tmp) {
1004 const char *p;
1005
1006 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1007 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1008 if (r < 0)
1009 return r;
1010 }
1011
1012 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1013 if (r < 0)
1014 return r;
1015 }
1016
1017 if (!IN_SET(c->std_output,
1018 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1019 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1020 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1021 !IN_SET(c->std_error,
1022 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1023 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1024 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1025 return 0;
1026
1027 /* If syslog or kernel logging is requested, make sure our own
1028 * logging daemon is run first. */
1029
1030 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1031 if (r < 0)
1032 return r;
1033
1034 return 0;
1035 }
1036
1037 const char *unit_description(Unit *u) {
1038 assert(u);
1039
1040 if (u->description)
1041 return u->description;
1042
1043 return strna(u->id);
1044 }
1045
1046 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1047 const struct {
1048 UnitDependencyMask mask;
1049 const char *name;
1050 } table[] = {
1051 { UNIT_DEPENDENCY_FILE, "file" },
1052 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1053 { UNIT_DEPENDENCY_DEFAULT, "default" },
1054 { UNIT_DEPENDENCY_UDEV, "udev" },
1055 { UNIT_DEPENDENCY_PATH, "path" },
1056 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1057 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1058 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1059 };
1060 size_t i;
1061
1062 assert(f);
1063 assert(kind);
1064 assert(space);
1065
1066 for (i = 0; i < ELEMENTSOF(table); i++) {
1067
1068 if (mask == 0)
1069 break;
1070
1071 if (FLAGS_SET(mask, table[i].mask)) {
1072 if (*space)
1073 fputc(' ', f);
1074 else
1075 *space = true;
1076
1077 fputs(kind, f);
1078 fputs("-", f);
1079 fputs(table[i].name, f);
1080
1081 mask &= ~table[i].mask;
1082 }
1083 }
1084
1085 assert(mask == 0);
1086 }
1087
1088 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1089 char *t, **j;
1090 UnitDependency d;
1091 Iterator i;
1092 const char *prefix2;
1093 char
1094 timestamp0[FORMAT_TIMESTAMP_MAX],
1095 timestamp1[FORMAT_TIMESTAMP_MAX],
1096 timestamp2[FORMAT_TIMESTAMP_MAX],
1097 timestamp3[FORMAT_TIMESTAMP_MAX],
1098 timestamp4[FORMAT_TIMESTAMP_MAX],
1099 timespan[FORMAT_TIMESPAN_MAX];
1100 Unit *following;
1101 _cleanup_set_free_ Set *following_set = NULL;
1102 const char *n;
1103 CGroupMask m;
1104 int r;
1105
1106 assert(u);
1107 assert(u->type >= 0);
1108
1109 prefix = strempty(prefix);
1110 prefix2 = strjoina(prefix, "\t");
1111
1112 fprintf(f,
1113 "%s-> Unit %s:\n"
1114 "%s\tDescription: %s\n"
1115 "%s\tInstance: %s\n"
1116 "%s\tUnit Load State: %s\n"
1117 "%s\tUnit Active State: %s\n"
1118 "%s\tState Change Timestamp: %s\n"
1119 "%s\tInactive Exit Timestamp: %s\n"
1120 "%s\tActive Enter Timestamp: %s\n"
1121 "%s\tActive Exit Timestamp: %s\n"
1122 "%s\tInactive Enter Timestamp: %s\n"
1123 "%s\tMay GC: %s\n"
1124 "%s\tNeed Daemon Reload: %s\n"
1125 "%s\tTransient: %s\n"
1126 "%s\tPerpetual: %s\n"
1127 "%s\tGarbage Collection Mode: %s\n"
1128 "%s\tSlice: %s\n"
1129 "%s\tCGroup: %s\n"
1130 "%s\tCGroup realized: %s\n",
1131 prefix, u->id,
1132 prefix, unit_description(u),
1133 prefix, strna(u->instance),
1134 prefix, unit_load_state_to_string(u->load_state),
1135 prefix, unit_active_state_to_string(unit_active_state(u)),
1136 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1137 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1138 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1139 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1140 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1141 prefix, yes_no(unit_may_gc(u)),
1142 prefix, yes_no(unit_need_daemon_reload(u)),
1143 prefix, yes_no(u->transient),
1144 prefix, yes_no(u->perpetual),
1145 prefix, collect_mode_to_string(u->collect_mode),
1146 prefix, strna(unit_slice_name(u)),
1147 prefix, strna(u->cgroup_path),
1148 prefix, yes_no(u->cgroup_realized));
1149
1150 if (u->cgroup_realized_mask != 0) {
1151 _cleanup_free_ char *s = NULL;
1152 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1153 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1154 }
1155 if (u->cgroup_enabled_mask != 0) {
1156 _cleanup_free_ char *s = NULL;
1157 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1158 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1159 }
1160 m = unit_get_own_mask(u);
1161 if (m != 0) {
1162 _cleanup_free_ char *s = NULL;
1163 (void) cg_mask_to_string(m, &s);
1164 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1165 }
1166 m = unit_get_members_mask(u);
1167 if (m != 0) {
1168 _cleanup_free_ char *s = NULL;
1169 (void) cg_mask_to_string(m, &s);
1170 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1171 }
1172
1173 SET_FOREACH(t, u->names, i)
1174 fprintf(f, "%s\tName: %s\n", prefix, t);
1175
1176 if (!sd_id128_is_null(u->invocation_id))
1177 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1178 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1179
1180 STRV_FOREACH(j, u->documentation)
1181 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1182
1183 following = unit_following(u);
1184 if (following)
1185 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1186
1187 r = unit_following_set(u, &following_set);
1188 if (r >= 0) {
1189 Unit *other;
1190
1191 SET_FOREACH(other, following_set, i)
1192 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1193 }
1194
1195 if (u->fragment_path)
1196 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1197
1198 if (u->source_path)
1199 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1200
1201 STRV_FOREACH(j, u->dropin_paths)
1202 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1203
1204 if (u->failure_action != EMERGENCY_ACTION_NONE)
1205 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1206 if (u->success_action != EMERGENCY_ACTION_NONE)
1207 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1208
1209 if (u->job_timeout != USEC_INFINITY)
1210 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1211
1212 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1213 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1214
1215 if (u->job_timeout_reboot_arg)
1216 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1217
1218 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1219 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1220
1221 if (dual_timestamp_is_set(&u->condition_timestamp))
1222 fprintf(f,
1223 "%s\tCondition Timestamp: %s\n"
1224 "%s\tCondition Result: %s\n",
1225 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1226 prefix, yes_no(u->condition_result));
1227
1228 if (dual_timestamp_is_set(&u->assert_timestamp))
1229 fprintf(f,
1230 "%s\tAssert Timestamp: %s\n"
1231 "%s\tAssert Result: %s\n",
1232 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1233 prefix, yes_no(u->assert_result));
1234
1235 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1236 UnitDependencyInfo di;
1237 Unit *other;
1238
1239 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1240 bool space = false;
1241
1242 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1243
1244 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1245 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1246
1247 fputs(")\n", f);
1248 }
1249 }
1250
1251 if (!hashmap_isempty(u->requires_mounts_for)) {
1252 UnitDependencyInfo di;
1253 const char *path;
1254
1255 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1256 bool space = false;
1257
1258 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1259
1260 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1261 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1262
1263 fputs(")\n", f);
1264 }
1265 }
1266
1267 if (u->load_state == UNIT_LOADED) {
1268
1269 fprintf(f,
1270 "%s\tStopWhenUnneeded: %s\n"
1271 "%s\tRefuseManualStart: %s\n"
1272 "%s\tRefuseManualStop: %s\n"
1273 "%s\tDefaultDependencies: %s\n"
1274 "%s\tOnFailureJobMode: %s\n"
1275 "%s\tIgnoreOnIsolate: %s\n",
1276 prefix, yes_no(u->stop_when_unneeded),
1277 prefix, yes_no(u->refuse_manual_start),
1278 prefix, yes_no(u->refuse_manual_stop),
1279 prefix, yes_no(u->default_dependencies),
1280 prefix, job_mode_to_string(u->on_failure_job_mode),
1281 prefix, yes_no(u->ignore_on_isolate));
1282
1283 if (UNIT_VTABLE(u)->dump)
1284 UNIT_VTABLE(u)->dump(u, f, prefix2);
1285
1286 } else if (u->load_state == UNIT_MERGED)
1287 fprintf(f,
1288 "%s\tMerged into: %s\n",
1289 prefix, u->merged_into->id);
1290 else if (u->load_state == UNIT_ERROR)
1291 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1292
1293 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1294 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1295
1296 if (u->job)
1297 job_dump(u->job, f, prefix2);
1298
1299 if (u->nop_job)
1300 job_dump(u->nop_job, f, prefix2);
1301 }
1302
1303 /* Common implementation for multiple backends */
1304 int unit_load_fragment_and_dropin(Unit *u) {
1305 int r;
1306
1307 assert(u);
1308
1309 /* Load a .{service,socket,...} file */
1310 r = unit_load_fragment(u);
1311 if (r < 0)
1312 return r;
1313
1314 if (u->load_state == UNIT_STUB)
1315 return -ENOENT;
1316
1317 /* Load drop-in directory data. If u is an alias, we might be reloading the
1318 * target unit needlessly. But we cannot be sure which drops-ins have already
1319 * been loaded and which not, at least without doing complicated book-keeping,
1320 * so let's always reread all drop-ins. */
1321 return unit_load_dropin(unit_follow_merge(u));
1322 }
1323
1324 /* Common implementation for multiple backends */
1325 int unit_load_fragment_and_dropin_optional(Unit *u) {
1326 int r;
1327
1328 assert(u);
1329
1330 /* Same as unit_load_fragment_and_dropin(), but whether
1331 * something can be loaded or not doesn't matter. */
1332
1333 /* Load a .service/.socket/.slice/… file */
1334 r = unit_load_fragment(u);
1335 if (r < 0)
1336 return r;
1337
1338 if (u->load_state == UNIT_STUB)
1339 u->load_state = UNIT_LOADED;
1340
1341 /* Load drop-in directory data */
1342 return unit_load_dropin(unit_follow_merge(u));
1343 }
1344
1345 void unit_add_to_target_deps_queue(Unit *u) {
1346 Manager *m = u->manager;
1347
1348 assert(u);
1349
1350 if (u->in_target_deps_queue)
1351 return;
1352
1353 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1354 u->in_target_deps_queue = true;
1355 }
1356
1357 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1358 assert(u);
1359 assert(target);
1360
1361 if (target->type != UNIT_TARGET)
1362 return 0;
1363
1364 /* Only add the dependency if both units are loaded, so that
1365 * that loop check below is reliable */
1366 if (u->load_state != UNIT_LOADED ||
1367 target->load_state != UNIT_LOADED)
1368 return 0;
1369
1370 /* If either side wants no automatic dependencies, then let's
1371 * skip this */
1372 if (!u->default_dependencies ||
1373 !target->default_dependencies)
1374 return 0;
1375
1376 /* Don't create loops */
1377 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1378 return 0;
1379
1380 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1381 }
1382
1383 static int unit_add_slice_dependencies(Unit *u) {
1384 UnitDependencyMask mask;
1385 assert(u);
1386
1387 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1388 return 0;
1389
1390 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1391 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1392 relationship). */
1393 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1394
1395 if (UNIT_ISSET(u->slice))
1396 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1397
1398 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1399 return 0;
1400
1401 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1402 }
1403
1404 static int unit_add_mount_dependencies(Unit *u) {
1405 UnitDependencyInfo di;
1406 const char *path;
1407 Iterator i;
1408 int r;
1409
1410 assert(u);
1411
1412 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1413 char prefix[strlen(path) + 1];
1414
1415 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1416 _cleanup_free_ char *p = NULL;
1417 Unit *m;
1418
1419 r = unit_name_from_path(prefix, ".mount", &p);
1420 if (r < 0)
1421 return r;
1422
1423 m = manager_get_unit(u->manager, p);
1424 if (!m) {
1425 /* Make sure to load the mount unit if
1426 * it exists. If so the dependencies
1427 * on this unit will be added later
1428 * during the loading of the mount
1429 * unit. */
1430 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1431 continue;
1432 }
1433 if (m == u)
1434 continue;
1435
1436 if (m->load_state != UNIT_LOADED)
1437 continue;
1438
1439 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1440 if (r < 0)
1441 return r;
1442
1443 if (m->fragment_path) {
1444 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1445 if (r < 0)
1446 return r;
1447 }
1448 }
1449 }
1450
1451 return 0;
1452 }
1453
1454 static int unit_add_startup_units(Unit *u) {
1455 CGroupContext *c;
1456 int r;
1457
1458 c = unit_get_cgroup_context(u);
1459 if (!c)
1460 return 0;
1461
1462 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1463 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1464 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1465 return 0;
1466
1467 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1468 if (r < 0)
1469 return r;
1470
1471 return set_put(u->manager->startup_units, u);
1472 }
1473
1474 int unit_load(Unit *u) {
1475 int r;
1476
1477 assert(u);
1478
1479 if (u->in_load_queue) {
1480 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1481 u->in_load_queue = false;
1482 }
1483
1484 if (u->type == _UNIT_TYPE_INVALID)
1485 return -EINVAL;
1486
1487 if (u->load_state != UNIT_STUB)
1488 return 0;
1489
1490 if (u->transient_file) {
1491 r = fflush_and_check(u->transient_file);
1492 if (r < 0)
1493 goto fail;
1494
1495 u->transient_file = safe_fclose(u->transient_file);
1496 u->fragment_mtime = now(CLOCK_REALTIME);
1497 }
1498
1499 if (UNIT_VTABLE(u)->load) {
1500 r = UNIT_VTABLE(u)->load(u);
1501 if (r < 0)
1502 goto fail;
1503 }
1504
1505 if (u->load_state == UNIT_STUB) {
1506 r = -ENOENT;
1507 goto fail;
1508 }
1509
1510 if (u->load_state == UNIT_LOADED) {
1511 unit_add_to_target_deps_queue(u);
1512
1513 r = unit_add_slice_dependencies(u);
1514 if (r < 0)
1515 goto fail;
1516
1517 r = unit_add_mount_dependencies(u);
1518 if (r < 0)
1519 goto fail;
1520
1521 r = unit_add_startup_units(u);
1522 if (r < 0)
1523 goto fail;
1524
1525 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1526 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1527 r = -ENOEXEC;
1528 goto fail;
1529 }
1530
1531 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1532 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1533
1534 unit_update_cgroup_members_masks(u);
1535 }
1536
1537 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1538
1539 unit_add_to_dbus_queue(unit_follow_merge(u));
1540 unit_add_to_gc_queue(u);
1541
1542 return 0;
1543
1544 fail:
1545 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1546 * return ENOEXEC to ensure units are placed in this state after loading */
1547
1548 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1549 r == -ENOEXEC ? UNIT_BAD_SETTING :
1550 UNIT_ERROR;
1551 u->load_error = r;
1552
1553 unit_add_to_dbus_queue(u);
1554 unit_add_to_gc_queue(u);
1555
1556 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1557 }
1558
1559 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1560 Condition *c;
1561 int triggered = -1;
1562
1563 assert(u);
1564 assert(to_string);
1565
1566 /* If the condition list is empty, then it is true */
1567 if (!first)
1568 return true;
1569
1570 /* Otherwise, if all of the non-trigger conditions apply and
1571 * if any of the trigger conditions apply (unless there are
1572 * none) we return true */
1573 LIST_FOREACH(conditions, c, first) {
1574 int r;
1575
1576 r = condition_test(c);
1577 if (r < 0)
1578 log_unit_warning(u,
1579 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1580 to_string(c->type),
1581 c->trigger ? "|" : "",
1582 c->negate ? "!" : "",
1583 c->parameter);
1584 else
1585 log_unit_debug(u,
1586 "%s=%s%s%s %s.",
1587 to_string(c->type),
1588 c->trigger ? "|" : "",
1589 c->negate ? "!" : "",
1590 c->parameter,
1591 condition_result_to_string(c->result));
1592
1593 if (!c->trigger && r <= 0)
1594 return false;
1595
1596 if (c->trigger && triggered <= 0)
1597 triggered = r > 0;
1598 }
1599
1600 return triggered != 0;
1601 }
1602
1603 static bool unit_condition_test(Unit *u) {
1604 assert(u);
1605
1606 dual_timestamp_get(&u->condition_timestamp);
1607 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1608
1609 return u->condition_result;
1610 }
1611
1612 static bool unit_assert_test(Unit *u) {
1613 assert(u);
1614
1615 dual_timestamp_get(&u->assert_timestamp);
1616 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1617
1618 return u->assert_result;
1619 }
1620
1621 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1622 DISABLE_WARNING_FORMAT_NONLITERAL;
1623 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1624 REENABLE_WARNING;
1625 }
1626
1627 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1628 const char *format;
1629 const UnitStatusMessageFormats *format_table;
1630
1631 assert(u);
1632 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1633
1634 if (t != JOB_RELOAD) {
1635 format_table = &UNIT_VTABLE(u)->status_message_formats;
1636 if (format_table) {
1637 format = format_table->starting_stopping[t == JOB_STOP];
1638 if (format)
1639 return format;
1640 }
1641 }
1642
1643 /* Return generic strings */
1644 if (t == JOB_START)
1645 return "Starting %s.";
1646 else if (t == JOB_STOP)
1647 return "Stopping %s.";
1648 else
1649 return "Reloading %s.";
1650 }
1651
1652 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1653 const char *format;
1654
1655 assert(u);
1656
1657 /* Reload status messages have traditionally not been printed to console. */
1658 if (!IN_SET(t, JOB_START, JOB_STOP))
1659 return;
1660
1661 format = unit_get_status_message_format(u, t);
1662
1663 DISABLE_WARNING_FORMAT_NONLITERAL;
1664 unit_status_printf(u, "", format);
1665 REENABLE_WARNING;
1666 }
1667
1668 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1669 const char *format, *mid;
1670 char buf[LINE_MAX];
1671
1672 assert(u);
1673
1674 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1675 return;
1676
1677 if (log_on_console())
1678 return;
1679
1680 /* We log status messages for all units and all operations. */
1681
1682 format = unit_get_status_message_format(u, t);
1683
1684 DISABLE_WARNING_FORMAT_NONLITERAL;
1685 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1686 REENABLE_WARNING;
1687
1688 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1689 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1690 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1691
1692 /* Note that we deliberately use LOG_MESSAGE() instead of
1693 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1694 * closely what is written to screen using the status output,
1695 * which is supposed the highest level, friendliest output
1696 * possible, which means we should avoid the low-level unit
1697 * name. */
1698 log_struct(LOG_INFO,
1699 LOG_MESSAGE("%s", buf),
1700 LOG_UNIT_ID(u),
1701 LOG_UNIT_INVOCATION_ID(u),
1702 mid);
1703 }
1704
1705 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1706 assert(u);
1707 assert(t >= 0);
1708 assert(t < _JOB_TYPE_MAX);
1709
1710 unit_status_log_starting_stopping_reloading(u, t);
1711 unit_status_print_starting_stopping(u, t);
1712 }
1713
1714 int unit_start_limit_test(Unit *u) {
1715 assert(u);
1716
1717 if (ratelimit_below(&u->start_limit)) {
1718 u->start_limit_hit = false;
1719 return 0;
1720 }
1721
1722 log_unit_warning(u, "Start request repeated too quickly.");
1723 u->start_limit_hit = true;
1724
1725 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1726 }
1727
1728 bool unit_shall_confirm_spawn(Unit *u) {
1729 assert(u);
1730
1731 if (manager_is_confirm_spawn_disabled(u->manager))
1732 return false;
1733
1734 /* For some reasons units remaining in the same process group
1735 * as PID 1 fail to acquire the console even if it's not used
1736 * by any process. So skip the confirmation question for them. */
1737 return !unit_get_exec_context(u)->same_pgrp;
1738 }
1739
1740 static bool unit_verify_deps(Unit *u) {
1741 Unit *other;
1742 Iterator j;
1743 void *v;
1744
1745 assert(u);
1746
1747 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1748 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1749 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1750 * conjunction with After= as for them any such check would make things entirely racy. */
1751
1752 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1753
1754 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1755 continue;
1756
1757 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1758 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1759 return false;
1760 }
1761 }
1762
1763 return true;
1764 }
1765
1766 /* Errors:
1767 * -EBADR: This unit type does not support starting.
1768 * -EALREADY: Unit is already started.
1769 * -EAGAIN: An operation is already in progress. Retry later.
1770 * -ECANCELED: Too many requests for now.
1771 * -EPROTO: Assert failed
1772 * -EINVAL: Unit not loaded
1773 * -EOPNOTSUPP: Unit type not supported
1774 * -ENOLINK: The necessary dependencies are not fulfilled.
1775 * -ESTALE: This unit has been started before and can't be started a second time
1776 */
1777 int unit_start(Unit *u) {
1778 UnitActiveState state;
1779 Unit *following;
1780
1781 assert(u);
1782
1783 /* If this is already started, then this will succeed. Note
1784 * that this will even succeed if this unit is not startable
1785 * by the user. This is relied on to detect when we need to
1786 * wait for units and when waiting is finished. */
1787 state = unit_active_state(u);
1788 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1789 return -EALREADY;
1790
1791 /* Units that aren't loaded cannot be started */
1792 if (u->load_state != UNIT_LOADED)
1793 return -EINVAL;
1794
1795 /* Refuse starting scope units more than once */
1796 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1797 return -ESTALE;
1798
1799 /* If the conditions failed, don't do anything at all. If we
1800 * already are activating this call might still be useful to
1801 * speed up activation in case there is some hold-off time,
1802 * but we don't want to recheck the condition in that case. */
1803 if (state != UNIT_ACTIVATING &&
1804 !unit_condition_test(u)) {
1805 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1806 return -EALREADY;
1807 }
1808
1809 /* If the asserts failed, fail the entire job */
1810 if (state != UNIT_ACTIVATING &&
1811 !unit_assert_test(u)) {
1812 log_unit_notice(u, "Starting requested but asserts failed.");
1813 return -EPROTO;
1814 }
1815
1816 /* Units of types that aren't supported cannot be
1817 * started. Note that we do this test only after the condition
1818 * checks, so that we rather return condition check errors
1819 * (which are usually not considered a true failure) than "not
1820 * supported" errors (which are considered a failure).
1821 */
1822 if (!unit_supported(u))
1823 return -EOPNOTSUPP;
1824
1825 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1826 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1827 * effect anymore, due to a reload or due to a failed condition. */
1828 if (!unit_verify_deps(u))
1829 return -ENOLINK;
1830
1831 /* Forward to the main object, if we aren't it. */
1832 following = unit_following(u);
1833 if (following) {
1834 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1835 return unit_start(following);
1836 }
1837
1838 /* If it is stopped, but we cannot start it, then fail */
1839 if (!UNIT_VTABLE(u)->start)
1840 return -EBADR;
1841
1842 /* We don't suppress calls to ->start() here when we are
1843 * already starting, to allow this request to be used as a
1844 * "hurry up" call, for example when the unit is in some "auto
1845 * restart" state where it waits for a holdoff timer to elapse
1846 * before it will start again. */
1847
1848 unit_add_to_dbus_queue(u);
1849
1850 return UNIT_VTABLE(u)->start(u);
1851 }
1852
1853 bool unit_can_start(Unit *u) {
1854 assert(u);
1855
1856 if (u->load_state != UNIT_LOADED)
1857 return false;
1858
1859 if (!unit_supported(u))
1860 return false;
1861
1862 /* Scope units may be started only once */
1863 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1864 return false;
1865
1866 return !!UNIT_VTABLE(u)->start;
1867 }
1868
1869 bool unit_can_isolate(Unit *u) {
1870 assert(u);
1871
1872 return unit_can_start(u) &&
1873 u->allow_isolate;
1874 }
1875
1876 /* Errors:
1877 * -EBADR: This unit type does not support stopping.
1878 * -EALREADY: Unit is already stopped.
1879 * -EAGAIN: An operation is already in progress. Retry later.
1880 */
1881 int unit_stop(Unit *u) {
1882 UnitActiveState state;
1883 Unit *following;
1884
1885 assert(u);
1886
1887 state = unit_active_state(u);
1888 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1889 return -EALREADY;
1890
1891 following = unit_following(u);
1892 if (following) {
1893 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1894 return unit_stop(following);
1895 }
1896
1897 if (!UNIT_VTABLE(u)->stop)
1898 return -EBADR;
1899
1900 unit_add_to_dbus_queue(u);
1901
1902 return UNIT_VTABLE(u)->stop(u);
1903 }
1904
1905 bool unit_can_stop(Unit *u) {
1906 assert(u);
1907
1908 if (!unit_supported(u))
1909 return false;
1910
1911 if (u->perpetual)
1912 return false;
1913
1914 return !!UNIT_VTABLE(u)->stop;
1915 }
1916
1917 /* Errors:
1918 * -EBADR: This unit type does not support reloading.
1919 * -ENOEXEC: Unit is not started.
1920 * -EAGAIN: An operation is already in progress. Retry later.
1921 */
1922 int unit_reload(Unit *u) {
1923 UnitActiveState state;
1924 Unit *following;
1925
1926 assert(u);
1927
1928 if (u->load_state != UNIT_LOADED)
1929 return -EINVAL;
1930
1931 if (!unit_can_reload(u))
1932 return -EBADR;
1933
1934 state = unit_active_state(u);
1935 if (state == UNIT_RELOADING)
1936 return -EALREADY;
1937
1938 if (state != UNIT_ACTIVE) {
1939 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1940 return -ENOEXEC;
1941 }
1942
1943 following = unit_following(u);
1944 if (following) {
1945 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1946 return unit_reload(following);
1947 }
1948
1949 unit_add_to_dbus_queue(u);
1950
1951 if (!UNIT_VTABLE(u)->reload) {
1952 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1953 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1954 return 0;
1955 }
1956
1957 return UNIT_VTABLE(u)->reload(u);
1958 }
1959
1960 bool unit_can_reload(Unit *u) {
1961 assert(u);
1962
1963 if (UNIT_VTABLE(u)->can_reload)
1964 return UNIT_VTABLE(u)->can_reload(u);
1965
1966 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1967 return true;
1968
1969 return UNIT_VTABLE(u)->reload;
1970 }
1971
1972 bool unit_is_unneeded(Unit *u) {
1973 static const UnitDependency deps[] = {
1974 UNIT_REQUIRED_BY,
1975 UNIT_REQUISITE_OF,
1976 UNIT_WANTED_BY,
1977 UNIT_BOUND_BY,
1978 };
1979 size_t j;
1980
1981 assert(u);
1982
1983 if (!u->stop_when_unneeded)
1984 return false;
1985
1986 /* Don't clean up while the unit is transitioning or is even inactive. */
1987 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1988 return false;
1989 if (u->job)
1990 return false;
1991
1992 for (j = 0; j < ELEMENTSOF(deps); j++) {
1993 Unit *other;
1994 Iterator i;
1995 void *v;
1996
1997 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1998 * restart, then don't clean this one up. */
1999
2000 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
2001 if (u->job)
2002 return false;
2003
2004 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2005 return false;
2006
2007 if (unit_will_restart(other))
2008 return false;
2009 }
2010 }
2011
2012 return true;
2013 }
2014
2015 static void check_unneeded_dependencies(Unit *u) {
2016
2017 static const UnitDependency deps[] = {
2018 UNIT_REQUIRES,
2019 UNIT_REQUISITE,
2020 UNIT_WANTS,
2021 UNIT_BINDS_TO,
2022 };
2023 size_t j;
2024
2025 assert(u);
2026
2027 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2028
2029 for (j = 0; j < ELEMENTSOF(deps); j++) {
2030 Unit *other;
2031 Iterator i;
2032 void *v;
2033
2034 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2035 unit_submit_to_stop_when_unneeded_queue(other);
2036 }
2037 }
2038
2039 static void unit_check_binds_to(Unit *u) {
2040 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2041 bool stop = false;
2042 Unit *other;
2043 Iterator i;
2044 void *v;
2045 int r;
2046
2047 assert(u);
2048
2049 if (u->job)
2050 return;
2051
2052 if (unit_active_state(u) != UNIT_ACTIVE)
2053 return;
2054
2055 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2056 if (other->job)
2057 continue;
2058
2059 if (!other->coldplugged)
2060 /* We might yet create a job for the other unit… */
2061 continue;
2062
2063 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2064 continue;
2065
2066 stop = true;
2067 break;
2068 }
2069
2070 if (!stop)
2071 return;
2072
2073 /* If stopping a unit fails continuously we might enter a stop
2074 * loop here, hence stop acting on the service being
2075 * unnecessary after a while. */
2076 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2077 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2078 return;
2079 }
2080
2081 assert(other);
2082 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2083
2084 /* A unit we need to run is gone. Sniff. Let's stop this. */
2085 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2086 if (r < 0)
2087 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2088 }
2089
2090 static void retroactively_start_dependencies(Unit *u) {
2091 Iterator i;
2092 Unit *other;
2093 void *v;
2094
2095 assert(u);
2096 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2097
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2099 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2100 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2101 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2102
2103 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2104 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2105 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2107
2108 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2109 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2110 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2111 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2112
2113 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2114 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2115 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2116
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2118 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2119 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2120 }
2121
2122 static void retroactively_stop_dependencies(Unit *u) {
2123 Unit *other;
2124 Iterator i;
2125 void *v;
2126
2127 assert(u);
2128 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2129
2130 /* Pull down units which are bound to us recursively if enabled */
2131 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2132 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2133 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2134 }
2135
2136 void unit_start_on_failure(Unit *u) {
2137 Unit *other;
2138 Iterator i;
2139 void *v;
2140 int r;
2141
2142 assert(u);
2143
2144 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2145 return;
2146
2147 log_unit_info(u, "Triggering OnFailure= dependencies.");
2148
2149 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2150 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2151
2152 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2153 if (r < 0)
2154 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2155 }
2156 }
2157
2158 void unit_trigger_notify(Unit *u) {
2159 Unit *other;
2160 Iterator i;
2161 void *v;
2162
2163 assert(u);
2164
2165 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2166 if (UNIT_VTABLE(other)->trigger_notify)
2167 UNIT_VTABLE(other)->trigger_notify(other, u);
2168 }
2169
2170 static int unit_log_resources(Unit *u) {
2171
2172 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2173 size_t n_message_parts = 0, n_iovec = 0;
2174 char* message_parts[3 + 1], *t;
2175 nsec_t nsec = NSEC_INFINITY;
2176 CGroupIPAccountingMetric m;
2177 size_t i;
2178 int r;
2179 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2180 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2181 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2182 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2183 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2184 };
2185
2186 assert(u);
2187
2188 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2189 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2190 * information and the complete data in structured fields. */
2191
2192 (void) unit_get_cpu_usage(u, &nsec);
2193 if (nsec != NSEC_INFINITY) {
2194 char buf[FORMAT_TIMESPAN_MAX] = "";
2195
2196 /* Format the CPU time for inclusion in the structured log message */
2197 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2202
2203 /* Format the CPU time for inclusion in the human language message string */
2204 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2205 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2206 if (!t) {
2207 r = log_oom();
2208 goto finish;
2209 }
2210
2211 message_parts[n_message_parts++] = t;
2212 }
2213
2214 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2215 char buf[FORMAT_BYTES_MAX] = "";
2216 uint64_t value = UINT64_MAX;
2217
2218 assert(ip_fields[m]);
2219
2220 (void) unit_get_ip_accounting(u, m, &value);
2221 if (value == UINT64_MAX)
2222 continue;
2223
2224 /* Format IP accounting data for inclusion in the structured log message */
2225 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2226 r = log_oom();
2227 goto finish;
2228 }
2229 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2230
2231 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2232 * bytes counters (and not for the packets counters) */
2233 if (m == CGROUP_IP_INGRESS_BYTES)
2234 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2235 format_bytes(buf, sizeof(buf), value),
2236 " IP traffic");
2237 else if (m == CGROUP_IP_EGRESS_BYTES)
2238 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2239 format_bytes(buf, sizeof(buf), value),
2240 " IP traffic");
2241 else
2242 continue;
2243 if (!t) {
2244 r = log_oom();
2245 goto finish;
2246 }
2247
2248 message_parts[n_message_parts++] = t;
2249 }
2250
2251 /* Is there any accounting data available at all? */
2252 if (n_iovec == 0) {
2253 r = 0;
2254 goto finish;
2255 }
2256
2257 if (n_message_parts == 0)
2258 t = strjoina("MESSAGE=", u->id, ": Completed");
2259 else {
2260 _cleanup_free_ char *joined;
2261
2262 message_parts[n_message_parts] = NULL;
2263
2264 joined = strv_join(message_parts, ", ");
2265 if (!joined) {
2266 r = log_oom();
2267 goto finish;
2268 }
2269
2270 t = strjoina("MESSAGE=", u->id, ": ", joined);
2271 }
2272
2273 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2274 * and hence don't increase n_iovec for them */
2275 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2276 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2277
2278 t = strjoina(u->manager->unit_log_field, u->id);
2279 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2280
2281 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2282 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2283
2284 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2285 r = 0;
2286
2287 finish:
2288 for (i = 0; i < n_message_parts; i++)
2289 free(message_parts[i]);
2290
2291 for (i = 0; i < n_iovec; i++)
2292 free(iovec[i].iov_base);
2293
2294 return r;
2295
2296 }
2297
2298 static void unit_update_on_console(Unit *u) {
2299 bool b;
2300
2301 assert(u);
2302
2303 b = unit_needs_console(u);
2304 if (u->on_console == b)
2305 return;
2306
2307 u->on_console = b;
2308 if (b)
2309 manager_ref_console(u->manager);
2310 else
2311 manager_unref_console(u->manager);
2312 }
2313
2314 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2315 bool unexpected;
2316 Manager *m;
2317
2318 assert(u);
2319 assert(os < _UNIT_ACTIVE_STATE_MAX);
2320 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2321
2322 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2323 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2324 * remounted this function will be called too! */
2325
2326 m = u->manager;
2327
2328 /* Update timestamps for state changes */
2329 if (!MANAGER_IS_RELOADING(m)) {
2330 dual_timestamp_get(&u->state_change_timestamp);
2331
2332 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2333 u->inactive_exit_timestamp = u->state_change_timestamp;
2334 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2335 u->inactive_enter_timestamp = u->state_change_timestamp;
2336
2337 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2338 u->active_enter_timestamp = u->state_change_timestamp;
2339 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2340 u->active_exit_timestamp = u->state_change_timestamp;
2341 }
2342
2343 /* Keep track of failed units */
2344 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2345
2346 /* Make sure the cgroup and state files are always removed when we become inactive */
2347 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2348 unit_prune_cgroup(u);
2349 unit_unlink_state_files(u);
2350 }
2351
2352 unit_update_on_console(u);
2353
2354 if (u->job) {
2355 unexpected = false;
2356
2357 if (u->job->state == JOB_WAITING)
2358
2359 /* So we reached a different state for this
2360 * job. Let's see if we can run it now if it
2361 * failed previously due to EAGAIN. */
2362 job_add_to_run_queue(u->job);
2363
2364 /* Let's check whether this state change constitutes a
2365 * finished job, or maybe contradicts a running job and
2366 * hence needs to invalidate jobs. */
2367
2368 switch (u->job->type) {
2369
2370 case JOB_START:
2371 case JOB_VERIFY_ACTIVE:
2372
2373 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2374 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2375 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2376 unexpected = true;
2377
2378 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2379 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2380 }
2381
2382 break;
2383
2384 case JOB_RELOAD:
2385 case JOB_RELOAD_OR_START:
2386 case JOB_TRY_RELOAD:
2387
2388 if (u->job->state == JOB_RUNNING) {
2389 if (ns == UNIT_ACTIVE)
2390 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2391 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2392 unexpected = true;
2393
2394 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2395 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2396 }
2397 }
2398
2399 break;
2400
2401 case JOB_STOP:
2402 case JOB_RESTART:
2403 case JOB_TRY_RESTART:
2404
2405 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2406 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2407 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2408 unexpected = true;
2409 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2410 }
2411
2412 break;
2413
2414 default:
2415 assert_not_reached("Job type unknown");
2416 }
2417
2418 } else
2419 unexpected = true;
2420
2421 if (!MANAGER_IS_RELOADING(m)) {
2422
2423 /* If this state change happened without being
2424 * requested by a job, then let's retroactively start
2425 * or stop dependencies. We skip that step when
2426 * deserializing, since we don't want to create any
2427 * additional jobs just because something is already
2428 * activated. */
2429
2430 if (unexpected) {
2431 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2432 retroactively_start_dependencies(u);
2433 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2434 retroactively_stop_dependencies(u);
2435 }
2436
2437 /* stop unneeded units regardless if going down was expected or not */
2438 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2439 check_unneeded_dependencies(u);
2440
2441 if (ns != os && ns == UNIT_FAILED) {
2442 log_unit_debug(u, "Unit entered failed state.");
2443
2444 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2445 unit_start_on_failure(u);
2446 }
2447 }
2448
2449 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2450
2451 if (u->type == UNIT_SERVICE &&
2452 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2453 !MANAGER_IS_RELOADING(m)) {
2454 /* Write audit record if we have just finished starting up */
2455 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2456 u->in_audit = true;
2457 }
2458
2459 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2460 manager_send_unit_plymouth(m, u);
2461
2462 } else {
2463
2464 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2465 !UNIT_IS_INACTIVE_OR_FAILED(os)
2466 && !MANAGER_IS_RELOADING(m)) {
2467
2468 /* This unit just stopped/failed. */
2469 if (u->type == UNIT_SERVICE) {
2470
2471 /* Hmm, if there was no start record written
2472 * write it now, so that we always have a nice
2473 * pair */
2474 if (!u->in_audit) {
2475 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2476
2477 if (ns == UNIT_INACTIVE)
2478 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2479 } else
2480 /* Write audit record if we have just finished shutting down */
2481 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2482
2483 u->in_audit = false;
2484 }
2485
2486 /* Write a log message about consumed resources */
2487 unit_log_resources(u);
2488 }
2489 }
2490
2491 manager_recheck_journal(m);
2492 manager_recheck_dbus(m);
2493
2494 unit_trigger_notify(u);
2495
2496 if (!MANAGER_IS_RELOADING(u->manager)) {
2497 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2498 unit_submit_to_stop_when_unneeded_queue(u);
2499
2500 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2501 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2502 * without ever entering started.) */
2503 unit_check_binds_to(u);
2504
2505 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2506 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2507 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2508 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2509 }
2510
2511 unit_add_to_dbus_queue(u);
2512 unit_add_to_gc_queue(u);
2513 }
2514
2515 int unit_watch_pid(Unit *u, pid_t pid) {
2516 int r;
2517
2518 assert(u);
2519 assert(pid_is_valid(pid));
2520
2521 /* Watch a specific PID */
2522
2523 r = set_ensure_allocated(&u->pids, NULL);
2524 if (r < 0)
2525 return r;
2526
2527 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2528 if (r < 0)
2529 return r;
2530
2531 /* First try, let's add the unit keyed by "pid". */
2532 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2533 if (r == -EEXIST) {
2534 Unit **array;
2535 bool found = false;
2536 size_t n = 0;
2537
2538 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2539 * to an array of Units rather than just a Unit), lists us already. */
2540
2541 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2542 if (array)
2543 for (; array[n]; n++)
2544 if (array[n] == u)
2545 found = true;
2546
2547 if (found) /* Found it already? if so, do nothing */
2548 r = 0;
2549 else {
2550 Unit **new_array;
2551
2552 /* Allocate a new array */
2553 new_array = new(Unit*, n + 2);
2554 if (!new_array)
2555 return -ENOMEM;
2556
2557 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2558 new_array[n] = u;
2559 new_array[n+1] = NULL;
2560
2561 /* Add or replace the old array */
2562 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2563 if (r < 0) {
2564 free(new_array);
2565 return r;
2566 }
2567
2568 free(array);
2569 }
2570 } else if (r < 0)
2571 return r;
2572
2573 r = set_put(u->pids, PID_TO_PTR(pid));
2574 if (r < 0)
2575 return r;
2576
2577 return 0;
2578 }
2579
2580 void unit_unwatch_pid(Unit *u, pid_t pid) {
2581 Unit **array;
2582
2583 assert(u);
2584 assert(pid_is_valid(pid));
2585
2586 /* First let's drop the unit in case it's keyed as "pid". */
2587 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2588
2589 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2590 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2591 if (array) {
2592 size_t n, m = 0;
2593
2594 /* Let's iterate through the array, dropping our own entry */
2595 for (n = 0; array[n]; n++)
2596 if (array[n] != u)
2597 array[m++] = array[n];
2598 array[m] = NULL;
2599
2600 if (m == 0) {
2601 /* The array is now empty, remove the entire entry */
2602 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2603 free(array);
2604 }
2605 }
2606
2607 (void) set_remove(u->pids, PID_TO_PTR(pid));
2608 }
2609
2610 void unit_unwatch_all_pids(Unit *u) {
2611 assert(u);
2612
2613 while (!set_isempty(u->pids))
2614 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2615
2616 u->pids = set_free(u->pids);
2617 }
2618
2619 static void unit_tidy_watch_pids(Unit *u) {
2620 pid_t except1, except2;
2621 Iterator i;
2622 void *e;
2623
2624 assert(u);
2625
2626 /* Cleans dead PIDs from our list */
2627
2628 except1 = unit_main_pid(u);
2629 except2 = unit_control_pid(u);
2630
2631 SET_FOREACH(e, u->pids, i) {
2632 pid_t pid = PTR_TO_PID(e);
2633
2634 if (pid == except1 || pid == except2)
2635 continue;
2636
2637 if (!pid_is_unwaited(pid))
2638 unit_unwatch_pid(u, pid);
2639 }
2640 }
2641
2642 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2643 Unit *u = userdata;
2644
2645 assert(s);
2646 assert(u);
2647
2648 unit_tidy_watch_pids(u);
2649 unit_watch_all_pids(u);
2650
2651 /* If the PID set is empty now, then let's finish this off. */
2652 unit_synthesize_cgroup_empty_event(u);
2653
2654 return 0;
2655 }
2656
2657 int unit_enqueue_rewatch_pids(Unit *u) {
2658 int r;
2659
2660 assert(u);
2661
2662 if (!u->cgroup_path)
2663 return -ENOENT;
2664
2665 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2666 if (r < 0)
2667 return r;
2668 if (r > 0) /* On unified we can use proper notifications */
2669 return 0;
2670
2671 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2672 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2673 * involves issuing kill(pid, 0) on all processes we watch. */
2674
2675 if (!u->rewatch_pids_event_source) {
2676 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2677
2678 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2679 if (r < 0)
2680 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2681
2682 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2683 if (r < 0)
2684 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2685
2686 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2687
2688 u->rewatch_pids_event_source = TAKE_PTR(s);
2689 }
2690
2691 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2692 if (r < 0)
2693 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2694
2695 return 0;
2696 }
2697
2698 void unit_dequeue_rewatch_pids(Unit *u) {
2699 int r;
2700 assert(u);
2701
2702 if (!u->rewatch_pids_event_source)
2703 return;
2704
2705 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2706 if (r < 0)
2707 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2708
2709 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2710 }
2711
2712 bool unit_job_is_applicable(Unit *u, JobType j) {
2713 assert(u);
2714 assert(j >= 0 && j < _JOB_TYPE_MAX);
2715
2716 switch (j) {
2717
2718 case JOB_VERIFY_ACTIVE:
2719 case JOB_START:
2720 case JOB_NOP:
2721 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2722 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2723 * jobs for it. */
2724 return true;
2725
2726 case JOB_STOP:
2727 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2728 * external events), hence it makes no sense to permit enqueing such a request either. */
2729 return !u->perpetual;
2730
2731 case JOB_RESTART:
2732 case JOB_TRY_RESTART:
2733 return unit_can_stop(u) && unit_can_start(u);
2734
2735 case JOB_RELOAD:
2736 case JOB_TRY_RELOAD:
2737 return unit_can_reload(u);
2738
2739 case JOB_RELOAD_OR_START:
2740 return unit_can_reload(u) && unit_can_start(u);
2741
2742 default:
2743 assert_not_reached("Invalid job type");
2744 }
2745 }
2746
2747 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2748 assert(u);
2749
2750 /* Only warn about some unit types */
2751 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2752 return;
2753
2754 if (streq_ptr(u->id, other))
2755 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2756 else
2757 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2758 }
2759
2760 static int unit_add_dependency_hashmap(
2761 Hashmap **h,
2762 Unit *other,
2763 UnitDependencyMask origin_mask,
2764 UnitDependencyMask destination_mask) {
2765
2766 UnitDependencyInfo info;
2767 int r;
2768
2769 assert(h);
2770 assert(other);
2771 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2772 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2773 assert(origin_mask > 0 || destination_mask > 0);
2774
2775 r = hashmap_ensure_allocated(h, NULL);
2776 if (r < 0)
2777 return r;
2778
2779 assert_cc(sizeof(void*) == sizeof(info));
2780
2781 info.data = hashmap_get(*h, other);
2782 if (info.data) {
2783 /* Entry already exists. Add in our mask. */
2784
2785 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2786 FLAGS_SET(destination_mask, info.destination_mask))
2787 return 0; /* NOP */
2788
2789 info.origin_mask |= origin_mask;
2790 info.destination_mask |= destination_mask;
2791
2792 r = hashmap_update(*h, other, info.data);
2793 } else {
2794 info = (UnitDependencyInfo) {
2795 .origin_mask = origin_mask,
2796 .destination_mask = destination_mask,
2797 };
2798
2799 r = hashmap_put(*h, other, info.data);
2800 }
2801 if (r < 0)
2802 return r;
2803
2804 return 1;
2805 }
2806
2807 int unit_add_dependency(
2808 Unit *u,
2809 UnitDependency d,
2810 Unit *other,
2811 bool add_reference,
2812 UnitDependencyMask mask) {
2813
2814 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2815 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2816 [UNIT_WANTS] = UNIT_WANTED_BY,
2817 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2818 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2819 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2820 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2821 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2822 [UNIT_WANTED_BY] = UNIT_WANTS,
2823 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2824 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2825 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2826 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2827 [UNIT_BEFORE] = UNIT_AFTER,
2828 [UNIT_AFTER] = UNIT_BEFORE,
2829 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2830 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2831 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2832 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2833 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2834 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2835 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2836 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2837 };
2838 Unit *original_u = u, *original_other = other;
2839 int r;
2840
2841 assert(u);
2842 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2843 assert(other);
2844
2845 u = unit_follow_merge(u);
2846 other = unit_follow_merge(other);
2847
2848 /* We won't allow dependencies on ourselves. We will not
2849 * consider them an error however. */
2850 if (u == other) {
2851 maybe_warn_about_dependency(original_u, original_other->id, d);
2852 return 0;
2853 }
2854
2855 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2856 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2857 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2858 return 0;
2859 }
2860
2861 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2862 if (r < 0)
2863 return r;
2864
2865 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2866 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2867 if (r < 0)
2868 return r;
2869 }
2870
2871 if (add_reference) {
2872 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2873 if (r < 0)
2874 return r;
2875
2876 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2877 if (r < 0)
2878 return r;
2879 }
2880
2881 unit_add_to_dbus_queue(u);
2882 return 0;
2883 }
2884
2885 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2886 int r;
2887
2888 assert(u);
2889
2890 r = unit_add_dependency(u, d, other, add_reference, mask);
2891 if (r < 0)
2892 return r;
2893
2894 return unit_add_dependency(u, e, other, add_reference, mask);
2895 }
2896
2897 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2898 int r;
2899
2900 assert(u);
2901 assert(name);
2902 assert(buf);
2903 assert(ret);
2904
2905 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2906 *buf = NULL;
2907 *ret = name;
2908 return 0;
2909 }
2910
2911 if (u->instance)
2912 r = unit_name_replace_instance(name, u->instance, buf);
2913 else {
2914 _cleanup_free_ char *i = NULL;
2915
2916 r = unit_name_to_prefix(u->id, &i);
2917 if (r < 0)
2918 return r;
2919
2920 r = unit_name_replace_instance(name, i, buf);
2921 }
2922 if (r < 0)
2923 return r;
2924
2925 *ret = *buf;
2926 return 0;
2927 }
2928
2929 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2930 _cleanup_free_ char *buf = NULL;
2931 Unit *other;
2932 int r;
2933
2934 assert(u);
2935 assert(name);
2936
2937 r = resolve_template(u, name, &buf, &name);
2938 if (r < 0)
2939 return r;
2940
2941 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2942 if (r < 0)
2943 return r;
2944
2945 return unit_add_dependency(u, d, other, add_reference, mask);
2946 }
2947
2948 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2949 _cleanup_free_ char *buf = NULL;
2950 Unit *other;
2951 int r;
2952
2953 assert(u);
2954 assert(name);
2955
2956 r = resolve_template(u, name, &buf, &name);
2957 if (r < 0)
2958 return r;
2959
2960 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2961 if (r < 0)
2962 return r;
2963
2964 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2965 }
2966
2967 int set_unit_path(const char *p) {
2968 /* This is mostly for debug purposes */
2969 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2970 return -errno;
2971
2972 return 0;
2973 }
2974
2975 char *unit_dbus_path(Unit *u) {
2976 assert(u);
2977
2978 if (!u->id)
2979 return NULL;
2980
2981 return unit_dbus_path_from_name(u->id);
2982 }
2983
2984 char *unit_dbus_path_invocation_id(Unit *u) {
2985 assert(u);
2986
2987 if (sd_id128_is_null(u->invocation_id))
2988 return NULL;
2989
2990 return unit_dbus_path_from_name(u->invocation_id_string);
2991 }
2992
2993 int unit_set_slice(Unit *u, Unit *slice) {
2994 assert(u);
2995 assert(slice);
2996
2997 /* Sets the unit slice if it has not been set before. Is extra
2998 * careful, to only allow this for units that actually have a
2999 * cgroup context. Also, we don't allow to set this for slices
3000 * (since the parent slice is derived from the name). Make
3001 * sure the unit we set is actually a slice. */
3002
3003 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3004 return -EOPNOTSUPP;
3005
3006 if (u->type == UNIT_SLICE)
3007 return -EINVAL;
3008
3009 if (unit_active_state(u) != UNIT_INACTIVE)
3010 return -EBUSY;
3011
3012 if (slice->type != UNIT_SLICE)
3013 return -EINVAL;
3014
3015 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3016 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3017 return -EPERM;
3018
3019 if (UNIT_DEREF(u->slice) == slice)
3020 return 0;
3021
3022 /* Disallow slice changes if @u is already bound to cgroups */
3023 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3024 return -EBUSY;
3025
3026 unit_ref_set(&u->slice, u, slice);
3027 return 1;
3028 }
3029
3030 int unit_set_default_slice(Unit *u) {
3031 _cleanup_free_ char *b = NULL;
3032 const char *slice_name;
3033 Unit *slice;
3034 int r;
3035
3036 assert(u);
3037
3038 if (UNIT_ISSET(u->slice))
3039 return 0;
3040
3041 if (u->instance) {
3042 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3043
3044 /* Implicitly place all instantiated units in their
3045 * own per-template slice */
3046
3047 r = unit_name_to_prefix(u->id, &prefix);
3048 if (r < 0)
3049 return r;
3050
3051 /* The prefix is already escaped, but it might include
3052 * "-" which has a special meaning for slice units,
3053 * hence escape it here extra. */
3054 escaped = unit_name_escape(prefix);
3055 if (!escaped)
3056 return -ENOMEM;
3057
3058 if (MANAGER_IS_SYSTEM(u->manager))
3059 b = strjoin("system-", escaped, ".slice");
3060 else
3061 b = strappend(escaped, ".slice");
3062 if (!b)
3063 return -ENOMEM;
3064
3065 slice_name = b;
3066 } else
3067 slice_name =
3068 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3069 ? SPECIAL_SYSTEM_SLICE
3070 : SPECIAL_ROOT_SLICE;
3071
3072 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3073 if (r < 0)
3074 return r;
3075
3076 return unit_set_slice(u, slice);
3077 }
3078
3079 const char *unit_slice_name(Unit *u) {
3080 assert(u);
3081
3082 if (!UNIT_ISSET(u->slice))
3083 return NULL;
3084
3085 return UNIT_DEREF(u->slice)->id;
3086 }
3087
3088 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3089 _cleanup_free_ char *t = NULL;
3090 int r;
3091
3092 assert(u);
3093 assert(type);
3094 assert(_found);
3095
3096 r = unit_name_change_suffix(u->id, type, &t);
3097 if (r < 0)
3098 return r;
3099 if (unit_has_name(u, t))
3100 return -EINVAL;
3101
3102 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3103 assert(r < 0 || *_found != u);
3104 return r;
3105 }
3106
3107 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3108 const char *name, *old_owner, *new_owner;
3109 Unit *u = userdata;
3110 int r;
3111
3112 assert(message);
3113 assert(u);
3114
3115 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3116 if (r < 0) {
3117 bus_log_parse_error(r);
3118 return 0;
3119 }
3120
3121 old_owner = empty_to_null(old_owner);
3122 new_owner = empty_to_null(new_owner);
3123
3124 if (UNIT_VTABLE(u)->bus_name_owner_change)
3125 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3126
3127 return 0;
3128 }
3129
3130 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3131 const char *match;
3132
3133 assert(u);
3134 assert(bus);
3135 assert(name);
3136
3137 if (u->match_bus_slot)
3138 return -EBUSY;
3139
3140 match = strjoina("type='signal',"
3141 "sender='org.freedesktop.DBus',"
3142 "path='/org/freedesktop/DBus',"
3143 "interface='org.freedesktop.DBus',"
3144 "member='NameOwnerChanged',"
3145 "arg0='", name, "'");
3146
3147 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3148 }
3149
3150 int unit_watch_bus_name(Unit *u, const char *name) {
3151 int r;
3152
3153 assert(u);
3154 assert(name);
3155
3156 /* Watch a specific name on the bus. We only support one unit
3157 * watching each name for now. */
3158
3159 if (u->manager->api_bus) {
3160 /* If the bus is already available, install the match directly.
3161 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3162 r = unit_install_bus_match(u, u->manager->api_bus, name);
3163 if (r < 0)
3164 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3165 }
3166
3167 r = hashmap_put(u->manager->watch_bus, name, u);
3168 if (r < 0) {
3169 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3170 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3171 }
3172
3173 return 0;
3174 }
3175
3176 void unit_unwatch_bus_name(Unit *u, const char *name) {
3177 assert(u);
3178 assert(name);
3179
3180 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3181 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3182 }
3183
3184 bool unit_can_serialize(Unit *u) {
3185 assert(u);
3186
3187 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3188 }
3189
3190 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3191 _cleanup_free_ char *s = NULL;
3192 int r = 0;
3193
3194 assert(f);
3195 assert(key);
3196
3197 if (mask != 0) {
3198 r = cg_mask_to_string(mask, &s);
3199 if (r >= 0) {
3200 fputs(key, f);
3201 fputc('=', f);
3202 fputs(s, f);
3203 fputc('\n', f);
3204 }
3205 }
3206 return r;
3207 }
3208
3209 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3210 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3211 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3212 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3213 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3214 };
3215
3216 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3217 CGroupIPAccountingMetric m;
3218 int r;
3219
3220 assert(u);
3221 assert(f);
3222 assert(fds);
3223
3224 if (unit_can_serialize(u)) {
3225 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3226 if (r < 0)
3227 return r;
3228 }
3229
3230 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3231
3232 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3233 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3234 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3235 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3236
3237 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3238 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3239
3240 if (dual_timestamp_is_set(&u->condition_timestamp))
3241 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3242
3243 if (dual_timestamp_is_set(&u->assert_timestamp))
3244 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3245
3246 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3247
3248 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3249 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3250 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3251
3252 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3253 if (u->cpu_usage_last != NSEC_INFINITY)
3254 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3255
3256 if (u->cgroup_path)
3257 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3258 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3259 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3260 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3261 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3262
3263 if (uid_is_valid(u->ref_uid))
3264 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3265 if (gid_is_valid(u->ref_gid))
3266 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3267
3268 if (!sd_id128_is_null(u->invocation_id))
3269 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3270
3271 bus_track_serialize(u->bus_track, f, "ref");
3272
3273 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3274 uint64_t v;
3275
3276 r = unit_get_ip_accounting(u, m, &v);
3277 if (r >= 0)
3278 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3279 }
3280
3281 if (serialize_jobs) {
3282 if (u->job) {
3283 fprintf(f, "job\n");
3284 job_serialize(u->job, f);
3285 }
3286
3287 if (u->nop_job) {
3288 fprintf(f, "job\n");
3289 job_serialize(u->nop_job, f);
3290 }
3291 }
3292
3293 /* End marker */
3294 fputc('\n', f);
3295 return 0;
3296 }
3297
3298 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3299 assert(u);
3300 assert(f);
3301 assert(key);
3302
3303 if (!value)
3304 return 0;
3305
3306 fputs(key, f);
3307 fputc('=', f);
3308 fputs(value, f);
3309 fputc('\n', f);
3310
3311 return 1;
3312 }
3313
3314 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3315 _cleanup_free_ char *c = NULL;
3316
3317 assert(u);
3318 assert(f);
3319 assert(key);
3320
3321 if (!value)
3322 return 0;
3323
3324 c = cescape(value);
3325 if (!c)
3326 return -ENOMEM;
3327
3328 fputs(key, f);
3329 fputc('=', f);
3330 fputs(c, f);
3331 fputc('\n', f);
3332
3333 return 1;
3334 }
3335
3336 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3337 int copy;
3338
3339 assert(u);
3340 assert(f);
3341 assert(key);
3342
3343 if (fd < 0)
3344 return 0;
3345
3346 copy = fdset_put_dup(fds, fd);
3347 if (copy < 0)
3348 return copy;
3349
3350 fprintf(f, "%s=%i\n", key, copy);
3351 return 1;
3352 }
3353
3354 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3355 va_list ap;
3356
3357 assert(u);
3358 assert(f);
3359 assert(key);
3360 assert(format);
3361
3362 fputs(key, f);
3363 fputc('=', f);
3364
3365 va_start(ap, format);
3366 vfprintf(f, format, ap);
3367 va_end(ap);
3368
3369 fputc('\n', f);
3370 }
3371
3372 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3373 int r;
3374
3375 assert(u);
3376 assert(f);
3377 assert(fds);
3378
3379 for (;;) {
3380 char line[LINE_MAX], *l, *v;
3381 CGroupIPAccountingMetric m;
3382 size_t k;
3383
3384 if (!fgets(line, sizeof(line), f)) {
3385 if (feof(f))
3386 return 0;
3387 return -errno;
3388 }
3389
3390 char_array_0(line);
3391 l = strstrip(line);
3392
3393 /* End marker */
3394 if (isempty(l))
3395 break;
3396
3397 k = strcspn(l, "=");
3398
3399 if (l[k] == '=') {
3400 l[k] = 0;
3401 v = l+k+1;
3402 } else
3403 v = l+k;
3404
3405 if (streq(l, "job")) {
3406 if (v[0] == '\0') {
3407 /* new-style serialized job */
3408 Job *j;
3409
3410 j = job_new_raw(u);
3411 if (!j)
3412 return log_oom();
3413
3414 r = job_deserialize(j, f);
3415 if (r < 0) {
3416 job_free(j);
3417 return r;
3418 }
3419
3420 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3421 if (r < 0) {
3422 job_free(j);
3423 return r;
3424 }
3425
3426 r = job_install_deserialized(j);
3427 if (r < 0) {
3428 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3429 job_free(j);
3430 return r;
3431 }
3432 } else /* legacy for pre-44 */
3433 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3434 continue;
3435 } else if (streq(l, "state-change-timestamp")) {
3436 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3437 continue;
3438 } else if (streq(l, "inactive-exit-timestamp")) {
3439 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3440 continue;
3441 } else if (streq(l, "active-enter-timestamp")) {
3442 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3443 continue;
3444 } else if (streq(l, "active-exit-timestamp")) {
3445 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3446 continue;
3447 } else if (streq(l, "inactive-enter-timestamp")) {
3448 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3449 continue;
3450 } else if (streq(l, "condition-timestamp")) {
3451 dual_timestamp_deserialize(v, &u->condition_timestamp);
3452 continue;
3453 } else if (streq(l, "assert-timestamp")) {
3454 dual_timestamp_deserialize(v, &u->assert_timestamp);
3455 continue;
3456 } else if (streq(l, "condition-result")) {
3457
3458 r = parse_boolean(v);
3459 if (r < 0)
3460 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3461 else
3462 u->condition_result = r;
3463
3464 continue;
3465
3466 } else if (streq(l, "assert-result")) {
3467
3468 r = parse_boolean(v);
3469 if (r < 0)
3470 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3471 else
3472 u->assert_result = r;
3473
3474 continue;
3475
3476 } else if (streq(l, "transient")) {
3477
3478 r = parse_boolean(v);
3479 if (r < 0)
3480 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3481 else
3482 u->transient = r;
3483
3484 continue;
3485
3486 } else if (streq(l, "exported-invocation-id")) {
3487
3488 r = parse_boolean(v);
3489 if (r < 0)
3490 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3491 else
3492 u->exported_invocation_id = r;
3493
3494 continue;
3495
3496 } else if (streq(l, "exported-log-level-max")) {
3497
3498 r = parse_boolean(v);
3499 if (r < 0)
3500 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3501 else
3502 u->exported_log_level_max = r;
3503
3504 continue;
3505
3506 } else if (streq(l, "exported-log-extra-fields")) {
3507
3508 r = parse_boolean(v);
3509 if (r < 0)
3510 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3511 else
3512 u->exported_log_extra_fields = r;
3513
3514 continue;
3515
3516 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3517
3518 r = safe_atou64(v, &u->cpu_usage_base);
3519 if (r < 0)
3520 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3521
3522 continue;
3523
3524 } else if (streq(l, "cpu-usage-last")) {
3525
3526 r = safe_atou64(v, &u->cpu_usage_last);
3527 if (r < 0)
3528 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3529
3530 continue;
3531
3532 } else if (streq(l, "cgroup")) {
3533
3534 r = unit_set_cgroup_path(u, v);
3535 if (r < 0)
3536 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3537
3538 (void) unit_watch_cgroup(u);
3539
3540 continue;
3541 } else if (streq(l, "cgroup-realized")) {
3542 int b;
3543
3544 b = parse_boolean(v);
3545 if (b < 0)
3546 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3547 else
3548 u->cgroup_realized = b;
3549
3550 continue;
3551
3552 } else if (streq(l, "cgroup-realized-mask")) {
3553
3554 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3555 if (r < 0)
3556 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3557 continue;
3558
3559 } else if (streq(l, "cgroup-enabled-mask")) {
3560
3561 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3562 if (r < 0)
3563 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3564 continue;
3565
3566 } else if (streq(l, "cgroup-bpf-realized")) {
3567 int i;
3568
3569 r = safe_atoi(v, &i);
3570 if (r < 0)
3571 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3572 else
3573 u->cgroup_bpf_state =
3574 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3575 i > 0 ? UNIT_CGROUP_BPF_ON :
3576 UNIT_CGROUP_BPF_OFF;
3577
3578 continue;
3579
3580 } else if (streq(l, "ref-uid")) {
3581 uid_t uid;
3582
3583 r = parse_uid(v, &uid);
3584 if (r < 0)
3585 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3586 else
3587 unit_ref_uid_gid(u, uid, GID_INVALID);
3588
3589 continue;
3590
3591 } else if (streq(l, "ref-gid")) {
3592 gid_t gid;
3593
3594 r = parse_gid(v, &gid);
3595 if (r < 0)
3596 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3597 else
3598 unit_ref_uid_gid(u, UID_INVALID, gid);
3599
3600 } else if (streq(l, "ref")) {
3601
3602 r = strv_extend(&u->deserialized_refs, v);
3603 if (r < 0)
3604 log_oom();
3605
3606 continue;
3607 } else if (streq(l, "invocation-id")) {
3608 sd_id128_t id;
3609
3610 r = sd_id128_from_string(v, &id);
3611 if (r < 0)
3612 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3613 else {
3614 r = unit_set_invocation_id(u, id);
3615 if (r < 0)
3616 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3617 }
3618
3619 continue;
3620 }
3621
3622 /* Check if this is an IP accounting metric serialization field */
3623 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3624 if (streq(l, ip_accounting_metric_field[m]))
3625 break;
3626 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3627 uint64_t c;
3628
3629 r = safe_atou64(v, &c);
3630 if (r < 0)
3631 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3632 else
3633 u->ip_accounting_extra[m] = c;
3634 continue;
3635 }
3636
3637 if (unit_can_serialize(u)) {
3638 r = exec_runtime_deserialize_compat(u, l, v, fds);
3639 if (r < 0) {
3640 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3641 continue;
3642 }
3643
3644 /* Returns positive if key was handled by the call */
3645 if (r > 0)
3646 continue;
3647
3648 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3649 if (r < 0)
3650 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3651 }
3652 }
3653
3654 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3655 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3656 * before 228 where the base for timeouts was not persistent across reboots. */
3657
3658 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3659 dual_timestamp_get(&u->state_change_timestamp);
3660
3661 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3662 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3663 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3664 unit_invalidate_cgroup_bpf(u);
3665
3666 return 0;
3667 }
3668
3669 void unit_deserialize_skip(FILE *f) {
3670 assert(f);
3671
3672 /* Skip serialized data for this unit. We don't know what it is. */
3673
3674 for (;;) {
3675 char line[LINE_MAX], *l;
3676
3677 if (!fgets(line, sizeof line, f))
3678 return;
3679
3680 char_array_0(line);
3681 l = strstrip(line);
3682
3683 /* End marker */
3684 if (isempty(l))
3685 return;
3686 }
3687 }
3688
3689 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3690 Unit *device;
3691 _cleanup_free_ char *e = NULL;
3692 int r;
3693
3694 assert(u);
3695
3696 /* Adds in links to the device node that this unit is based on */
3697 if (isempty(what))
3698 return 0;
3699
3700 if (!is_device_path(what))
3701 return 0;
3702
3703 /* When device units aren't supported (such as in a
3704 * container), don't create dependencies on them. */
3705 if (!unit_type_supported(UNIT_DEVICE))
3706 return 0;
3707
3708 r = unit_name_from_path(what, ".device", &e);
3709 if (r < 0)
3710 return r;
3711
3712 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3713 if (r < 0)
3714 return r;
3715
3716 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3717 dep = UNIT_BINDS_TO;
3718
3719 r = unit_add_two_dependencies(u, UNIT_AFTER,
3720 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3721 device, true, mask);
3722 if (r < 0)
3723 return r;
3724
3725 if (wants) {
3726 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3727 if (r < 0)
3728 return r;
3729 }
3730
3731 return 0;
3732 }
3733
3734 int unit_coldplug(Unit *u) {
3735 int r = 0, q;
3736 char **i;
3737
3738 assert(u);
3739
3740 /* Make sure we don't enter a loop, when coldplugging recursively. */
3741 if (u->coldplugged)
3742 return 0;
3743
3744 u->coldplugged = true;
3745
3746 STRV_FOREACH(i, u->deserialized_refs) {
3747 q = bus_unit_track_add_name(u, *i);
3748 if (q < 0 && r >= 0)
3749 r = q;
3750 }
3751 u->deserialized_refs = strv_free(u->deserialized_refs);
3752
3753 if (UNIT_VTABLE(u)->coldplug) {
3754 q = UNIT_VTABLE(u)->coldplug(u);
3755 if (q < 0 && r >= 0)
3756 r = q;
3757 }
3758
3759 if (u->job) {
3760 q = job_coldplug(u->job);
3761 if (q < 0 && r >= 0)
3762 r = q;
3763 }
3764
3765 return r;
3766 }
3767
3768 void unit_catchup(Unit *u) {
3769 assert(u);
3770
3771 if (UNIT_VTABLE(u)->catchup)
3772 UNIT_VTABLE(u)->catchup(u);
3773 }
3774
3775 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3776 struct stat st;
3777
3778 if (!path)
3779 return false;
3780
3781 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3782 * are never out-of-date. */
3783 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3784 return false;
3785
3786 if (stat(path, &st) < 0)
3787 /* What, cannot access this anymore? */
3788 return true;
3789
3790 if (path_masked)
3791 /* For masked files check if they are still so */
3792 return !null_or_empty(&st);
3793 else
3794 /* For non-empty files check the mtime */
3795 return timespec_load(&st.st_mtim) > mtime;
3796
3797 return false;
3798 }
3799
3800 bool unit_need_daemon_reload(Unit *u) {
3801 _cleanup_strv_free_ char **t = NULL;
3802 char **path;
3803
3804 assert(u);
3805
3806 /* For unit files, we allow masking… */
3807 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3808 u->load_state == UNIT_MASKED))
3809 return true;
3810
3811 /* Source paths should not be masked… */
3812 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3813 return true;
3814
3815 if (u->load_state == UNIT_LOADED)
3816 (void) unit_find_dropin_paths(u, &t);
3817 if (!strv_equal(u->dropin_paths, t))
3818 return true;
3819
3820 /* … any drop-ins that are masked are simply omitted from the list. */
3821 STRV_FOREACH(path, u->dropin_paths)
3822 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3823 return true;
3824
3825 return false;
3826 }
3827
3828 void unit_reset_failed(Unit *u) {
3829 assert(u);
3830
3831 if (UNIT_VTABLE(u)->reset_failed)
3832 UNIT_VTABLE(u)->reset_failed(u);
3833
3834 RATELIMIT_RESET(u->start_limit);
3835 u->start_limit_hit = false;
3836 }
3837
3838 Unit *unit_following(Unit *u) {
3839 assert(u);
3840
3841 if (UNIT_VTABLE(u)->following)
3842 return UNIT_VTABLE(u)->following(u);
3843
3844 return NULL;
3845 }
3846
3847 bool unit_stop_pending(Unit *u) {
3848 assert(u);
3849
3850 /* This call does check the current state of the unit. It's
3851 * hence useful to be called from state change calls of the
3852 * unit itself, where the state isn't updated yet. This is
3853 * different from unit_inactive_or_pending() which checks both
3854 * the current state and for a queued job. */
3855
3856 return u->job && u->job->type == JOB_STOP;
3857 }
3858
3859 bool unit_inactive_or_pending(Unit *u) {
3860 assert(u);
3861
3862 /* Returns true if the unit is inactive or going down */
3863
3864 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3865 return true;
3866
3867 if (unit_stop_pending(u))
3868 return true;
3869
3870 return false;
3871 }
3872
3873 bool unit_active_or_pending(Unit *u) {
3874 assert(u);
3875
3876 /* Returns true if the unit is active or going up */
3877
3878 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3879 return true;
3880
3881 if (u->job &&
3882 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3883 return true;
3884
3885 return false;
3886 }
3887
3888 bool unit_will_restart(Unit *u) {
3889 assert(u);
3890
3891 if (!UNIT_VTABLE(u)->will_restart)
3892 return false;
3893
3894 return UNIT_VTABLE(u)->will_restart(u);
3895 }
3896
3897 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3898 assert(u);
3899 assert(w >= 0 && w < _KILL_WHO_MAX);
3900 assert(SIGNAL_VALID(signo));
3901
3902 if (!UNIT_VTABLE(u)->kill)
3903 return -EOPNOTSUPP;
3904
3905 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3906 }
3907
3908 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3909 _cleanup_set_free_ Set *pid_set = NULL;
3910 int r;
3911
3912 pid_set = set_new(NULL);
3913 if (!pid_set)
3914 return NULL;
3915
3916 /* Exclude the main/control pids from being killed via the cgroup */
3917 if (main_pid > 0) {
3918 r = set_put(pid_set, PID_TO_PTR(main_pid));
3919 if (r < 0)
3920 return NULL;
3921 }
3922
3923 if (control_pid > 0) {
3924 r = set_put(pid_set, PID_TO_PTR(control_pid));
3925 if (r < 0)
3926 return NULL;
3927 }
3928
3929 return TAKE_PTR(pid_set);
3930 }
3931
3932 int unit_kill_common(
3933 Unit *u,
3934 KillWho who,
3935 int signo,
3936 pid_t main_pid,
3937 pid_t control_pid,
3938 sd_bus_error *error) {
3939
3940 int r = 0;
3941 bool killed = false;
3942
3943 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3944 if (main_pid < 0)
3945 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3946 else if (main_pid == 0)
3947 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3948 }
3949
3950 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3951 if (control_pid < 0)
3952 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3953 else if (control_pid == 0)
3954 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3955 }
3956
3957 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3958 if (control_pid > 0) {
3959 if (kill(control_pid, signo) < 0)
3960 r = -errno;
3961 else
3962 killed = true;
3963 }
3964
3965 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3966 if (main_pid > 0) {
3967 if (kill(main_pid, signo) < 0)
3968 r = -errno;
3969 else
3970 killed = true;
3971 }
3972
3973 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3974 _cleanup_set_free_ Set *pid_set = NULL;
3975 int q;
3976
3977 /* Exclude the main/control pids from being killed via the cgroup */
3978 pid_set = unit_pid_set(main_pid, control_pid);
3979 if (!pid_set)
3980 return -ENOMEM;
3981
3982 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3983 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3984 r = q;
3985 else
3986 killed = true;
3987 }
3988
3989 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3990 return -ESRCH;
3991
3992 return r;
3993 }
3994
3995 int unit_following_set(Unit *u, Set **s) {
3996 assert(u);
3997 assert(s);
3998
3999 if (UNIT_VTABLE(u)->following_set)
4000 return UNIT_VTABLE(u)->following_set(u, s);
4001
4002 *s = NULL;
4003 return 0;
4004 }
4005
4006 UnitFileState unit_get_unit_file_state(Unit *u) {
4007 int r;
4008
4009 assert(u);
4010
4011 if (u->unit_file_state < 0 && u->fragment_path) {
4012 r = unit_file_get_state(
4013 u->manager->unit_file_scope,
4014 NULL,
4015 u->id,
4016 &u->unit_file_state);
4017 if (r < 0)
4018 u->unit_file_state = UNIT_FILE_BAD;
4019 }
4020
4021 return u->unit_file_state;
4022 }
4023
4024 int unit_get_unit_file_preset(Unit *u) {
4025 assert(u);
4026
4027 if (u->unit_file_preset < 0 && u->fragment_path)
4028 u->unit_file_preset = unit_file_query_preset(
4029 u->manager->unit_file_scope,
4030 NULL,
4031 basename(u->fragment_path));
4032
4033 return u->unit_file_preset;
4034 }
4035
4036 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4037 assert(ref);
4038 assert(source);
4039 assert(target);
4040
4041 if (ref->target)
4042 unit_ref_unset(ref);
4043
4044 ref->source = source;
4045 ref->target = target;
4046 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4047 return target;
4048 }
4049
4050 void unit_ref_unset(UnitRef *ref) {
4051 assert(ref);
4052
4053 if (!ref->target)
4054 return;
4055
4056 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4057 * be unreferenced now. */
4058 unit_add_to_gc_queue(ref->target);
4059
4060 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4061 ref->source = ref->target = NULL;
4062 }
4063
4064 static int user_from_unit_name(Unit *u, char **ret) {
4065
4066 static const uint8_t hash_key[] = {
4067 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4068 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4069 };
4070
4071 _cleanup_free_ char *n = NULL;
4072 int r;
4073
4074 r = unit_name_to_prefix(u->id, &n);
4075 if (r < 0)
4076 return r;
4077
4078 if (valid_user_group_name(n)) {
4079 *ret = TAKE_PTR(n);
4080 return 0;
4081 }
4082
4083 /* If we can't use the unit name as a user name, then let's hash it and use that */
4084 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4085 return -ENOMEM;
4086
4087 return 0;
4088 }
4089
4090 int unit_patch_contexts(Unit *u) {
4091 CGroupContext *cc;
4092 ExecContext *ec;
4093 unsigned i;
4094 int r;
4095
4096 assert(u);
4097
4098 /* Patch in the manager defaults into the exec and cgroup
4099 * contexts, _after_ the rest of the settings have been
4100 * initialized */
4101
4102 ec = unit_get_exec_context(u);
4103 if (ec) {
4104 /* This only copies in the ones that need memory */
4105 for (i = 0; i < _RLIMIT_MAX; i++)
4106 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4107 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4108 if (!ec->rlimit[i])
4109 return -ENOMEM;
4110 }
4111
4112 if (MANAGER_IS_USER(u->manager) &&
4113 !ec->working_directory) {
4114
4115 r = get_home_dir(&ec->working_directory);
4116 if (r < 0)
4117 return r;
4118
4119 /* Allow user services to run, even if the
4120 * home directory is missing */
4121 ec->working_directory_missing_ok = true;
4122 }
4123
4124 if (ec->private_devices)
4125 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4126
4127 if (ec->protect_kernel_modules)
4128 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4129
4130 if (ec->dynamic_user) {
4131 if (!ec->user) {
4132 r = user_from_unit_name(u, &ec->user);
4133 if (r < 0)
4134 return r;
4135 }
4136
4137 if (!ec->group) {
4138 ec->group = strdup(ec->user);
4139 if (!ec->group)
4140 return -ENOMEM;
4141 }
4142
4143 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4144 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4145
4146 ec->private_tmp = true;
4147 ec->remove_ipc = true;
4148 ec->protect_system = PROTECT_SYSTEM_STRICT;
4149 if (ec->protect_home == PROTECT_HOME_NO)
4150 ec->protect_home = PROTECT_HOME_READ_ONLY;
4151 }
4152 }
4153
4154 cc = unit_get_cgroup_context(u);
4155 if (cc && ec) {
4156
4157 if (ec->private_devices &&
4158 cc->device_policy == CGROUP_AUTO)
4159 cc->device_policy = CGROUP_CLOSED;
4160
4161 if (ec->root_image &&
4162 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4163
4164 /* When RootImage= is specified, the following devices are touched. */
4165 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4166 if (r < 0)
4167 return r;
4168
4169 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4170 if (r < 0)
4171 return r;
4172
4173 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4174 if (r < 0)
4175 return r;
4176 }
4177 }
4178
4179 return 0;
4180 }
4181
4182 ExecContext *unit_get_exec_context(Unit *u) {
4183 size_t offset;
4184 assert(u);
4185
4186 if (u->type < 0)
4187 return NULL;
4188
4189 offset = UNIT_VTABLE(u)->exec_context_offset;
4190 if (offset <= 0)
4191 return NULL;
4192
4193 return (ExecContext*) ((uint8_t*) u + offset);
4194 }
4195
4196 KillContext *unit_get_kill_context(Unit *u) {
4197 size_t offset;
4198 assert(u);
4199
4200 if (u->type < 0)
4201 return NULL;
4202
4203 offset = UNIT_VTABLE(u)->kill_context_offset;
4204 if (offset <= 0)
4205 return NULL;
4206
4207 return (KillContext*) ((uint8_t*) u + offset);
4208 }
4209
4210 CGroupContext *unit_get_cgroup_context(Unit *u) {
4211 size_t offset;
4212
4213 if (u->type < 0)
4214 return NULL;
4215
4216 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4217 if (offset <= 0)
4218 return NULL;
4219
4220 return (CGroupContext*) ((uint8_t*) u + offset);
4221 }
4222
4223 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4224 size_t offset;
4225
4226 if (u->type < 0)
4227 return NULL;
4228
4229 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4230 if (offset <= 0)
4231 return NULL;
4232
4233 return *(ExecRuntime**) ((uint8_t*) u + offset);
4234 }
4235
4236 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4237 assert(u);
4238
4239 if (UNIT_WRITE_FLAGS_NOOP(flags))
4240 return NULL;
4241
4242 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4243 return u->manager->lookup_paths.transient;
4244
4245 if (flags & UNIT_PERSISTENT)
4246 return u->manager->lookup_paths.persistent_control;
4247
4248 if (flags & UNIT_RUNTIME)
4249 return u->manager->lookup_paths.runtime_control;
4250
4251 return NULL;
4252 }
4253
4254 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4255 char *ret = NULL;
4256
4257 if (!s)
4258 return NULL;
4259
4260 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4261 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4262 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4263 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4264 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4265 * allocations. */
4266
4267 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4268 ret = specifier_escape(s);
4269 if (!ret)
4270 return NULL;
4271
4272 s = ret;
4273 }
4274
4275 if (flags & UNIT_ESCAPE_C) {
4276 char *a;
4277
4278 a = cescape(s);
4279 free(ret);
4280 if (!a)
4281 return NULL;
4282
4283 ret = a;
4284 }
4285
4286 if (buf) {
4287 *buf = ret;
4288 return ret ?: (char*) s;
4289 }
4290
4291 return ret ?: strdup(s);
4292 }
4293
4294 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4295 _cleanup_free_ char *result = NULL;
4296 size_t n = 0, allocated = 0;
4297 char **i;
4298
4299 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4300 * way suitable for ExecStart= stanzas */
4301
4302 STRV_FOREACH(i, l) {
4303 _cleanup_free_ char *buf = NULL;
4304 const char *p;
4305 size_t a;
4306 char *q;
4307
4308 p = unit_escape_setting(*i, flags, &buf);
4309 if (!p)
4310 return NULL;
4311
4312 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4313 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4314 return NULL;
4315
4316 q = result + n;
4317 if (n > 0)
4318 *(q++) = ' ';
4319
4320 *(q++) = '"';
4321 q = stpcpy(q, p);
4322 *(q++) = '"';
4323
4324 n += a;
4325 }
4326
4327 if (!GREEDY_REALLOC(result, allocated, n + 1))
4328 return NULL;
4329
4330 result[n] = 0;
4331
4332 return TAKE_PTR(result);
4333 }
4334
4335 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4336 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4337 const char *dir, *wrapped;
4338 int r;
4339
4340 assert(u);
4341 assert(name);
4342 assert(data);
4343
4344 if (UNIT_WRITE_FLAGS_NOOP(flags))
4345 return 0;
4346
4347 data = unit_escape_setting(data, flags, &escaped);
4348 if (!data)
4349 return -ENOMEM;
4350
4351 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4352 * previous section header is the same */
4353
4354 if (flags & UNIT_PRIVATE) {
4355 if (!UNIT_VTABLE(u)->private_section)
4356 return -EINVAL;
4357
4358 if (!u->transient_file || u->last_section_private < 0)
4359 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4360 else if (u->last_section_private == 0)
4361 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4362 } else {
4363 if (!u->transient_file || u->last_section_private < 0)
4364 data = strjoina("[Unit]\n", data);
4365 else if (u->last_section_private > 0)
4366 data = strjoina("\n[Unit]\n", data);
4367 }
4368
4369 if (u->transient_file) {
4370 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4371 * write to the transient unit file. */
4372 fputs(data, u->transient_file);
4373
4374 if (!endswith(data, "\n"))
4375 fputc('\n', u->transient_file);
4376
4377 /* Remember which section we wrote this entry to */
4378 u->last_section_private = !!(flags & UNIT_PRIVATE);
4379 return 0;
4380 }
4381
4382 dir = unit_drop_in_dir(u, flags);
4383 if (!dir)
4384 return -EINVAL;
4385
4386 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4387 "# or an equivalent operation. Do not edit.\n",
4388 data,
4389 "\n");
4390
4391 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4392 if (r < 0)
4393 return r;
4394
4395 (void) mkdir_p_label(p, 0755);
4396 r = write_string_file_atomic_label(q, wrapped);
4397 if (r < 0)
4398 return r;
4399
4400 r = strv_push(&u->dropin_paths, q);
4401 if (r < 0)
4402 return r;
4403 q = NULL;
4404
4405 strv_uniq(u->dropin_paths);
4406
4407 u->dropin_mtime = now(CLOCK_REALTIME);
4408
4409 return 0;
4410 }
4411
4412 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4413 _cleanup_free_ char *p = NULL;
4414 va_list ap;
4415 int r;
4416
4417 assert(u);
4418 assert(name);
4419 assert(format);
4420
4421 if (UNIT_WRITE_FLAGS_NOOP(flags))
4422 return 0;
4423
4424 va_start(ap, format);
4425 r = vasprintf(&p, format, ap);
4426 va_end(ap);
4427
4428 if (r < 0)
4429 return -ENOMEM;
4430
4431 return unit_write_setting(u, flags, name, p);
4432 }
4433
4434 int unit_make_transient(Unit *u) {
4435 _cleanup_free_ char *path = NULL;
4436 FILE *f;
4437
4438 assert(u);
4439
4440 if (!UNIT_VTABLE(u)->can_transient)
4441 return -EOPNOTSUPP;
4442
4443 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4444
4445 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4446 if (!path)
4447 return -ENOMEM;
4448
4449 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4450 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4451
4452 RUN_WITH_UMASK(0022) {
4453 f = fopen(path, "we");
4454 if (!f)
4455 return -errno;
4456 }
4457
4458 safe_fclose(u->transient_file);
4459 u->transient_file = f;
4460
4461 free_and_replace(u->fragment_path, path);
4462
4463 u->source_path = mfree(u->source_path);
4464 u->dropin_paths = strv_free(u->dropin_paths);
4465 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4466
4467 u->load_state = UNIT_STUB;
4468 u->load_error = 0;
4469 u->transient = true;
4470
4471 unit_add_to_dbus_queue(u);
4472 unit_add_to_gc_queue(u);
4473
4474 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4475 u->transient_file);
4476
4477 return 0;
4478 }
4479
4480 static void log_kill(pid_t pid, int sig, void *userdata) {
4481 _cleanup_free_ char *comm = NULL;
4482
4483 (void) get_process_comm(pid, &comm);
4484
4485 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4486 only, like for example systemd's own PAM stub process. */
4487 if (comm && comm[0] == '(')
4488 return;
4489
4490 log_unit_notice(userdata,
4491 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4492 pid,
4493 strna(comm),
4494 signal_to_string(sig));
4495 }
4496
4497 static int operation_to_signal(KillContext *c, KillOperation k) {
4498 assert(c);
4499
4500 switch (k) {
4501
4502 case KILL_TERMINATE:
4503 case KILL_TERMINATE_AND_LOG:
4504 return c->kill_signal;
4505
4506 case KILL_KILL:
4507 return c->final_kill_signal;
4508
4509 case KILL_WATCHDOG:
4510 return c->watchdog_signal;
4511
4512 default:
4513 assert_not_reached("KillOperation unknown");
4514 }
4515 }
4516
4517 int unit_kill_context(
4518 Unit *u,
4519 KillContext *c,
4520 KillOperation k,
4521 pid_t main_pid,
4522 pid_t control_pid,
4523 bool main_pid_alien) {
4524
4525 bool wait_for_exit = false, send_sighup;
4526 cg_kill_log_func_t log_func = NULL;
4527 int sig, r;
4528
4529 assert(u);
4530 assert(c);
4531
4532 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4533 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4534
4535 if (c->kill_mode == KILL_NONE)
4536 return 0;
4537
4538 sig = operation_to_signal(c, k);
4539
4540 send_sighup =
4541 c->send_sighup &&
4542 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4543 sig != SIGHUP;
4544
4545 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4546 log_func = log_kill;
4547
4548 if (main_pid > 0) {
4549 if (log_func)
4550 log_func(main_pid, sig, u);
4551
4552 r = kill_and_sigcont(main_pid, sig);
4553 if (r < 0 && r != -ESRCH) {
4554 _cleanup_free_ char *comm = NULL;
4555 (void) get_process_comm(main_pid, &comm);
4556
4557 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4558 } else {
4559 if (!main_pid_alien)
4560 wait_for_exit = true;
4561
4562 if (r != -ESRCH && send_sighup)
4563 (void) kill(main_pid, SIGHUP);
4564 }
4565 }
4566
4567 if (control_pid > 0) {
4568 if (log_func)
4569 log_func(control_pid, sig, u);
4570
4571 r = kill_and_sigcont(control_pid, sig);
4572 if (r < 0 && r != -ESRCH) {
4573 _cleanup_free_ char *comm = NULL;
4574 (void) get_process_comm(control_pid, &comm);
4575
4576 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4577 } else {
4578 wait_for_exit = true;
4579
4580 if (r != -ESRCH && send_sighup)
4581 (void) kill(control_pid, SIGHUP);
4582 }
4583 }
4584
4585 if (u->cgroup_path &&
4586 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4587 _cleanup_set_free_ Set *pid_set = NULL;
4588
4589 /* Exclude the main/control pids from being killed via the cgroup */
4590 pid_set = unit_pid_set(main_pid, control_pid);
4591 if (!pid_set)
4592 return -ENOMEM;
4593
4594 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4595 sig,
4596 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4597 pid_set,
4598 log_func, u);
4599 if (r < 0) {
4600 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4601 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4602
4603 } else if (r > 0) {
4604
4605 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4606 * we are running in a container or if this is a delegation unit, simply because cgroup
4607 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4608 * of containers it can be confused easily by left-over directories in the cgroup — which
4609 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4610 * there we get proper events. Hence rely on them. */
4611
4612 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4613 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4614 wait_for_exit = true;
4615
4616 if (send_sighup) {
4617 set_free(pid_set);
4618
4619 pid_set = unit_pid_set(main_pid, control_pid);
4620 if (!pid_set)
4621 return -ENOMEM;
4622
4623 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4624 SIGHUP,
4625 CGROUP_IGNORE_SELF,
4626 pid_set,
4627 NULL, NULL);
4628 }
4629 }
4630 }
4631
4632 return wait_for_exit;
4633 }
4634
4635 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4636 _cleanup_free_ char *p = NULL;
4637 char *prefix;
4638 UnitDependencyInfo di;
4639 int r;
4640
4641 assert(u);
4642 assert(path);
4643
4644 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4645 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4646 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4647 * determine which units to make themselves a dependency of. */
4648
4649 if (!path_is_absolute(path))
4650 return -EINVAL;
4651
4652 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4653 if (r < 0)
4654 return r;
4655
4656 p = strdup(path);
4657 if (!p)
4658 return -ENOMEM;
4659
4660 path = path_simplify(p, false);
4661
4662 if (!path_is_normalized(path))
4663 return -EPERM;
4664
4665 if (hashmap_contains(u->requires_mounts_for, path))
4666 return 0;
4667
4668 di = (UnitDependencyInfo) {
4669 .origin_mask = mask
4670 };
4671
4672 r = hashmap_put(u->requires_mounts_for, path, di.data);
4673 if (r < 0)
4674 return r;
4675 p = NULL;
4676
4677 prefix = alloca(strlen(path) + 1);
4678 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4679 Set *x;
4680
4681 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4682 if (!x) {
4683 _cleanup_free_ char *q = NULL;
4684
4685 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4686 if (r < 0)
4687 return r;
4688
4689 q = strdup(prefix);
4690 if (!q)
4691 return -ENOMEM;
4692
4693 x = set_new(NULL);
4694 if (!x)
4695 return -ENOMEM;
4696
4697 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4698 if (r < 0) {
4699 set_free(x);
4700 return r;
4701 }
4702 q = NULL;
4703 }
4704
4705 r = set_put(x, u);
4706 if (r < 0)
4707 return r;
4708 }
4709
4710 return 0;
4711 }
4712
4713 int unit_setup_exec_runtime(Unit *u) {
4714 ExecRuntime **rt;
4715 size_t offset;
4716 Unit *other;
4717 Iterator i;
4718 void *v;
4719 int r;
4720
4721 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4722 assert(offset > 0);
4723
4724 /* Check if there already is an ExecRuntime for this unit? */
4725 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4726 if (*rt)
4727 return 0;
4728
4729 /* Try to get it from somebody else */
4730 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4731 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4732 if (r == 1)
4733 return 1;
4734 }
4735
4736 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4737 }
4738
4739 int unit_setup_dynamic_creds(Unit *u) {
4740 ExecContext *ec;
4741 DynamicCreds *dcreds;
4742 size_t offset;
4743
4744 assert(u);
4745
4746 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4747 assert(offset > 0);
4748 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4749
4750 ec = unit_get_exec_context(u);
4751 assert(ec);
4752
4753 if (!ec->dynamic_user)
4754 return 0;
4755
4756 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4757 }
4758
4759 bool unit_type_supported(UnitType t) {
4760 if (_unlikely_(t < 0))
4761 return false;
4762 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4763 return false;
4764
4765 if (!unit_vtable[t]->supported)
4766 return true;
4767
4768 return unit_vtable[t]->supported();
4769 }
4770
4771 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4772 int r;
4773
4774 assert(u);
4775 assert(where);
4776
4777 r = dir_is_empty(where);
4778 if (r > 0 || r == -ENOTDIR)
4779 return;
4780 if (r < 0) {
4781 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4782 return;
4783 }
4784
4785 log_struct(LOG_NOTICE,
4786 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4787 LOG_UNIT_ID(u),
4788 LOG_UNIT_INVOCATION_ID(u),
4789 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4790 "WHERE=%s", where);
4791 }
4792
4793 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4794 _cleanup_free_ char *canonical_where;
4795 int r;
4796
4797 assert(u);
4798 assert(where);
4799
4800 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4801 if (r < 0) {
4802 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4803 return 0;
4804 }
4805
4806 /* We will happily ignore a trailing slash (or any redundant slashes) */
4807 if (path_equal(where, canonical_where))
4808 return 0;
4809
4810 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4811 log_struct(LOG_ERR,
4812 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4813 LOG_UNIT_ID(u),
4814 LOG_UNIT_INVOCATION_ID(u),
4815 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4816 "WHERE=%s", where);
4817
4818 return -ELOOP;
4819 }
4820
4821 bool unit_is_pristine(Unit *u) {
4822 assert(u);
4823
4824 /* Check if the unit already exists or is already around,
4825 * in a number of different ways. Note that to cater for unit
4826 * types such as slice, we are generally fine with units that
4827 * are marked UNIT_LOADED even though nothing was actually
4828 * loaded, as those unit types don't require a file on disk. */
4829
4830 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4831 u->fragment_path ||
4832 u->source_path ||
4833 !strv_isempty(u->dropin_paths) ||
4834 u->job ||
4835 u->merged_into);
4836 }
4837
4838 pid_t unit_control_pid(Unit *u) {
4839 assert(u);
4840
4841 if (UNIT_VTABLE(u)->control_pid)
4842 return UNIT_VTABLE(u)->control_pid(u);
4843
4844 return 0;
4845 }
4846
4847 pid_t unit_main_pid(Unit *u) {
4848 assert(u);
4849
4850 if (UNIT_VTABLE(u)->main_pid)
4851 return UNIT_VTABLE(u)->main_pid(u);
4852
4853 return 0;
4854 }
4855
4856 static void unit_unref_uid_internal(
4857 Unit *u,
4858 uid_t *ref_uid,
4859 bool destroy_now,
4860 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4861
4862 assert(u);
4863 assert(ref_uid);
4864 assert(_manager_unref_uid);
4865
4866 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4867 * gid_t are actually the same time, with the same validity rules.
4868 *
4869 * Drops a reference to UID/GID from a unit. */
4870
4871 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4872 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4873
4874 if (!uid_is_valid(*ref_uid))
4875 return;
4876
4877 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4878 *ref_uid = UID_INVALID;
4879 }
4880
4881 void unit_unref_uid(Unit *u, bool destroy_now) {
4882 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4883 }
4884
4885 void unit_unref_gid(Unit *u, bool destroy_now) {
4886 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4887 }
4888
4889 static int unit_ref_uid_internal(
4890 Unit *u,
4891 uid_t *ref_uid,
4892 uid_t uid,
4893 bool clean_ipc,
4894 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4895
4896 int r;
4897
4898 assert(u);
4899 assert(ref_uid);
4900 assert(uid_is_valid(uid));
4901 assert(_manager_ref_uid);
4902
4903 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4904 * are actually the same type, and have the same validity rules.
4905 *
4906 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4907 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4908 * drops to zero. */
4909
4910 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4911 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4912
4913 if (*ref_uid == uid)
4914 return 0;
4915
4916 if (uid_is_valid(*ref_uid)) /* Already set? */
4917 return -EBUSY;
4918
4919 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4920 if (r < 0)
4921 return r;
4922
4923 *ref_uid = uid;
4924 return 1;
4925 }
4926
4927 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4928 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4929 }
4930
4931 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4932 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4933 }
4934
4935 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4936 int r = 0, q = 0;
4937
4938 assert(u);
4939
4940 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4941
4942 if (uid_is_valid(uid)) {
4943 r = unit_ref_uid(u, uid, clean_ipc);
4944 if (r < 0)
4945 return r;
4946 }
4947
4948 if (gid_is_valid(gid)) {
4949 q = unit_ref_gid(u, gid, clean_ipc);
4950 if (q < 0) {
4951 if (r > 0)
4952 unit_unref_uid(u, false);
4953
4954 return q;
4955 }
4956 }
4957
4958 return r > 0 || q > 0;
4959 }
4960
4961 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4962 ExecContext *c;
4963 int r;
4964
4965 assert(u);
4966
4967 c = unit_get_exec_context(u);
4968
4969 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4970 if (r < 0)
4971 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4972
4973 return r;
4974 }
4975
4976 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4977 assert(u);
4978
4979 unit_unref_uid(u, destroy_now);
4980 unit_unref_gid(u, destroy_now);
4981 }
4982
4983 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4984 int r;
4985
4986 assert(u);
4987
4988 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4989 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4990 * objects when no service references the UID/GID anymore. */
4991
4992 r = unit_ref_uid_gid(u, uid, gid);
4993 if (r > 0)
4994 bus_unit_send_change_signal(u);
4995 }
4996
4997 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4998 int r;
4999
5000 assert(u);
5001
5002 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5003
5004 if (sd_id128_equal(u->invocation_id, id))
5005 return 0;
5006
5007 if (!sd_id128_is_null(u->invocation_id))
5008 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5009
5010 if (sd_id128_is_null(id)) {
5011 r = 0;
5012 goto reset;
5013 }
5014
5015 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5016 if (r < 0)
5017 goto reset;
5018
5019 u->invocation_id = id;
5020 sd_id128_to_string(id, u->invocation_id_string);
5021
5022 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5023 if (r < 0)
5024 goto reset;
5025
5026 return 0;
5027
5028 reset:
5029 u->invocation_id = SD_ID128_NULL;
5030 u->invocation_id_string[0] = 0;
5031 return r;
5032 }
5033
5034 int unit_acquire_invocation_id(Unit *u) {
5035 sd_id128_t id;
5036 int r;
5037
5038 assert(u);
5039
5040 r = sd_id128_randomize(&id);
5041 if (r < 0)
5042 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5043
5044 r = unit_set_invocation_id(u, id);
5045 if (r < 0)
5046 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5047
5048 return 0;
5049 }
5050
5051 void unit_set_exec_params(Unit *u, ExecParameters *p) {
5052 assert(u);
5053 assert(p);
5054
5055 /* Copy parameters from manager */
5056 p->environment = u->manager->environment;
5057 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5058 p->cgroup_supported = u->manager->cgroup_supported;
5059 p->prefix = u->manager->prefix;
5060 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5061
5062 /* Copy paramaters from unit */
5063 p->cgroup_path = u->cgroup_path;
5064 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5065 }
5066
5067 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5068 int r;
5069
5070 assert(u);
5071 assert(ret);
5072
5073 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5074 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5075
5076 (void) unit_realize_cgroup(u);
5077
5078 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5079 if (r != 0)
5080 return r;
5081
5082 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5083 (void) ignore_signals(SIGPIPE, -1);
5084
5085 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5086
5087 if (u->cgroup_path) {
5088 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5089 if (r < 0) {
5090 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5091 _exit(EXIT_CGROUP);
5092 }
5093 }
5094
5095 return 0;
5096 }
5097
5098 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5099 assert(u);
5100 assert(d >= 0);
5101 assert(d < _UNIT_DEPENDENCY_MAX);
5102 assert(other);
5103
5104 if (di.origin_mask == 0 && di.destination_mask == 0) {
5105 /* No bit set anymore, let's drop the whole entry */
5106 assert_se(hashmap_remove(u->dependencies[d], other));
5107 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5108 } else
5109 /* Mask was reduced, let's update the entry */
5110 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5111 }
5112
5113 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5114 UnitDependency d;
5115
5116 assert(u);
5117
5118 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5119
5120 if (mask == 0)
5121 return;
5122
5123 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5124 bool done;
5125
5126 do {
5127 UnitDependencyInfo di;
5128 Unit *other;
5129 Iterator i;
5130
5131 done = true;
5132
5133 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5134 UnitDependency q;
5135
5136 if ((di.origin_mask & ~mask) == di.origin_mask)
5137 continue;
5138 di.origin_mask &= ~mask;
5139 unit_update_dependency_mask(u, d, other, di);
5140
5141 /* We updated the dependency from our unit to the other unit now. But most dependencies
5142 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5143 * all dependency types on the other unit and delete all those which point to us and
5144 * have the right mask set. */
5145
5146 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5147 UnitDependencyInfo dj;
5148
5149 dj.data = hashmap_get(other->dependencies[q], u);
5150 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5151 continue;
5152 dj.destination_mask &= ~mask;
5153
5154 unit_update_dependency_mask(other, q, u, dj);
5155 }
5156
5157 unit_add_to_gc_queue(other);
5158
5159 done = false;
5160 break;
5161 }
5162
5163 } while (!done);
5164 }
5165 }
5166
5167 static int unit_export_invocation_id(Unit *u) {
5168 const char *p;
5169 int r;
5170
5171 assert(u);
5172
5173 if (u->exported_invocation_id)
5174 return 0;
5175
5176 if (sd_id128_is_null(u->invocation_id))
5177 return 0;
5178
5179 p = strjoina("/run/systemd/units/invocation:", u->id);
5180 r = symlink_atomic(u->invocation_id_string, p);
5181 if (r < 0)
5182 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5183
5184 u->exported_invocation_id = true;
5185 return 0;
5186 }
5187
5188 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5189 const char *p;
5190 char buf[2];
5191 int r;
5192
5193 assert(u);
5194 assert(c);
5195
5196 if (u->exported_log_level_max)
5197 return 0;
5198
5199 if (c->log_level_max < 0)
5200 return 0;
5201
5202 assert(c->log_level_max <= 7);
5203
5204 buf[0] = '0' + c->log_level_max;
5205 buf[1] = 0;
5206
5207 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5208 r = symlink_atomic(buf, p);
5209 if (r < 0)
5210 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5211
5212 u->exported_log_level_max = true;
5213 return 0;
5214 }
5215
5216 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5217 _cleanup_close_ int fd = -1;
5218 struct iovec *iovec;
5219 const char *p;
5220 char *pattern;
5221 le64_t *sizes;
5222 ssize_t n;
5223 size_t i;
5224 int r;
5225
5226 if (u->exported_log_extra_fields)
5227 return 0;
5228
5229 if (c->n_log_extra_fields <= 0)
5230 return 0;
5231
5232 sizes = newa(le64_t, c->n_log_extra_fields);
5233 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5234
5235 for (i = 0; i < c->n_log_extra_fields; i++) {
5236 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5237
5238 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5239 iovec[i*2+1] = c->log_extra_fields[i];
5240 }
5241
5242 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5243 pattern = strjoina(p, ".XXXXXX");
5244
5245 fd = mkostemp_safe(pattern);
5246 if (fd < 0)
5247 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5248
5249 n = writev(fd, iovec, c->n_log_extra_fields*2);
5250 if (n < 0) {
5251 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5252 goto fail;
5253 }
5254
5255 (void) fchmod(fd, 0644);
5256
5257 if (rename(pattern, p) < 0) {
5258 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5259 goto fail;
5260 }
5261
5262 u->exported_log_extra_fields = true;
5263 return 0;
5264
5265 fail:
5266 (void) unlink(pattern);
5267 return r;
5268 }
5269
5270 void unit_export_state_files(Unit *u) {
5271 const ExecContext *c;
5272
5273 assert(u);
5274
5275 if (!u->id)
5276 return;
5277
5278 if (!MANAGER_IS_SYSTEM(u->manager))
5279 return;
5280
5281 if (u->manager->test_run_flags != 0)
5282 return;
5283
5284 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5285 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5286 * the IPC system itself and PID 1 also log to the journal.
5287 *
5288 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5289 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5290 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5291 * namespace at least.
5292 *
5293 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5294 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5295 * them with one. */
5296
5297 (void) unit_export_invocation_id(u);
5298
5299 c = unit_get_exec_context(u);
5300 if (c) {
5301 (void) unit_export_log_level_max(u, c);
5302 (void) unit_export_log_extra_fields(u, c);
5303 }
5304 }
5305
5306 void unit_unlink_state_files(Unit *u) {
5307 const char *p;
5308
5309 assert(u);
5310
5311 if (!u->id)
5312 return;
5313
5314 if (!MANAGER_IS_SYSTEM(u->manager))
5315 return;
5316
5317 /* Undoes the effect of unit_export_state() */
5318
5319 if (u->exported_invocation_id) {
5320 p = strjoina("/run/systemd/units/invocation:", u->id);
5321 (void) unlink(p);
5322
5323 u->exported_invocation_id = false;
5324 }
5325
5326 if (u->exported_log_level_max) {
5327 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5328 (void) unlink(p);
5329
5330 u->exported_log_level_max = false;
5331 }
5332
5333 if (u->exported_log_extra_fields) {
5334 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5335 (void) unlink(p);
5336
5337 u->exported_log_extra_fields = false;
5338 }
5339 }
5340
5341 int unit_prepare_exec(Unit *u) {
5342 int r;
5343
5344 assert(u);
5345
5346 /* Prepares everything so that we can fork of a process for this unit */
5347
5348 (void) unit_realize_cgroup(u);
5349
5350 if (u->reset_accounting) {
5351 (void) unit_reset_cpu_accounting(u);
5352 (void) unit_reset_ip_accounting(u);
5353 u->reset_accounting = false;
5354 }
5355
5356 unit_export_state_files(u);
5357
5358 r = unit_setup_exec_runtime(u);
5359 if (r < 0)
5360 return r;
5361
5362 r = unit_setup_dynamic_creds(u);
5363 if (r < 0)
5364 return r;
5365
5366 return 0;
5367 }
5368
5369 static void log_leftover(pid_t pid, int sig, void *userdata) {
5370 _cleanup_free_ char *comm = NULL;
5371
5372 (void) get_process_comm(pid, &comm);
5373
5374 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5375 return;
5376
5377 log_unit_warning(userdata,
5378 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5379 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5380 pid, strna(comm));
5381 }
5382
5383 void unit_warn_leftover_processes(Unit *u) {
5384 assert(u);
5385
5386 (void) unit_pick_cgroup_path(u);
5387
5388 if (!u->cgroup_path)
5389 return;
5390
5391 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5392 }
5393
5394 bool unit_needs_console(Unit *u) {
5395 ExecContext *ec;
5396 UnitActiveState state;
5397
5398 assert(u);
5399
5400 state = unit_active_state(u);
5401
5402 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5403 return false;
5404
5405 if (UNIT_VTABLE(u)->needs_console)
5406 return UNIT_VTABLE(u)->needs_console(u);
5407
5408 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5409 ec = unit_get_exec_context(u);
5410 if (!ec)
5411 return false;
5412
5413 return exec_context_may_touch_console(ec);
5414 }
5415
5416 const char *unit_label_path(Unit *u) {
5417 const char *p;
5418
5419 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5420 * when validating access checks. */
5421
5422 p = u->source_path ?: u->fragment_path;
5423 if (!p)
5424 return NULL;
5425
5426 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5427 if (path_equal(p, "/dev/null"))
5428 return NULL;
5429
5430 return p;
5431 }
5432
5433 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5434 int r;
5435
5436 assert(u);
5437
5438 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5439 * and not a kernel thread either */
5440
5441 /* First, a simple range check */
5442 if (!pid_is_valid(pid))
5443 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5444
5445 /* Some extra safety check */
5446 if (pid == 1 || pid == getpid_cached())
5447 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5448
5449 /* Don't even begin to bother with kernel threads */
5450 r = is_kernel_thread(pid);
5451 if (r == -ESRCH)
5452 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5453 if (r < 0)
5454 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5455 if (r > 0)
5456 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5457
5458 return 0;
5459 }
5460
5461 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5462 [COLLECT_INACTIVE] = "inactive",
5463 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5464 };
5465
5466 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);