]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #9406 from yuwata/rfe-9228
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "alloc-util.h"
14 #include "all-units.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "set.h"
39 #include "signal-util.h"
40 #include "sparse-endian.h"
41 #include "special.h"
42 #include "specifier.h"
43 #include "stat-util.h"
44 #include "stdio-util.h"
45 #include "string-table.h"
46 #include "string-util.h"
47 #include "strv.h"
48 #include "umask-util.h"
49 #include "unit-name.h"
50 #include "unit.h"
51 #include "user-util.h"
52 #include "virt.h"
53
54 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
55 [UNIT_SERVICE] = &service_vtable,
56 [UNIT_SOCKET] = &socket_vtable,
57 [UNIT_TARGET] = &target_vtable,
58 [UNIT_DEVICE] = &device_vtable,
59 [UNIT_MOUNT] = &mount_vtable,
60 [UNIT_AUTOMOUNT] = &automount_vtable,
61 [UNIT_SWAP] = &swap_vtable,
62 [UNIT_TIMER] = &timer_vtable,
63 [UNIT_PATH] = &path_vtable,
64 [UNIT_SLICE] = &slice_vtable,
65 [UNIT_SCOPE] = &scope_vtable,
66 };
67
68 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
69
70 Unit *unit_new(Manager *m, size_t size) {
71 Unit *u;
72
73 assert(m);
74 assert(size >= sizeof(Unit));
75
76 u = malloc0(size);
77 if (!u)
78 return NULL;
79
80 u->names = set_new(&string_hash_ops);
81 if (!u->names)
82 return mfree(u);
83
84 u->manager = m;
85 u->type = _UNIT_TYPE_INVALID;
86 u->default_dependencies = true;
87 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
88 u->unit_file_preset = -1;
89 u->on_failure_job_mode = JOB_REPLACE;
90 u->cgroup_inotify_wd = -1;
91 u->job_timeout = USEC_INFINITY;
92 u->job_running_timeout = USEC_INFINITY;
93 u->ref_uid = UID_INVALID;
94 u->ref_gid = GID_INVALID;
95 u->cpu_usage_last = NSEC_INFINITY;
96 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
97
98 u->ip_accounting_ingress_map_fd = -1;
99 u->ip_accounting_egress_map_fd = -1;
100 u->ipv4_allow_map_fd = -1;
101 u->ipv6_allow_map_fd = -1;
102 u->ipv4_deny_map_fd = -1;
103 u->ipv6_deny_map_fd = -1;
104
105 u->last_section_private = -1;
106
107 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
108 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
109
110 return u;
111 }
112
113 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
114 _cleanup_(unit_freep) Unit *u = NULL;
115 int r;
116
117 u = unit_new(m, size);
118 if (!u)
119 return -ENOMEM;
120
121 r = unit_add_name(u, name);
122 if (r < 0)
123 return r;
124
125 *ret = TAKE_PTR(u);
126
127 return r;
128 }
129
130 bool unit_has_name(Unit *u, const char *name) {
131 assert(u);
132 assert(name);
133
134 return set_contains(u->names, (char*) name);
135 }
136
137 static void unit_init(Unit *u) {
138 CGroupContext *cc;
139 ExecContext *ec;
140 KillContext *kc;
141
142 assert(u);
143 assert(u->manager);
144 assert(u->type >= 0);
145
146 cc = unit_get_cgroup_context(u);
147 if (cc) {
148 cgroup_context_init(cc);
149
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
153
154 cc->cpu_accounting = u->manager->default_cpu_accounting;
155 cc->io_accounting = u->manager->default_io_accounting;
156 cc->ip_accounting = u->manager->default_ip_accounting;
157 cc->blockio_accounting = u->manager->default_blockio_accounting;
158 cc->memory_accounting = u->manager->default_memory_accounting;
159 cc->tasks_accounting = u->manager->default_tasks_accounting;
160 cc->ip_accounting = u->manager->default_ip_accounting;
161
162 if (u->type != UNIT_SLICE)
163 cc->tasks_max = u->manager->default_tasks_max;
164 }
165
166 ec = unit_get_exec_context(u);
167 if (ec) {
168 exec_context_init(ec);
169
170 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
171 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
172 }
173
174 kc = unit_get_kill_context(u);
175 if (kc)
176 kill_context_init(kc);
177
178 if (UNIT_VTABLE(u)->init)
179 UNIT_VTABLE(u)->init(u);
180 }
181
182 int unit_add_name(Unit *u, const char *text) {
183 _cleanup_free_ char *s = NULL, *i = NULL;
184 UnitType t;
185 int r;
186
187 assert(u);
188 assert(text);
189
190 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
191
192 if (!u->instance)
193 return -EINVAL;
194
195 r = unit_name_replace_instance(text, u->instance, &s);
196 if (r < 0)
197 return r;
198 } else {
199 s = strdup(text);
200 if (!s)
201 return -ENOMEM;
202 }
203
204 if (set_contains(u->names, s))
205 return 0;
206 if (hashmap_contains(u->manager->units, s))
207 return -EEXIST;
208
209 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
210 return -EINVAL;
211
212 t = unit_name_to_type(s);
213 if (t < 0)
214 return -EINVAL;
215
216 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
217 return -EINVAL;
218
219 r = unit_name_to_instance(s, &i);
220 if (r < 0)
221 return r;
222
223 if (i && !unit_type_may_template(t))
224 return -EINVAL;
225
226 /* Ensure that this unit is either instanced or not instanced,
227 * but not both. Note that we do allow names with different
228 * instance names however! */
229 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
230 return -EINVAL;
231
232 if (!unit_type_may_alias(t) && !set_isempty(u->names))
233 return -EEXIST;
234
235 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
236 return -E2BIG;
237
238 r = set_put(u->names, s);
239 if (r < 0)
240 return r;
241 assert(r > 0);
242
243 r = hashmap_put(u->manager->units, s, u);
244 if (r < 0) {
245 (void) set_remove(u->names, s);
246 return r;
247 }
248
249 if (u->type == _UNIT_TYPE_INVALID) {
250 u->type = t;
251 u->id = s;
252 u->instance = TAKE_PTR(i);
253
254 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
255
256 unit_init(u);
257 }
258
259 s = NULL;
260
261 unit_add_to_dbus_queue(u);
262 return 0;
263 }
264
265 int unit_choose_id(Unit *u, const char *name) {
266 _cleanup_free_ char *t = NULL;
267 char *s, *i;
268 int r;
269
270 assert(u);
271 assert(name);
272
273 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
274
275 if (!u->instance)
276 return -EINVAL;
277
278 r = unit_name_replace_instance(name, u->instance, &t);
279 if (r < 0)
280 return r;
281
282 name = t;
283 }
284
285 /* Selects one of the names of this unit as the id */
286 s = set_get(u->names, (char*) name);
287 if (!s)
288 return -ENOENT;
289
290 /* Determine the new instance from the new id */
291 r = unit_name_to_instance(s, &i);
292 if (r < 0)
293 return r;
294
295 u->id = s;
296
297 free(u->instance);
298 u->instance = i;
299
300 unit_add_to_dbus_queue(u);
301
302 return 0;
303 }
304
305 int unit_set_description(Unit *u, const char *description) {
306 int r;
307
308 assert(u);
309
310 r = free_and_strdup(&u->description, empty_to_null(description));
311 if (r < 0)
312 return r;
313 if (r > 0)
314 unit_add_to_dbus_queue(u);
315
316 return 0;
317 }
318
319 bool unit_may_gc(Unit *u) {
320 UnitActiveState state;
321 int r;
322
323 assert(u);
324
325 /* Checks whether the unit is ready to be unloaded for garbage collection.
326 * Returns true when the unit may be collected, and false if there's some
327 * reason to keep it loaded.
328 *
329 * References from other units are *not* checked here. Instead, this is done
330 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
331 */
332
333 if (u->job)
334 return false;
335
336 if (u->nop_job)
337 return false;
338
339 state = unit_active_state(u);
340
341 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
342 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
343 UNIT_VTABLE(u)->release_resources)
344 UNIT_VTABLE(u)->release_resources(u);
345
346 if (u->perpetual)
347 return false;
348
349 if (sd_bus_track_count(u->bus_track) > 0)
350 return false;
351
352 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
353 switch (u->collect_mode) {
354
355 case COLLECT_INACTIVE:
356 if (state != UNIT_INACTIVE)
357 return false;
358
359 break;
360
361 case COLLECT_INACTIVE_OR_FAILED:
362 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
363 return false;
364
365 break;
366
367 default:
368 assert_not_reached("Unknown garbage collection mode");
369 }
370
371 if (u->cgroup_path) {
372 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
373 * around. Units with active processes should never be collected. */
374
375 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
376 if (r < 0)
377 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
378 if (r <= 0)
379 return false;
380 }
381
382 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
383 return false;
384
385 return true;
386 }
387
388 void unit_add_to_load_queue(Unit *u) {
389 assert(u);
390 assert(u->type != _UNIT_TYPE_INVALID);
391
392 if (u->load_state != UNIT_STUB || u->in_load_queue)
393 return;
394
395 LIST_PREPEND(load_queue, u->manager->load_queue, u);
396 u->in_load_queue = true;
397 }
398
399 void unit_add_to_cleanup_queue(Unit *u) {
400 assert(u);
401
402 if (u->in_cleanup_queue)
403 return;
404
405 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
406 u->in_cleanup_queue = true;
407 }
408
409 void unit_add_to_gc_queue(Unit *u) {
410 assert(u);
411
412 if (u->in_gc_queue || u->in_cleanup_queue)
413 return;
414
415 if (!unit_may_gc(u))
416 return;
417
418 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
419 u->in_gc_queue = true;
420 }
421
422 void unit_add_to_dbus_queue(Unit *u) {
423 assert(u);
424 assert(u->type != _UNIT_TYPE_INVALID);
425
426 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
427 return;
428
429 /* Shortcut things if nobody cares */
430 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
431 sd_bus_track_count(u->bus_track) <= 0 &&
432 set_isempty(u->manager->private_buses)) {
433 u->sent_dbus_new_signal = true;
434 return;
435 }
436
437 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
438 u->in_dbus_queue = true;
439 }
440
441 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
442 assert(u);
443
444 if (u->in_stop_when_unneeded_queue)
445 return;
446
447 if (!u->stop_when_unneeded)
448 return;
449
450 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
451 return;
452
453 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
454 u->in_stop_when_unneeded_queue = true;
455 }
456
457 static void bidi_set_free(Unit *u, Hashmap *h) {
458 Unit *other;
459 Iterator i;
460 void *v;
461
462 assert(u);
463
464 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
465
466 HASHMAP_FOREACH_KEY(v, other, h, i) {
467 UnitDependency d;
468
469 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
470 hashmap_remove(other->dependencies[d], u);
471
472 unit_add_to_gc_queue(other);
473 }
474
475 hashmap_free(h);
476 }
477
478 static void unit_remove_transient(Unit *u) {
479 char **i;
480
481 assert(u);
482
483 if (!u->transient)
484 return;
485
486 if (u->fragment_path)
487 (void) unlink(u->fragment_path);
488
489 STRV_FOREACH(i, u->dropin_paths) {
490 _cleanup_free_ char *p = NULL, *pp = NULL;
491
492 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
493 if (!p)
494 continue;
495
496 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
497 if (!pp)
498 continue;
499
500 /* Only drop transient drop-ins */
501 if (!path_equal(u->manager->lookup_paths.transient, pp))
502 continue;
503
504 (void) unlink(*i);
505 (void) rmdir(p);
506 }
507 }
508
509 static void unit_free_requires_mounts_for(Unit *u) {
510 assert(u);
511
512 for (;;) {
513 _cleanup_free_ char *path;
514
515 path = hashmap_steal_first_key(u->requires_mounts_for);
516 if (!path)
517 break;
518 else {
519 char s[strlen(path) + 1];
520
521 PATH_FOREACH_PREFIX_MORE(s, path) {
522 char *y;
523 Set *x;
524
525 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
526 if (!x)
527 continue;
528
529 (void) set_remove(x, u);
530
531 if (set_isempty(x)) {
532 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
533 free(y);
534 set_free(x);
535 }
536 }
537 }
538 }
539
540 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
541 }
542
543 static void unit_done(Unit *u) {
544 ExecContext *ec;
545 CGroupContext *cc;
546
547 assert(u);
548
549 if (u->type < 0)
550 return;
551
552 if (UNIT_VTABLE(u)->done)
553 UNIT_VTABLE(u)->done(u);
554
555 ec = unit_get_exec_context(u);
556 if (ec)
557 exec_context_done(ec);
558
559 cc = unit_get_cgroup_context(u);
560 if (cc)
561 cgroup_context_done(cc);
562 }
563
564 void unit_free(Unit *u) {
565 UnitDependency d;
566 Iterator i;
567 char *t;
568
569 if (!u)
570 return;
571
572 u->transient_file = safe_fclose(u->transient_file);
573
574 if (!MANAGER_IS_RELOADING(u->manager))
575 unit_remove_transient(u);
576
577 bus_unit_send_removed_signal(u);
578
579 unit_done(u);
580
581 unit_dequeue_rewatch_pids(u);
582
583 sd_bus_slot_unref(u->match_bus_slot);
584 sd_bus_track_unref(u->bus_track);
585 u->deserialized_refs = strv_free(u->deserialized_refs);
586
587 unit_free_requires_mounts_for(u);
588
589 SET_FOREACH(t, u->names, i)
590 hashmap_remove_value(u->manager->units, t, u);
591
592 if (!sd_id128_is_null(u->invocation_id))
593 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
594
595 if (u->job) {
596 Job *j = u->job;
597 job_uninstall(j);
598 job_free(j);
599 }
600
601 if (u->nop_job) {
602 Job *j = u->nop_job;
603 job_uninstall(j);
604 job_free(j);
605 }
606
607 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
608 bidi_set_free(u, u->dependencies[d]);
609
610 if (u->on_console)
611 manager_unref_console(u->manager);
612
613 unit_release_cgroup(u);
614
615 if (!MANAGER_IS_RELOADING(u->manager))
616 unit_unlink_state_files(u);
617
618 unit_unref_uid_gid(u, false);
619
620 (void) manager_update_failed_units(u->manager, u, false);
621 set_remove(u->manager->startup_units, u);
622
623 unit_unwatch_all_pids(u);
624
625 unit_ref_unset(&u->slice);
626 while (u->refs_by_target)
627 unit_ref_unset(u->refs_by_target);
628
629 if (u->type != _UNIT_TYPE_INVALID)
630 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
631
632 if (u->in_load_queue)
633 LIST_REMOVE(load_queue, u->manager->load_queue, u);
634
635 if (u->in_dbus_queue)
636 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
637
638 if (u->in_gc_queue)
639 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
640
641 if (u->in_cgroup_realize_queue)
642 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
643
644 if (u->in_cgroup_empty_queue)
645 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
646
647 if (u->in_cleanup_queue)
648 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
649
650 if (u->in_target_deps_queue)
651 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
652
653 if (u->in_stop_when_unneeded_queue)
654 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
655
656 safe_close(u->ip_accounting_ingress_map_fd);
657 safe_close(u->ip_accounting_egress_map_fd);
658
659 safe_close(u->ipv4_allow_map_fd);
660 safe_close(u->ipv6_allow_map_fd);
661 safe_close(u->ipv4_deny_map_fd);
662 safe_close(u->ipv6_deny_map_fd);
663
664 bpf_program_unref(u->ip_bpf_ingress);
665 bpf_program_unref(u->ip_bpf_ingress_installed);
666 bpf_program_unref(u->ip_bpf_egress);
667 bpf_program_unref(u->ip_bpf_egress_installed);
668
669 condition_free_list(u->conditions);
670 condition_free_list(u->asserts);
671
672 free(u->description);
673 strv_free(u->documentation);
674 free(u->fragment_path);
675 free(u->source_path);
676 strv_free(u->dropin_paths);
677 free(u->instance);
678
679 free(u->job_timeout_reboot_arg);
680
681 set_free_free(u->names);
682
683 free(u->reboot_arg);
684
685 free(u);
686 }
687
688 UnitActiveState unit_active_state(Unit *u) {
689 assert(u);
690
691 if (u->load_state == UNIT_MERGED)
692 return unit_active_state(unit_follow_merge(u));
693
694 /* After a reload it might happen that a unit is not correctly
695 * loaded but still has a process around. That's why we won't
696 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
697
698 return UNIT_VTABLE(u)->active_state(u);
699 }
700
701 const char* unit_sub_state_to_string(Unit *u) {
702 assert(u);
703
704 return UNIT_VTABLE(u)->sub_state_to_string(u);
705 }
706
707 static int set_complete_move(Set **s, Set **other) {
708 assert(s);
709 assert(other);
710
711 if (!other)
712 return 0;
713
714 if (*s)
715 return set_move(*s, *other);
716 else
717 *s = TAKE_PTR(*other);
718
719 return 0;
720 }
721
722 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
723 assert(s);
724 assert(other);
725
726 if (!*other)
727 return 0;
728
729 if (*s)
730 return hashmap_move(*s, *other);
731 else
732 *s = TAKE_PTR(*other);
733
734 return 0;
735 }
736
737 static int merge_names(Unit *u, Unit *other) {
738 char *t;
739 Iterator i;
740 int r;
741
742 assert(u);
743 assert(other);
744
745 r = set_complete_move(&u->names, &other->names);
746 if (r < 0)
747 return r;
748
749 set_free_free(other->names);
750 other->names = NULL;
751 other->id = NULL;
752
753 SET_FOREACH(t, u->names, i)
754 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
755
756 return 0;
757 }
758
759 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
760 unsigned n_reserve;
761
762 assert(u);
763 assert(other);
764 assert(d < _UNIT_DEPENDENCY_MAX);
765
766 /*
767 * If u does not have this dependency set allocated, there is no need
768 * to reserve anything. In that case other's set will be transferred
769 * as a whole to u by complete_move().
770 */
771 if (!u->dependencies[d])
772 return 0;
773
774 /* merge_dependencies() will skip a u-on-u dependency */
775 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
776
777 return hashmap_reserve(u->dependencies[d], n_reserve);
778 }
779
780 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
781 Iterator i;
782 Unit *back;
783 void *v;
784 int r;
785
786 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
787
788 assert(u);
789 assert(other);
790 assert(d < _UNIT_DEPENDENCY_MAX);
791
792 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
793 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
794 UnitDependency k;
795
796 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
797 * pointers back, and let's fix them up, to instead point to 'u'. */
798
799 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
800 if (back == u) {
801 /* Do not add dependencies between u and itself. */
802 if (hashmap_remove(back->dependencies[k], other))
803 maybe_warn_about_dependency(u, other_id, k);
804 } else {
805 UnitDependencyInfo di_u, di_other, di_merged;
806
807 /* Let's drop this dependency between "back" and "other", and let's create it between
808 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
809 * and any such dependency which might already exist */
810
811 di_other.data = hashmap_get(back->dependencies[k], other);
812 if (!di_other.data)
813 continue; /* dependency isn't set, let's try the next one */
814
815 di_u.data = hashmap_get(back->dependencies[k], u);
816
817 di_merged = (UnitDependencyInfo) {
818 .origin_mask = di_u.origin_mask | di_other.origin_mask,
819 .destination_mask = di_u.destination_mask | di_other.destination_mask,
820 };
821
822 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
823 if (r < 0)
824 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
825 assert(r >= 0);
826
827 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
828 }
829 }
830
831 }
832
833 /* Also do not move dependencies on u to itself */
834 back = hashmap_remove(other->dependencies[d], u);
835 if (back)
836 maybe_warn_about_dependency(u, other_id, d);
837
838 /* The move cannot fail. The caller must have performed a reservation. */
839 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
840
841 other->dependencies[d] = hashmap_free(other->dependencies[d]);
842 }
843
844 int unit_merge(Unit *u, Unit *other) {
845 UnitDependency d;
846 const char *other_id = NULL;
847 int r;
848
849 assert(u);
850 assert(other);
851 assert(u->manager == other->manager);
852 assert(u->type != _UNIT_TYPE_INVALID);
853
854 other = unit_follow_merge(other);
855
856 if (other == u)
857 return 0;
858
859 if (u->type != other->type)
860 return -EINVAL;
861
862 if (!u->instance != !other->instance)
863 return -EINVAL;
864
865 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
866 return -EEXIST;
867
868 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
869 return -EEXIST;
870
871 if (other->job)
872 return -EEXIST;
873
874 if (other->nop_job)
875 return -EEXIST;
876
877 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
878 return -EEXIST;
879
880 if (other->id)
881 other_id = strdupa(other->id);
882
883 /* Make reservations to ensure merge_dependencies() won't fail */
884 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
885 r = reserve_dependencies(u, other, d);
886 /*
887 * We don't rollback reservations if we fail. We don't have
888 * a way to undo reservations. A reservation is not a leak.
889 */
890 if (r < 0)
891 return r;
892 }
893
894 /* Merge names */
895 r = merge_names(u, other);
896 if (r < 0)
897 return r;
898
899 /* Redirect all references */
900 while (other->refs_by_target)
901 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
902
903 /* Merge dependencies */
904 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
905 merge_dependencies(u, other, other_id, d);
906
907 other->load_state = UNIT_MERGED;
908 other->merged_into = u;
909
910 /* If there is still some data attached to the other node, we
911 * don't need it anymore, and can free it. */
912 if (other->load_state != UNIT_STUB)
913 if (UNIT_VTABLE(other)->done)
914 UNIT_VTABLE(other)->done(other);
915
916 unit_add_to_dbus_queue(u);
917 unit_add_to_cleanup_queue(other);
918
919 return 0;
920 }
921
922 int unit_merge_by_name(Unit *u, const char *name) {
923 _cleanup_free_ char *s = NULL;
924 Unit *other;
925 int r;
926
927 assert(u);
928 assert(name);
929
930 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
931 if (!u->instance)
932 return -EINVAL;
933
934 r = unit_name_replace_instance(name, u->instance, &s);
935 if (r < 0)
936 return r;
937
938 name = s;
939 }
940
941 other = manager_get_unit(u->manager, name);
942 if (other)
943 return unit_merge(u, other);
944
945 return unit_add_name(u, name);
946 }
947
948 Unit* unit_follow_merge(Unit *u) {
949 assert(u);
950
951 while (u->load_state == UNIT_MERGED)
952 assert_se(u = u->merged_into);
953
954 return u;
955 }
956
957 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
958 ExecDirectoryType dt;
959 char **dp;
960 int r;
961
962 assert(u);
963 assert(c);
964
965 if (c->working_directory) {
966 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
967 if (r < 0)
968 return r;
969 }
970
971 if (c->root_directory) {
972 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
973 if (r < 0)
974 return r;
975 }
976
977 if (c->root_image) {
978 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
979 if (r < 0)
980 return r;
981 }
982
983 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
984 if (!u->manager->prefix[dt])
985 continue;
986
987 STRV_FOREACH(dp, c->directories[dt].paths) {
988 _cleanup_free_ char *p;
989
990 p = strjoin(u->manager->prefix[dt], "/", *dp);
991 if (!p)
992 return -ENOMEM;
993
994 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
995 if (r < 0)
996 return r;
997 }
998 }
999
1000 if (!MANAGER_IS_SYSTEM(u->manager))
1001 return 0;
1002
1003 if (c->private_tmp) {
1004 const char *p;
1005
1006 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1007 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1008 if (r < 0)
1009 return r;
1010 }
1011
1012 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1013 if (r < 0)
1014 return r;
1015 }
1016
1017 if (!IN_SET(c->std_output,
1018 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1019 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1020 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1021 !IN_SET(c->std_error,
1022 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1023 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1024 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1025 return 0;
1026
1027 /* If syslog or kernel logging is requested, make sure our own
1028 * logging daemon is run first. */
1029
1030 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1031 if (r < 0)
1032 return r;
1033
1034 return 0;
1035 }
1036
1037 const char *unit_description(Unit *u) {
1038 assert(u);
1039
1040 if (u->description)
1041 return u->description;
1042
1043 return strna(u->id);
1044 }
1045
1046 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1047 const struct {
1048 UnitDependencyMask mask;
1049 const char *name;
1050 } table[] = {
1051 { UNIT_DEPENDENCY_FILE, "file" },
1052 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1053 { UNIT_DEPENDENCY_DEFAULT, "default" },
1054 { UNIT_DEPENDENCY_UDEV, "udev" },
1055 { UNIT_DEPENDENCY_PATH, "path" },
1056 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1057 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1058 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1059 };
1060 size_t i;
1061
1062 assert(f);
1063 assert(kind);
1064 assert(space);
1065
1066 for (i = 0; i < ELEMENTSOF(table); i++) {
1067
1068 if (mask == 0)
1069 break;
1070
1071 if (FLAGS_SET(mask, table[i].mask)) {
1072 if (*space)
1073 fputc(' ', f);
1074 else
1075 *space = true;
1076
1077 fputs(kind, f);
1078 fputs("-", f);
1079 fputs(table[i].name, f);
1080
1081 mask &= ~table[i].mask;
1082 }
1083 }
1084
1085 assert(mask == 0);
1086 }
1087
1088 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1089 char *t, **j;
1090 UnitDependency d;
1091 Iterator i;
1092 const char *prefix2;
1093 char
1094 timestamp0[FORMAT_TIMESTAMP_MAX],
1095 timestamp1[FORMAT_TIMESTAMP_MAX],
1096 timestamp2[FORMAT_TIMESTAMP_MAX],
1097 timestamp3[FORMAT_TIMESTAMP_MAX],
1098 timestamp4[FORMAT_TIMESTAMP_MAX],
1099 timespan[FORMAT_TIMESPAN_MAX];
1100 Unit *following;
1101 _cleanup_set_free_ Set *following_set = NULL;
1102 const char *n;
1103 CGroupMask m;
1104 int r;
1105
1106 assert(u);
1107 assert(u->type >= 0);
1108
1109 prefix = strempty(prefix);
1110 prefix2 = strjoina(prefix, "\t");
1111
1112 fprintf(f,
1113 "%s-> Unit %s:\n"
1114 "%s\tDescription: %s\n"
1115 "%s\tInstance: %s\n"
1116 "%s\tUnit Load State: %s\n"
1117 "%s\tUnit Active State: %s\n"
1118 "%s\tState Change Timestamp: %s\n"
1119 "%s\tInactive Exit Timestamp: %s\n"
1120 "%s\tActive Enter Timestamp: %s\n"
1121 "%s\tActive Exit Timestamp: %s\n"
1122 "%s\tInactive Enter Timestamp: %s\n"
1123 "%s\tMay GC: %s\n"
1124 "%s\tNeed Daemon Reload: %s\n"
1125 "%s\tTransient: %s\n"
1126 "%s\tPerpetual: %s\n"
1127 "%s\tGarbage Collection Mode: %s\n"
1128 "%s\tSlice: %s\n"
1129 "%s\tCGroup: %s\n"
1130 "%s\tCGroup realized: %s\n",
1131 prefix, u->id,
1132 prefix, unit_description(u),
1133 prefix, strna(u->instance),
1134 prefix, unit_load_state_to_string(u->load_state),
1135 prefix, unit_active_state_to_string(unit_active_state(u)),
1136 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1137 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1138 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1139 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1140 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1141 prefix, yes_no(unit_may_gc(u)),
1142 prefix, yes_no(unit_need_daemon_reload(u)),
1143 prefix, yes_no(u->transient),
1144 prefix, yes_no(u->perpetual),
1145 prefix, collect_mode_to_string(u->collect_mode),
1146 prefix, strna(unit_slice_name(u)),
1147 prefix, strna(u->cgroup_path),
1148 prefix, yes_no(u->cgroup_realized));
1149
1150 if (u->cgroup_realized_mask != 0) {
1151 _cleanup_free_ char *s = NULL;
1152 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1153 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1154 }
1155 if (u->cgroup_enabled_mask != 0) {
1156 _cleanup_free_ char *s = NULL;
1157 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1158 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1159 }
1160 m = unit_get_own_mask(u);
1161 if (m != 0) {
1162 _cleanup_free_ char *s = NULL;
1163 (void) cg_mask_to_string(m, &s);
1164 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1165 }
1166 m = unit_get_members_mask(u);
1167 if (m != 0) {
1168 _cleanup_free_ char *s = NULL;
1169 (void) cg_mask_to_string(m, &s);
1170 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1171 }
1172
1173 SET_FOREACH(t, u->names, i)
1174 fprintf(f, "%s\tName: %s\n", prefix, t);
1175
1176 if (!sd_id128_is_null(u->invocation_id))
1177 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1178 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1179
1180 STRV_FOREACH(j, u->documentation)
1181 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1182
1183 following = unit_following(u);
1184 if (following)
1185 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1186
1187 r = unit_following_set(u, &following_set);
1188 if (r >= 0) {
1189 Unit *other;
1190
1191 SET_FOREACH(other, following_set, i)
1192 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1193 }
1194
1195 if (u->fragment_path)
1196 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1197
1198 if (u->source_path)
1199 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1200
1201 STRV_FOREACH(j, u->dropin_paths)
1202 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1203
1204 if (u->failure_action != EMERGENCY_ACTION_NONE)
1205 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1206 if (u->success_action != EMERGENCY_ACTION_NONE)
1207 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1208
1209 if (u->job_timeout != USEC_INFINITY)
1210 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1211
1212 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1213 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1214
1215 if (u->job_timeout_reboot_arg)
1216 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1217
1218 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1219 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1220
1221 if (dual_timestamp_is_set(&u->condition_timestamp))
1222 fprintf(f,
1223 "%s\tCondition Timestamp: %s\n"
1224 "%s\tCondition Result: %s\n",
1225 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1226 prefix, yes_no(u->condition_result));
1227
1228 if (dual_timestamp_is_set(&u->assert_timestamp))
1229 fprintf(f,
1230 "%s\tAssert Timestamp: %s\n"
1231 "%s\tAssert Result: %s\n",
1232 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1233 prefix, yes_no(u->assert_result));
1234
1235 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1236 UnitDependencyInfo di;
1237 Unit *other;
1238
1239 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1240 bool space = false;
1241
1242 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1243
1244 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1245 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1246
1247 fputs(")\n", f);
1248 }
1249 }
1250
1251 if (!hashmap_isempty(u->requires_mounts_for)) {
1252 UnitDependencyInfo di;
1253 const char *path;
1254
1255 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1256 bool space = false;
1257
1258 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1259
1260 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1261 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1262
1263 fputs(")\n", f);
1264 }
1265 }
1266
1267 if (u->load_state == UNIT_LOADED) {
1268
1269 fprintf(f,
1270 "%s\tStopWhenUnneeded: %s\n"
1271 "%s\tRefuseManualStart: %s\n"
1272 "%s\tRefuseManualStop: %s\n"
1273 "%s\tDefaultDependencies: %s\n"
1274 "%s\tOnFailureJobMode: %s\n"
1275 "%s\tIgnoreOnIsolate: %s\n",
1276 prefix, yes_no(u->stop_when_unneeded),
1277 prefix, yes_no(u->refuse_manual_start),
1278 prefix, yes_no(u->refuse_manual_stop),
1279 prefix, yes_no(u->default_dependencies),
1280 prefix, job_mode_to_string(u->on_failure_job_mode),
1281 prefix, yes_no(u->ignore_on_isolate));
1282
1283 if (UNIT_VTABLE(u)->dump)
1284 UNIT_VTABLE(u)->dump(u, f, prefix2);
1285
1286 } else if (u->load_state == UNIT_MERGED)
1287 fprintf(f,
1288 "%s\tMerged into: %s\n",
1289 prefix, u->merged_into->id);
1290 else if (u->load_state == UNIT_ERROR)
1291 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1292
1293 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1294 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1295
1296 if (u->job)
1297 job_dump(u->job, f, prefix2);
1298
1299 if (u->nop_job)
1300 job_dump(u->nop_job, f, prefix2);
1301 }
1302
1303 /* Common implementation for multiple backends */
1304 int unit_load_fragment_and_dropin(Unit *u) {
1305 int r;
1306
1307 assert(u);
1308
1309 /* Load a .{service,socket,...} file */
1310 r = unit_load_fragment(u);
1311 if (r < 0)
1312 return r;
1313
1314 if (u->load_state == UNIT_STUB)
1315 return -ENOENT;
1316
1317 /* Load drop-in directory data. If u is an alias, we might be reloading the
1318 * target unit needlessly. But we cannot be sure which drops-ins have already
1319 * been loaded and which not, at least without doing complicated book-keeping,
1320 * so let's always reread all drop-ins. */
1321 return unit_load_dropin(unit_follow_merge(u));
1322 }
1323
1324 /* Common implementation for multiple backends */
1325 int unit_load_fragment_and_dropin_optional(Unit *u) {
1326 int r;
1327
1328 assert(u);
1329
1330 /* Same as unit_load_fragment_and_dropin(), but whether
1331 * something can be loaded or not doesn't matter. */
1332
1333 /* Load a .service/.socket/.slice/… file */
1334 r = unit_load_fragment(u);
1335 if (r < 0)
1336 return r;
1337
1338 if (u->load_state == UNIT_STUB)
1339 u->load_state = UNIT_LOADED;
1340
1341 /* Load drop-in directory data */
1342 return unit_load_dropin(unit_follow_merge(u));
1343 }
1344
1345 void unit_add_to_target_deps_queue(Unit *u) {
1346 Manager *m = u->manager;
1347
1348 assert(u);
1349
1350 if (u->in_target_deps_queue)
1351 return;
1352
1353 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1354 u->in_target_deps_queue = true;
1355 }
1356
1357 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1358 assert(u);
1359 assert(target);
1360
1361 if (target->type != UNIT_TARGET)
1362 return 0;
1363
1364 /* Only add the dependency if both units are loaded, so that
1365 * that loop check below is reliable */
1366 if (u->load_state != UNIT_LOADED ||
1367 target->load_state != UNIT_LOADED)
1368 return 0;
1369
1370 /* If either side wants no automatic dependencies, then let's
1371 * skip this */
1372 if (!u->default_dependencies ||
1373 !target->default_dependencies)
1374 return 0;
1375
1376 /* Don't create loops */
1377 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1378 return 0;
1379
1380 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1381 }
1382
1383 static int unit_add_slice_dependencies(Unit *u) {
1384 UnitDependencyMask mask;
1385 assert(u);
1386
1387 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1388 return 0;
1389
1390 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1391 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1392 relationship). */
1393 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1394
1395 if (UNIT_ISSET(u->slice))
1396 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1397
1398 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1399 return 0;
1400
1401 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1402 }
1403
1404 static int unit_add_mount_dependencies(Unit *u) {
1405 UnitDependencyInfo di;
1406 const char *path;
1407 Iterator i;
1408 int r;
1409
1410 assert(u);
1411
1412 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1413 char prefix[strlen(path) + 1];
1414
1415 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1416 _cleanup_free_ char *p = NULL;
1417 Unit *m;
1418
1419 r = unit_name_from_path(prefix, ".mount", &p);
1420 if (r < 0)
1421 return r;
1422
1423 m = manager_get_unit(u->manager, p);
1424 if (!m) {
1425 /* Make sure to load the mount unit if
1426 * it exists. If so the dependencies
1427 * on this unit will be added later
1428 * during the loading of the mount
1429 * unit. */
1430 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1431 continue;
1432 }
1433 if (m == u)
1434 continue;
1435
1436 if (m->load_state != UNIT_LOADED)
1437 continue;
1438
1439 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1440 if (r < 0)
1441 return r;
1442
1443 if (m->fragment_path) {
1444 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1445 if (r < 0)
1446 return r;
1447 }
1448 }
1449 }
1450
1451 return 0;
1452 }
1453
1454 static int unit_add_startup_units(Unit *u) {
1455 CGroupContext *c;
1456 int r;
1457
1458 c = unit_get_cgroup_context(u);
1459 if (!c)
1460 return 0;
1461
1462 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1463 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1464 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1465 return 0;
1466
1467 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1468 if (r < 0)
1469 return r;
1470
1471 return set_put(u->manager->startup_units, u);
1472 }
1473
1474 int unit_load(Unit *u) {
1475 int r;
1476
1477 assert(u);
1478
1479 if (u->in_load_queue) {
1480 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1481 u->in_load_queue = false;
1482 }
1483
1484 if (u->type == _UNIT_TYPE_INVALID)
1485 return -EINVAL;
1486
1487 if (u->load_state != UNIT_STUB)
1488 return 0;
1489
1490 if (u->transient_file) {
1491 r = fflush_and_check(u->transient_file);
1492 if (r < 0)
1493 goto fail;
1494
1495 u->transient_file = safe_fclose(u->transient_file);
1496 u->fragment_mtime = now(CLOCK_REALTIME);
1497 }
1498
1499 if (UNIT_VTABLE(u)->load) {
1500 r = UNIT_VTABLE(u)->load(u);
1501 if (r < 0)
1502 goto fail;
1503 }
1504
1505 if (u->load_state == UNIT_STUB) {
1506 r = -ENOENT;
1507 goto fail;
1508 }
1509
1510 if (u->load_state == UNIT_LOADED) {
1511 unit_add_to_target_deps_queue(u);
1512
1513 r = unit_add_slice_dependencies(u);
1514 if (r < 0)
1515 goto fail;
1516
1517 r = unit_add_mount_dependencies(u);
1518 if (r < 0)
1519 goto fail;
1520
1521 r = unit_add_startup_units(u);
1522 if (r < 0)
1523 goto fail;
1524
1525 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1526 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1527 r = -ENOEXEC;
1528 goto fail;
1529 }
1530
1531 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1532 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1533
1534 unit_update_cgroup_members_masks(u);
1535 }
1536
1537 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1538
1539 unit_add_to_dbus_queue(unit_follow_merge(u));
1540 unit_add_to_gc_queue(u);
1541
1542 return 0;
1543
1544 fail:
1545 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1546 * return ENOEXEC to ensure units are placed in this state after loading */
1547
1548 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1549 r == -ENOEXEC ? UNIT_BAD_SETTING :
1550 UNIT_ERROR;
1551 u->load_error = r;
1552
1553 unit_add_to_dbus_queue(u);
1554 unit_add_to_gc_queue(u);
1555
1556 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1557 }
1558
1559 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1560 Condition *c;
1561 int triggered = -1;
1562
1563 assert(u);
1564 assert(to_string);
1565
1566 /* If the condition list is empty, then it is true */
1567 if (!first)
1568 return true;
1569
1570 /* Otherwise, if all of the non-trigger conditions apply and
1571 * if any of the trigger conditions apply (unless there are
1572 * none) we return true */
1573 LIST_FOREACH(conditions, c, first) {
1574 int r;
1575
1576 r = condition_test(c);
1577 if (r < 0)
1578 log_unit_warning(u,
1579 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1580 to_string(c->type),
1581 c->trigger ? "|" : "",
1582 c->negate ? "!" : "",
1583 c->parameter);
1584 else
1585 log_unit_debug(u,
1586 "%s=%s%s%s %s.",
1587 to_string(c->type),
1588 c->trigger ? "|" : "",
1589 c->negate ? "!" : "",
1590 c->parameter,
1591 condition_result_to_string(c->result));
1592
1593 if (!c->trigger && r <= 0)
1594 return false;
1595
1596 if (c->trigger && triggered <= 0)
1597 triggered = r > 0;
1598 }
1599
1600 return triggered != 0;
1601 }
1602
1603 static bool unit_condition_test(Unit *u) {
1604 assert(u);
1605
1606 dual_timestamp_get(&u->condition_timestamp);
1607 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1608
1609 return u->condition_result;
1610 }
1611
1612 static bool unit_assert_test(Unit *u) {
1613 assert(u);
1614
1615 dual_timestamp_get(&u->assert_timestamp);
1616 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1617
1618 return u->assert_result;
1619 }
1620
1621 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1622 DISABLE_WARNING_FORMAT_NONLITERAL;
1623 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1624 REENABLE_WARNING;
1625 }
1626
1627 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1628 const char *format;
1629 const UnitStatusMessageFormats *format_table;
1630
1631 assert(u);
1632 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1633
1634 if (t != JOB_RELOAD) {
1635 format_table = &UNIT_VTABLE(u)->status_message_formats;
1636 if (format_table) {
1637 format = format_table->starting_stopping[t == JOB_STOP];
1638 if (format)
1639 return format;
1640 }
1641 }
1642
1643 /* Return generic strings */
1644 if (t == JOB_START)
1645 return "Starting %s.";
1646 else if (t == JOB_STOP)
1647 return "Stopping %s.";
1648 else
1649 return "Reloading %s.";
1650 }
1651
1652 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1653 const char *format;
1654
1655 assert(u);
1656
1657 /* Reload status messages have traditionally not been printed to console. */
1658 if (!IN_SET(t, JOB_START, JOB_STOP))
1659 return;
1660
1661 format = unit_get_status_message_format(u, t);
1662
1663 DISABLE_WARNING_FORMAT_NONLITERAL;
1664 unit_status_printf(u, "", format);
1665 REENABLE_WARNING;
1666 }
1667
1668 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1669 const char *format, *mid;
1670 char buf[LINE_MAX];
1671
1672 assert(u);
1673
1674 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1675 return;
1676
1677 if (log_on_console())
1678 return;
1679
1680 /* We log status messages for all units and all operations. */
1681
1682 format = unit_get_status_message_format(u, t);
1683
1684 DISABLE_WARNING_FORMAT_NONLITERAL;
1685 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1686 REENABLE_WARNING;
1687
1688 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1689 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1690 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1691
1692 /* Note that we deliberately use LOG_MESSAGE() instead of
1693 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1694 * closely what is written to screen using the status output,
1695 * which is supposed the highest level, friendliest output
1696 * possible, which means we should avoid the low-level unit
1697 * name. */
1698 log_struct(LOG_INFO,
1699 LOG_MESSAGE("%s", buf),
1700 LOG_UNIT_ID(u),
1701 LOG_UNIT_INVOCATION_ID(u),
1702 mid);
1703 }
1704
1705 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1706 assert(u);
1707 assert(t >= 0);
1708 assert(t < _JOB_TYPE_MAX);
1709
1710 unit_status_log_starting_stopping_reloading(u, t);
1711 unit_status_print_starting_stopping(u, t);
1712 }
1713
1714 int unit_start_limit_test(Unit *u) {
1715 assert(u);
1716
1717 if (ratelimit_below(&u->start_limit)) {
1718 u->start_limit_hit = false;
1719 return 0;
1720 }
1721
1722 log_unit_warning(u, "Start request repeated too quickly.");
1723 u->start_limit_hit = true;
1724
1725 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1726 }
1727
1728 bool unit_shall_confirm_spawn(Unit *u) {
1729 assert(u);
1730
1731 if (manager_is_confirm_spawn_disabled(u->manager))
1732 return false;
1733
1734 /* For some reasons units remaining in the same process group
1735 * as PID 1 fail to acquire the console even if it's not used
1736 * by any process. So skip the confirmation question for them. */
1737 return !unit_get_exec_context(u)->same_pgrp;
1738 }
1739
1740 static bool unit_verify_deps(Unit *u) {
1741 Unit *other;
1742 Iterator j;
1743 void *v;
1744
1745 assert(u);
1746
1747 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1748 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1749 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1750 * conjunction with After= as for them any such check would make things entirely racy. */
1751
1752 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1753
1754 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1755 continue;
1756
1757 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1758 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1759 return false;
1760 }
1761 }
1762
1763 return true;
1764 }
1765
1766 /* Errors:
1767 * -EBADR: This unit type does not support starting.
1768 * -EALREADY: Unit is already started.
1769 * -EAGAIN: An operation is already in progress. Retry later.
1770 * -ECANCELED: Too many requests for now.
1771 * -EPROTO: Assert failed
1772 * -EINVAL: Unit not loaded
1773 * -EOPNOTSUPP: Unit type not supported
1774 * -ENOLINK: The necessary dependencies are not fulfilled.
1775 * -ESTALE: This unit has been started before and can't be started a second time
1776 */
1777 int unit_start(Unit *u) {
1778 UnitActiveState state;
1779 Unit *following;
1780
1781 assert(u);
1782
1783 /* If this is already started, then this will succeed. Note
1784 * that this will even succeed if this unit is not startable
1785 * by the user. This is relied on to detect when we need to
1786 * wait for units and when waiting is finished. */
1787 state = unit_active_state(u);
1788 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1789 return -EALREADY;
1790
1791 /* Units that aren't loaded cannot be started */
1792 if (u->load_state != UNIT_LOADED)
1793 return -EINVAL;
1794
1795 /* Refuse starting scope units more than once */
1796 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1797 return -ESTALE;
1798
1799 /* If the conditions failed, don't do anything at all. If we
1800 * already are activating this call might still be useful to
1801 * speed up activation in case there is some hold-off time,
1802 * but we don't want to recheck the condition in that case. */
1803 if (state != UNIT_ACTIVATING &&
1804 !unit_condition_test(u)) {
1805 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1806 return -EALREADY;
1807 }
1808
1809 /* If the asserts failed, fail the entire job */
1810 if (state != UNIT_ACTIVATING &&
1811 !unit_assert_test(u)) {
1812 log_unit_notice(u, "Starting requested but asserts failed.");
1813 return -EPROTO;
1814 }
1815
1816 /* Units of types that aren't supported cannot be
1817 * started. Note that we do this test only after the condition
1818 * checks, so that we rather return condition check errors
1819 * (which are usually not considered a true failure) than "not
1820 * supported" errors (which are considered a failure).
1821 */
1822 if (!unit_supported(u))
1823 return -EOPNOTSUPP;
1824
1825 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1826 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1827 * effect anymore, due to a reload or due to a failed condition. */
1828 if (!unit_verify_deps(u))
1829 return -ENOLINK;
1830
1831 /* Forward to the main object, if we aren't it. */
1832 following = unit_following(u);
1833 if (following) {
1834 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1835 return unit_start(following);
1836 }
1837
1838 /* If it is stopped, but we cannot start it, then fail */
1839 if (!UNIT_VTABLE(u)->start)
1840 return -EBADR;
1841
1842 /* We don't suppress calls to ->start() here when we are
1843 * already starting, to allow this request to be used as a
1844 * "hurry up" call, for example when the unit is in some "auto
1845 * restart" state where it waits for a holdoff timer to elapse
1846 * before it will start again. */
1847
1848 unit_add_to_dbus_queue(u);
1849
1850 return UNIT_VTABLE(u)->start(u);
1851 }
1852
1853 bool unit_can_start(Unit *u) {
1854 assert(u);
1855
1856 if (u->load_state != UNIT_LOADED)
1857 return false;
1858
1859 if (!unit_supported(u))
1860 return false;
1861
1862 /* Scope units may be started only once */
1863 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1864 return false;
1865
1866 return !!UNIT_VTABLE(u)->start;
1867 }
1868
1869 bool unit_can_isolate(Unit *u) {
1870 assert(u);
1871
1872 return unit_can_start(u) &&
1873 u->allow_isolate;
1874 }
1875
1876 /* Errors:
1877 * -EBADR: This unit type does not support stopping.
1878 * -EALREADY: Unit is already stopped.
1879 * -EAGAIN: An operation is already in progress. Retry later.
1880 */
1881 int unit_stop(Unit *u) {
1882 UnitActiveState state;
1883 Unit *following;
1884
1885 assert(u);
1886
1887 state = unit_active_state(u);
1888 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1889 return -EALREADY;
1890
1891 following = unit_following(u);
1892 if (following) {
1893 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1894 return unit_stop(following);
1895 }
1896
1897 if (!UNIT_VTABLE(u)->stop)
1898 return -EBADR;
1899
1900 unit_add_to_dbus_queue(u);
1901
1902 return UNIT_VTABLE(u)->stop(u);
1903 }
1904
1905 bool unit_can_stop(Unit *u) {
1906 assert(u);
1907
1908 if (!unit_supported(u))
1909 return false;
1910
1911 if (u->perpetual)
1912 return false;
1913
1914 return !!UNIT_VTABLE(u)->stop;
1915 }
1916
1917 /* Errors:
1918 * -EBADR: This unit type does not support reloading.
1919 * -ENOEXEC: Unit is not started.
1920 * -EAGAIN: An operation is already in progress. Retry later.
1921 */
1922 int unit_reload(Unit *u) {
1923 UnitActiveState state;
1924 Unit *following;
1925
1926 assert(u);
1927
1928 if (u->load_state != UNIT_LOADED)
1929 return -EINVAL;
1930
1931 if (!unit_can_reload(u))
1932 return -EBADR;
1933
1934 state = unit_active_state(u);
1935 if (state == UNIT_RELOADING)
1936 return -EALREADY;
1937
1938 if (state != UNIT_ACTIVE) {
1939 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1940 return -ENOEXEC;
1941 }
1942
1943 following = unit_following(u);
1944 if (following) {
1945 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1946 return unit_reload(following);
1947 }
1948
1949 unit_add_to_dbus_queue(u);
1950
1951 if (!UNIT_VTABLE(u)->reload) {
1952 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1953 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1954 return 0;
1955 }
1956
1957 return UNIT_VTABLE(u)->reload(u);
1958 }
1959
1960 bool unit_can_reload(Unit *u) {
1961 assert(u);
1962
1963 if (UNIT_VTABLE(u)->can_reload)
1964 return UNIT_VTABLE(u)->can_reload(u);
1965
1966 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1967 return true;
1968
1969 return UNIT_VTABLE(u)->reload;
1970 }
1971
1972 bool unit_is_unneeded(Unit *u) {
1973 static const UnitDependency deps[] = {
1974 UNIT_REQUIRED_BY,
1975 UNIT_REQUISITE_OF,
1976 UNIT_WANTED_BY,
1977 UNIT_BOUND_BY,
1978 };
1979 size_t j;
1980
1981 assert(u);
1982
1983 if (!u->stop_when_unneeded)
1984 return false;
1985
1986 /* Don't clean up while the unit is transitioning or is even inactive. */
1987 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1988 return false;
1989 if (u->job)
1990 return false;
1991
1992 for (j = 0; j < ELEMENTSOF(deps); j++) {
1993 Unit *other;
1994 Iterator i;
1995 void *v;
1996
1997 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1998 * restart, then don't clean this one up. */
1999
2000 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
2001 if (u->job)
2002 return false;
2003
2004 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2005 return false;
2006
2007 if (unit_will_restart(other))
2008 return false;
2009 }
2010 }
2011
2012 return true;
2013 }
2014
2015 static void check_unneeded_dependencies(Unit *u) {
2016
2017 static const UnitDependency deps[] = {
2018 UNIT_REQUIRES,
2019 UNIT_REQUISITE,
2020 UNIT_WANTS,
2021 UNIT_BINDS_TO,
2022 };
2023 size_t j;
2024
2025 assert(u);
2026
2027 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2028
2029 for (j = 0; j < ELEMENTSOF(deps); j++) {
2030 Unit *other;
2031 Iterator i;
2032 void *v;
2033
2034 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2035 unit_submit_to_stop_when_unneeded_queue(other);
2036 }
2037 }
2038
2039 static void unit_check_binds_to(Unit *u) {
2040 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2041 bool stop = false;
2042 Unit *other;
2043 Iterator i;
2044 void *v;
2045 int r;
2046
2047 assert(u);
2048
2049 if (u->job)
2050 return;
2051
2052 if (unit_active_state(u) != UNIT_ACTIVE)
2053 return;
2054
2055 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2056 if (other->job)
2057 continue;
2058
2059 if (!other->coldplugged)
2060 /* We might yet create a job for the other unit… */
2061 continue;
2062
2063 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2064 continue;
2065
2066 stop = true;
2067 break;
2068 }
2069
2070 if (!stop)
2071 return;
2072
2073 /* If stopping a unit fails continuously we might enter a stop
2074 * loop here, hence stop acting on the service being
2075 * unnecessary after a while. */
2076 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2077 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2078 return;
2079 }
2080
2081 assert(other);
2082 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2083
2084 /* A unit we need to run is gone. Sniff. Let's stop this. */
2085 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2086 if (r < 0)
2087 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2088 }
2089
2090 static void retroactively_start_dependencies(Unit *u) {
2091 Iterator i;
2092 Unit *other;
2093 void *v;
2094
2095 assert(u);
2096 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2097
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2099 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2100 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2101 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2102
2103 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2104 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2105 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2107
2108 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2109 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2110 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2111 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2112
2113 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2114 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2115 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2116
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2118 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2119 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2120 }
2121
2122 static void retroactively_stop_dependencies(Unit *u) {
2123 Unit *other;
2124 Iterator i;
2125 void *v;
2126
2127 assert(u);
2128 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2129
2130 /* Pull down units which are bound to us recursively if enabled */
2131 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2132 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2133 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2134 }
2135
2136 void unit_start_on_failure(Unit *u) {
2137 Unit *other;
2138 Iterator i;
2139 void *v;
2140 int r;
2141
2142 assert(u);
2143
2144 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2145 return;
2146
2147 log_unit_info(u, "Triggering OnFailure= dependencies.");
2148
2149 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2150 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2151
2152 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2153 if (r < 0)
2154 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2155 }
2156 }
2157
2158 void unit_trigger_notify(Unit *u) {
2159 Unit *other;
2160 Iterator i;
2161 void *v;
2162
2163 assert(u);
2164
2165 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2166 if (UNIT_VTABLE(other)->trigger_notify)
2167 UNIT_VTABLE(other)->trigger_notify(other, u);
2168 }
2169
2170 static int unit_log_resources(Unit *u) {
2171
2172 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2173 size_t n_message_parts = 0, n_iovec = 0;
2174 char* message_parts[3 + 1], *t;
2175 nsec_t nsec = NSEC_INFINITY;
2176 CGroupIPAccountingMetric m;
2177 size_t i;
2178 int r;
2179 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2180 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2181 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2182 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2183 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2184 };
2185
2186 assert(u);
2187
2188 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2189 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2190 * information and the complete data in structured fields. */
2191
2192 (void) unit_get_cpu_usage(u, &nsec);
2193 if (nsec != NSEC_INFINITY) {
2194 char buf[FORMAT_TIMESPAN_MAX] = "";
2195
2196 /* Format the CPU time for inclusion in the structured log message */
2197 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2202
2203 /* Format the CPU time for inclusion in the human language message string */
2204 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2205 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2206 if (!t) {
2207 r = log_oom();
2208 goto finish;
2209 }
2210
2211 message_parts[n_message_parts++] = t;
2212 }
2213
2214 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2215 char buf[FORMAT_BYTES_MAX] = "";
2216 uint64_t value = UINT64_MAX;
2217
2218 assert(ip_fields[m]);
2219
2220 (void) unit_get_ip_accounting(u, m, &value);
2221 if (value == UINT64_MAX)
2222 continue;
2223
2224 /* Format IP accounting data for inclusion in the structured log message */
2225 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2226 r = log_oom();
2227 goto finish;
2228 }
2229 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2230
2231 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2232 * bytes counters (and not for the packets counters) */
2233 if (m == CGROUP_IP_INGRESS_BYTES)
2234 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2235 format_bytes(buf, sizeof(buf), value),
2236 " IP traffic");
2237 else if (m == CGROUP_IP_EGRESS_BYTES)
2238 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2239 format_bytes(buf, sizeof(buf), value),
2240 " IP traffic");
2241 else
2242 continue;
2243 if (!t) {
2244 r = log_oom();
2245 goto finish;
2246 }
2247
2248 message_parts[n_message_parts++] = t;
2249 }
2250
2251 /* Is there any accounting data available at all? */
2252 if (n_iovec == 0) {
2253 r = 0;
2254 goto finish;
2255 }
2256
2257 if (n_message_parts == 0)
2258 t = strjoina("MESSAGE=", u->id, ": Completed");
2259 else {
2260 _cleanup_free_ char *joined;
2261
2262 message_parts[n_message_parts] = NULL;
2263
2264 joined = strv_join(message_parts, ", ");
2265 if (!joined) {
2266 r = log_oom();
2267 goto finish;
2268 }
2269
2270 t = strjoina("MESSAGE=", u->id, ": ", joined);
2271 }
2272
2273 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2274 * and hence don't increase n_iovec for them */
2275 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2276 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2277
2278 t = strjoina(u->manager->unit_log_field, u->id);
2279 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2280
2281 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2282 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2283
2284 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2285 r = 0;
2286
2287 finish:
2288 for (i = 0; i < n_message_parts; i++)
2289 free(message_parts[i]);
2290
2291 for (i = 0; i < n_iovec; i++)
2292 free(iovec[i].iov_base);
2293
2294 return r;
2295
2296 }
2297
2298 static void unit_update_on_console(Unit *u) {
2299 bool b;
2300
2301 assert(u);
2302
2303 b = unit_needs_console(u);
2304 if (u->on_console == b)
2305 return;
2306
2307 u->on_console = b;
2308 if (b)
2309 manager_ref_console(u->manager);
2310 else
2311 manager_unref_console(u->manager);
2312 }
2313
2314 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2315 bool unexpected;
2316 Manager *m;
2317
2318 assert(u);
2319 assert(os < _UNIT_ACTIVE_STATE_MAX);
2320 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2321
2322 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2323 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2324 * remounted this function will be called too! */
2325
2326 m = u->manager;
2327
2328 /* Update timestamps for state changes */
2329 if (!MANAGER_IS_RELOADING(m)) {
2330 dual_timestamp_get(&u->state_change_timestamp);
2331
2332 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2333 u->inactive_exit_timestamp = u->state_change_timestamp;
2334 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2335 u->inactive_enter_timestamp = u->state_change_timestamp;
2336
2337 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2338 u->active_enter_timestamp = u->state_change_timestamp;
2339 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2340 u->active_exit_timestamp = u->state_change_timestamp;
2341 }
2342
2343 /* Keep track of failed units */
2344 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2345
2346 /* Make sure the cgroup and state files are always removed when we become inactive */
2347 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2348 unit_prune_cgroup(u);
2349 unit_unlink_state_files(u);
2350 }
2351
2352 unit_update_on_console(u);
2353
2354 if (u->job) {
2355 unexpected = false;
2356
2357 if (u->job->state == JOB_WAITING)
2358
2359 /* So we reached a different state for this
2360 * job. Let's see if we can run it now if it
2361 * failed previously due to EAGAIN. */
2362 job_add_to_run_queue(u->job);
2363
2364 /* Let's check whether this state change constitutes a
2365 * finished job, or maybe contradicts a running job and
2366 * hence needs to invalidate jobs. */
2367
2368 switch (u->job->type) {
2369
2370 case JOB_START:
2371 case JOB_VERIFY_ACTIVE:
2372
2373 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2374 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2375 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2376 unexpected = true;
2377
2378 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2379 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2380 }
2381
2382 break;
2383
2384 case JOB_RELOAD:
2385 case JOB_RELOAD_OR_START:
2386 case JOB_TRY_RELOAD:
2387
2388 if (u->job->state == JOB_RUNNING) {
2389 if (ns == UNIT_ACTIVE)
2390 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2391 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2392 unexpected = true;
2393
2394 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2395 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2396 }
2397 }
2398
2399 break;
2400
2401 case JOB_STOP:
2402 case JOB_RESTART:
2403 case JOB_TRY_RESTART:
2404
2405 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2406 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2407 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2408 unexpected = true;
2409 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2410 }
2411
2412 break;
2413
2414 default:
2415 assert_not_reached("Job type unknown");
2416 }
2417
2418 } else
2419 unexpected = true;
2420
2421 if (!MANAGER_IS_RELOADING(m)) {
2422
2423 /* If this state change happened without being
2424 * requested by a job, then let's retroactively start
2425 * or stop dependencies. We skip that step when
2426 * deserializing, since we don't want to create any
2427 * additional jobs just because something is already
2428 * activated. */
2429
2430 if (unexpected) {
2431 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2432 retroactively_start_dependencies(u);
2433 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2434 retroactively_stop_dependencies(u);
2435 }
2436
2437 /* stop unneeded units regardless if going down was expected or not */
2438 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2439 check_unneeded_dependencies(u);
2440
2441 if (ns != os && ns == UNIT_FAILED) {
2442 log_unit_debug(u, "Unit entered failed state.");
2443
2444 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2445 unit_start_on_failure(u);
2446 }
2447 }
2448
2449 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2450
2451 if (u->type == UNIT_SERVICE &&
2452 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2453 !MANAGER_IS_RELOADING(m)) {
2454 /* Write audit record if we have just finished starting up */
2455 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2456 u->in_audit = true;
2457 }
2458
2459 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2460 manager_send_unit_plymouth(m, u);
2461
2462 } else {
2463
2464 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2465 !UNIT_IS_INACTIVE_OR_FAILED(os)
2466 && !MANAGER_IS_RELOADING(m)) {
2467
2468 /* This unit just stopped/failed. */
2469 if (u->type == UNIT_SERVICE) {
2470
2471 /* Hmm, if there was no start record written
2472 * write it now, so that we always have a nice
2473 * pair */
2474 if (!u->in_audit) {
2475 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2476
2477 if (ns == UNIT_INACTIVE)
2478 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2479 } else
2480 /* Write audit record if we have just finished shutting down */
2481 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2482
2483 u->in_audit = false;
2484 }
2485
2486 /* Write a log message about consumed resources */
2487 unit_log_resources(u);
2488 }
2489 }
2490
2491 manager_recheck_journal(m);
2492 manager_recheck_dbus(m);
2493
2494 unit_trigger_notify(u);
2495
2496 if (!MANAGER_IS_RELOADING(u->manager)) {
2497 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2498 unit_submit_to_stop_when_unneeded_queue(u);
2499
2500 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2501 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2502 * without ever entering started.) */
2503 unit_check_binds_to(u);
2504
2505 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2506 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2507 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2508 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2509 }
2510
2511 unit_add_to_dbus_queue(u);
2512 unit_add_to_gc_queue(u);
2513 }
2514
2515 int unit_watch_pid(Unit *u, pid_t pid) {
2516 int r;
2517
2518 assert(u);
2519 assert(pid_is_valid(pid));
2520
2521 /* Watch a specific PID */
2522
2523 r = set_ensure_allocated(&u->pids, NULL);
2524 if (r < 0)
2525 return r;
2526
2527 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2528 if (r < 0)
2529 return r;
2530
2531 /* First try, let's add the unit keyed by "pid". */
2532 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2533 if (r == -EEXIST) {
2534 Unit **array;
2535 bool found = false;
2536 size_t n = 0;
2537
2538 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2539 * to an array of Units rather than just a Unit), lists us already. */
2540
2541 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2542 if (array)
2543 for (; array[n]; n++)
2544 if (array[n] == u)
2545 found = true;
2546
2547 if (found) /* Found it already? if so, do nothing */
2548 r = 0;
2549 else {
2550 Unit **new_array;
2551
2552 /* Allocate a new array */
2553 new_array = new(Unit*, n + 2);
2554 if (!new_array)
2555 return -ENOMEM;
2556
2557 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2558 new_array[n] = u;
2559 new_array[n+1] = NULL;
2560
2561 /* Add or replace the old array */
2562 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2563 if (r < 0) {
2564 free(new_array);
2565 return r;
2566 }
2567
2568 free(array);
2569 }
2570 } else if (r < 0)
2571 return r;
2572
2573 r = set_put(u->pids, PID_TO_PTR(pid));
2574 if (r < 0)
2575 return r;
2576
2577 return 0;
2578 }
2579
2580 void unit_unwatch_pid(Unit *u, pid_t pid) {
2581 Unit **array;
2582
2583 assert(u);
2584 assert(pid_is_valid(pid));
2585
2586 /* First let's drop the unit in case it's keyed as "pid". */
2587 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2588
2589 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2590 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2591 if (array) {
2592 size_t n, m = 0;
2593
2594 /* Let's iterate through the array, dropping our own entry */
2595 for (n = 0; array[n]; n++)
2596 if (array[n] != u)
2597 array[m++] = array[n];
2598 array[m] = NULL;
2599
2600 if (m == 0) {
2601 /* The array is now empty, remove the entire entry */
2602 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2603 free(array);
2604 }
2605 }
2606
2607 (void) set_remove(u->pids, PID_TO_PTR(pid));
2608 }
2609
2610 void unit_unwatch_all_pids(Unit *u) {
2611 assert(u);
2612
2613 while (!set_isempty(u->pids))
2614 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2615
2616 u->pids = set_free(u->pids);
2617 }
2618
2619 static void unit_tidy_watch_pids(Unit *u) {
2620 pid_t except1, except2;
2621 Iterator i;
2622 void *e;
2623
2624 assert(u);
2625
2626 /* Cleans dead PIDs from our list */
2627
2628 except1 = unit_main_pid(u);
2629 except2 = unit_control_pid(u);
2630
2631 SET_FOREACH(e, u->pids, i) {
2632 pid_t pid = PTR_TO_PID(e);
2633
2634 if (pid == except1 || pid == except2)
2635 continue;
2636
2637 if (!pid_is_unwaited(pid))
2638 unit_unwatch_pid(u, pid);
2639 }
2640 }
2641
2642 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2643 Unit *u = userdata;
2644
2645 assert(s);
2646 assert(u);
2647
2648 unit_tidy_watch_pids(u);
2649 unit_watch_all_pids(u);
2650
2651 /* If the PID set is empty now, then let's finish this off. */
2652 unit_synthesize_cgroup_empty_event(u);
2653
2654 return 0;
2655 }
2656
2657 int unit_enqueue_rewatch_pids(Unit *u) {
2658 int r;
2659
2660 assert(u);
2661
2662 if (!u->cgroup_path)
2663 return -ENOENT;
2664
2665 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2666 if (r < 0)
2667 return r;
2668 if (r > 0) /* On unified we can use proper notifications */
2669 return 0;
2670
2671 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2672 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2673 * involves issuing kill(pid, 0) on all processes we watch. */
2674
2675 if (!u->rewatch_pids_event_source) {
2676 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2677
2678 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2679 if (r < 0)
2680 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2681
2682 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2683 if (r < 0)
2684 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2685
2686 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2687
2688 u->rewatch_pids_event_source = TAKE_PTR(s);
2689 }
2690
2691 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2692 if (r < 0)
2693 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2694
2695 return 0;
2696 }
2697
2698 void unit_dequeue_rewatch_pids(Unit *u) {
2699 int r;
2700 assert(u);
2701
2702 if (!u->rewatch_pids_event_source)
2703 return;
2704
2705 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2706 if (r < 0)
2707 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2708
2709 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2710 }
2711
2712 bool unit_job_is_applicable(Unit *u, JobType j) {
2713 assert(u);
2714 assert(j >= 0 && j < _JOB_TYPE_MAX);
2715
2716 switch (j) {
2717
2718 case JOB_VERIFY_ACTIVE:
2719 case JOB_START:
2720 case JOB_NOP:
2721 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2722 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2723 * jobs for it. */
2724 return true;
2725
2726 case JOB_STOP:
2727 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2728 * external events), hence it makes no sense to permit enqueing such a request either. */
2729 return !u->perpetual;
2730
2731 case JOB_RESTART:
2732 case JOB_TRY_RESTART:
2733 return unit_can_stop(u) && unit_can_start(u);
2734
2735 case JOB_RELOAD:
2736 case JOB_TRY_RELOAD:
2737 return unit_can_reload(u);
2738
2739 case JOB_RELOAD_OR_START:
2740 return unit_can_reload(u) && unit_can_start(u);
2741
2742 default:
2743 assert_not_reached("Invalid job type");
2744 }
2745 }
2746
2747 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2748 assert(u);
2749
2750 /* Only warn about some unit types */
2751 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2752 return;
2753
2754 if (streq_ptr(u->id, other))
2755 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2756 else
2757 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2758 }
2759
2760 static int unit_add_dependency_hashmap(
2761 Hashmap **h,
2762 Unit *other,
2763 UnitDependencyMask origin_mask,
2764 UnitDependencyMask destination_mask) {
2765
2766 UnitDependencyInfo info;
2767 int r;
2768
2769 assert(h);
2770 assert(other);
2771 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2772 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2773 assert(origin_mask > 0 || destination_mask > 0);
2774
2775 r = hashmap_ensure_allocated(h, NULL);
2776 if (r < 0)
2777 return r;
2778
2779 assert_cc(sizeof(void*) == sizeof(info));
2780
2781 info.data = hashmap_get(*h, other);
2782 if (info.data) {
2783 /* Entry already exists. Add in our mask. */
2784
2785 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2786 FLAGS_SET(destination_mask, info.destination_mask))
2787 return 0; /* NOP */
2788
2789 info.origin_mask |= origin_mask;
2790 info.destination_mask |= destination_mask;
2791
2792 r = hashmap_update(*h, other, info.data);
2793 } else {
2794 info = (UnitDependencyInfo) {
2795 .origin_mask = origin_mask,
2796 .destination_mask = destination_mask,
2797 };
2798
2799 r = hashmap_put(*h, other, info.data);
2800 }
2801 if (r < 0)
2802 return r;
2803
2804 return 1;
2805 }
2806
2807 int unit_add_dependency(
2808 Unit *u,
2809 UnitDependency d,
2810 Unit *other,
2811 bool add_reference,
2812 UnitDependencyMask mask) {
2813
2814 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2815 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2816 [UNIT_WANTS] = UNIT_WANTED_BY,
2817 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2818 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2819 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2820 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2821 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2822 [UNIT_WANTED_BY] = UNIT_WANTS,
2823 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2824 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2825 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2826 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2827 [UNIT_BEFORE] = UNIT_AFTER,
2828 [UNIT_AFTER] = UNIT_BEFORE,
2829 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2830 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2831 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2832 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2833 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2834 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2835 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2836 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2837 };
2838 Unit *original_u = u, *original_other = other;
2839 int r;
2840
2841 assert(u);
2842 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2843 assert(other);
2844
2845 u = unit_follow_merge(u);
2846 other = unit_follow_merge(other);
2847
2848 /* We won't allow dependencies on ourselves. We will not
2849 * consider them an error however. */
2850 if (u == other) {
2851 maybe_warn_about_dependency(original_u, original_other->id, d);
2852 return 0;
2853 }
2854
2855 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2856 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2857 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2858 return 0;
2859 }
2860
2861 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2862 if (r < 0)
2863 return r;
2864
2865 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2866 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2867 if (r < 0)
2868 return r;
2869 }
2870
2871 if (add_reference) {
2872 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2873 if (r < 0)
2874 return r;
2875
2876 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2877 if (r < 0)
2878 return r;
2879 }
2880
2881 unit_add_to_dbus_queue(u);
2882 return 0;
2883 }
2884
2885 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2886 int r;
2887
2888 assert(u);
2889
2890 r = unit_add_dependency(u, d, other, add_reference, mask);
2891 if (r < 0)
2892 return r;
2893
2894 return unit_add_dependency(u, e, other, add_reference, mask);
2895 }
2896
2897 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2898 int r;
2899
2900 assert(u);
2901 assert(name || path);
2902 assert(buf);
2903 assert(ret);
2904
2905 if (!name)
2906 name = basename(path);
2907
2908 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2909 *buf = NULL;
2910 *ret = name;
2911 return 0;
2912 }
2913
2914 if (u->instance)
2915 r = unit_name_replace_instance(name, u->instance, buf);
2916 else {
2917 _cleanup_free_ char *i = NULL;
2918
2919 r = unit_name_to_prefix(u->id, &i);
2920 if (r < 0)
2921 return r;
2922
2923 r = unit_name_replace_instance(name, i, buf);
2924 }
2925 if (r < 0)
2926 return r;
2927
2928 *ret = *buf;
2929 return 0;
2930 }
2931
2932 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2933 _cleanup_free_ char *buf = NULL;
2934 Unit *other;
2935 int r;
2936
2937 assert(u);
2938 assert(name || path);
2939
2940 r = resolve_template(u, name, path, &buf, &name);
2941 if (r < 0)
2942 return r;
2943
2944 r = manager_load_unit(u->manager, name, path, NULL, &other);
2945 if (r < 0)
2946 return r;
2947
2948 return unit_add_dependency(u, d, other, add_reference, mask);
2949 }
2950
2951 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2952 _cleanup_free_ char *buf = NULL;
2953 Unit *other;
2954 int r;
2955
2956 assert(u);
2957 assert(name || path);
2958
2959 r = resolve_template(u, name, path, &buf, &name);
2960 if (r < 0)
2961 return r;
2962
2963 r = manager_load_unit(u->manager, name, path, NULL, &other);
2964 if (r < 0)
2965 return r;
2966
2967 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2968 }
2969
2970 int set_unit_path(const char *p) {
2971 /* This is mostly for debug purposes */
2972 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2973 return -errno;
2974
2975 return 0;
2976 }
2977
2978 char *unit_dbus_path(Unit *u) {
2979 assert(u);
2980
2981 if (!u->id)
2982 return NULL;
2983
2984 return unit_dbus_path_from_name(u->id);
2985 }
2986
2987 char *unit_dbus_path_invocation_id(Unit *u) {
2988 assert(u);
2989
2990 if (sd_id128_is_null(u->invocation_id))
2991 return NULL;
2992
2993 return unit_dbus_path_from_name(u->invocation_id_string);
2994 }
2995
2996 int unit_set_slice(Unit *u, Unit *slice) {
2997 assert(u);
2998 assert(slice);
2999
3000 /* Sets the unit slice if it has not been set before. Is extra
3001 * careful, to only allow this for units that actually have a
3002 * cgroup context. Also, we don't allow to set this for slices
3003 * (since the parent slice is derived from the name). Make
3004 * sure the unit we set is actually a slice. */
3005
3006 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3007 return -EOPNOTSUPP;
3008
3009 if (u->type == UNIT_SLICE)
3010 return -EINVAL;
3011
3012 if (unit_active_state(u) != UNIT_INACTIVE)
3013 return -EBUSY;
3014
3015 if (slice->type != UNIT_SLICE)
3016 return -EINVAL;
3017
3018 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3019 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3020 return -EPERM;
3021
3022 if (UNIT_DEREF(u->slice) == slice)
3023 return 0;
3024
3025 /* Disallow slice changes if @u is already bound to cgroups */
3026 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3027 return -EBUSY;
3028
3029 unit_ref_set(&u->slice, u, slice);
3030 return 1;
3031 }
3032
3033 int unit_set_default_slice(Unit *u) {
3034 _cleanup_free_ char *b = NULL;
3035 const char *slice_name;
3036 Unit *slice;
3037 int r;
3038
3039 assert(u);
3040
3041 if (UNIT_ISSET(u->slice))
3042 return 0;
3043
3044 if (u->instance) {
3045 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3046
3047 /* Implicitly place all instantiated units in their
3048 * own per-template slice */
3049
3050 r = unit_name_to_prefix(u->id, &prefix);
3051 if (r < 0)
3052 return r;
3053
3054 /* The prefix is already escaped, but it might include
3055 * "-" which has a special meaning for slice units,
3056 * hence escape it here extra. */
3057 escaped = unit_name_escape(prefix);
3058 if (!escaped)
3059 return -ENOMEM;
3060
3061 if (MANAGER_IS_SYSTEM(u->manager))
3062 b = strjoin("system-", escaped, ".slice");
3063 else
3064 b = strappend(escaped, ".slice");
3065 if (!b)
3066 return -ENOMEM;
3067
3068 slice_name = b;
3069 } else
3070 slice_name =
3071 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3072 ? SPECIAL_SYSTEM_SLICE
3073 : SPECIAL_ROOT_SLICE;
3074
3075 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3076 if (r < 0)
3077 return r;
3078
3079 return unit_set_slice(u, slice);
3080 }
3081
3082 const char *unit_slice_name(Unit *u) {
3083 assert(u);
3084
3085 if (!UNIT_ISSET(u->slice))
3086 return NULL;
3087
3088 return UNIT_DEREF(u->slice)->id;
3089 }
3090
3091 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3092 _cleanup_free_ char *t = NULL;
3093 int r;
3094
3095 assert(u);
3096 assert(type);
3097 assert(_found);
3098
3099 r = unit_name_change_suffix(u->id, type, &t);
3100 if (r < 0)
3101 return r;
3102 if (unit_has_name(u, t))
3103 return -EINVAL;
3104
3105 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3106 assert(r < 0 || *_found != u);
3107 return r;
3108 }
3109
3110 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3111 const char *name, *old_owner, *new_owner;
3112 Unit *u = userdata;
3113 int r;
3114
3115 assert(message);
3116 assert(u);
3117
3118 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3119 if (r < 0) {
3120 bus_log_parse_error(r);
3121 return 0;
3122 }
3123
3124 old_owner = empty_to_null(old_owner);
3125 new_owner = empty_to_null(new_owner);
3126
3127 if (UNIT_VTABLE(u)->bus_name_owner_change)
3128 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3129
3130 return 0;
3131 }
3132
3133 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3134 const char *match;
3135
3136 assert(u);
3137 assert(bus);
3138 assert(name);
3139
3140 if (u->match_bus_slot)
3141 return -EBUSY;
3142
3143 match = strjoina("type='signal',"
3144 "sender='org.freedesktop.DBus',"
3145 "path='/org/freedesktop/DBus',"
3146 "interface='org.freedesktop.DBus',"
3147 "member='NameOwnerChanged',"
3148 "arg0='", name, "'");
3149
3150 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3151 }
3152
3153 int unit_watch_bus_name(Unit *u, const char *name) {
3154 int r;
3155
3156 assert(u);
3157 assert(name);
3158
3159 /* Watch a specific name on the bus. We only support one unit
3160 * watching each name for now. */
3161
3162 if (u->manager->api_bus) {
3163 /* If the bus is already available, install the match directly.
3164 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3165 r = unit_install_bus_match(u, u->manager->api_bus, name);
3166 if (r < 0)
3167 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3168 }
3169
3170 r = hashmap_put(u->manager->watch_bus, name, u);
3171 if (r < 0) {
3172 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3173 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3174 }
3175
3176 return 0;
3177 }
3178
3179 void unit_unwatch_bus_name(Unit *u, const char *name) {
3180 assert(u);
3181 assert(name);
3182
3183 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3184 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3185 }
3186
3187 bool unit_can_serialize(Unit *u) {
3188 assert(u);
3189
3190 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3191 }
3192
3193 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3194 _cleanup_free_ char *s = NULL;
3195 int r = 0;
3196
3197 assert(f);
3198 assert(key);
3199
3200 if (mask != 0) {
3201 r = cg_mask_to_string(mask, &s);
3202 if (r >= 0) {
3203 fputs(key, f);
3204 fputc('=', f);
3205 fputs(s, f);
3206 fputc('\n', f);
3207 }
3208 }
3209 return r;
3210 }
3211
3212 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3213 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3214 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3215 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3216 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3217 };
3218
3219 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3220 CGroupIPAccountingMetric m;
3221 int r;
3222
3223 assert(u);
3224 assert(f);
3225 assert(fds);
3226
3227 if (unit_can_serialize(u)) {
3228 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3229 if (r < 0)
3230 return r;
3231 }
3232
3233 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3234
3235 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3236 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3237 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3238 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3239
3240 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3241 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3242
3243 if (dual_timestamp_is_set(&u->condition_timestamp))
3244 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3245
3246 if (dual_timestamp_is_set(&u->assert_timestamp))
3247 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3248
3249 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3250
3251 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3252 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3253 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3254
3255 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3256 if (u->cpu_usage_last != NSEC_INFINITY)
3257 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3258
3259 if (u->cgroup_path)
3260 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3261 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3262 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3263 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3264 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3265
3266 if (uid_is_valid(u->ref_uid))
3267 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3268 if (gid_is_valid(u->ref_gid))
3269 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3270
3271 if (!sd_id128_is_null(u->invocation_id))
3272 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3273
3274 bus_track_serialize(u->bus_track, f, "ref");
3275
3276 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3277 uint64_t v;
3278
3279 r = unit_get_ip_accounting(u, m, &v);
3280 if (r >= 0)
3281 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3282 }
3283
3284 if (serialize_jobs) {
3285 if (u->job) {
3286 fprintf(f, "job\n");
3287 job_serialize(u->job, f);
3288 }
3289
3290 if (u->nop_job) {
3291 fprintf(f, "job\n");
3292 job_serialize(u->nop_job, f);
3293 }
3294 }
3295
3296 /* End marker */
3297 fputc('\n', f);
3298 return 0;
3299 }
3300
3301 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3302 assert(u);
3303 assert(f);
3304 assert(key);
3305
3306 if (!value)
3307 return 0;
3308
3309 fputs(key, f);
3310 fputc('=', f);
3311 fputs(value, f);
3312 fputc('\n', f);
3313
3314 return 1;
3315 }
3316
3317 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3318 _cleanup_free_ char *c = NULL;
3319
3320 assert(u);
3321 assert(f);
3322 assert(key);
3323
3324 if (!value)
3325 return 0;
3326
3327 c = cescape(value);
3328 if (!c)
3329 return -ENOMEM;
3330
3331 fputs(key, f);
3332 fputc('=', f);
3333 fputs(c, f);
3334 fputc('\n', f);
3335
3336 return 1;
3337 }
3338
3339 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3340 int copy;
3341
3342 assert(u);
3343 assert(f);
3344 assert(key);
3345
3346 if (fd < 0)
3347 return 0;
3348
3349 copy = fdset_put_dup(fds, fd);
3350 if (copy < 0)
3351 return copy;
3352
3353 fprintf(f, "%s=%i\n", key, copy);
3354 return 1;
3355 }
3356
3357 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3358 va_list ap;
3359
3360 assert(u);
3361 assert(f);
3362 assert(key);
3363 assert(format);
3364
3365 fputs(key, f);
3366 fputc('=', f);
3367
3368 va_start(ap, format);
3369 vfprintf(f, format, ap);
3370 va_end(ap);
3371
3372 fputc('\n', f);
3373 }
3374
3375 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3376 int r;
3377
3378 assert(u);
3379 assert(f);
3380 assert(fds);
3381
3382 for (;;) {
3383 char line[LINE_MAX], *l, *v;
3384 CGroupIPAccountingMetric m;
3385 size_t k;
3386
3387 if (!fgets(line, sizeof(line), f)) {
3388 if (feof(f))
3389 return 0;
3390 return -errno;
3391 }
3392
3393 char_array_0(line);
3394 l = strstrip(line);
3395
3396 /* End marker */
3397 if (isempty(l))
3398 break;
3399
3400 k = strcspn(l, "=");
3401
3402 if (l[k] == '=') {
3403 l[k] = 0;
3404 v = l+k+1;
3405 } else
3406 v = l+k;
3407
3408 if (streq(l, "job")) {
3409 if (v[0] == '\0') {
3410 /* new-style serialized job */
3411 Job *j;
3412
3413 j = job_new_raw(u);
3414 if (!j)
3415 return log_oom();
3416
3417 r = job_deserialize(j, f);
3418 if (r < 0) {
3419 job_free(j);
3420 return r;
3421 }
3422
3423 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3424 if (r < 0) {
3425 job_free(j);
3426 return r;
3427 }
3428
3429 r = job_install_deserialized(j);
3430 if (r < 0) {
3431 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3432 job_free(j);
3433 return r;
3434 }
3435 } else /* legacy for pre-44 */
3436 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3437 continue;
3438 } else if (streq(l, "state-change-timestamp")) {
3439 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3440 continue;
3441 } else if (streq(l, "inactive-exit-timestamp")) {
3442 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3443 continue;
3444 } else if (streq(l, "active-enter-timestamp")) {
3445 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3446 continue;
3447 } else if (streq(l, "active-exit-timestamp")) {
3448 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3449 continue;
3450 } else if (streq(l, "inactive-enter-timestamp")) {
3451 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3452 continue;
3453 } else if (streq(l, "condition-timestamp")) {
3454 dual_timestamp_deserialize(v, &u->condition_timestamp);
3455 continue;
3456 } else if (streq(l, "assert-timestamp")) {
3457 dual_timestamp_deserialize(v, &u->assert_timestamp);
3458 continue;
3459 } else if (streq(l, "condition-result")) {
3460
3461 r = parse_boolean(v);
3462 if (r < 0)
3463 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3464 else
3465 u->condition_result = r;
3466
3467 continue;
3468
3469 } else if (streq(l, "assert-result")) {
3470
3471 r = parse_boolean(v);
3472 if (r < 0)
3473 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3474 else
3475 u->assert_result = r;
3476
3477 continue;
3478
3479 } else if (streq(l, "transient")) {
3480
3481 r = parse_boolean(v);
3482 if (r < 0)
3483 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3484 else
3485 u->transient = r;
3486
3487 continue;
3488
3489 } else if (streq(l, "exported-invocation-id")) {
3490
3491 r = parse_boolean(v);
3492 if (r < 0)
3493 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3494 else
3495 u->exported_invocation_id = r;
3496
3497 continue;
3498
3499 } else if (streq(l, "exported-log-level-max")) {
3500
3501 r = parse_boolean(v);
3502 if (r < 0)
3503 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3504 else
3505 u->exported_log_level_max = r;
3506
3507 continue;
3508
3509 } else if (streq(l, "exported-log-extra-fields")) {
3510
3511 r = parse_boolean(v);
3512 if (r < 0)
3513 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3514 else
3515 u->exported_log_extra_fields = r;
3516
3517 continue;
3518
3519 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3520
3521 r = safe_atou64(v, &u->cpu_usage_base);
3522 if (r < 0)
3523 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3524
3525 continue;
3526
3527 } else if (streq(l, "cpu-usage-last")) {
3528
3529 r = safe_atou64(v, &u->cpu_usage_last);
3530 if (r < 0)
3531 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3532
3533 continue;
3534
3535 } else if (streq(l, "cgroup")) {
3536
3537 r = unit_set_cgroup_path(u, v);
3538 if (r < 0)
3539 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3540
3541 (void) unit_watch_cgroup(u);
3542
3543 continue;
3544 } else if (streq(l, "cgroup-realized")) {
3545 int b;
3546
3547 b = parse_boolean(v);
3548 if (b < 0)
3549 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3550 else
3551 u->cgroup_realized = b;
3552
3553 continue;
3554
3555 } else if (streq(l, "cgroup-realized-mask")) {
3556
3557 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3558 if (r < 0)
3559 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3560 continue;
3561
3562 } else if (streq(l, "cgroup-enabled-mask")) {
3563
3564 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3565 if (r < 0)
3566 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3567 continue;
3568
3569 } else if (streq(l, "cgroup-bpf-realized")) {
3570 int i;
3571
3572 r = safe_atoi(v, &i);
3573 if (r < 0)
3574 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3575 else
3576 u->cgroup_bpf_state =
3577 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3578 i > 0 ? UNIT_CGROUP_BPF_ON :
3579 UNIT_CGROUP_BPF_OFF;
3580
3581 continue;
3582
3583 } else if (streq(l, "ref-uid")) {
3584 uid_t uid;
3585
3586 r = parse_uid(v, &uid);
3587 if (r < 0)
3588 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3589 else
3590 unit_ref_uid_gid(u, uid, GID_INVALID);
3591
3592 continue;
3593
3594 } else if (streq(l, "ref-gid")) {
3595 gid_t gid;
3596
3597 r = parse_gid(v, &gid);
3598 if (r < 0)
3599 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3600 else
3601 unit_ref_uid_gid(u, UID_INVALID, gid);
3602
3603 } else if (streq(l, "ref")) {
3604
3605 r = strv_extend(&u->deserialized_refs, v);
3606 if (r < 0)
3607 log_oom();
3608
3609 continue;
3610 } else if (streq(l, "invocation-id")) {
3611 sd_id128_t id;
3612
3613 r = sd_id128_from_string(v, &id);
3614 if (r < 0)
3615 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3616 else {
3617 r = unit_set_invocation_id(u, id);
3618 if (r < 0)
3619 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3620 }
3621
3622 continue;
3623 }
3624
3625 /* Check if this is an IP accounting metric serialization field */
3626 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3627 if (streq(l, ip_accounting_metric_field[m]))
3628 break;
3629 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3630 uint64_t c;
3631
3632 r = safe_atou64(v, &c);
3633 if (r < 0)
3634 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3635 else
3636 u->ip_accounting_extra[m] = c;
3637 continue;
3638 }
3639
3640 if (unit_can_serialize(u)) {
3641 r = exec_runtime_deserialize_compat(u, l, v, fds);
3642 if (r < 0) {
3643 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3644 continue;
3645 }
3646
3647 /* Returns positive if key was handled by the call */
3648 if (r > 0)
3649 continue;
3650
3651 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3652 if (r < 0)
3653 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3654 }
3655 }
3656
3657 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3658 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3659 * before 228 where the base for timeouts was not persistent across reboots. */
3660
3661 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3662 dual_timestamp_get(&u->state_change_timestamp);
3663
3664 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3665 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3666 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3667 unit_invalidate_cgroup_bpf(u);
3668
3669 return 0;
3670 }
3671
3672 void unit_deserialize_skip(FILE *f) {
3673 assert(f);
3674
3675 /* Skip serialized data for this unit. We don't know what it is. */
3676
3677 for (;;) {
3678 char line[LINE_MAX], *l;
3679
3680 if (!fgets(line, sizeof line, f))
3681 return;
3682
3683 char_array_0(line);
3684 l = strstrip(line);
3685
3686 /* End marker */
3687 if (isempty(l))
3688 return;
3689 }
3690 }
3691
3692 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3693 Unit *device;
3694 _cleanup_free_ char *e = NULL;
3695 int r;
3696
3697 assert(u);
3698
3699 /* Adds in links to the device node that this unit is based on */
3700 if (isempty(what))
3701 return 0;
3702
3703 if (!is_device_path(what))
3704 return 0;
3705
3706 /* When device units aren't supported (such as in a
3707 * container), don't create dependencies on them. */
3708 if (!unit_type_supported(UNIT_DEVICE))
3709 return 0;
3710
3711 r = unit_name_from_path(what, ".device", &e);
3712 if (r < 0)
3713 return r;
3714
3715 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3716 if (r < 0)
3717 return r;
3718
3719 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3720 dep = UNIT_BINDS_TO;
3721
3722 r = unit_add_two_dependencies(u, UNIT_AFTER,
3723 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3724 device, true, mask);
3725 if (r < 0)
3726 return r;
3727
3728 if (wants) {
3729 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3730 if (r < 0)
3731 return r;
3732 }
3733
3734 return 0;
3735 }
3736
3737 int unit_coldplug(Unit *u) {
3738 int r = 0, q;
3739 char **i;
3740
3741 assert(u);
3742
3743 /* Make sure we don't enter a loop, when coldplugging recursively. */
3744 if (u->coldplugged)
3745 return 0;
3746
3747 u->coldplugged = true;
3748
3749 STRV_FOREACH(i, u->deserialized_refs) {
3750 q = bus_unit_track_add_name(u, *i);
3751 if (q < 0 && r >= 0)
3752 r = q;
3753 }
3754 u->deserialized_refs = strv_free(u->deserialized_refs);
3755
3756 if (UNIT_VTABLE(u)->coldplug) {
3757 q = UNIT_VTABLE(u)->coldplug(u);
3758 if (q < 0 && r >= 0)
3759 r = q;
3760 }
3761
3762 if (u->job) {
3763 q = job_coldplug(u->job);
3764 if (q < 0 && r >= 0)
3765 r = q;
3766 }
3767
3768 return r;
3769 }
3770
3771 void unit_catchup(Unit *u) {
3772 assert(u);
3773
3774 if (UNIT_VTABLE(u)->catchup)
3775 UNIT_VTABLE(u)->catchup(u);
3776 }
3777
3778 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3779 struct stat st;
3780
3781 if (!path)
3782 return false;
3783
3784 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3785 * are never out-of-date. */
3786 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3787 return false;
3788
3789 if (stat(path, &st) < 0)
3790 /* What, cannot access this anymore? */
3791 return true;
3792
3793 if (path_masked)
3794 /* For masked files check if they are still so */
3795 return !null_or_empty(&st);
3796 else
3797 /* For non-empty files check the mtime */
3798 return timespec_load(&st.st_mtim) > mtime;
3799
3800 return false;
3801 }
3802
3803 bool unit_need_daemon_reload(Unit *u) {
3804 _cleanup_strv_free_ char **t = NULL;
3805 char **path;
3806
3807 assert(u);
3808
3809 /* For unit files, we allow masking… */
3810 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3811 u->load_state == UNIT_MASKED))
3812 return true;
3813
3814 /* Source paths should not be masked… */
3815 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3816 return true;
3817
3818 if (u->load_state == UNIT_LOADED)
3819 (void) unit_find_dropin_paths(u, &t);
3820 if (!strv_equal(u->dropin_paths, t))
3821 return true;
3822
3823 /* … any drop-ins that are masked are simply omitted from the list. */
3824 STRV_FOREACH(path, u->dropin_paths)
3825 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3826 return true;
3827
3828 return false;
3829 }
3830
3831 void unit_reset_failed(Unit *u) {
3832 assert(u);
3833
3834 if (UNIT_VTABLE(u)->reset_failed)
3835 UNIT_VTABLE(u)->reset_failed(u);
3836
3837 RATELIMIT_RESET(u->start_limit);
3838 u->start_limit_hit = false;
3839 }
3840
3841 Unit *unit_following(Unit *u) {
3842 assert(u);
3843
3844 if (UNIT_VTABLE(u)->following)
3845 return UNIT_VTABLE(u)->following(u);
3846
3847 return NULL;
3848 }
3849
3850 bool unit_stop_pending(Unit *u) {
3851 assert(u);
3852
3853 /* This call does check the current state of the unit. It's
3854 * hence useful to be called from state change calls of the
3855 * unit itself, where the state isn't updated yet. This is
3856 * different from unit_inactive_or_pending() which checks both
3857 * the current state and for a queued job. */
3858
3859 return u->job && u->job->type == JOB_STOP;
3860 }
3861
3862 bool unit_inactive_or_pending(Unit *u) {
3863 assert(u);
3864
3865 /* Returns true if the unit is inactive or going down */
3866
3867 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3868 return true;
3869
3870 if (unit_stop_pending(u))
3871 return true;
3872
3873 return false;
3874 }
3875
3876 bool unit_active_or_pending(Unit *u) {
3877 assert(u);
3878
3879 /* Returns true if the unit is active or going up */
3880
3881 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3882 return true;
3883
3884 if (u->job &&
3885 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3886 return true;
3887
3888 return false;
3889 }
3890
3891 bool unit_will_restart(Unit *u) {
3892 assert(u);
3893
3894 if (!UNIT_VTABLE(u)->will_restart)
3895 return false;
3896
3897 return UNIT_VTABLE(u)->will_restart(u);
3898 }
3899
3900 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3901 assert(u);
3902 assert(w >= 0 && w < _KILL_WHO_MAX);
3903 assert(SIGNAL_VALID(signo));
3904
3905 if (!UNIT_VTABLE(u)->kill)
3906 return -EOPNOTSUPP;
3907
3908 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3909 }
3910
3911 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3912 _cleanup_set_free_ Set *pid_set = NULL;
3913 int r;
3914
3915 pid_set = set_new(NULL);
3916 if (!pid_set)
3917 return NULL;
3918
3919 /* Exclude the main/control pids from being killed via the cgroup */
3920 if (main_pid > 0) {
3921 r = set_put(pid_set, PID_TO_PTR(main_pid));
3922 if (r < 0)
3923 return NULL;
3924 }
3925
3926 if (control_pid > 0) {
3927 r = set_put(pid_set, PID_TO_PTR(control_pid));
3928 if (r < 0)
3929 return NULL;
3930 }
3931
3932 return TAKE_PTR(pid_set);
3933 }
3934
3935 int unit_kill_common(
3936 Unit *u,
3937 KillWho who,
3938 int signo,
3939 pid_t main_pid,
3940 pid_t control_pid,
3941 sd_bus_error *error) {
3942
3943 int r = 0;
3944 bool killed = false;
3945
3946 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3947 if (main_pid < 0)
3948 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3949 else if (main_pid == 0)
3950 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3951 }
3952
3953 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3954 if (control_pid < 0)
3955 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3956 else if (control_pid == 0)
3957 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3958 }
3959
3960 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3961 if (control_pid > 0) {
3962 if (kill(control_pid, signo) < 0)
3963 r = -errno;
3964 else
3965 killed = true;
3966 }
3967
3968 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3969 if (main_pid > 0) {
3970 if (kill(main_pid, signo) < 0)
3971 r = -errno;
3972 else
3973 killed = true;
3974 }
3975
3976 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3977 _cleanup_set_free_ Set *pid_set = NULL;
3978 int q;
3979
3980 /* Exclude the main/control pids from being killed via the cgroup */
3981 pid_set = unit_pid_set(main_pid, control_pid);
3982 if (!pid_set)
3983 return -ENOMEM;
3984
3985 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3986 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3987 r = q;
3988 else
3989 killed = true;
3990 }
3991
3992 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3993 return -ESRCH;
3994
3995 return r;
3996 }
3997
3998 int unit_following_set(Unit *u, Set **s) {
3999 assert(u);
4000 assert(s);
4001
4002 if (UNIT_VTABLE(u)->following_set)
4003 return UNIT_VTABLE(u)->following_set(u, s);
4004
4005 *s = NULL;
4006 return 0;
4007 }
4008
4009 UnitFileState unit_get_unit_file_state(Unit *u) {
4010 int r;
4011
4012 assert(u);
4013
4014 if (u->unit_file_state < 0 && u->fragment_path) {
4015 r = unit_file_get_state(
4016 u->manager->unit_file_scope,
4017 NULL,
4018 u->id,
4019 &u->unit_file_state);
4020 if (r < 0)
4021 u->unit_file_state = UNIT_FILE_BAD;
4022 }
4023
4024 return u->unit_file_state;
4025 }
4026
4027 int unit_get_unit_file_preset(Unit *u) {
4028 assert(u);
4029
4030 if (u->unit_file_preset < 0 && u->fragment_path)
4031 u->unit_file_preset = unit_file_query_preset(
4032 u->manager->unit_file_scope,
4033 NULL,
4034 basename(u->fragment_path));
4035
4036 return u->unit_file_preset;
4037 }
4038
4039 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4040 assert(ref);
4041 assert(source);
4042 assert(target);
4043
4044 if (ref->target)
4045 unit_ref_unset(ref);
4046
4047 ref->source = source;
4048 ref->target = target;
4049 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4050 return target;
4051 }
4052
4053 void unit_ref_unset(UnitRef *ref) {
4054 assert(ref);
4055
4056 if (!ref->target)
4057 return;
4058
4059 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4060 * be unreferenced now. */
4061 unit_add_to_gc_queue(ref->target);
4062
4063 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4064 ref->source = ref->target = NULL;
4065 }
4066
4067 static int user_from_unit_name(Unit *u, char **ret) {
4068
4069 static const uint8_t hash_key[] = {
4070 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4071 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4072 };
4073
4074 _cleanup_free_ char *n = NULL;
4075 int r;
4076
4077 r = unit_name_to_prefix(u->id, &n);
4078 if (r < 0)
4079 return r;
4080
4081 if (valid_user_group_name(n)) {
4082 *ret = TAKE_PTR(n);
4083 return 0;
4084 }
4085
4086 /* If we can't use the unit name as a user name, then let's hash it and use that */
4087 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4088 return -ENOMEM;
4089
4090 return 0;
4091 }
4092
4093 int unit_patch_contexts(Unit *u) {
4094 CGroupContext *cc;
4095 ExecContext *ec;
4096 unsigned i;
4097 int r;
4098
4099 assert(u);
4100
4101 /* Patch in the manager defaults into the exec and cgroup
4102 * contexts, _after_ the rest of the settings have been
4103 * initialized */
4104
4105 ec = unit_get_exec_context(u);
4106 if (ec) {
4107 /* This only copies in the ones that need memory */
4108 for (i = 0; i < _RLIMIT_MAX; i++)
4109 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4110 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4111 if (!ec->rlimit[i])
4112 return -ENOMEM;
4113 }
4114
4115 if (MANAGER_IS_USER(u->manager) &&
4116 !ec->working_directory) {
4117
4118 r = get_home_dir(&ec->working_directory);
4119 if (r < 0)
4120 return r;
4121
4122 /* Allow user services to run, even if the
4123 * home directory is missing */
4124 ec->working_directory_missing_ok = true;
4125 }
4126
4127 if (ec->private_devices)
4128 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4129
4130 if (ec->protect_kernel_modules)
4131 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4132
4133 if (ec->dynamic_user) {
4134 if (!ec->user) {
4135 r = user_from_unit_name(u, &ec->user);
4136 if (r < 0)
4137 return r;
4138 }
4139
4140 if (!ec->group) {
4141 ec->group = strdup(ec->user);
4142 if (!ec->group)
4143 return -ENOMEM;
4144 }
4145
4146 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4147 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4148
4149 ec->private_tmp = true;
4150 ec->remove_ipc = true;
4151 ec->protect_system = PROTECT_SYSTEM_STRICT;
4152 if (ec->protect_home == PROTECT_HOME_NO)
4153 ec->protect_home = PROTECT_HOME_READ_ONLY;
4154 }
4155 }
4156
4157 cc = unit_get_cgroup_context(u);
4158 if (cc && ec) {
4159
4160 if (ec->private_devices &&
4161 cc->device_policy == CGROUP_AUTO)
4162 cc->device_policy = CGROUP_CLOSED;
4163
4164 if (ec->root_image &&
4165 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4166
4167 /* When RootImage= is specified, the following devices are touched. */
4168 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4169 if (r < 0)
4170 return r;
4171
4172 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4173 if (r < 0)
4174 return r;
4175
4176 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4177 if (r < 0)
4178 return r;
4179 }
4180 }
4181
4182 return 0;
4183 }
4184
4185 ExecContext *unit_get_exec_context(Unit *u) {
4186 size_t offset;
4187 assert(u);
4188
4189 if (u->type < 0)
4190 return NULL;
4191
4192 offset = UNIT_VTABLE(u)->exec_context_offset;
4193 if (offset <= 0)
4194 return NULL;
4195
4196 return (ExecContext*) ((uint8_t*) u + offset);
4197 }
4198
4199 KillContext *unit_get_kill_context(Unit *u) {
4200 size_t offset;
4201 assert(u);
4202
4203 if (u->type < 0)
4204 return NULL;
4205
4206 offset = UNIT_VTABLE(u)->kill_context_offset;
4207 if (offset <= 0)
4208 return NULL;
4209
4210 return (KillContext*) ((uint8_t*) u + offset);
4211 }
4212
4213 CGroupContext *unit_get_cgroup_context(Unit *u) {
4214 size_t offset;
4215
4216 if (u->type < 0)
4217 return NULL;
4218
4219 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4220 if (offset <= 0)
4221 return NULL;
4222
4223 return (CGroupContext*) ((uint8_t*) u + offset);
4224 }
4225
4226 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4227 size_t offset;
4228
4229 if (u->type < 0)
4230 return NULL;
4231
4232 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4233 if (offset <= 0)
4234 return NULL;
4235
4236 return *(ExecRuntime**) ((uint8_t*) u + offset);
4237 }
4238
4239 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4240 assert(u);
4241
4242 if (UNIT_WRITE_FLAGS_NOOP(flags))
4243 return NULL;
4244
4245 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4246 return u->manager->lookup_paths.transient;
4247
4248 if (flags & UNIT_PERSISTENT)
4249 return u->manager->lookup_paths.persistent_control;
4250
4251 if (flags & UNIT_RUNTIME)
4252 return u->manager->lookup_paths.runtime_control;
4253
4254 return NULL;
4255 }
4256
4257 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4258 char *ret = NULL;
4259
4260 if (!s)
4261 return NULL;
4262
4263 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4264 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4265 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4266 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4267 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4268 * allocations. */
4269
4270 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4271 ret = specifier_escape(s);
4272 if (!ret)
4273 return NULL;
4274
4275 s = ret;
4276 }
4277
4278 if (flags & UNIT_ESCAPE_C) {
4279 char *a;
4280
4281 a = cescape(s);
4282 free(ret);
4283 if (!a)
4284 return NULL;
4285
4286 ret = a;
4287 }
4288
4289 if (buf) {
4290 *buf = ret;
4291 return ret ?: (char*) s;
4292 }
4293
4294 return ret ?: strdup(s);
4295 }
4296
4297 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4298 _cleanup_free_ char *result = NULL;
4299 size_t n = 0, allocated = 0;
4300 char **i;
4301
4302 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4303 * way suitable for ExecStart= stanzas */
4304
4305 STRV_FOREACH(i, l) {
4306 _cleanup_free_ char *buf = NULL;
4307 const char *p;
4308 size_t a;
4309 char *q;
4310
4311 p = unit_escape_setting(*i, flags, &buf);
4312 if (!p)
4313 return NULL;
4314
4315 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4316 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4317 return NULL;
4318
4319 q = result + n;
4320 if (n > 0)
4321 *(q++) = ' ';
4322
4323 *(q++) = '"';
4324 q = stpcpy(q, p);
4325 *(q++) = '"';
4326
4327 n += a;
4328 }
4329
4330 if (!GREEDY_REALLOC(result, allocated, n + 1))
4331 return NULL;
4332
4333 result[n] = 0;
4334
4335 return TAKE_PTR(result);
4336 }
4337
4338 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4339 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4340 const char *dir, *wrapped;
4341 int r;
4342
4343 assert(u);
4344 assert(name);
4345 assert(data);
4346
4347 if (UNIT_WRITE_FLAGS_NOOP(flags))
4348 return 0;
4349
4350 data = unit_escape_setting(data, flags, &escaped);
4351 if (!data)
4352 return -ENOMEM;
4353
4354 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4355 * previous section header is the same */
4356
4357 if (flags & UNIT_PRIVATE) {
4358 if (!UNIT_VTABLE(u)->private_section)
4359 return -EINVAL;
4360
4361 if (!u->transient_file || u->last_section_private < 0)
4362 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4363 else if (u->last_section_private == 0)
4364 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4365 } else {
4366 if (!u->transient_file || u->last_section_private < 0)
4367 data = strjoina("[Unit]\n", data);
4368 else if (u->last_section_private > 0)
4369 data = strjoina("\n[Unit]\n", data);
4370 }
4371
4372 if (u->transient_file) {
4373 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4374 * write to the transient unit file. */
4375 fputs(data, u->transient_file);
4376
4377 if (!endswith(data, "\n"))
4378 fputc('\n', u->transient_file);
4379
4380 /* Remember which section we wrote this entry to */
4381 u->last_section_private = !!(flags & UNIT_PRIVATE);
4382 return 0;
4383 }
4384
4385 dir = unit_drop_in_dir(u, flags);
4386 if (!dir)
4387 return -EINVAL;
4388
4389 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4390 "# or an equivalent operation. Do not edit.\n",
4391 data,
4392 "\n");
4393
4394 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4395 if (r < 0)
4396 return r;
4397
4398 (void) mkdir_p_label(p, 0755);
4399 r = write_string_file_atomic_label(q, wrapped);
4400 if (r < 0)
4401 return r;
4402
4403 r = strv_push(&u->dropin_paths, q);
4404 if (r < 0)
4405 return r;
4406 q = NULL;
4407
4408 strv_uniq(u->dropin_paths);
4409
4410 u->dropin_mtime = now(CLOCK_REALTIME);
4411
4412 return 0;
4413 }
4414
4415 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4416 _cleanup_free_ char *p = NULL;
4417 va_list ap;
4418 int r;
4419
4420 assert(u);
4421 assert(name);
4422 assert(format);
4423
4424 if (UNIT_WRITE_FLAGS_NOOP(flags))
4425 return 0;
4426
4427 va_start(ap, format);
4428 r = vasprintf(&p, format, ap);
4429 va_end(ap);
4430
4431 if (r < 0)
4432 return -ENOMEM;
4433
4434 return unit_write_setting(u, flags, name, p);
4435 }
4436
4437 int unit_make_transient(Unit *u) {
4438 _cleanup_free_ char *path = NULL;
4439 FILE *f;
4440
4441 assert(u);
4442
4443 if (!UNIT_VTABLE(u)->can_transient)
4444 return -EOPNOTSUPP;
4445
4446 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4447
4448 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4449 if (!path)
4450 return -ENOMEM;
4451
4452 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4453 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4454
4455 RUN_WITH_UMASK(0022) {
4456 f = fopen(path, "we");
4457 if (!f)
4458 return -errno;
4459 }
4460
4461 safe_fclose(u->transient_file);
4462 u->transient_file = f;
4463
4464 free_and_replace(u->fragment_path, path);
4465
4466 u->source_path = mfree(u->source_path);
4467 u->dropin_paths = strv_free(u->dropin_paths);
4468 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4469
4470 u->load_state = UNIT_STUB;
4471 u->load_error = 0;
4472 u->transient = true;
4473
4474 unit_add_to_dbus_queue(u);
4475 unit_add_to_gc_queue(u);
4476
4477 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4478 u->transient_file);
4479
4480 return 0;
4481 }
4482
4483 static void log_kill(pid_t pid, int sig, void *userdata) {
4484 _cleanup_free_ char *comm = NULL;
4485
4486 (void) get_process_comm(pid, &comm);
4487
4488 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4489 only, like for example systemd's own PAM stub process. */
4490 if (comm && comm[0] == '(')
4491 return;
4492
4493 log_unit_notice(userdata,
4494 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4495 pid,
4496 strna(comm),
4497 signal_to_string(sig));
4498 }
4499
4500 static int operation_to_signal(KillContext *c, KillOperation k) {
4501 assert(c);
4502
4503 switch (k) {
4504
4505 case KILL_TERMINATE:
4506 case KILL_TERMINATE_AND_LOG:
4507 return c->kill_signal;
4508
4509 case KILL_KILL:
4510 return c->final_kill_signal;
4511
4512 case KILL_ABORT:
4513 return SIGABRT;
4514
4515 default:
4516 assert_not_reached("KillOperation unknown");
4517 }
4518 }
4519
4520 int unit_kill_context(
4521 Unit *u,
4522 KillContext *c,
4523 KillOperation k,
4524 pid_t main_pid,
4525 pid_t control_pid,
4526 bool main_pid_alien) {
4527
4528 bool wait_for_exit = false, send_sighup;
4529 cg_kill_log_func_t log_func = NULL;
4530 int sig, r;
4531
4532 assert(u);
4533 assert(c);
4534
4535 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4536 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4537
4538 if (c->kill_mode == KILL_NONE)
4539 return 0;
4540
4541 sig = operation_to_signal(c, k);
4542
4543 send_sighup =
4544 c->send_sighup &&
4545 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4546 sig != SIGHUP;
4547
4548 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4549 log_func = log_kill;
4550
4551 if (main_pid > 0) {
4552 if (log_func)
4553 log_func(main_pid, sig, u);
4554
4555 r = kill_and_sigcont(main_pid, sig);
4556 if (r < 0 && r != -ESRCH) {
4557 _cleanup_free_ char *comm = NULL;
4558 (void) get_process_comm(main_pid, &comm);
4559
4560 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4561 } else {
4562 if (!main_pid_alien)
4563 wait_for_exit = true;
4564
4565 if (r != -ESRCH && send_sighup)
4566 (void) kill(main_pid, SIGHUP);
4567 }
4568 }
4569
4570 if (control_pid > 0) {
4571 if (log_func)
4572 log_func(control_pid, sig, u);
4573
4574 r = kill_and_sigcont(control_pid, sig);
4575 if (r < 0 && r != -ESRCH) {
4576 _cleanup_free_ char *comm = NULL;
4577 (void) get_process_comm(control_pid, &comm);
4578
4579 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4580 } else {
4581 wait_for_exit = true;
4582
4583 if (r != -ESRCH && send_sighup)
4584 (void) kill(control_pid, SIGHUP);
4585 }
4586 }
4587
4588 if (u->cgroup_path &&
4589 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4590 _cleanup_set_free_ Set *pid_set = NULL;
4591
4592 /* Exclude the main/control pids from being killed via the cgroup */
4593 pid_set = unit_pid_set(main_pid, control_pid);
4594 if (!pid_set)
4595 return -ENOMEM;
4596
4597 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4598 sig,
4599 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4600 pid_set,
4601 log_func, u);
4602 if (r < 0) {
4603 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4604 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4605
4606 } else if (r > 0) {
4607
4608 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4609 * we are running in a container or if this is a delegation unit, simply because cgroup
4610 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4611 * of containers it can be confused easily by left-over directories in the cgroup — which
4612 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4613 * there we get proper events. Hence rely on them. */
4614
4615 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4616 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4617 wait_for_exit = true;
4618
4619 if (send_sighup) {
4620 set_free(pid_set);
4621
4622 pid_set = unit_pid_set(main_pid, control_pid);
4623 if (!pid_set)
4624 return -ENOMEM;
4625
4626 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4627 SIGHUP,
4628 CGROUP_IGNORE_SELF,
4629 pid_set,
4630 NULL, NULL);
4631 }
4632 }
4633 }
4634
4635 return wait_for_exit;
4636 }
4637
4638 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4639 _cleanup_free_ char *p = NULL;
4640 char *prefix;
4641 UnitDependencyInfo di;
4642 int r;
4643
4644 assert(u);
4645 assert(path);
4646
4647 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4648 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4649 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4650 * determine which units to make themselves a dependency of. */
4651
4652 if (!path_is_absolute(path))
4653 return -EINVAL;
4654
4655 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4656 if (r < 0)
4657 return r;
4658
4659 p = strdup(path);
4660 if (!p)
4661 return -ENOMEM;
4662
4663 path = path_simplify(p, false);
4664
4665 if (!path_is_normalized(path))
4666 return -EPERM;
4667
4668 if (hashmap_contains(u->requires_mounts_for, path))
4669 return 0;
4670
4671 di = (UnitDependencyInfo) {
4672 .origin_mask = mask
4673 };
4674
4675 r = hashmap_put(u->requires_mounts_for, path, di.data);
4676 if (r < 0)
4677 return r;
4678 p = NULL;
4679
4680 prefix = alloca(strlen(path) + 1);
4681 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4682 Set *x;
4683
4684 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4685 if (!x) {
4686 _cleanup_free_ char *q = NULL;
4687
4688 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4689 if (r < 0)
4690 return r;
4691
4692 q = strdup(prefix);
4693 if (!q)
4694 return -ENOMEM;
4695
4696 x = set_new(NULL);
4697 if (!x)
4698 return -ENOMEM;
4699
4700 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4701 if (r < 0) {
4702 set_free(x);
4703 return r;
4704 }
4705 q = NULL;
4706 }
4707
4708 r = set_put(x, u);
4709 if (r < 0)
4710 return r;
4711 }
4712
4713 return 0;
4714 }
4715
4716 int unit_setup_exec_runtime(Unit *u) {
4717 ExecRuntime **rt;
4718 size_t offset;
4719 Unit *other;
4720 Iterator i;
4721 void *v;
4722 int r;
4723
4724 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4725 assert(offset > 0);
4726
4727 /* Check if there already is an ExecRuntime for this unit? */
4728 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4729 if (*rt)
4730 return 0;
4731
4732 /* Try to get it from somebody else */
4733 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4734 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4735 if (r == 1)
4736 return 1;
4737 }
4738
4739 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4740 }
4741
4742 int unit_setup_dynamic_creds(Unit *u) {
4743 ExecContext *ec;
4744 DynamicCreds *dcreds;
4745 size_t offset;
4746
4747 assert(u);
4748
4749 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4750 assert(offset > 0);
4751 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4752
4753 ec = unit_get_exec_context(u);
4754 assert(ec);
4755
4756 if (!ec->dynamic_user)
4757 return 0;
4758
4759 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4760 }
4761
4762 bool unit_type_supported(UnitType t) {
4763 if (_unlikely_(t < 0))
4764 return false;
4765 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4766 return false;
4767
4768 if (!unit_vtable[t]->supported)
4769 return true;
4770
4771 return unit_vtable[t]->supported();
4772 }
4773
4774 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4775 int r;
4776
4777 assert(u);
4778 assert(where);
4779
4780 r = dir_is_empty(where);
4781 if (r > 0 || r == -ENOTDIR)
4782 return;
4783 if (r < 0) {
4784 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4785 return;
4786 }
4787
4788 log_struct(LOG_NOTICE,
4789 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4790 LOG_UNIT_ID(u),
4791 LOG_UNIT_INVOCATION_ID(u),
4792 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4793 "WHERE=%s", where);
4794 }
4795
4796 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4797 _cleanup_free_ char *canonical_where;
4798 int r;
4799
4800 assert(u);
4801 assert(where);
4802
4803 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4804 if (r < 0) {
4805 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4806 return 0;
4807 }
4808
4809 /* We will happily ignore a trailing slash (or any redundant slashes) */
4810 if (path_equal(where, canonical_where))
4811 return 0;
4812
4813 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4814 log_struct(LOG_ERR,
4815 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4816 LOG_UNIT_ID(u),
4817 LOG_UNIT_INVOCATION_ID(u),
4818 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4819 "WHERE=%s", where);
4820
4821 return -ELOOP;
4822 }
4823
4824 bool unit_is_pristine(Unit *u) {
4825 assert(u);
4826
4827 /* Check if the unit already exists or is already around,
4828 * in a number of different ways. Note that to cater for unit
4829 * types such as slice, we are generally fine with units that
4830 * are marked UNIT_LOADED even though nothing was actually
4831 * loaded, as those unit types don't require a file on disk. */
4832
4833 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4834 u->fragment_path ||
4835 u->source_path ||
4836 !strv_isempty(u->dropin_paths) ||
4837 u->job ||
4838 u->merged_into);
4839 }
4840
4841 pid_t unit_control_pid(Unit *u) {
4842 assert(u);
4843
4844 if (UNIT_VTABLE(u)->control_pid)
4845 return UNIT_VTABLE(u)->control_pid(u);
4846
4847 return 0;
4848 }
4849
4850 pid_t unit_main_pid(Unit *u) {
4851 assert(u);
4852
4853 if (UNIT_VTABLE(u)->main_pid)
4854 return UNIT_VTABLE(u)->main_pid(u);
4855
4856 return 0;
4857 }
4858
4859 static void unit_unref_uid_internal(
4860 Unit *u,
4861 uid_t *ref_uid,
4862 bool destroy_now,
4863 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4864
4865 assert(u);
4866 assert(ref_uid);
4867 assert(_manager_unref_uid);
4868
4869 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4870 * gid_t are actually the same time, with the same validity rules.
4871 *
4872 * Drops a reference to UID/GID from a unit. */
4873
4874 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4875 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4876
4877 if (!uid_is_valid(*ref_uid))
4878 return;
4879
4880 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4881 *ref_uid = UID_INVALID;
4882 }
4883
4884 void unit_unref_uid(Unit *u, bool destroy_now) {
4885 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4886 }
4887
4888 void unit_unref_gid(Unit *u, bool destroy_now) {
4889 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4890 }
4891
4892 static int unit_ref_uid_internal(
4893 Unit *u,
4894 uid_t *ref_uid,
4895 uid_t uid,
4896 bool clean_ipc,
4897 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4898
4899 int r;
4900
4901 assert(u);
4902 assert(ref_uid);
4903 assert(uid_is_valid(uid));
4904 assert(_manager_ref_uid);
4905
4906 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4907 * are actually the same type, and have the same validity rules.
4908 *
4909 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4910 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4911 * drops to zero. */
4912
4913 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4914 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4915
4916 if (*ref_uid == uid)
4917 return 0;
4918
4919 if (uid_is_valid(*ref_uid)) /* Already set? */
4920 return -EBUSY;
4921
4922 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4923 if (r < 0)
4924 return r;
4925
4926 *ref_uid = uid;
4927 return 1;
4928 }
4929
4930 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4931 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4932 }
4933
4934 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4935 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4936 }
4937
4938 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4939 int r = 0, q = 0;
4940
4941 assert(u);
4942
4943 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4944
4945 if (uid_is_valid(uid)) {
4946 r = unit_ref_uid(u, uid, clean_ipc);
4947 if (r < 0)
4948 return r;
4949 }
4950
4951 if (gid_is_valid(gid)) {
4952 q = unit_ref_gid(u, gid, clean_ipc);
4953 if (q < 0) {
4954 if (r > 0)
4955 unit_unref_uid(u, false);
4956
4957 return q;
4958 }
4959 }
4960
4961 return r > 0 || q > 0;
4962 }
4963
4964 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4965 ExecContext *c;
4966 int r;
4967
4968 assert(u);
4969
4970 c = unit_get_exec_context(u);
4971
4972 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4973 if (r < 0)
4974 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4975
4976 return r;
4977 }
4978
4979 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4980 assert(u);
4981
4982 unit_unref_uid(u, destroy_now);
4983 unit_unref_gid(u, destroy_now);
4984 }
4985
4986 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4987 int r;
4988
4989 assert(u);
4990
4991 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4992 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4993 * objects when no service references the UID/GID anymore. */
4994
4995 r = unit_ref_uid_gid(u, uid, gid);
4996 if (r > 0)
4997 bus_unit_send_change_signal(u);
4998 }
4999
5000 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5001 int r;
5002
5003 assert(u);
5004
5005 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5006
5007 if (sd_id128_equal(u->invocation_id, id))
5008 return 0;
5009
5010 if (!sd_id128_is_null(u->invocation_id))
5011 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5012
5013 if (sd_id128_is_null(id)) {
5014 r = 0;
5015 goto reset;
5016 }
5017
5018 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5019 if (r < 0)
5020 goto reset;
5021
5022 u->invocation_id = id;
5023 sd_id128_to_string(id, u->invocation_id_string);
5024
5025 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5026 if (r < 0)
5027 goto reset;
5028
5029 return 0;
5030
5031 reset:
5032 u->invocation_id = SD_ID128_NULL;
5033 u->invocation_id_string[0] = 0;
5034 return r;
5035 }
5036
5037 int unit_acquire_invocation_id(Unit *u) {
5038 sd_id128_t id;
5039 int r;
5040
5041 assert(u);
5042
5043 r = sd_id128_randomize(&id);
5044 if (r < 0)
5045 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5046
5047 r = unit_set_invocation_id(u, id);
5048 if (r < 0)
5049 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5050
5051 return 0;
5052 }
5053
5054 void unit_set_exec_params(Unit *u, ExecParameters *p) {
5055 assert(u);
5056 assert(p);
5057
5058 /* Copy parameters from manager */
5059 p->environment = u->manager->environment;
5060 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5061 p->cgroup_supported = u->manager->cgroup_supported;
5062 p->prefix = u->manager->prefix;
5063 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5064
5065 /* Copy paramaters from unit */
5066 p->cgroup_path = u->cgroup_path;
5067 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5068 }
5069
5070 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5071 int r;
5072
5073 assert(u);
5074 assert(ret);
5075
5076 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5077 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5078
5079 (void) unit_realize_cgroup(u);
5080
5081 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5082 if (r != 0)
5083 return r;
5084
5085 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5086 (void) ignore_signals(SIGPIPE, -1);
5087
5088 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5089
5090 if (u->cgroup_path) {
5091 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5092 if (r < 0) {
5093 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5094 _exit(EXIT_CGROUP);
5095 }
5096 }
5097
5098 return 0;
5099 }
5100
5101 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5102 assert(u);
5103 assert(d >= 0);
5104 assert(d < _UNIT_DEPENDENCY_MAX);
5105 assert(other);
5106
5107 if (di.origin_mask == 0 && di.destination_mask == 0) {
5108 /* No bit set anymore, let's drop the whole entry */
5109 assert_se(hashmap_remove(u->dependencies[d], other));
5110 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5111 } else
5112 /* Mask was reduced, let's update the entry */
5113 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5114 }
5115
5116 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5117 UnitDependency d;
5118
5119 assert(u);
5120
5121 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5122
5123 if (mask == 0)
5124 return;
5125
5126 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5127 bool done;
5128
5129 do {
5130 UnitDependencyInfo di;
5131 Unit *other;
5132 Iterator i;
5133
5134 done = true;
5135
5136 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5137 UnitDependency q;
5138
5139 if ((di.origin_mask & ~mask) == di.origin_mask)
5140 continue;
5141 di.origin_mask &= ~mask;
5142 unit_update_dependency_mask(u, d, other, di);
5143
5144 /* We updated the dependency from our unit to the other unit now. But most dependencies
5145 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5146 * all dependency types on the other unit and delete all those which point to us and
5147 * have the right mask set. */
5148
5149 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5150 UnitDependencyInfo dj;
5151
5152 dj.data = hashmap_get(other->dependencies[q], u);
5153 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5154 continue;
5155 dj.destination_mask &= ~mask;
5156
5157 unit_update_dependency_mask(other, q, u, dj);
5158 }
5159
5160 unit_add_to_gc_queue(other);
5161
5162 done = false;
5163 break;
5164 }
5165
5166 } while (!done);
5167 }
5168 }
5169
5170 static int unit_export_invocation_id(Unit *u) {
5171 const char *p;
5172 int r;
5173
5174 assert(u);
5175
5176 if (u->exported_invocation_id)
5177 return 0;
5178
5179 if (sd_id128_is_null(u->invocation_id))
5180 return 0;
5181
5182 p = strjoina("/run/systemd/units/invocation:", u->id);
5183 r = symlink_atomic(u->invocation_id_string, p);
5184 if (r < 0)
5185 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5186
5187 u->exported_invocation_id = true;
5188 return 0;
5189 }
5190
5191 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5192 const char *p;
5193 char buf[2];
5194 int r;
5195
5196 assert(u);
5197 assert(c);
5198
5199 if (u->exported_log_level_max)
5200 return 0;
5201
5202 if (c->log_level_max < 0)
5203 return 0;
5204
5205 assert(c->log_level_max <= 7);
5206
5207 buf[0] = '0' + c->log_level_max;
5208 buf[1] = 0;
5209
5210 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5211 r = symlink_atomic(buf, p);
5212 if (r < 0)
5213 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5214
5215 u->exported_log_level_max = true;
5216 return 0;
5217 }
5218
5219 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5220 _cleanup_close_ int fd = -1;
5221 struct iovec *iovec;
5222 const char *p;
5223 char *pattern;
5224 le64_t *sizes;
5225 ssize_t n;
5226 size_t i;
5227 int r;
5228
5229 if (u->exported_log_extra_fields)
5230 return 0;
5231
5232 if (c->n_log_extra_fields <= 0)
5233 return 0;
5234
5235 sizes = newa(le64_t, c->n_log_extra_fields);
5236 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5237
5238 for (i = 0; i < c->n_log_extra_fields; i++) {
5239 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5240
5241 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5242 iovec[i*2+1] = c->log_extra_fields[i];
5243 }
5244
5245 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5246 pattern = strjoina(p, ".XXXXXX");
5247
5248 fd = mkostemp_safe(pattern);
5249 if (fd < 0)
5250 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5251
5252 n = writev(fd, iovec, c->n_log_extra_fields*2);
5253 if (n < 0) {
5254 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5255 goto fail;
5256 }
5257
5258 (void) fchmod(fd, 0644);
5259
5260 if (rename(pattern, p) < 0) {
5261 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5262 goto fail;
5263 }
5264
5265 u->exported_log_extra_fields = true;
5266 return 0;
5267
5268 fail:
5269 (void) unlink(pattern);
5270 return r;
5271 }
5272
5273 void unit_export_state_files(Unit *u) {
5274 const ExecContext *c;
5275
5276 assert(u);
5277
5278 if (!u->id)
5279 return;
5280
5281 if (!MANAGER_IS_SYSTEM(u->manager))
5282 return;
5283
5284 if (u->manager->test_run_flags != 0)
5285 return;
5286
5287 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5288 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5289 * the IPC system itself and PID 1 also log to the journal.
5290 *
5291 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5292 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5293 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5294 * namespace at least.
5295 *
5296 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5297 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5298 * them with one. */
5299
5300 (void) unit_export_invocation_id(u);
5301
5302 c = unit_get_exec_context(u);
5303 if (c) {
5304 (void) unit_export_log_level_max(u, c);
5305 (void) unit_export_log_extra_fields(u, c);
5306 }
5307 }
5308
5309 void unit_unlink_state_files(Unit *u) {
5310 const char *p;
5311
5312 assert(u);
5313
5314 if (!u->id)
5315 return;
5316
5317 if (!MANAGER_IS_SYSTEM(u->manager))
5318 return;
5319
5320 /* Undoes the effect of unit_export_state() */
5321
5322 if (u->exported_invocation_id) {
5323 p = strjoina("/run/systemd/units/invocation:", u->id);
5324 (void) unlink(p);
5325
5326 u->exported_invocation_id = false;
5327 }
5328
5329 if (u->exported_log_level_max) {
5330 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5331 (void) unlink(p);
5332
5333 u->exported_log_level_max = false;
5334 }
5335
5336 if (u->exported_log_extra_fields) {
5337 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5338 (void) unlink(p);
5339
5340 u->exported_log_extra_fields = false;
5341 }
5342 }
5343
5344 int unit_prepare_exec(Unit *u) {
5345 int r;
5346
5347 assert(u);
5348
5349 /* Prepares everything so that we can fork of a process for this unit */
5350
5351 (void) unit_realize_cgroup(u);
5352
5353 if (u->reset_accounting) {
5354 (void) unit_reset_cpu_accounting(u);
5355 (void) unit_reset_ip_accounting(u);
5356 u->reset_accounting = false;
5357 }
5358
5359 unit_export_state_files(u);
5360
5361 r = unit_setup_exec_runtime(u);
5362 if (r < 0)
5363 return r;
5364
5365 r = unit_setup_dynamic_creds(u);
5366 if (r < 0)
5367 return r;
5368
5369 return 0;
5370 }
5371
5372 static void log_leftover(pid_t pid, int sig, void *userdata) {
5373 _cleanup_free_ char *comm = NULL;
5374
5375 (void) get_process_comm(pid, &comm);
5376
5377 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5378 return;
5379
5380 log_unit_warning(userdata,
5381 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5382 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5383 pid, strna(comm));
5384 }
5385
5386 void unit_warn_leftover_processes(Unit *u) {
5387 assert(u);
5388
5389 (void) unit_pick_cgroup_path(u);
5390
5391 if (!u->cgroup_path)
5392 return;
5393
5394 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5395 }
5396
5397 bool unit_needs_console(Unit *u) {
5398 ExecContext *ec;
5399 UnitActiveState state;
5400
5401 assert(u);
5402
5403 state = unit_active_state(u);
5404
5405 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5406 return false;
5407
5408 if (UNIT_VTABLE(u)->needs_console)
5409 return UNIT_VTABLE(u)->needs_console(u);
5410
5411 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5412 ec = unit_get_exec_context(u);
5413 if (!ec)
5414 return false;
5415
5416 return exec_context_may_touch_console(ec);
5417 }
5418
5419 const char *unit_label_path(Unit *u) {
5420 const char *p;
5421
5422 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5423 * when validating access checks. */
5424
5425 p = u->source_path ?: u->fragment_path;
5426 if (!p)
5427 return NULL;
5428
5429 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5430 if (path_equal(p, "/dev/null"))
5431 return NULL;
5432
5433 return p;
5434 }
5435
5436 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5437 int r;
5438
5439 assert(u);
5440
5441 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5442 * and not a kernel thread either */
5443
5444 /* First, a simple range check */
5445 if (!pid_is_valid(pid))
5446 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5447
5448 /* Some extra safety check */
5449 if (pid == 1 || pid == getpid_cached())
5450 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5451
5452 /* Don't even begin to bother with kernel threads */
5453 r = is_kernel_thread(pid);
5454 if (r == -ESRCH)
5455 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5456 if (r < 0)
5457 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5458 if (r > 0)
5459 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5460
5461 return 0;
5462 }
5463
5464 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5465 [COLLECT_INACTIVE] = "inactive",
5466 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5467 };
5468
5469 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);