]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #9158 from poettering/notify-auto-reload
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6 ***/
7
8 #include <errno.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/prctl.h>
12 #include <sys/stat.h>
13 #include <unistd.h>
14
15 #include "sd-id128.h"
16 #include "sd-messages.h"
17
18 #include "alloc-util.h"
19 #include "all-units.h"
20 #include "bus-common-errors.h"
21 #include "bus-util.h"
22 #include "cgroup-util.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "escape.h"
27 #include "execute.h"
28 #include "fd-util.h"
29 #include "fileio-label.h"
30 #include "format-util.h"
31 #include "fs-util.h"
32 #include "id128-util.h"
33 #include "io-util.h"
34 #include "load-dropin.h"
35 #include "load-fragment.h"
36 #include "log.h"
37 #include "macro.h"
38 #include "missing.h"
39 #include "mkdir.h"
40 #include "parse-util.h"
41 #include "path-util.h"
42 #include "process-util.h"
43 #include "set.h"
44 #include "signal-util.h"
45 #include "sparse-endian.h"
46 #include "special.h"
47 #include "specifier.h"
48 #include "stat-util.h"
49 #include "stdio-util.h"
50 #include "string-table.h"
51 #include "string-util.h"
52 #include "strv.h"
53 #include "umask-util.h"
54 #include "unit-name.h"
55 #include "unit.h"
56 #include "user-util.h"
57 #include "virt.h"
58
59 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
60 [UNIT_SERVICE] = &service_vtable,
61 [UNIT_SOCKET] = &socket_vtable,
62 [UNIT_TARGET] = &target_vtable,
63 [UNIT_DEVICE] = &device_vtable,
64 [UNIT_MOUNT] = &mount_vtable,
65 [UNIT_AUTOMOUNT] = &automount_vtable,
66 [UNIT_SWAP] = &swap_vtable,
67 [UNIT_TIMER] = &timer_vtable,
68 [UNIT_PATH] = &path_vtable,
69 [UNIT_SLICE] = &slice_vtable,
70 [UNIT_SCOPE] = &scope_vtable,
71 };
72
73 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
74
75 Unit *unit_new(Manager *m, size_t size) {
76 Unit *u;
77
78 assert(m);
79 assert(size >= sizeof(Unit));
80
81 u = malloc0(size);
82 if (!u)
83 return NULL;
84
85 u->names = set_new(&string_hash_ops);
86 if (!u->names)
87 return mfree(u);
88
89 u->manager = m;
90 u->type = _UNIT_TYPE_INVALID;
91 u->default_dependencies = true;
92 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
93 u->unit_file_preset = -1;
94 u->on_failure_job_mode = JOB_REPLACE;
95 u->cgroup_inotify_wd = -1;
96 u->job_timeout = USEC_INFINITY;
97 u->job_running_timeout = USEC_INFINITY;
98 u->ref_uid = UID_INVALID;
99 u->ref_gid = GID_INVALID;
100 u->cpu_usage_last = NSEC_INFINITY;
101 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
102
103 u->ip_accounting_ingress_map_fd = -1;
104 u->ip_accounting_egress_map_fd = -1;
105 u->ipv4_allow_map_fd = -1;
106 u->ipv6_allow_map_fd = -1;
107 u->ipv4_deny_map_fd = -1;
108 u->ipv6_deny_map_fd = -1;
109
110 u->last_section_private = -1;
111
112 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
113 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
114
115 return u;
116 }
117
118 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
119 _cleanup_(unit_freep) Unit *u = NULL;
120 int r;
121
122 u = unit_new(m, size);
123 if (!u)
124 return -ENOMEM;
125
126 r = unit_add_name(u, name);
127 if (r < 0)
128 return r;
129
130 *ret = TAKE_PTR(u);
131
132 return r;
133 }
134
135 bool unit_has_name(Unit *u, const char *name) {
136 assert(u);
137 assert(name);
138
139 return set_contains(u->names, (char*) name);
140 }
141
142 static void unit_init(Unit *u) {
143 CGroupContext *cc;
144 ExecContext *ec;
145 KillContext *kc;
146
147 assert(u);
148 assert(u->manager);
149 assert(u->type >= 0);
150
151 cc = unit_get_cgroup_context(u);
152 if (cc) {
153 cgroup_context_init(cc);
154
155 /* Copy in the manager defaults into the cgroup
156 * context, _before_ the rest of the settings have
157 * been initialized */
158
159 cc->cpu_accounting = u->manager->default_cpu_accounting;
160 cc->io_accounting = u->manager->default_io_accounting;
161 cc->ip_accounting = u->manager->default_ip_accounting;
162 cc->blockio_accounting = u->manager->default_blockio_accounting;
163 cc->memory_accounting = u->manager->default_memory_accounting;
164 cc->tasks_accounting = u->manager->default_tasks_accounting;
165 cc->ip_accounting = u->manager->default_ip_accounting;
166
167 if (u->type != UNIT_SLICE)
168 cc->tasks_max = u->manager->default_tasks_max;
169 }
170
171 ec = unit_get_exec_context(u);
172 if (ec) {
173 exec_context_init(ec);
174
175 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
176 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
177 }
178
179 kc = unit_get_kill_context(u);
180 if (kc)
181 kill_context_init(kc);
182
183 if (UNIT_VTABLE(u)->init)
184 UNIT_VTABLE(u)->init(u);
185 }
186
187 int unit_add_name(Unit *u, const char *text) {
188 _cleanup_free_ char *s = NULL, *i = NULL;
189 UnitType t;
190 int r;
191
192 assert(u);
193 assert(text);
194
195 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
196
197 if (!u->instance)
198 return -EINVAL;
199
200 r = unit_name_replace_instance(text, u->instance, &s);
201 if (r < 0)
202 return r;
203 } else {
204 s = strdup(text);
205 if (!s)
206 return -ENOMEM;
207 }
208
209 if (set_contains(u->names, s))
210 return 0;
211 if (hashmap_contains(u->manager->units, s))
212 return -EEXIST;
213
214 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
215 return -EINVAL;
216
217 t = unit_name_to_type(s);
218 if (t < 0)
219 return -EINVAL;
220
221 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
222 return -EINVAL;
223
224 r = unit_name_to_instance(s, &i);
225 if (r < 0)
226 return r;
227
228 if (i && !unit_type_may_template(t))
229 return -EINVAL;
230
231 /* Ensure that this unit is either instanced or not instanced,
232 * but not both. Note that we do allow names with different
233 * instance names however! */
234 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
235 return -EINVAL;
236
237 if (!unit_type_may_alias(t) && !set_isempty(u->names))
238 return -EEXIST;
239
240 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
241 return -E2BIG;
242
243 r = set_put(u->names, s);
244 if (r < 0)
245 return r;
246 assert(r > 0);
247
248 r = hashmap_put(u->manager->units, s, u);
249 if (r < 0) {
250 (void) set_remove(u->names, s);
251 return r;
252 }
253
254 if (u->type == _UNIT_TYPE_INVALID) {
255 u->type = t;
256 u->id = s;
257 u->instance = TAKE_PTR(i);
258
259 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
260
261 unit_init(u);
262 }
263
264 s = NULL;
265
266 unit_add_to_dbus_queue(u);
267 return 0;
268 }
269
270 int unit_choose_id(Unit *u, const char *name) {
271 _cleanup_free_ char *t = NULL;
272 char *s, *i;
273 int r;
274
275 assert(u);
276 assert(name);
277
278 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
279
280 if (!u->instance)
281 return -EINVAL;
282
283 r = unit_name_replace_instance(name, u->instance, &t);
284 if (r < 0)
285 return r;
286
287 name = t;
288 }
289
290 /* Selects one of the names of this unit as the id */
291 s = set_get(u->names, (char*) name);
292 if (!s)
293 return -ENOENT;
294
295 /* Determine the new instance from the new id */
296 r = unit_name_to_instance(s, &i);
297 if (r < 0)
298 return r;
299
300 u->id = s;
301
302 free(u->instance);
303 u->instance = i;
304
305 unit_add_to_dbus_queue(u);
306
307 return 0;
308 }
309
310 int unit_set_description(Unit *u, const char *description) {
311 int r;
312
313 assert(u);
314
315 r = free_and_strdup(&u->description, empty_to_null(description));
316 if (r < 0)
317 return r;
318 if (r > 0)
319 unit_add_to_dbus_queue(u);
320
321 return 0;
322 }
323
324 bool unit_may_gc(Unit *u) {
325 UnitActiveState state;
326 int r;
327
328 assert(u);
329
330 /* Checks whether the unit is ready to be unloaded for garbage collection.
331 * Returns true when the unit may be collected, and false if there's some
332 * reason to keep it loaded.
333 *
334 * References from other units are *not* checked here. Instead, this is done
335 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
336 */
337
338 if (u->job)
339 return false;
340
341 if (u->nop_job)
342 return false;
343
344 state = unit_active_state(u);
345
346 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
347 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
348 UNIT_VTABLE(u)->release_resources)
349 UNIT_VTABLE(u)->release_resources(u);
350
351 if (u->perpetual)
352 return false;
353
354 if (sd_bus_track_count(u->bus_track) > 0)
355 return false;
356
357 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
358 switch (u->collect_mode) {
359
360 case COLLECT_INACTIVE:
361 if (state != UNIT_INACTIVE)
362 return false;
363
364 break;
365
366 case COLLECT_INACTIVE_OR_FAILED:
367 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
368 return false;
369
370 break;
371
372 default:
373 assert_not_reached("Unknown garbage collection mode");
374 }
375
376 if (u->cgroup_path) {
377 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
378 * around. Units with active processes should never be collected. */
379
380 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
381 if (r < 0)
382 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
383 if (r <= 0)
384 return false;
385 }
386
387 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
388 return false;
389
390 return true;
391 }
392
393 void unit_add_to_load_queue(Unit *u) {
394 assert(u);
395 assert(u->type != _UNIT_TYPE_INVALID);
396
397 if (u->load_state != UNIT_STUB || u->in_load_queue)
398 return;
399
400 LIST_PREPEND(load_queue, u->manager->load_queue, u);
401 u->in_load_queue = true;
402 }
403
404 void unit_add_to_cleanup_queue(Unit *u) {
405 assert(u);
406
407 if (u->in_cleanup_queue)
408 return;
409
410 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
411 u->in_cleanup_queue = true;
412 }
413
414 void unit_add_to_gc_queue(Unit *u) {
415 assert(u);
416
417 if (u->in_gc_queue || u->in_cleanup_queue)
418 return;
419
420 if (!unit_may_gc(u))
421 return;
422
423 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
424 u->in_gc_queue = true;
425 }
426
427 void unit_add_to_dbus_queue(Unit *u) {
428 assert(u);
429 assert(u->type != _UNIT_TYPE_INVALID);
430
431 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
432 return;
433
434 /* Shortcut things if nobody cares */
435 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
436 sd_bus_track_count(u->bus_track) <= 0 &&
437 set_isempty(u->manager->private_buses)) {
438 u->sent_dbus_new_signal = true;
439 return;
440 }
441
442 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
443 u->in_dbus_queue = true;
444 }
445
446 static void bidi_set_free(Unit *u, Hashmap *h) {
447 Unit *other;
448 Iterator i;
449 void *v;
450
451 assert(u);
452
453 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
454
455 HASHMAP_FOREACH_KEY(v, other, h, i) {
456 UnitDependency d;
457
458 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
459 hashmap_remove(other->dependencies[d], u);
460
461 unit_add_to_gc_queue(other);
462 }
463
464 hashmap_free(h);
465 }
466
467 static void unit_remove_transient(Unit *u) {
468 char **i;
469
470 assert(u);
471
472 if (!u->transient)
473 return;
474
475 if (u->fragment_path)
476 (void) unlink(u->fragment_path);
477
478 STRV_FOREACH(i, u->dropin_paths) {
479 _cleanup_free_ char *p = NULL, *pp = NULL;
480
481 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
482 if (!p)
483 continue;
484
485 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
486 if (!pp)
487 continue;
488
489 /* Only drop transient drop-ins */
490 if (!path_equal(u->manager->lookup_paths.transient, pp))
491 continue;
492
493 (void) unlink(*i);
494 (void) rmdir(p);
495 }
496 }
497
498 static void unit_free_requires_mounts_for(Unit *u) {
499 assert(u);
500
501 for (;;) {
502 _cleanup_free_ char *path;
503
504 path = hashmap_steal_first_key(u->requires_mounts_for);
505 if (!path)
506 break;
507 else {
508 char s[strlen(path) + 1];
509
510 PATH_FOREACH_PREFIX_MORE(s, path) {
511 char *y;
512 Set *x;
513
514 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
515 if (!x)
516 continue;
517
518 (void) set_remove(x, u);
519
520 if (set_isempty(x)) {
521 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
522 free(y);
523 set_free(x);
524 }
525 }
526 }
527 }
528
529 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
530 }
531
532 static void unit_done(Unit *u) {
533 ExecContext *ec;
534 CGroupContext *cc;
535
536 assert(u);
537
538 if (u->type < 0)
539 return;
540
541 if (UNIT_VTABLE(u)->done)
542 UNIT_VTABLE(u)->done(u);
543
544 ec = unit_get_exec_context(u);
545 if (ec)
546 exec_context_done(ec);
547
548 cc = unit_get_cgroup_context(u);
549 if (cc)
550 cgroup_context_done(cc);
551 }
552
553 void unit_free(Unit *u) {
554 UnitDependency d;
555 Iterator i;
556 char *t;
557
558 if (!u)
559 return;
560
561 u->transient_file = safe_fclose(u->transient_file);
562
563 if (!MANAGER_IS_RELOADING(u->manager))
564 unit_remove_transient(u);
565
566 bus_unit_send_removed_signal(u);
567
568 unit_done(u);
569
570 sd_bus_slot_unref(u->match_bus_slot);
571
572 sd_bus_track_unref(u->bus_track);
573 u->deserialized_refs = strv_free(u->deserialized_refs);
574
575 unit_free_requires_mounts_for(u);
576
577 SET_FOREACH(t, u->names, i)
578 hashmap_remove_value(u->manager->units, t, u);
579
580 if (!sd_id128_is_null(u->invocation_id))
581 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
582
583 if (u->job) {
584 Job *j = u->job;
585 job_uninstall(j);
586 job_free(j);
587 }
588
589 if (u->nop_job) {
590 Job *j = u->nop_job;
591 job_uninstall(j);
592 job_free(j);
593 }
594
595 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
596 bidi_set_free(u, u->dependencies[d]);
597
598 if (u->on_console)
599 manager_unref_console(u->manager);
600
601 unit_release_cgroup(u);
602
603 if (!MANAGER_IS_RELOADING(u->manager))
604 unit_unlink_state_files(u);
605
606 unit_unref_uid_gid(u, false);
607
608 (void) manager_update_failed_units(u->manager, u, false);
609 set_remove(u->manager->startup_units, u);
610
611 unit_unwatch_all_pids(u);
612
613 unit_ref_unset(&u->slice);
614 while (u->refs_by_target)
615 unit_ref_unset(u->refs_by_target);
616
617 if (u->type != _UNIT_TYPE_INVALID)
618 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
619
620 if (u->in_load_queue)
621 LIST_REMOVE(load_queue, u->manager->load_queue, u);
622
623 if (u->in_dbus_queue)
624 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
625
626 if (u->in_gc_queue)
627 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
628
629 if (u->in_cgroup_realize_queue)
630 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
631
632 if (u->in_cgroup_empty_queue)
633 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
634
635 if (u->in_cleanup_queue)
636 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
637
638 if (u->in_target_deps_queue)
639 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
640
641 safe_close(u->ip_accounting_ingress_map_fd);
642 safe_close(u->ip_accounting_egress_map_fd);
643
644 safe_close(u->ipv4_allow_map_fd);
645 safe_close(u->ipv6_allow_map_fd);
646 safe_close(u->ipv4_deny_map_fd);
647 safe_close(u->ipv6_deny_map_fd);
648
649 bpf_program_unref(u->ip_bpf_ingress);
650 bpf_program_unref(u->ip_bpf_ingress_installed);
651 bpf_program_unref(u->ip_bpf_egress);
652 bpf_program_unref(u->ip_bpf_egress_installed);
653
654 condition_free_list(u->conditions);
655 condition_free_list(u->asserts);
656
657 free(u->description);
658 strv_free(u->documentation);
659 free(u->fragment_path);
660 free(u->source_path);
661 strv_free(u->dropin_paths);
662 free(u->instance);
663
664 free(u->job_timeout_reboot_arg);
665
666 set_free_free(u->names);
667
668 free(u->reboot_arg);
669
670 free(u);
671 }
672
673 UnitActiveState unit_active_state(Unit *u) {
674 assert(u);
675
676 if (u->load_state == UNIT_MERGED)
677 return unit_active_state(unit_follow_merge(u));
678
679 /* After a reload it might happen that a unit is not correctly
680 * loaded but still has a process around. That's why we won't
681 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
682
683 return UNIT_VTABLE(u)->active_state(u);
684 }
685
686 const char* unit_sub_state_to_string(Unit *u) {
687 assert(u);
688
689 return UNIT_VTABLE(u)->sub_state_to_string(u);
690 }
691
692 static int set_complete_move(Set **s, Set **other) {
693 assert(s);
694 assert(other);
695
696 if (!other)
697 return 0;
698
699 if (*s)
700 return set_move(*s, *other);
701 else
702 *s = TAKE_PTR(*other);
703
704 return 0;
705 }
706
707 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
708 assert(s);
709 assert(other);
710
711 if (!*other)
712 return 0;
713
714 if (*s)
715 return hashmap_move(*s, *other);
716 else
717 *s = TAKE_PTR(*other);
718
719 return 0;
720 }
721
722 static int merge_names(Unit *u, Unit *other) {
723 char *t;
724 Iterator i;
725 int r;
726
727 assert(u);
728 assert(other);
729
730 r = set_complete_move(&u->names, &other->names);
731 if (r < 0)
732 return r;
733
734 set_free_free(other->names);
735 other->names = NULL;
736 other->id = NULL;
737
738 SET_FOREACH(t, u->names, i)
739 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
740
741 return 0;
742 }
743
744 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
745 unsigned n_reserve;
746
747 assert(u);
748 assert(other);
749 assert(d < _UNIT_DEPENDENCY_MAX);
750
751 /*
752 * If u does not have this dependency set allocated, there is no need
753 * to reserve anything. In that case other's set will be transferred
754 * as a whole to u by complete_move().
755 */
756 if (!u->dependencies[d])
757 return 0;
758
759 /* merge_dependencies() will skip a u-on-u dependency */
760 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
761
762 return hashmap_reserve(u->dependencies[d], n_reserve);
763 }
764
765 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
766 Iterator i;
767 Unit *back;
768 void *v;
769 int r;
770
771 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
772
773 assert(u);
774 assert(other);
775 assert(d < _UNIT_DEPENDENCY_MAX);
776
777 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
778 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
779 UnitDependency k;
780
781 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
782 * pointers back, and let's fix them up, to instead point to 'u'. */
783
784 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
785 if (back == u) {
786 /* Do not add dependencies between u and itself. */
787 if (hashmap_remove(back->dependencies[k], other))
788 maybe_warn_about_dependency(u, other_id, k);
789 } else {
790 UnitDependencyInfo di_u, di_other, di_merged;
791
792 /* Let's drop this dependency between "back" and "other", and let's create it between
793 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
794 * and any such dependency which might already exist */
795
796 di_other.data = hashmap_get(back->dependencies[k], other);
797 if (!di_other.data)
798 continue; /* dependency isn't set, let's try the next one */
799
800 di_u.data = hashmap_get(back->dependencies[k], u);
801
802 di_merged = (UnitDependencyInfo) {
803 .origin_mask = di_u.origin_mask | di_other.origin_mask,
804 .destination_mask = di_u.destination_mask | di_other.destination_mask,
805 };
806
807 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
808 if (r < 0)
809 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
810 assert(r >= 0);
811
812 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
813 }
814 }
815
816 }
817
818 /* Also do not move dependencies on u to itself */
819 back = hashmap_remove(other->dependencies[d], u);
820 if (back)
821 maybe_warn_about_dependency(u, other_id, d);
822
823 /* The move cannot fail. The caller must have performed a reservation. */
824 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
825
826 other->dependencies[d] = hashmap_free(other->dependencies[d]);
827 }
828
829 int unit_merge(Unit *u, Unit *other) {
830 UnitDependency d;
831 const char *other_id = NULL;
832 int r;
833
834 assert(u);
835 assert(other);
836 assert(u->manager == other->manager);
837 assert(u->type != _UNIT_TYPE_INVALID);
838
839 other = unit_follow_merge(other);
840
841 if (other == u)
842 return 0;
843
844 if (u->type != other->type)
845 return -EINVAL;
846
847 if (!u->instance != !other->instance)
848 return -EINVAL;
849
850 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
851 return -EEXIST;
852
853 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
854 return -EEXIST;
855
856 if (other->job)
857 return -EEXIST;
858
859 if (other->nop_job)
860 return -EEXIST;
861
862 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
863 return -EEXIST;
864
865 if (other->id)
866 other_id = strdupa(other->id);
867
868 /* Make reservations to ensure merge_dependencies() won't fail */
869 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
870 r = reserve_dependencies(u, other, d);
871 /*
872 * We don't rollback reservations if we fail. We don't have
873 * a way to undo reservations. A reservation is not a leak.
874 */
875 if (r < 0)
876 return r;
877 }
878
879 /* Merge names */
880 r = merge_names(u, other);
881 if (r < 0)
882 return r;
883
884 /* Redirect all references */
885 while (other->refs_by_target)
886 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
887
888 /* Merge dependencies */
889 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
890 merge_dependencies(u, other, other_id, d);
891
892 other->load_state = UNIT_MERGED;
893 other->merged_into = u;
894
895 /* If there is still some data attached to the other node, we
896 * don't need it anymore, and can free it. */
897 if (other->load_state != UNIT_STUB)
898 if (UNIT_VTABLE(other)->done)
899 UNIT_VTABLE(other)->done(other);
900
901 unit_add_to_dbus_queue(u);
902 unit_add_to_cleanup_queue(other);
903
904 return 0;
905 }
906
907 int unit_merge_by_name(Unit *u, const char *name) {
908 _cleanup_free_ char *s = NULL;
909 Unit *other;
910 int r;
911
912 assert(u);
913 assert(name);
914
915 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
916 if (!u->instance)
917 return -EINVAL;
918
919 r = unit_name_replace_instance(name, u->instance, &s);
920 if (r < 0)
921 return r;
922
923 name = s;
924 }
925
926 other = manager_get_unit(u->manager, name);
927 if (other)
928 return unit_merge(u, other);
929
930 return unit_add_name(u, name);
931 }
932
933 Unit* unit_follow_merge(Unit *u) {
934 assert(u);
935
936 while (u->load_state == UNIT_MERGED)
937 assert_se(u = u->merged_into);
938
939 return u;
940 }
941
942 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
943 ExecDirectoryType dt;
944 char **dp;
945 int r;
946
947 assert(u);
948 assert(c);
949
950 if (c->working_directory) {
951 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
952 if (r < 0)
953 return r;
954 }
955
956 if (c->root_directory) {
957 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
958 if (r < 0)
959 return r;
960 }
961
962 if (c->root_image) {
963 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
964 if (r < 0)
965 return r;
966 }
967
968 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
969 if (!u->manager->prefix[dt])
970 continue;
971
972 STRV_FOREACH(dp, c->directories[dt].paths) {
973 _cleanup_free_ char *p;
974
975 p = strjoin(u->manager->prefix[dt], "/", *dp);
976 if (!p)
977 return -ENOMEM;
978
979 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
980 if (r < 0)
981 return r;
982 }
983 }
984
985 if (!MANAGER_IS_SYSTEM(u->manager))
986 return 0;
987
988 if (c->private_tmp) {
989 const char *p;
990
991 FOREACH_STRING(p, "/tmp", "/var/tmp") {
992 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
993 if (r < 0)
994 return r;
995 }
996
997 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
998 if (r < 0)
999 return r;
1000 }
1001
1002 if (!IN_SET(c->std_output,
1003 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1004 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1005 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1006 !IN_SET(c->std_error,
1007 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1008 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1009 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1010 return 0;
1011
1012 /* If syslog or kernel logging is requested, make sure our own
1013 * logging daemon is run first. */
1014
1015 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1016 if (r < 0)
1017 return r;
1018
1019 return 0;
1020 }
1021
1022 const char *unit_description(Unit *u) {
1023 assert(u);
1024
1025 if (u->description)
1026 return u->description;
1027
1028 return strna(u->id);
1029 }
1030
1031 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1032 const struct {
1033 UnitDependencyMask mask;
1034 const char *name;
1035 } table[] = {
1036 { UNIT_DEPENDENCY_FILE, "file" },
1037 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1038 { UNIT_DEPENDENCY_DEFAULT, "default" },
1039 { UNIT_DEPENDENCY_UDEV, "udev" },
1040 { UNIT_DEPENDENCY_PATH, "path" },
1041 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1042 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1043 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1044 };
1045 size_t i;
1046
1047 assert(f);
1048 assert(kind);
1049 assert(space);
1050
1051 for (i = 0; i < ELEMENTSOF(table); i++) {
1052
1053 if (mask == 0)
1054 break;
1055
1056 if (FLAGS_SET(mask, table[i].mask)) {
1057 if (*space)
1058 fputc(' ', f);
1059 else
1060 *space = true;
1061
1062 fputs(kind, f);
1063 fputs("-", f);
1064 fputs(table[i].name, f);
1065
1066 mask &= ~table[i].mask;
1067 }
1068 }
1069
1070 assert(mask == 0);
1071 }
1072
1073 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1074 char *t, **j;
1075 UnitDependency d;
1076 Iterator i;
1077 const char *prefix2;
1078 char
1079 timestamp0[FORMAT_TIMESTAMP_MAX],
1080 timestamp1[FORMAT_TIMESTAMP_MAX],
1081 timestamp2[FORMAT_TIMESTAMP_MAX],
1082 timestamp3[FORMAT_TIMESTAMP_MAX],
1083 timestamp4[FORMAT_TIMESTAMP_MAX],
1084 timespan[FORMAT_TIMESPAN_MAX];
1085 Unit *following;
1086 _cleanup_set_free_ Set *following_set = NULL;
1087 const char *n;
1088 CGroupMask m;
1089 int r;
1090
1091 assert(u);
1092 assert(u->type >= 0);
1093
1094 prefix = strempty(prefix);
1095 prefix2 = strjoina(prefix, "\t");
1096
1097 fprintf(f,
1098 "%s-> Unit %s:\n"
1099 "%s\tDescription: %s\n"
1100 "%s\tInstance: %s\n"
1101 "%s\tUnit Load State: %s\n"
1102 "%s\tUnit Active State: %s\n"
1103 "%s\tState Change Timestamp: %s\n"
1104 "%s\tInactive Exit Timestamp: %s\n"
1105 "%s\tActive Enter Timestamp: %s\n"
1106 "%s\tActive Exit Timestamp: %s\n"
1107 "%s\tInactive Enter Timestamp: %s\n"
1108 "%s\tMay GC: %s\n"
1109 "%s\tNeed Daemon Reload: %s\n"
1110 "%s\tTransient: %s\n"
1111 "%s\tPerpetual: %s\n"
1112 "%s\tGarbage Collection Mode: %s\n"
1113 "%s\tSlice: %s\n"
1114 "%s\tCGroup: %s\n"
1115 "%s\tCGroup realized: %s\n",
1116 prefix, u->id,
1117 prefix, unit_description(u),
1118 prefix, strna(u->instance),
1119 prefix, unit_load_state_to_string(u->load_state),
1120 prefix, unit_active_state_to_string(unit_active_state(u)),
1121 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1122 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1123 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1124 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1125 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1126 prefix, yes_no(unit_may_gc(u)),
1127 prefix, yes_no(unit_need_daemon_reload(u)),
1128 prefix, yes_no(u->transient),
1129 prefix, yes_no(u->perpetual),
1130 prefix, collect_mode_to_string(u->collect_mode),
1131 prefix, strna(unit_slice_name(u)),
1132 prefix, strna(u->cgroup_path),
1133 prefix, yes_no(u->cgroup_realized));
1134
1135 if (u->cgroup_realized_mask != 0) {
1136 _cleanup_free_ char *s = NULL;
1137 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1138 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1139 }
1140 if (u->cgroup_enabled_mask != 0) {
1141 _cleanup_free_ char *s = NULL;
1142 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1143 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1144 }
1145 m = unit_get_own_mask(u);
1146 if (m != 0) {
1147 _cleanup_free_ char *s = NULL;
1148 (void) cg_mask_to_string(m, &s);
1149 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1150 }
1151 m = unit_get_members_mask(u);
1152 if (m != 0) {
1153 _cleanup_free_ char *s = NULL;
1154 (void) cg_mask_to_string(m, &s);
1155 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1156 }
1157
1158 SET_FOREACH(t, u->names, i)
1159 fprintf(f, "%s\tName: %s\n", prefix, t);
1160
1161 if (!sd_id128_is_null(u->invocation_id))
1162 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1163 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1164
1165 STRV_FOREACH(j, u->documentation)
1166 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1167
1168 following = unit_following(u);
1169 if (following)
1170 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1171
1172 r = unit_following_set(u, &following_set);
1173 if (r >= 0) {
1174 Unit *other;
1175
1176 SET_FOREACH(other, following_set, i)
1177 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1178 }
1179
1180 if (u->fragment_path)
1181 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1182
1183 if (u->source_path)
1184 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1185
1186 STRV_FOREACH(j, u->dropin_paths)
1187 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1188
1189 if (u->failure_action != EMERGENCY_ACTION_NONE)
1190 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1191 if (u->success_action != EMERGENCY_ACTION_NONE)
1192 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1193
1194 if (u->job_timeout != USEC_INFINITY)
1195 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1196
1197 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1198 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1199
1200 if (u->job_timeout_reboot_arg)
1201 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1202
1203 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1204 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1205
1206 if (dual_timestamp_is_set(&u->condition_timestamp))
1207 fprintf(f,
1208 "%s\tCondition Timestamp: %s\n"
1209 "%s\tCondition Result: %s\n",
1210 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1211 prefix, yes_no(u->condition_result));
1212
1213 if (dual_timestamp_is_set(&u->assert_timestamp))
1214 fprintf(f,
1215 "%s\tAssert Timestamp: %s\n"
1216 "%s\tAssert Result: %s\n",
1217 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1218 prefix, yes_no(u->assert_result));
1219
1220 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1221 UnitDependencyInfo di;
1222 Unit *other;
1223
1224 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1225 bool space = false;
1226
1227 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1228
1229 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1230 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1231
1232 fputs(")\n", f);
1233 }
1234 }
1235
1236 if (!hashmap_isempty(u->requires_mounts_for)) {
1237 UnitDependencyInfo di;
1238 const char *path;
1239
1240 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1241 bool space = false;
1242
1243 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1244
1245 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1246 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1247
1248 fputs(")\n", f);
1249 }
1250 }
1251
1252 if (u->load_state == UNIT_LOADED) {
1253
1254 fprintf(f,
1255 "%s\tStopWhenUnneeded: %s\n"
1256 "%s\tRefuseManualStart: %s\n"
1257 "%s\tRefuseManualStop: %s\n"
1258 "%s\tDefaultDependencies: %s\n"
1259 "%s\tOnFailureJobMode: %s\n"
1260 "%s\tIgnoreOnIsolate: %s\n",
1261 prefix, yes_no(u->stop_when_unneeded),
1262 prefix, yes_no(u->refuse_manual_start),
1263 prefix, yes_no(u->refuse_manual_stop),
1264 prefix, yes_no(u->default_dependencies),
1265 prefix, job_mode_to_string(u->on_failure_job_mode),
1266 prefix, yes_no(u->ignore_on_isolate));
1267
1268 if (UNIT_VTABLE(u)->dump)
1269 UNIT_VTABLE(u)->dump(u, f, prefix2);
1270
1271 } else if (u->load_state == UNIT_MERGED)
1272 fprintf(f,
1273 "%s\tMerged into: %s\n",
1274 prefix, u->merged_into->id);
1275 else if (u->load_state == UNIT_ERROR)
1276 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1277
1278 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1279 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1280
1281 if (u->job)
1282 job_dump(u->job, f, prefix2);
1283
1284 if (u->nop_job)
1285 job_dump(u->nop_job, f, prefix2);
1286 }
1287
1288 /* Common implementation for multiple backends */
1289 int unit_load_fragment_and_dropin(Unit *u) {
1290 int r;
1291
1292 assert(u);
1293
1294 /* Load a .{service,socket,...} file */
1295 r = unit_load_fragment(u);
1296 if (r < 0)
1297 return r;
1298
1299 if (u->load_state == UNIT_STUB)
1300 return -ENOENT;
1301
1302 /* Load drop-in directory data. If u is an alias, we might be reloading the
1303 * target unit needlessly. But we cannot be sure which drops-ins have already
1304 * been loaded and which not, at least without doing complicated book-keeping,
1305 * so let's always reread all drop-ins. */
1306 return unit_load_dropin(unit_follow_merge(u));
1307 }
1308
1309 /* Common implementation for multiple backends */
1310 int unit_load_fragment_and_dropin_optional(Unit *u) {
1311 int r;
1312
1313 assert(u);
1314
1315 /* Same as unit_load_fragment_and_dropin(), but whether
1316 * something can be loaded or not doesn't matter. */
1317
1318 /* Load a .service/.socket/.slice/… file */
1319 r = unit_load_fragment(u);
1320 if (r < 0)
1321 return r;
1322
1323 if (u->load_state == UNIT_STUB)
1324 u->load_state = UNIT_LOADED;
1325
1326 /* Load drop-in directory data */
1327 return unit_load_dropin(unit_follow_merge(u));
1328 }
1329
1330 void unit_add_to_target_deps_queue(Unit *u) {
1331 Manager *m = u->manager;
1332
1333 assert(u);
1334
1335 if (u->in_target_deps_queue)
1336 return;
1337
1338 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1339 u->in_target_deps_queue = true;
1340 }
1341
1342 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1343 assert(u);
1344 assert(target);
1345
1346 if (target->type != UNIT_TARGET)
1347 return 0;
1348
1349 /* Only add the dependency if both units are loaded, so that
1350 * that loop check below is reliable */
1351 if (u->load_state != UNIT_LOADED ||
1352 target->load_state != UNIT_LOADED)
1353 return 0;
1354
1355 /* If either side wants no automatic dependencies, then let's
1356 * skip this */
1357 if (!u->default_dependencies ||
1358 !target->default_dependencies)
1359 return 0;
1360
1361 /* Don't create loops */
1362 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1363 return 0;
1364
1365 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1366 }
1367
1368 static int unit_add_slice_dependencies(Unit *u) {
1369 UnitDependencyMask mask;
1370 assert(u);
1371
1372 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1373 return 0;
1374
1375 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1376 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1377 relationship). */
1378 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1379
1380 if (UNIT_ISSET(u->slice))
1381 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1382
1383 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1384 return 0;
1385
1386 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1387 }
1388
1389 static int unit_add_mount_dependencies(Unit *u) {
1390 UnitDependencyInfo di;
1391 const char *path;
1392 Iterator i;
1393 int r;
1394
1395 assert(u);
1396
1397 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1398 char prefix[strlen(path) + 1];
1399
1400 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1401 _cleanup_free_ char *p = NULL;
1402 Unit *m;
1403
1404 r = unit_name_from_path(prefix, ".mount", &p);
1405 if (r < 0)
1406 return r;
1407
1408 m = manager_get_unit(u->manager, p);
1409 if (!m) {
1410 /* Make sure to load the mount unit if
1411 * it exists. If so the dependencies
1412 * on this unit will be added later
1413 * during the loading of the mount
1414 * unit. */
1415 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1416 continue;
1417 }
1418 if (m == u)
1419 continue;
1420
1421 if (m->load_state != UNIT_LOADED)
1422 continue;
1423
1424 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1425 if (r < 0)
1426 return r;
1427
1428 if (m->fragment_path) {
1429 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1430 if (r < 0)
1431 return r;
1432 }
1433 }
1434 }
1435
1436 return 0;
1437 }
1438
1439 static int unit_add_startup_units(Unit *u) {
1440 CGroupContext *c;
1441 int r;
1442
1443 c = unit_get_cgroup_context(u);
1444 if (!c)
1445 return 0;
1446
1447 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1448 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1449 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1450 return 0;
1451
1452 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1453 if (r < 0)
1454 return r;
1455
1456 return set_put(u->manager->startup_units, u);
1457 }
1458
1459 int unit_load(Unit *u) {
1460 int r;
1461
1462 assert(u);
1463
1464 if (u->in_load_queue) {
1465 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1466 u->in_load_queue = false;
1467 }
1468
1469 if (u->type == _UNIT_TYPE_INVALID)
1470 return -EINVAL;
1471
1472 if (u->load_state != UNIT_STUB)
1473 return 0;
1474
1475 if (u->transient_file) {
1476 r = fflush_and_check(u->transient_file);
1477 if (r < 0)
1478 goto fail;
1479
1480 u->transient_file = safe_fclose(u->transient_file);
1481 u->fragment_mtime = now(CLOCK_REALTIME);
1482 }
1483
1484 if (UNIT_VTABLE(u)->load) {
1485 r = UNIT_VTABLE(u)->load(u);
1486 if (r < 0)
1487 goto fail;
1488 }
1489
1490 if (u->load_state == UNIT_STUB) {
1491 r = -ENOENT;
1492 goto fail;
1493 }
1494
1495 if (u->load_state == UNIT_LOADED) {
1496 unit_add_to_target_deps_queue(u);
1497
1498 r = unit_add_slice_dependencies(u);
1499 if (r < 0)
1500 goto fail;
1501
1502 r = unit_add_mount_dependencies(u);
1503 if (r < 0)
1504 goto fail;
1505
1506 r = unit_add_startup_units(u);
1507 if (r < 0)
1508 goto fail;
1509
1510 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1511 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1512 r = -EINVAL;
1513 goto fail;
1514 }
1515
1516 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1517 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1518
1519 unit_update_cgroup_members_masks(u);
1520 }
1521
1522 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1523
1524 unit_add_to_dbus_queue(unit_follow_merge(u));
1525 unit_add_to_gc_queue(u);
1526
1527 return 0;
1528
1529 fail:
1530 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1531 u->load_error = r;
1532 unit_add_to_dbus_queue(u);
1533 unit_add_to_gc_queue(u);
1534
1535 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1536
1537 return r;
1538 }
1539
1540 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1541 Condition *c;
1542 int triggered = -1;
1543
1544 assert(u);
1545 assert(to_string);
1546
1547 /* If the condition list is empty, then it is true */
1548 if (!first)
1549 return true;
1550
1551 /* Otherwise, if all of the non-trigger conditions apply and
1552 * if any of the trigger conditions apply (unless there are
1553 * none) we return true */
1554 LIST_FOREACH(conditions, c, first) {
1555 int r;
1556
1557 r = condition_test(c);
1558 if (r < 0)
1559 log_unit_warning(u,
1560 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1561 to_string(c->type),
1562 c->trigger ? "|" : "",
1563 c->negate ? "!" : "",
1564 c->parameter);
1565 else
1566 log_unit_debug(u,
1567 "%s=%s%s%s %s.",
1568 to_string(c->type),
1569 c->trigger ? "|" : "",
1570 c->negate ? "!" : "",
1571 c->parameter,
1572 condition_result_to_string(c->result));
1573
1574 if (!c->trigger && r <= 0)
1575 return false;
1576
1577 if (c->trigger && triggered <= 0)
1578 triggered = r > 0;
1579 }
1580
1581 return triggered != 0;
1582 }
1583
1584 static bool unit_condition_test(Unit *u) {
1585 assert(u);
1586
1587 dual_timestamp_get(&u->condition_timestamp);
1588 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1589
1590 return u->condition_result;
1591 }
1592
1593 static bool unit_assert_test(Unit *u) {
1594 assert(u);
1595
1596 dual_timestamp_get(&u->assert_timestamp);
1597 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1598
1599 return u->assert_result;
1600 }
1601
1602 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1603 DISABLE_WARNING_FORMAT_NONLITERAL;
1604 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1605 REENABLE_WARNING;
1606 }
1607
1608 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1609 const char *format;
1610 const UnitStatusMessageFormats *format_table;
1611
1612 assert(u);
1613 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1614
1615 if (t != JOB_RELOAD) {
1616 format_table = &UNIT_VTABLE(u)->status_message_formats;
1617 if (format_table) {
1618 format = format_table->starting_stopping[t == JOB_STOP];
1619 if (format)
1620 return format;
1621 }
1622 }
1623
1624 /* Return generic strings */
1625 if (t == JOB_START)
1626 return "Starting %s.";
1627 else if (t == JOB_STOP)
1628 return "Stopping %s.";
1629 else
1630 return "Reloading %s.";
1631 }
1632
1633 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1634 const char *format;
1635
1636 assert(u);
1637
1638 /* Reload status messages have traditionally not been printed to console. */
1639 if (!IN_SET(t, JOB_START, JOB_STOP))
1640 return;
1641
1642 format = unit_get_status_message_format(u, t);
1643
1644 DISABLE_WARNING_FORMAT_NONLITERAL;
1645 unit_status_printf(u, "", format);
1646 REENABLE_WARNING;
1647 }
1648
1649 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1650 const char *format, *mid;
1651 char buf[LINE_MAX];
1652
1653 assert(u);
1654
1655 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1656 return;
1657
1658 if (log_on_console())
1659 return;
1660
1661 /* We log status messages for all units and all operations. */
1662
1663 format = unit_get_status_message_format(u, t);
1664
1665 DISABLE_WARNING_FORMAT_NONLITERAL;
1666 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1667 REENABLE_WARNING;
1668
1669 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1670 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1671 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1672
1673 /* Note that we deliberately use LOG_MESSAGE() instead of
1674 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1675 * closely what is written to screen using the status output,
1676 * which is supposed the highest level, friendliest output
1677 * possible, which means we should avoid the low-level unit
1678 * name. */
1679 log_struct(LOG_INFO,
1680 LOG_MESSAGE("%s", buf),
1681 LOG_UNIT_ID(u),
1682 LOG_UNIT_INVOCATION_ID(u),
1683 mid);
1684 }
1685
1686 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1687 assert(u);
1688 assert(t >= 0);
1689 assert(t < _JOB_TYPE_MAX);
1690
1691 unit_status_log_starting_stopping_reloading(u, t);
1692 unit_status_print_starting_stopping(u, t);
1693 }
1694
1695 int unit_start_limit_test(Unit *u) {
1696 assert(u);
1697
1698 if (ratelimit_below(&u->start_limit)) {
1699 u->start_limit_hit = false;
1700 return 0;
1701 }
1702
1703 log_unit_warning(u, "Start request repeated too quickly.");
1704 u->start_limit_hit = true;
1705
1706 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1707 }
1708
1709 bool unit_shall_confirm_spawn(Unit *u) {
1710 assert(u);
1711
1712 if (manager_is_confirm_spawn_disabled(u->manager))
1713 return false;
1714
1715 /* For some reasons units remaining in the same process group
1716 * as PID 1 fail to acquire the console even if it's not used
1717 * by any process. So skip the confirmation question for them. */
1718 return !unit_get_exec_context(u)->same_pgrp;
1719 }
1720
1721 static bool unit_verify_deps(Unit *u) {
1722 Unit *other;
1723 Iterator j;
1724 void *v;
1725
1726 assert(u);
1727
1728 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1729 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1730 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1731 * conjunction with After= as for them any such check would make things entirely racy. */
1732
1733 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1734
1735 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1736 continue;
1737
1738 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1739 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1740 return false;
1741 }
1742 }
1743
1744 return true;
1745 }
1746
1747 /* Errors:
1748 * -EBADR: This unit type does not support starting.
1749 * -EALREADY: Unit is already started.
1750 * -EAGAIN: An operation is already in progress. Retry later.
1751 * -ECANCELED: Too many requests for now.
1752 * -EPROTO: Assert failed
1753 * -EINVAL: Unit not loaded
1754 * -EOPNOTSUPP: Unit type not supported
1755 * -ENOLINK: The necessary dependencies are not fulfilled.
1756 * -ESTALE: This unit has been started before and can't be started a second time
1757 */
1758 int unit_start(Unit *u) {
1759 UnitActiveState state;
1760 Unit *following;
1761
1762 assert(u);
1763
1764 /* If this is already started, then this will succeed. Note
1765 * that this will even succeed if this unit is not startable
1766 * by the user. This is relied on to detect when we need to
1767 * wait for units and when waiting is finished. */
1768 state = unit_active_state(u);
1769 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1770 return -EALREADY;
1771
1772 /* Units that aren't loaded cannot be started */
1773 if (u->load_state != UNIT_LOADED)
1774 return -EINVAL;
1775
1776 /* Refuse starting scope units more than once */
1777 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1778 return -ESTALE;
1779
1780 /* If the conditions failed, don't do anything at all. If we
1781 * already are activating this call might still be useful to
1782 * speed up activation in case there is some hold-off time,
1783 * but we don't want to recheck the condition in that case. */
1784 if (state != UNIT_ACTIVATING &&
1785 !unit_condition_test(u)) {
1786 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1787 return -EALREADY;
1788 }
1789
1790 /* If the asserts failed, fail the entire job */
1791 if (state != UNIT_ACTIVATING &&
1792 !unit_assert_test(u)) {
1793 log_unit_notice(u, "Starting requested but asserts failed.");
1794 return -EPROTO;
1795 }
1796
1797 /* Units of types that aren't supported cannot be
1798 * started. Note that we do this test only after the condition
1799 * checks, so that we rather return condition check errors
1800 * (which are usually not considered a true failure) than "not
1801 * supported" errors (which are considered a failure).
1802 */
1803 if (!unit_supported(u))
1804 return -EOPNOTSUPP;
1805
1806 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1807 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1808 * effect anymore, due to a reload or due to a failed condition. */
1809 if (!unit_verify_deps(u))
1810 return -ENOLINK;
1811
1812 /* Forward to the main object, if we aren't it. */
1813 following = unit_following(u);
1814 if (following) {
1815 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1816 return unit_start(following);
1817 }
1818
1819 /* If it is stopped, but we cannot start it, then fail */
1820 if (!UNIT_VTABLE(u)->start)
1821 return -EBADR;
1822
1823 /* We don't suppress calls to ->start() here when we are
1824 * already starting, to allow this request to be used as a
1825 * "hurry up" call, for example when the unit is in some "auto
1826 * restart" state where it waits for a holdoff timer to elapse
1827 * before it will start again. */
1828
1829 unit_add_to_dbus_queue(u);
1830
1831 return UNIT_VTABLE(u)->start(u);
1832 }
1833
1834 bool unit_can_start(Unit *u) {
1835 assert(u);
1836
1837 if (u->load_state != UNIT_LOADED)
1838 return false;
1839
1840 if (!unit_supported(u))
1841 return false;
1842
1843 /* Scope units may be started only once */
1844 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1845 return false;
1846
1847 return !!UNIT_VTABLE(u)->start;
1848 }
1849
1850 bool unit_can_isolate(Unit *u) {
1851 assert(u);
1852
1853 return unit_can_start(u) &&
1854 u->allow_isolate;
1855 }
1856
1857 /* Errors:
1858 * -EBADR: This unit type does not support stopping.
1859 * -EALREADY: Unit is already stopped.
1860 * -EAGAIN: An operation is already in progress. Retry later.
1861 */
1862 int unit_stop(Unit *u) {
1863 UnitActiveState state;
1864 Unit *following;
1865
1866 assert(u);
1867
1868 state = unit_active_state(u);
1869 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1870 return -EALREADY;
1871
1872 following = unit_following(u);
1873 if (following) {
1874 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1875 return unit_stop(following);
1876 }
1877
1878 if (!UNIT_VTABLE(u)->stop)
1879 return -EBADR;
1880
1881 unit_add_to_dbus_queue(u);
1882
1883 return UNIT_VTABLE(u)->stop(u);
1884 }
1885
1886 bool unit_can_stop(Unit *u) {
1887 assert(u);
1888
1889 if (!unit_supported(u))
1890 return false;
1891
1892 if (u->perpetual)
1893 return false;
1894
1895 return !!UNIT_VTABLE(u)->stop;
1896 }
1897
1898 /* Errors:
1899 * -EBADR: This unit type does not support reloading.
1900 * -ENOEXEC: Unit is not started.
1901 * -EAGAIN: An operation is already in progress. Retry later.
1902 */
1903 int unit_reload(Unit *u) {
1904 UnitActiveState state;
1905 Unit *following;
1906
1907 assert(u);
1908
1909 if (u->load_state != UNIT_LOADED)
1910 return -EINVAL;
1911
1912 if (!unit_can_reload(u))
1913 return -EBADR;
1914
1915 state = unit_active_state(u);
1916 if (state == UNIT_RELOADING)
1917 return -EALREADY;
1918
1919 if (state != UNIT_ACTIVE) {
1920 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1921 return -ENOEXEC;
1922 }
1923
1924 following = unit_following(u);
1925 if (following) {
1926 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1927 return unit_reload(following);
1928 }
1929
1930 unit_add_to_dbus_queue(u);
1931
1932 if (!UNIT_VTABLE(u)->reload) {
1933 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1934 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1935 return 0;
1936 }
1937
1938 return UNIT_VTABLE(u)->reload(u);
1939 }
1940
1941 bool unit_can_reload(Unit *u) {
1942 assert(u);
1943
1944 if (UNIT_VTABLE(u)->can_reload)
1945 return UNIT_VTABLE(u)->can_reload(u);
1946
1947 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1948 return true;
1949
1950 return UNIT_VTABLE(u)->reload;
1951 }
1952
1953 static void unit_check_unneeded(Unit *u) {
1954
1955 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1956
1957 static const UnitDependency needed_dependencies[] = {
1958 UNIT_REQUIRED_BY,
1959 UNIT_REQUISITE_OF,
1960 UNIT_WANTED_BY,
1961 UNIT_BOUND_BY,
1962 };
1963
1964 unsigned j;
1965 int r;
1966
1967 assert(u);
1968
1969 /* If this service shall be shut down when unneeded then do
1970 * so. */
1971
1972 if (!u->stop_when_unneeded)
1973 return;
1974
1975 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1976 return;
1977
1978 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
1979 Unit *other;
1980 Iterator i;
1981 void *v;
1982
1983 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
1984 if (unit_active_or_pending(other) || unit_will_restart(other))
1985 return;
1986 }
1987
1988 /* If stopping a unit fails continuously we might enter a stop
1989 * loop here, hence stop acting on the service being
1990 * unnecessary after a while. */
1991 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
1992 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1993 return;
1994 }
1995
1996 log_unit_info(u, "Unit not needed anymore. Stopping.");
1997
1998 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1999 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2000 if (r < 0)
2001 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2002 }
2003
2004 static void unit_check_binds_to(Unit *u) {
2005 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2006 bool stop = false;
2007 Unit *other;
2008 Iterator i;
2009 void *v;
2010 int r;
2011
2012 assert(u);
2013
2014 if (u->job)
2015 return;
2016
2017 if (unit_active_state(u) != UNIT_ACTIVE)
2018 return;
2019
2020 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2021 if (other->job)
2022 continue;
2023
2024 if (!other->coldplugged)
2025 /* We might yet create a job for the other unit… */
2026 continue;
2027
2028 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2029 continue;
2030
2031 stop = true;
2032 break;
2033 }
2034
2035 if (!stop)
2036 return;
2037
2038 /* If stopping a unit fails continuously we might enter a stop
2039 * loop here, hence stop acting on the service being
2040 * unnecessary after a while. */
2041 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2042 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2043 return;
2044 }
2045
2046 assert(other);
2047 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2048
2049 /* A unit we need to run is gone. Sniff. Let's stop this. */
2050 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2051 if (r < 0)
2052 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2053 }
2054
2055 static void retroactively_start_dependencies(Unit *u) {
2056 Iterator i;
2057 Unit *other;
2058 void *v;
2059
2060 assert(u);
2061 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2062
2063 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2064 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2065 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2066 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2067
2068 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2069 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2070 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2071 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2072
2073 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2074 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2075 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2076 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2077
2078 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2079 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2080 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2081
2082 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2083 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2084 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2085 }
2086
2087 static void retroactively_stop_dependencies(Unit *u) {
2088 Unit *other;
2089 Iterator i;
2090 void *v;
2091
2092 assert(u);
2093 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2094
2095 /* Pull down units which are bound to us recursively if enabled */
2096 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2097 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2098 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2099 }
2100
2101 static void check_unneeded_dependencies(Unit *u) {
2102 Unit *other;
2103 Iterator i;
2104 void *v;
2105
2106 assert(u);
2107 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2108
2109 /* Garbage collect services that might not be needed anymore, if enabled */
2110 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2111 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2112 unit_check_unneeded(other);
2113 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2114 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2115 unit_check_unneeded(other);
2116 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2117 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2118 unit_check_unneeded(other);
2119 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2120 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2121 unit_check_unneeded(other);
2122 }
2123
2124 void unit_start_on_failure(Unit *u) {
2125 Unit *other;
2126 Iterator i;
2127 void *v;
2128 int r;
2129
2130 assert(u);
2131
2132 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2133 return;
2134
2135 log_unit_info(u, "Triggering OnFailure= dependencies.");
2136
2137 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2138 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2139
2140 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2141 if (r < 0)
2142 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2143 }
2144 }
2145
2146 void unit_trigger_notify(Unit *u) {
2147 Unit *other;
2148 Iterator i;
2149 void *v;
2150
2151 assert(u);
2152
2153 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2154 if (UNIT_VTABLE(other)->trigger_notify)
2155 UNIT_VTABLE(other)->trigger_notify(other, u);
2156 }
2157
2158 static int unit_log_resources(Unit *u) {
2159
2160 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2161 size_t n_message_parts = 0, n_iovec = 0;
2162 char* message_parts[3 + 1], *t;
2163 nsec_t nsec = NSEC_INFINITY;
2164 CGroupIPAccountingMetric m;
2165 size_t i;
2166 int r;
2167 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2168 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2169 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2170 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2171 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2172 };
2173
2174 assert(u);
2175
2176 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2177 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2178 * information and the complete data in structured fields. */
2179
2180 (void) unit_get_cpu_usage(u, &nsec);
2181 if (nsec != NSEC_INFINITY) {
2182 char buf[FORMAT_TIMESPAN_MAX] = "";
2183
2184 /* Format the CPU time for inclusion in the structured log message */
2185 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2186 r = log_oom();
2187 goto finish;
2188 }
2189 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2190
2191 /* Format the CPU time for inclusion in the human language message string */
2192 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2193 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2194 if (!t) {
2195 r = log_oom();
2196 goto finish;
2197 }
2198
2199 message_parts[n_message_parts++] = t;
2200 }
2201
2202 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2203 char buf[FORMAT_BYTES_MAX] = "";
2204 uint64_t value = UINT64_MAX;
2205
2206 assert(ip_fields[m]);
2207
2208 (void) unit_get_ip_accounting(u, m, &value);
2209 if (value == UINT64_MAX)
2210 continue;
2211
2212 /* Format IP accounting data for inclusion in the structured log message */
2213 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2214 r = log_oom();
2215 goto finish;
2216 }
2217 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2218
2219 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2220 * bytes counters (and not for the packets counters) */
2221 if (m == CGROUP_IP_INGRESS_BYTES)
2222 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2223 format_bytes(buf, sizeof(buf), value),
2224 " IP traffic");
2225 else if (m == CGROUP_IP_EGRESS_BYTES)
2226 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2227 format_bytes(buf, sizeof(buf), value),
2228 " IP traffic");
2229 else
2230 continue;
2231 if (!t) {
2232 r = log_oom();
2233 goto finish;
2234 }
2235
2236 message_parts[n_message_parts++] = t;
2237 }
2238
2239 /* Is there any accounting data available at all? */
2240 if (n_iovec == 0) {
2241 r = 0;
2242 goto finish;
2243 }
2244
2245 if (n_message_parts == 0)
2246 t = strjoina("MESSAGE=", u->id, ": Completed");
2247 else {
2248 _cleanup_free_ char *joined;
2249
2250 message_parts[n_message_parts] = NULL;
2251
2252 joined = strv_join(message_parts, ", ");
2253 if (!joined) {
2254 r = log_oom();
2255 goto finish;
2256 }
2257
2258 t = strjoina("MESSAGE=", u->id, ": ", joined);
2259 }
2260
2261 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2262 * and hence don't increase n_iovec for them */
2263 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2264 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2265
2266 t = strjoina(u->manager->unit_log_field, u->id);
2267 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2268
2269 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2270 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2271
2272 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2273 r = 0;
2274
2275 finish:
2276 for (i = 0; i < n_message_parts; i++)
2277 free(message_parts[i]);
2278
2279 for (i = 0; i < n_iovec; i++)
2280 free(iovec[i].iov_base);
2281
2282 return r;
2283
2284 }
2285
2286 static void unit_update_on_console(Unit *u) {
2287 bool b;
2288
2289 assert(u);
2290
2291 b = unit_needs_console(u);
2292 if (u->on_console == b)
2293 return;
2294
2295 u->on_console = b;
2296 if (b)
2297 manager_ref_console(u->manager);
2298 else
2299 manager_unref_console(u->manager);
2300
2301 }
2302
2303 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2304 bool unexpected;
2305 Manager *m;
2306
2307 assert(u);
2308 assert(os < _UNIT_ACTIVE_STATE_MAX);
2309 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2310
2311 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2312 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2313 * remounted this function will be called too! */
2314
2315 m = u->manager;
2316
2317 /* Update timestamps for state changes */
2318 if (!MANAGER_IS_RELOADING(m)) {
2319 dual_timestamp_get(&u->state_change_timestamp);
2320
2321 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2322 u->inactive_exit_timestamp = u->state_change_timestamp;
2323 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2324 u->inactive_enter_timestamp = u->state_change_timestamp;
2325
2326 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2327 u->active_enter_timestamp = u->state_change_timestamp;
2328 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2329 u->active_exit_timestamp = u->state_change_timestamp;
2330 }
2331
2332 /* Keep track of failed units */
2333 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2334
2335 /* Make sure the cgroup and state files are always removed when we become inactive */
2336 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2337 unit_prune_cgroup(u);
2338 unit_unlink_state_files(u);
2339 }
2340
2341 unit_update_on_console(u);
2342
2343 if (u->job) {
2344 unexpected = false;
2345
2346 if (u->job->state == JOB_WAITING)
2347
2348 /* So we reached a different state for this
2349 * job. Let's see if we can run it now if it
2350 * failed previously due to EAGAIN. */
2351 job_add_to_run_queue(u->job);
2352
2353 /* Let's check whether this state change constitutes a
2354 * finished job, or maybe contradicts a running job and
2355 * hence needs to invalidate jobs. */
2356
2357 switch (u->job->type) {
2358
2359 case JOB_START:
2360 case JOB_VERIFY_ACTIVE:
2361
2362 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2363 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2364 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2365 unexpected = true;
2366
2367 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2368 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2369 }
2370
2371 break;
2372
2373 case JOB_RELOAD:
2374 case JOB_RELOAD_OR_START:
2375 case JOB_TRY_RELOAD:
2376
2377 if (u->job->state == JOB_RUNNING) {
2378 if (ns == UNIT_ACTIVE)
2379 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2380 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2381 unexpected = true;
2382
2383 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2384 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2385 }
2386 }
2387
2388 break;
2389
2390 case JOB_STOP:
2391 case JOB_RESTART:
2392 case JOB_TRY_RESTART:
2393
2394 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2395 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2396 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2397 unexpected = true;
2398 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2399 }
2400
2401 break;
2402
2403 default:
2404 assert_not_reached("Job type unknown");
2405 }
2406
2407 } else
2408 unexpected = true;
2409
2410 if (!MANAGER_IS_RELOADING(m)) {
2411
2412 /* If this state change happened without being
2413 * requested by a job, then let's retroactively start
2414 * or stop dependencies. We skip that step when
2415 * deserializing, since we don't want to create any
2416 * additional jobs just because something is already
2417 * activated. */
2418
2419 if (unexpected) {
2420 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2421 retroactively_start_dependencies(u);
2422 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2423 retroactively_stop_dependencies(u);
2424 }
2425
2426 /* stop unneeded units regardless if going down was expected or not */
2427 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2428 check_unneeded_dependencies(u);
2429
2430 if (ns != os && ns == UNIT_FAILED) {
2431 log_unit_debug(u, "Unit entered failed state.");
2432
2433 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2434 unit_start_on_failure(u);
2435 }
2436 }
2437
2438 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2439
2440 if (u->type == UNIT_SERVICE &&
2441 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2442 !MANAGER_IS_RELOADING(m)) {
2443 /* Write audit record if we have just finished starting up */
2444 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2445 u->in_audit = true;
2446 }
2447
2448 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2449 manager_send_unit_plymouth(m, u);
2450
2451 } else {
2452
2453 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2454 !UNIT_IS_INACTIVE_OR_FAILED(os)
2455 && !MANAGER_IS_RELOADING(m)) {
2456
2457 /* This unit just stopped/failed. */
2458 if (u->type == UNIT_SERVICE) {
2459
2460 /* Hmm, if there was no start record written
2461 * write it now, so that we always have a nice
2462 * pair */
2463 if (!u->in_audit) {
2464 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2465
2466 if (ns == UNIT_INACTIVE)
2467 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2468 } else
2469 /* Write audit record if we have just finished shutting down */
2470 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2471
2472 u->in_audit = false;
2473 }
2474
2475 /* Write a log message about consumed resources */
2476 unit_log_resources(u);
2477 }
2478 }
2479
2480 manager_recheck_journal(m);
2481 manager_recheck_dbus(m);
2482
2483 unit_trigger_notify(u);
2484
2485 if (!MANAGER_IS_RELOADING(u->manager)) {
2486 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2487 unit_check_unneeded(u);
2488
2489 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2490 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2491 * without ever entering started.) */
2492 unit_check_binds_to(u);
2493
2494 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2495 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2496 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2497 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2498 }
2499
2500 unit_add_to_dbus_queue(u);
2501 unit_add_to_gc_queue(u);
2502 }
2503
2504 int unit_watch_pid(Unit *u, pid_t pid) {
2505 int r;
2506
2507 assert(u);
2508 assert(pid_is_valid(pid));
2509
2510 /* Watch a specific PID */
2511
2512 r = set_ensure_allocated(&u->pids, NULL);
2513 if (r < 0)
2514 return r;
2515
2516 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2517 if (r < 0)
2518 return r;
2519
2520 /* First try, let's add the unit keyed by "pid". */
2521 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2522 if (r == -EEXIST) {
2523 Unit **array;
2524 bool found = false;
2525 size_t n = 0;
2526
2527 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2528 * to an array of Units rather than just a Unit), lists us already. */
2529
2530 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2531 if (array)
2532 for (; array[n]; n++)
2533 if (array[n] == u)
2534 found = true;
2535
2536 if (found) /* Found it already? if so, do nothing */
2537 r = 0;
2538 else {
2539 Unit **new_array;
2540
2541 /* Allocate a new array */
2542 new_array = new(Unit*, n + 2);
2543 if (!new_array)
2544 return -ENOMEM;
2545
2546 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2547 new_array[n] = u;
2548 new_array[n+1] = NULL;
2549
2550 /* Add or replace the old array */
2551 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2552 if (r < 0) {
2553 free(new_array);
2554 return r;
2555 }
2556
2557 free(array);
2558 }
2559 } else if (r < 0)
2560 return r;
2561
2562 r = set_put(u->pids, PID_TO_PTR(pid));
2563 if (r < 0)
2564 return r;
2565
2566 return 0;
2567 }
2568
2569 void unit_unwatch_pid(Unit *u, pid_t pid) {
2570 Unit **array;
2571
2572 assert(u);
2573 assert(pid_is_valid(pid));
2574
2575 /* First let's drop the unit in case it's keyed as "pid". */
2576 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2577
2578 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2579 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2580 if (array) {
2581 size_t n, m = 0;
2582
2583 /* Let's iterate through the array, dropping our own entry */
2584 for (n = 0; array[n]; n++)
2585 if (array[n] != u)
2586 array[m++] = array[n];
2587 array[m] = NULL;
2588
2589 if (m == 0) {
2590 /* The array is now empty, remove the entire entry */
2591 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2592 free(array);
2593 }
2594 }
2595
2596 (void) set_remove(u->pids, PID_TO_PTR(pid));
2597 }
2598
2599 void unit_unwatch_all_pids(Unit *u) {
2600 assert(u);
2601
2602 while (!set_isempty(u->pids))
2603 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2604
2605 u->pids = set_free(u->pids);
2606 }
2607
2608 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2609 Iterator i;
2610 void *e;
2611
2612 assert(u);
2613
2614 /* Cleans dead PIDs from our list */
2615
2616 SET_FOREACH(e, u->pids, i) {
2617 pid_t pid = PTR_TO_PID(e);
2618
2619 if (pid == except1 || pid == except2)
2620 continue;
2621
2622 if (!pid_is_unwaited(pid))
2623 unit_unwatch_pid(u, pid);
2624 }
2625 }
2626
2627 bool unit_job_is_applicable(Unit *u, JobType j) {
2628 assert(u);
2629 assert(j >= 0 && j < _JOB_TYPE_MAX);
2630
2631 switch (j) {
2632
2633 case JOB_VERIFY_ACTIVE:
2634 case JOB_START:
2635 case JOB_NOP:
2636 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2637 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2638 * jobs for it. */
2639 return true;
2640
2641 case JOB_STOP:
2642 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2643 * external events), hence it makes no sense to permit enqueing such a request either. */
2644 return !u->perpetual;
2645
2646 case JOB_RESTART:
2647 case JOB_TRY_RESTART:
2648 return unit_can_stop(u) && unit_can_start(u);
2649
2650 case JOB_RELOAD:
2651 case JOB_TRY_RELOAD:
2652 return unit_can_reload(u);
2653
2654 case JOB_RELOAD_OR_START:
2655 return unit_can_reload(u) && unit_can_start(u);
2656
2657 default:
2658 assert_not_reached("Invalid job type");
2659 }
2660 }
2661
2662 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2663 assert(u);
2664
2665 /* Only warn about some unit types */
2666 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2667 return;
2668
2669 if (streq_ptr(u->id, other))
2670 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2671 else
2672 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2673 }
2674
2675 static int unit_add_dependency_hashmap(
2676 Hashmap **h,
2677 Unit *other,
2678 UnitDependencyMask origin_mask,
2679 UnitDependencyMask destination_mask) {
2680
2681 UnitDependencyInfo info;
2682 int r;
2683
2684 assert(h);
2685 assert(other);
2686 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2687 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2688 assert(origin_mask > 0 || destination_mask > 0);
2689
2690 r = hashmap_ensure_allocated(h, NULL);
2691 if (r < 0)
2692 return r;
2693
2694 assert_cc(sizeof(void*) == sizeof(info));
2695
2696 info.data = hashmap_get(*h, other);
2697 if (info.data) {
2698 /* Entry already exists. Add in our mask. */
2699
2700 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2701 FLAGS_SET(destination_mask, info.destination_mask))
2702 return 0; /* NOP */
2703
2704 info.origin_mask |= origin_mask;
2705 info.destination_mask |= destination_mask;
2706
2707 r = hashmap_update(*h, other, info.data);
2708 } else {
2709 info = (UnitDependencyInfo) {
2710 .origin_mask = origin_mask,
2711 .destination_mask = destination_mask,
2712 };
2713
2714 r = hashmap_put(*h, other, info.data);
2715 }
2716 if (r < 0)
2717 return r;
2718
2719 return 1;
2720 }
2721
2722 int unit_add_dependency(
2723 Unit *u,
2724 UnitDependency d,
2725 Unit *other,
2726 bool add_reference,
2727 UnitDependencyMask mask) {
2728
2729 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2730 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2731 [UNIT_WANTS] = UNIT_WANTED_BY,
2732 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2733 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2734 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2735 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2736 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2737 [UNIT_WANTED_BY] = UNIT_WANTS,
2738 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2739 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2740 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2741 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2742 [UNIT_BEFORE] = UNIT_AFTER,
2743 [UNIT_AFTER] = UNIT_BEFORE,
2744 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2745 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2746 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2747 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2748 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2749 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2750 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2751 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2752 };
2753 Unit *original_u = u, *original_other = other;
2754 int r;
2755
2756 assert(u);
2757 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2758 assert(other);
2759
2760 u = unit_follow_merge(u);
2761 other = unit_follow_merge(other);
2762
2763 /* We won't allow dependencies on ourselves. We will not
2764 * consider them an error however. */
2765 if (u == other) {
2766 maybe_warn_about_dependency(original_u, original_other->id, d);
2767 return 0;
2768 }
2769
2770 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2771 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2772 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2773 return 0;
2774 }
2775
2776 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2777 if (r < 0)
2778 return r;
2779
2780 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2781 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2782 if (r < 0)
2783 return r;
2784 }
2785
2786 if (add_reference) {
2787 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2788 if (r < 0)
2789 return r;
2790
2791 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2792 if (r < 0)
2793 return r;
2794 }
2795
2796 unit_add_to_dbus_queue(u);
2797 return 0;
2798 }
2799
2800 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2801 int r;
2802
2803 assert(u);
2804
2805 r = unit_add_dependency(u, d, other, add_reference, mask);
2806 if (r < 0)
2807 return r;
2808
2809 return unit_add_dependency(u, e, other, add_reference, mask);
2810 }
2811
2812 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2813 int r;
2814
2815 assert(u);
2816 assert(name || path);
2817 assert(buf);
2818 assert(ret);
2819
2820 if (!name)
2821 name = basename(path);
2822
2823 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2824 *buf = NULL;
2825 *ret = name;
2826 return 0;
2827 }
2828
2829 if (u->instance)
2830 r = unit_name_replace_instance(name, u->instance, buf);
2831 else {
2832 _cleanup_free_ char *i = NULL;
2833
2834 r = unit_name_to_prefix(u->id, &i);
2835 if (r < 0)
2836 return r;
2837
2838 r = unit_name_replace_instance(name, i, buf);
2839 }
2840 if (r < 0)
2841 return r;
2842
2843 *ret = *buf;
2844 return 0;
2845 }
2846
2847 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2848 _cleanup_free_ char *buf = NULL;
2849 Unit *other;
2850 int r;
2851
2852 assert(u);
2853 assert(name || path);
2854
2855 r = resolve_template(u, name, path, &buf, &name);
2856 if (r < 0)
2857 return r;
2858
2859 r = manager_load_unit(u->manager, name, path, NULL, &other);
2860 if (r < 0)
2861 return r;
2862
2863 return unit_add_dependency(u, d, other, add_reference, mask);
2864 }
2865
2866 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2867 _cleanup_free_ char *buf = NULL;
2868 Unit *other;
2869 int r;
2870
2871 assert(u);
2872 assert(name || path);
2873
2874 r = resolve_template(u, name, path, &buf, &name);
2875 if (r < 0)
2876 return r;
2877
2878 r = manager_load_unit(u->manager, name, path, NULL, &other);
2879 if (r < 0)
2880 return r;
2881
2882 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2883 }
2884
2885 int set_unit_path(const char *p) {
2886 /* This is mostly for debug purposes */
2887 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2888 return -errno;
2889
2890 return 0;
2891 }
2892
2893 char *unit_dbus_path(Unit *u) {
2894 assert(u);
2895
2896 if (!u->id)
2897 return NULL;
2898
2899 return unit_dbus_path_from_name(u->id);
2900 }
2901
2902 char *unit_dbus_path_invocation_id(Unit *u) {
2903 assert(u);
2904
2905 if (sd_id128_is_null(u->invocation_id))
2906 return NULL;
2907
2908 return unit_dbus_path_from_name(u->invocation_id_string);
2909 }
2910
2911 int unit_set_slice(Unit *u, Unit *slice) {
2912 assert(u);
2913 assert(slice);
2914
2915 /* Sets the unit slice if it has not been set before. Is extra
2916 * careful, to only allow this for units that actually have a
2917 * cgroup context. Also, we don't allow to set this for slices
2918 * (since the parent slice is derived from the name). Make
2919 * sure the unit we set is actually a slice. */
2920
2921 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2922 return -EOPNOTSUPP;
2923
2924 if (u->type == UNIT_SLICE)
2925 return -EINVAL;
2926
2927 if (unit_active_state(u) != UNIT_INACTIVE)
2928 return -EBUSY;
2929
2930 if (slice->type != UNIT_SLICE)
2931 return -EINVAL;
2932
2933 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2934 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2935 return -EPERM;
2936
2937 if (UNIT_DEREF(u->slice) == slice)
2938 return 0;
2939
2940 /* Disallow slice changes if @u is already bound to cgroups */
2941 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2942 return -EBUSY;
2943
2944 unit_ref_set(&u->slice, u, slice);
2945 return 1;
2946 }
2947
2948 int unit_set_default_slice(Unit *u) {
2949 _cleanup_free_ char *b = NULL;
2950 const char *slice_name;
2951 Unit *slice;
2952 int r;
2953
2954 assert(u);
2955
2956 if (UNIT_ISSET(u->slice))
2957 return 0;
2958
2959 if (u->instance) {
2960 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2961
2962 /* Implicitly place all instantiated units in their
2963 * own per-template slice */
2964
2965 r = unit_name_to_prefix(u->id, &prefix);
2966 if (r < 0)
2967 return r;
2968
2969 /* The prefix is already escaped, but it might include
2970 * "-" which has a special meaning for slice units,
2971 * hence escape it here extra. */
2972 escaped = unit_name_escape(prefix);
2973 if (!escaped)
2974 return -ENOMEM;
2975
2976 if (MANAGER_IS_SYSTEM(u->manager))
2977 b = strjoin("system-", escaped, ".slice");
2978 else
2979 b = strappend(escaped, ".slice");
2980 if (!b)
2981 return -ENOMEM;
2982
2983 slice_name = b;
2984 } else
2985 slice_name =
2986 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2987 ? SPECIAL_SYSTEM_SLICE
2988 : SPECIAL_ROOT_SLICE;
2989
2990 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2991 if (r < 0)
2992 return r;
2993
2994 return unit_set_slice(u, slice);
2995 }
2996
2997 const char *unit_slice_name(Unit *u) {
2998 assert(u);
2999
3000 if (!UNIT_ISSET(u->slice))
3001 return NULL;
3002
3003 return UNIT_DEREF(u->slice)->id;
3004 }
3005
3006 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3007 _cleanup_free_ char *t = NULL;
3008 int r;
3009
3010 assert(u);
3011 assert(type);
3012 assert(_found);
3013
3014 r = unit_name_change_suffix(u->id, type, &t);
3015 if (r < 0)
3016 return r;
3017 if (unit_has_name(u, t))
3018 return -EINVAL;
3019
3020 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3021 assert(r < 0 || *_found != u);
3022 return r;
3023 }
3024
3025 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3026 const char *name, *old_owner, *new_owner;
3027 Unit *u = userdata;
3028 int r;
3029
3030 assert(message);
3031 assert(u);
3032
3033 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3034 if (r < 0) {
3035 bus_log_parse_error(r);
3036 return 0;
3037 }
3038
3039 old_owner = empty_to_null(old_owner);
3040 new_owner = empty_to_null(new_owner);
3041
3042 if (UNIT_VTABLE(u)->bus_name_owner_change)
3043 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3044
3045 return 0;
3046 }
3047
3048 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3049 const char *match;
3050
3051 assert(u);
3052 assert(bus);
3053 assert(name);
3054
3055 if (u->match_bus_slot)
3056 return -EBUSY;
3057
3058 match = strjoina("type='signal',"
3059 "sender='org.freedesktop.DBus',"
3060 "path='/org/freedesktop/DBus',"
3061 "interface='org.freedesktop.DBus',"
3062 "member='NameOwnerChanged',"
3063 "arg0='", name, "'");
3064
3065 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3066 }
3067
3068 int unit_watch_bus_name(Unit *u, const char *name) {
3069 int r;
3070
3071 assert(u);
3072 assert(name);
3073
3074 /* Watch a specific name on the bus. We only support one unit
3075 * watching each name for now. */
3076
3077 if (u->manager->api_bus) {
3078 /* If the bus is already available, install the match directly.
3079 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3080 r = unit_install_bus_match(u, u->manager->api_bus, name);
3081 if (r < 0)
3082 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3083 }
3084
3085 r = hashmap_put(u->manager->watch_bus, name, u);
3086 if (r < 0) {
3087 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3088 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3089 }
3090
3091 return 0;
3092 }
3093
3094 void unit_unwatch_bus_name(Unit *u, const char *name) {
3095 assert(u);
3096 assert(name);
3097
3098 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3099 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3100 }
3101
3102 bool unit_can_serialize(Unit *u) {
3103 assert(u);
3104
3105 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3106 }
3107
3108 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3109 _cleanup_free_ char *s = NULL;
3110 int r = 0;
3111
3112 assert(f);
3113 assert(key);
3114
3115 if (mask != 0) {
3116 r = cg_mask_to_string(mask, &s);
3117 if (r >= 0) {
3118 fputs(key, f);
3119 fputc('=', f);
3120 fputs(s, f);
3121 fputc('\n', f);
3122 }
3123 }
3124 return r;
3125 }
3126
3127 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3128 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3129 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3130 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3131 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3132 };
3133
3134 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3135 CGroupIPAccountingMetric m;
3136 int r;
3137
3138 assert(u);
3139 assert(f);
3140 assert(fds);
3141
3142 if (unit_can_serialize(u)) {
3143 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3144 if (r < 0)
3145 return r;
3146 }
3147
3148 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3149
3150 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3151 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3152 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3153 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3154
3155 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3156 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3157
3158 if (dual_timestamp_is_set(&u->condition_timestamp))
3159 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3160
3161 if (dual_timestamp_is_set(&u->assert_timestamp))
3162 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3163
3164 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3165
3166 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3167 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3168 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3169
3170 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3171 if (u->cpu_usage_last != NSEC_INFINITY)
3172 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3173
3174 if (u->cgroup_path)
3175 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3176 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3177 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3178 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3179 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3180
3181 if (uid_is_valid(u->ref_uid))
3182 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3183 if (gid_is_valid(u->ref_gid))
3184 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3185
3186 if (!sd_id128_is_null(u->invocation_id))
3187 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3188
3189 bus_track_serialize(u->bus_track, f, "ref");
3190
3191 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3192 uint64_t v;
3193
3194 r = unit_get_ip_accounting(u, m, &v);
3195 if (r >= 0)
3196 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3197 }
3198
3199 if (serialize_jobs) {
3200 if (u->job) {
3201 fprintf(f, "job\n");
3202 job_serialize(u->job, f);
3203 }
3204
3205 if (u->nop_job) {
3206 fprintf(f, "job\n");
3207 job_serialize(u->nop_job, f);
3208 }
3209 }
3210
3211 /* End marker */
3212 fputc('\n', f);
3213 return 0;
3214 }
3215
3216 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3217 assert(u);
3218 assert(f);
3219 assert(key);
3220
3221 if (!value)
3222 return 0;
3223
3224 fputs(key, f);
3225 fputc('=', f);
3226 fputs(value, f);
3227 fputc('\n', f);
3228
3229 return 1;
3230 }
3231
3232 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3233 _cleanup_free_ char *c = NULL;
3234
3235 assert(u);
3236 assert(f);
3237 assert(key);
3238
3239 if (!value)
3240 return 0;
3241
3242 c = cescape(value);
3243 if (!c)
3244 return -ENOMEM;
3245
3246 fputs(key, f);
3247 fputc('=', f);
3248 fputs(c, f);
3249 fputc('\n', f);
3250
3251 return 1;
3252 }
3253
3254 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3255 int copy;
3256
3257 assert(u);
3258 assert(f);
3259 assert(key);
3260
3261 if (fd < 0)
3262 return 0;
3263
3264 copy = fdset_put_dup(fds, fd);
3265 if (copy < 0)
3266 return copy;
3267
3268 fprintf(f, "%s=%i\n", key, copy);
3269 return 1;
3270 }
3271
3272 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3273 va_list ap;
3274
3275 assert(u);
3276 assert(f);
3277 assert(key);
3278 assert(format);
3279
3280 fputs(key, f);
3281 fputc('=', f);
3282
3283 va_start(ap, format);
3284 vfprintf(f, format, ap);
3285 va_end(ap);
3286
3287 fputc('\n', f);
3288 }
3289
3290 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3291 int r;
3292
3293 assert(u);
3294 assert(f);
3295 assert(fds);
3296
3297 for (;;) {
3298 char line[LINE_MAX], *l, *v;
3299 CGroupIPAccountingMetric m;
3300 size_t k;
3301
3302 if (!fgets(line, sizeof(line), f)) {
3303 if (feof(f))
3304 return 0;
3305 return -errno;
3306 }
3307
3308 char_array_0(line);
3309 l = strstrip(line);
3310
3311 /* End marker */
3312 if (isempty(l))
3313 break;
3314
3315 k = strcspn(l, "=");
3316
3317 if (l[k] == '=') {
3318 l[k] = 0;
3319 v = l+k+1;
3320 } else
3321 v = l+k;
3322
3323 if (streq(l, "job")) {
3324 if (v[0] == '\0') {
3325 /* new-style serialized job */
3326 Job *j;
3327
3328 j = job_new_raw(u);
3329 if (!j)
3330 return log_oom();
3331
3332 r = job_deserialize(j, f);
3333 if (r < 0) {
3334 job_free(j);
3335 return r;
3336 }
3337
3338 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3339 if (r < 0) {
3340 job_free(j);
3341 return r;
3342 }
3343
3344 r = job_install_deserialized(j);
3345 if (r < 0) {
3346 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3347 job_free(j);
3348 return r;
3349 }
3350 } else /* legacy for pre-44 */
3351 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3352 continue;
3353 } else if (streq(l, "state-change-timestamp")) {
3354 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3355 continue;
3356 } else if (streq(l, "inactive-exit-timestamp")) {
3357 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3358 continue;
3359 } else if (streq(l, "active-enter-timestamp")) {
3360 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3361 continue;
3362 } else if (streq(l, "active-exit-timestamp")) {
3363 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3364 continue;
3365 } else if (streq(l, "inactive-enter-timestamp")) {
3366 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3367 continue;
3368 } else if (streq(l, "condition-timestamp")) {
3369 dual_timestamp_deserialize(v, &u->condition_timestamp);
3370 continue;
3371 } else if (streq(l, "assert-timestamp")) {
3372 dual_timestamp_deserialize(v, &u->assert_timestamp);
3373 continue;
3374 } else if (streq(l, "condition-result")) {
3375
3376 r = parse_boolean(v);
3377 if (r < 0)
3378 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3379 else
3380 u->condition_result = r;
3381
3382 continue;
3383
3384 } else if (streq(l, "assert-result")) {
3385
3386 r = parse_boolean(v);
3387 if (r < 0)
3388 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3389 else
3390 u->assert_result = r;
3391
3392 continue;
3393
3394 } else if (streq(l, "transient")) {
3395
3396 r = parse_boolean(v);
3397 if (r < 0)
3398 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3399 else
3400 u->transient = r;
3401
3402 continue;
3403
3404 } else if (streq(l, "exported-invocation-id")) {
3405
3406 r = parse_boolean(v);
3407 if (r < 0)
3408 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3409 else
3410 u->exported_invocation_id = r;
3411
3412 continue;
3413
3414 } else if (streq(l, "exported-log-level-max")) {
3415
3416 r = parse_boolean(v);
3417 if (r < 0)
3418 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3419 else
3420 u->exported_log_level_max = r;
3421
3422 continue;
3423
3424 } else if (streq(l, "exported-log-extra-fields")) {
3425
3426 r = parse_boolean(v);
3427 if (r < 0)
3428 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3429 else
3430 u->exported_log_extra_fields = r;
3431
3432 continue;
3433
3434 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3435
3436 r = safe_atou64(v, &u->cpu_usage_base);
3437 if (r < 0)
3438 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3439
3440 continue;
3441
3442 } else if (streq(l, "cpu-usage-last")) {
3443
3444 r = safe_atou64(v, &u->cpu_usage_last);
3445 if (r < 0)
3446 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3447
3448 continue;
3449
3450 } else if (streq(l, "cgroup")) {
3451
3452 r = unit_set_cgroup_path(u, v);
3453 if (r < 0)
3454 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3455
3456 (void) unit_watch_cgroup(u);
3457
3458 continue;
3459 } else if (streq(l, "cgroup-realized")) {
3460 int b;
3461
3462 b = parse_boolean(v);
3463 if (b < 0)
3464 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3465 else
3466 u->cgroup_realized = b;
3467
3468 continue;
3469
3470 } else if (streq(l, "cgroup-realized-mask")) {
3471
3472 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3473 if (r < 0)
3474 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3475 continue;
3476
3477 } else if (streq(l, "cgroup-enabled-mask")) {
3478
3479 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3480 if (r < 0)
3481 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3482 continue;
3483
3484 } else if (streq(l, "cgroup-bpf-realized")) {
3485 int i;
3486
3487 r = safe_atoi(v, &i);
3488 if (r < 0)
3489 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3490 else
3491 u->cgroup_bpf_state =
3492 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3493 i > 0 ? UNIT_CGROUP_BPF_ON :
3494 UNIT_CGROUP_BPF_OFF;
3495
3496 continue;
3497
3498 } else if (streq(l, "ref-uid")) {
3499 uid_t uid;
3500
3501 r = parse_uid(v, &uid);
3502 if (r < 0)
3503 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3504 else
3505 unit_ref_uid_gid(u, uid, GID_INVALID);
3506
3507 continue;
3508
3509 } else if (streq(l, "ref-gid")) {
3510 gid_t gid;
3511
3512 r = parse_gid(v, &gid);
3513 if (r < 0)
3514 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3515 else
3516 unit_ref_uid_gid(u, UID_INVALID, gid);
3517
3518 } else if (streq(l, "ref")) {
3519
3520 r = strv_extend(&u->deserialized_refs, v);
3521 if (r < 0)
3522 log_oom();
3523
3524 continue;
3525 } else if (streq(l, "invocation-id")) {
3526 sd_id128_t id;
3527
3528 r = sd_id128_from_string(v, &id);
3529 if (r < 0)
3530 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3531 else {
3532 r = unit_set_invocation_id(u, id);
3533 if (r < 0)
3534 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3535 }
3536
3537 continue;
3538 }
3539
3540 /* Check if this is an IP accounting metric serialization field */
3541 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3542 if (streq(l, ip_accounting_metric_field[m]))
3543 break;
3544 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3545 uint64_t c;
3546
3547 r = safe_atou64(v, &c);
3548 if (r < 0)
3549 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3550 else
3551 u->ip_accounting_extra[m] = c;
3552 continue;
3553 }
3554
3555 if (unit_can_serialize(u)) {
3556 r = exec_runtime_deserialize_compat(u, l, v, fds);
3557 if (r < 0) {
3558 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3559 continue;
3560 }
3561
3562 /* Returns positive if key was handled by the call */
3563 if (r > 0)
3564 continue;
3565
3566 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3567 if (r < 0)
3568 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3569 }
3570 }
3571
3572 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3573 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3574 * before 228 where the base for timeouts was not persistent across reboots. */
3575
3576 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3577 dual_timestamp_get(&u->state_change_timestamp);
3578
3579 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3580 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3581 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3582 unit_invalidate_cgroup_bpf(u);
3583
3584 return 0;
3585 }
3586
3587 void unit_deserialize_skip(FILE *f) {
3588 assert(f);
3589
3590 /* Skip serialized data for this unit. We don't know what it is. */
3591
3592 for (;;) {
3593 char line[LINE_MAX], *l;
3594
3595 if (!fgets(line, sizeof line, f))
3596 return;
3597
3598 char_array_0(line);
3599 l = strstrip(line);
3600
3601 /* End marker */
3602 if (isempty(l))
3603 return;
3604 }
3605 }
3606
3607 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3608 Unit *device;
3609 _cleanup_free_ char *e = NULL;
3610 int r;
3611
3612 assert(u);
3613
3614 /* Adds in links to the device node that this unit is based on */
3615 if (isempty(what))
3616 return 0;
3617
3618 if (!is_device_path(what))
3619 return 0;
3620
3621 /* When device units aren't supported (such as in a
3622 * container), don't create dependencies on them. */
3623 if (!unit_type_supported(UNIT_DEVICE))
3624 return 0;
3625
3626 r = unit_name_from_path(what, ".device", &e);
3627 if (r < 0)
3628 return r;
3629
3630 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3631 if (r < 0)
3632 return r;
3633
3634 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3635 dep = UNIT_BINDS_TO;
3636
3637 r = unit_add_two_dependencies(u, UNIT_AFTER,
3638 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3639 device, true, mask);
3640 if (r < 0)
3641 return r;
3642
3643 if (wants) {
3644 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3645 if (r < 0)
3646 return r;
3647 }
3648
3649 return 0;
3650 }
3651
3652 int unit_coldplug(Unit *u) {
3653 int r = 0, q;
3654 char **i;
3655
3656 assert(u);
3657
3658 /* Make sure we don't enter a loop, when coldplugging
3659 * recursively. */
3660 if (u->coldplugged)
3661 return 0;
3662
3663 u->coldplugged = true;
3664
3665 STRV_FOREACH(i, u->deserialized_refs) {
3666 q = bus_unit_track_add_name(u, *i);
3667 if (q < 0 && r >= 0)
3668 r = q;
3669 }
3670 u->deserialized_refs = strv_free(u->deserialized_refs);
3671
3672 if (UNIT_VTABLE(u)->coldplug) {
3673 q = UNIT_VTABLE(u)->coldplug(u);
3674 if (q < 0 && r >= 0)
3675 r = q;
3676 }
3677
3678 if (u->job) {
3679 q = job_coldplug(u->job);
3680 if (q < 0 && r >= 0)
3681 r = q;
3682 }
3683
3684 return r;
3685 }
3686
3687 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3688 struct stat st;
3689
3690 if (!path)
3691 return false;
3692
3693 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3694 * are never out-of-date. */
3695 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3696 return false;
3697
3698 if (stat(path, &st) < 0)
3699 /* What, cannot access this anymore? */
3700 return true;
3701
3702 if (path_masked)
3703 /* For masked files check if they are still so */
3704 return !null_or_empty(&st);
3705 else
3706 /* For non-empty files check the mtime */
3707 return timespec_load(&st.st_mtim) > mtime;
3708
3709 return false;
3710 }
3711
3712 bool unit_need_daemon_reload(Unit *u) {
3713 _cleanup_strv_free_ char **t = NULL;
3714 char **path;
3715
3716 assert(u);
3717
3718 /* For unit files, we allow masking… */
3719 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3720 u->load_state == UNIT_MASKED))
3721 return true;
3722
3723 /* Source paths should not be masked… */
3724 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3725 return true;
3726
3727 if (u->load_state == UNIT_LOADED)
3728 (void) unit_find_dropin_paths(u, &t);
3729 if (!strv_equal(u->dropin_paths, t))
3730 return true;
3731
3732 /* … any drop-ins that are masked are simply omitted from the list. */
3733 STRV_FOREACH(path, u->dropin_paths)
3734 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3735 return true;
3736
3737 return false;
3738 }
3739
3740 void unit_reset_failed(Unit *u) {
3741 assert(u);
3742
3743 if (UNIT_VTABLE(u)->reset_failed)
3744 UNIT_VTABLE(u)->reset_failed(u);
3745
3746 RATELIMIT_RESET(u->start_limit);
3747 u->start_limit_hit = false;
3748 }
3749
3750 Unit *unit_following(Unit *u) {
3751 assert(u);
3752
3753 if (UNIT_VTABLE(u)->following)
3754 return UNIT_VTABLE(u)->following(u);
3755
3756 return NULL;
3757 }
3758
3759 bool unit_stop_pending(Unit *u) {
3760 assert(u);
3761
3762 /* This call does check the current state of the unit. It's
3763 * hence useful to be called from state change calls of the
3764 * unit itself, where the state isn't updated yet. This is
3765 * different from unit_inactive_or_pending() which checks both
3766 * the current state and for a queued job. */
3767
3768 return u->job && u->job->type == JOB_STOP;
3769 }
3770
3771 bool unit_inactive_or_pending(Unit *u) {
3772 assert(u);
3773
3774 /* Returns true if the unit is inactive or going down */
3775
3776 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3777 return true;
3778
3779 if (unit_stop_pending(u))
3780 return true;
3781
3782 return false;
3783 }
3784
3785 bool unit_active_or_pending(Unit *u) {
3786 assert(u);
3787
3788 /* Returns true if the unit is active or going up */
3789
3790 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3791 return true;
3792
3793 if (u->job &&
3794 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3795 return true;
3796
3797 return false;
3798 }
3799
3800 bool unit_will_restart(Unit *u) {
3801 assert(u);
3802
3803 if (!UNIT_VTABLE(u)->will_restart)
3804 return false;
3805
3806 return UNIT_VTABLE(u)->will_restart(u);
3807 }
3808
3809 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3810 assert(u);
3811 assert(w >= 0 && w < _KILL_WHO_MAX);
3812 assert(SIGNAL_VALID(signo));
3813
3814 if (!UNIT_VTABLE(u)->kill)
3815 return -EOPNOTSUPP;
3816
3817 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3818 }
3819
3820 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3821 _cleanup_set_free_ Set *pid_set = NULL;
3822 int r;
3823
3824 pid_set = set_new(NULL);
3825 if (!pid_set)
3826 return NULL;
3827
3828 /* Exclude the main/control pids from being killed via the cgroup */
3829 if (main_pid > 0) {
3830 r = set_put(pid_set, PID_TO_PTR(main_pid));
3831 if (r < 0)
3832 return NULL;
3833 }
3834
3835 if (control_pid > 0) {
3836 r = set_put(pid_set, PID_TO_PTR(control_pid));
3837 if (r < 0)
3838 return NULL;
3839 }
3840
3841 return TAKE_PTR(pid_set);
3842 }
3843
3844 int unit_kill_common(
3845 Unit *u,
3846 KillWho who,
3847 int signo,
3848 pid_t main_pid,
3849 pid_t control_pid,
3850 sd_bus_error *error) {
3851
3852 int r = 0;
3853 bool killed = false;
3854
3855 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3856 if (main_pid < 0)
3857 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3858 else if (main_pid == 0)
3859 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3860 }
3861
3862 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3863 if (control_pid < 0)
3864 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3865 else if (control_pid == 0)
3866 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3867 }
3868
3869 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3870 if (control_pid > 0) {
3871 if (kill(control_pid, signo) < 0)
3872 r = -errno;
3873 else
3874 killed = true;
3875 }
3876
3877 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3878 if (main_pid > 0) {
3879 if (kill(main_pid, signo) < 0)
3880 r = -errno;
3881 else
3882 killed = true;
3883 }
3884
3885 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3886 _cleanup_set_free_ Set *pid_set = NULL;
3887 int q;
3888
3889 /* Exclude the main/control pids from being killed via the cgroup */
3890 pid_set = unit_pid_set(main_pid, control_pid);
3891 if (!pid_set)
3892 return -ENOMEM;
3893
3894 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3895 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3896 r = q;
3897 else
3898 killed = true;
3899 }
3900
3901 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3902 return -ESRCH;
3903
3904 return r;
3905 }
3906
3907 int unit_following_set(Unit *u, Set **s) {
3908 assert(u);
3909 assert(s);
3910
3911 if (UNIT_VTABLE(u)->following_set)
3912 return UNIT_VTABLE(u)->following_set(u, s);
3913
3914 *s = NULL;
3915 return 0;
3916 }
3917
3918 UnitFileState unit_get_unit_file_state(Unit *u) {
3919 int r;
3920
3921 assert(u);
3922
3923 if (u->unit_file_state < 0 && u->fragment_path) {
3924 r = unit_file_get_state(
3925 u->manager->unit_file_scope,
3926 NULL,
3927 u->id,
3928 &u->unit_file_state);
3929 if (r < 0)
3930 u->unit_file_state = UNIT_FILE_BAD;
3931 }
3932
3933 return u->unit_file_state;
3934 }
3935
3936 int unit_get_unit_file_preset(Unit *u) {
3937 assert(u);
3938
3939 if (u->unit_file_preset < 0 && u->fragment_path)
3940 u->unit_file_preset = unit_file_query_preset(
3941 u->manager->unit_file_scope,
3942 NULL,
3943 basename(u->fragment_path));
3944
3945 return u->unit_file_preset;
3946 }
3947
3948 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3949 assert(ref);
3950 assert(source);
3951 assert(target);
3952
3953 if (ref->target)
3954 unit_ref_unset(ref);
3955
3956 ref->source = source;
3957 ref->target = target;
3958 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3959 return target;
3960 }
3961
3962 void unit_ref_unset(UnitRef *ref) {
3963 assert(ref);
3964
3965 if (!ref->target)
3966 return;
3967
3968 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3969 * be unreferenced now. */
3970 unit_add_to_gc_queue(ref->target);
3971
3972 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3973 ref->source = ref->target = NULL;
3974 }
3975
3976 static int user_from_unit_name(Unit *u, char **ret) {
3977
3978 static const uint8_t hash_key[] = {
3979 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3980 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3981 };
3982
3983 _cleanup_free_ char *n = NULL;
3984 int r;
3985
3986 r = unit_name_to_prefix(u->id, &n);
3987 if (r < 0)
3988 return r;
3989
3990 if (valid_user_group_name(n)) {
3991 *ret = TAKE_PTR(n);
3992 return 0;
3993 }
3994
3995 /* If we can't use the unit name as a user name, then let's hash it and use that */
3996 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3997 return -ENOMEM;
3998
3999 return 0;
4000 }
4001
4002 int unit_patch_contexts(Unit *u) {
4003 CGroupContext *cc;
4004 ExecContext *ec;
4005 unsigned i;
4006 int r;
4007
4008 assert(u);
4009
4010 /* Patch in the manager defaults into the exec and cgroup
4011 * contexts, _after_ the rest of the settings have been
4012 * initialized */
4013
4014 ec = unit_get_exec_context(u);
4015 if (ec) {
4016 /* This only copies in the ones that need memory */
4017 for (i = 0; i < _RLIMIT_MAX; i++)
4018 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4019 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4020 if (!ec->rlimit[i])
4021 return -ENOMEM;
4022 }
4023
4024 if (MANAGER_IS_USER(u->manager) &&
4025 !ec->working_directory) {
4026
4027 r = get_home_dir(&ec->working_directory);
4028 if (r < 0)
4029 return r;
4030
4031 /* Allow user services to run, even if the
4032 * home directory is missing */
4033 ec->working_directory_missing_ok = true;
4034 }
4035
4036 if (ec->private_devices)
4037 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4038
4039 if (ec->protect_kernel_modules)
4040 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4041
4042 if (ec->dynamic_user) {
4043 if (!ec->user) {
4044 r = user_from_unit_name(u, &ec->user);
4045 if (r < 0)
4046 return r;
4047 }
4048
4049 if (!ec->group) {
4050 ec->group = strdup(ec->user);
4051 if (!ec->group)
4052 return -ENOMEM;
4053 }
4054
4055 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4056 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4057
4058 ec->private_tmp = true;
4059 ec->remove_ipc = true;
4060 ec->protect_system = PROTECT_SYSTEM_STRICT;
4061 if (ec->protect_home == PROTECT_HOME_NO)
4062 ec->protect_home = PROTECT_HOME_READ_ONLY;
4063 }
4064 }
4065
4066 cc = unit_get_cgroup_context(u);
4067 if (cc) {
4068
4069 if (ec &&
4070 ec->private_devices &&
4071 cc->device_policy == CGROUP_AUTO)
4072 cc->device_policy = CGROUP_CLOSED;
4073 }
4074
4075 return 0;
4076 }
4077
4078 ExecContext *unit_get_exec_context(Unit *u) {
4079 size_t offset;
4080 assert(u);
4081
4082 if (u->type < 0)
4083 return NULL;
4084
4085 offset = UNIT_VTABLE(u)->exec_context_offset;
4086 if (offset <= 0)
4087 return NULL;
4088
4089 return (ExecContext*) ((uint8_t*) u + offset);
4090 }
4091
4092 KillContext *unit_get_kill_context(Unit *u) {
4093 size_t offset;
4094 assert(u);
4095
4096 if (u->type < 0)
4097 return NULL;
4098
4099 offset = UNIT_VTABLE(u)->kill_context_offset;
4100 if (offset <= 0)
4101 return NULL;
4102
4103 return (KillContext*) ((uint8_t*) u + offset);
4104 }
4105
4106 CGroupContext *unit_get_cgroup_context(Unit *u) {
4107 size_t offset;
4108
4109 if (u->type < 0)
4110 return NULL;
4111
4112 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4113 if (offset <= 0)
4114 return NULL;
4115
4116 return (CGroupContext*) ((uint8_t*) u + offset);
4117 }
4118
4119 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4120 size_t offset;
4121
4122 if (u->type < 0)
4123 return NULL;
4124
4125 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4126 if (offset <= 0)
4127 return NULL;
4128
4129 return *(ExecRuntime**) ((uint8_t*) u + offset);
4130 }
4131
4132 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4133 assert(u);
4134
4135 if (UNIT_WRITE_FLAGS_NOOP(flags))
4136 return NULL;
4137
4138 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4139 return u->manager->lookup_paths.transient;
4140
4141 if (flags & UNIT_PERSISTENT)
4142 return u->manager->lookup_paths.persistent_control;
4143
4144 if (flags & UNIT_RUNTIME)
4145 return u->manager->lookup_paths.runtime_control;
4146
4147 return NULL;
4148 }
4149
4150 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4151 char *ret = NULL;
4152
4153 if (!s)
4154 return NULL;
4155
4156 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4157 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4158 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4159 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4160 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4161 * allocations. */
4162
4163 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4164 ret = specifier_escape(s);
4165 if (!ret)
4166 return NULL;
4167
4168 s = ret;
4169 }
4170
4171 if (flags & UNIT_ESCAPE_C) {
4172 char *a;
4173
4174 a = cescape(s);
4175 free(ret);
4176 if (!a)
4177 return NULL;
4178
4179 ret = a;
4180 }
4181
4182 if (buf) {
4183 *buf = ret;
4184 return ret ?: (char*) s;
4185 }
4186
4187 return ret ?: strdup(s);
4188 }
4189
4190 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4191 _cleanup_free_ char *result = NULL;
4192 size_t n = 0, allocated = 0;
4193 char **i;
4194
4195 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4196 * way suitable for ExecStart= stanzas */
4197
4198 STRV_FOREACH(i, l) {
4199 _cleanup_free_ char *buf = NULL;
4200 const char *p;
4201 size_t a;
4202 char *q;
4203
4204 p = unit_escape_setting(*i, flags, &buf);
4205 if (!p)
4206 return NULL;
4207
4208 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4209 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4210 return NULL;
4211
4212 q = result + n;
4213 if (n > 0)
4214 *(q++) = ' ';
4215
4216 *(q++) = '"';
4217 q = stpcpy(q, p);
4218 *(q++) = '"';
4219
4220 n += a;
4221 }
4222
4223 if (!GREEDY_REALLOC(result, allocated, n + 1))
4224 return NULL;
4225
4226 result[n] = 0;
4227
4228 return TAKE_PTR(result);
4229 }
4230
4231 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4232 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4233 const char *dir, *wrapped;
4234 int r;
4235
4236 assert(u);
4237 assert(name);
4238 assert(data);
4239
4240 if (UNIT_WRITE_FLAGS_NOOP(flags))
4241 return 0;
4242
4243 data = unit_escape_setting(data, flags, &escaped);
4244 if (!data)
4245 return -ENOMEM;
4246
4247 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4248 * previous section header is the same */
4249
4250 if (flags & UNIT_PRIVATE) {
4251 if (!UNIT_VTABLE(u)->private_section)
4252 return -EINVAL;
4253
4254 if (!u->transient_file || u->last_section_private < 0)
4255 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4256 else if (u->last_section_private == 0)
4257 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4258 } else {
4259 if (!u->transient_file || u->last_section_private < 0)
4260 data = strjoina("[Unit]\n", data);
4261 else if (u->last_section_private > 0)
4262 data = strjoina("\n[Unit]\n", data);
4263 }
4264
4265 if (u->transient_file) {
4266 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4267 * write to the transient unit file. */
4268 fputs(data, u->transient_file);
4269
4270 if (!endswith(data, "\n"))
4271 fputc('\n', u->transient_file);
4272
4273 /* Remember which section we wrote this entry to */
4274 u->last_section_private = !!(flags & UNIT_PRIVATE);
4275 return 0;
4276 }
4277
4278 dir = unit_drop_in_dir(u, flags);
4279 if (!dir)
4280 return -EINVAL;
4281
4282 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4283 "# or an equivalent operation. Do not edit.\n",
4284 data,
4285 "\n");
4286
4287 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4288 if (r < 0)
4289 return r;
4290
4291 (void) mkdir_p_label(p, 0755);
4292 r = write_string_file_atomic_label(q, wrapped);
4293 if (r < 0)
4294 return r;
4295
4296 r = strv_push(&u->dropin_paths, q);
4297 if (r < 0)
4298 return r;
4299 q = NULL;
4300
4301 strv_uniq(u->dropin_paths);
4302
4303 u->dropin_mtime = now(CLOCK_REALTIME);
4304
4305 return 0;
4306 }
4307
4308 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4309 _cleanup_free_ char *p = NULL;
4310 va_list ap;
4311 int r;
4312
4313 assert(u);
4314 assert(name);
4315 assert(format);
4316
4317 if (UNIT_WRITE_FLAGS_NOOP(flags))
4318 return 0;
4319
4320 va_start(ap, format);
4321 r = vasprintf(&p, format, ap);
4322 va_end(ap);
4323
4324 if (r < 0)
4325 return -ENOMEM;
4326
4327 return unit_write_setting(u, flags, name, p);
4328 }
4329
4330 int unit_make_transient(Unit *u) {
4331 _cleanup_free_ char *path = NULL;
4332 FILE *f;
4333
4334 assert(u);
4335
4336 if (!UNIT_VTABLE(u)->can_transient)
4337 return -EOPNOTSUPP;
4338
4339 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4340
4341 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4342 if (!path)
4343 return -ENOMEM;
4344
4345 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4346 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4347
4348 RUN_WITH_UMASK(0022) {
4349 f = fopen(path, "we");
4350 if (!f)
4351 return -errno;
4352 }
4353
4354 safe_fclose(u->transient_file);
4355 u->transient_file = f;
4356
4357 free_and_replace(u->fragment_path, path);
4358
4359 u->source_path = mfree(u->source_path);
4360 u->dropin_paths = strv_free(u->dropin_paths);
4361 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4362
4363 u->load_state = UNIT_STUB;
4364 u->load_error = 0;
4365 u->transient = true;
4366
4367 unit_add_to_dbus_queue(u);
4368 unit_add_to_gc_queue(u);
4369
4370 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4371 u->transient_file);
4372
4373 return 0;
4374 }
4375
4376 static void log_kill(pid_t pid, int sig, void *userdata) {
4377 _cleanup_free_ char *comm = NULL;
4378
4379 (void) get_process_comm(pid, &comm);
4380
4381 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4382 only, like for example systemd's own PAM stub process. */
4383 if (comm && comm[0] == '(')
4384 return;
4385
4386 log_unit_notice(userdata,
4387 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4388 pid,
4389 strna(comm),
4390 signal_to_string(sig));
4391 }
4392
4393 static int operation_to_signal(KillContext *c, KillOperation k) {
4394 assert(c);
4395
4396 switch (k) {
4397
4398 case KILL_TERMINATE:
4399 case KILL_TERMINATE_AND_LOG:
4400 return c->kill_signal;
4401
4402 case KILL_KILL:
4403 return SIGKILL;
4404
4405 case KILL_ABORT:
4406 return SIGABRT;
4407
4408 default:
4409 assert_not_reached("KillOperation unknown");
4410 }
4411 }
4412
4413 int unit_kill_context(
4414 Unit *u,
4415 KillContext *c,
4416 KillOperation k,
4417 pid_t main_pid,
4418 pid_t control_pid,
4419 bool main_pid_alien) {
4420
4421 bool wait_for_exit = false, send_sighup;
4422 cg_kill_log_func_t log_func = NULL;
4423 int sig, r;
4424
4425 assert(u);
4426 assert(c);
4427
4428 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4429 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4430
4431 if (c->kill_mode == KILL_NONE)
4432 return 0;
4433
4434 sig = operation_to_signal(c, k);
4435
4436 send_sighup =
4437 c->send_sighup &&
4438 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4439 sig != SIGHUP;
4440
4441 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4442 log_func = log_kill;
4443
4444 if (main_pid > 0) {
4445 if (log_func)
4446 log_func(main_pid, sig, u);
4447
4448 r = kill_and_sigcont(main_pid, sig);
4449 if (r < 0 && r != -ESRCH) {
4450 _cleanup_free_ char *comm = NULL;
4451 (void) get_process_comm(main_pid, &comm);
4452
4453 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4454 } else {
4455 if (!main_pid_alien)
4456 wait_for_exit = true;
4457
4458 if (r != -ESRCH && send_sighup)
4459 (void) kill(main_pid, SIGHUP);
4460 }
4461 }
4462
4463 if (control_pid > 0) {
4464 if (log_func)
4465 log_func(control_pid, sig, u);
4466
4467 r = kill_and_sigcont(control_pid, sig);
4468 if (r < 0 && r != -ESRCH) {
4469 _cleanup_free_ char *comm = NULL;
4470 (void) get_process_comm(control_pid, &comm);
4471
4472 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4473 } else {
4474 wait_for_exit = true;
4475
4476 if (r != -ESRCH && send_sighup)
4477 (void) kill(control_pid, SIGHUP);
4478 }
4479 }
4480
4481 if (u->cgroup_path &&
4482 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4483 _cleanup_set_free_ Set *pid_set = NULL;
4484
4485 /* Exclude the main/control pids from being killed via the cgroup */
4486 pid_set = unit_pid_set(main_pid, control_pid);
4487 if (!pid_set)
4488 return -ENOMEM;
4489
4490 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4491 sig,
4492 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4493 pid_set,
4494 log_func, u);
4495 if (r < 0) {
4496 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4497 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4498
4499 } else if (r > 0) {
4500
4501 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4502 * we are running in a container or if this is a delegation unit, simply because cgroup
4503 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4504 * of containers it can be confused easily by left-over directories in the cgroup — which
4505 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4506 * there we get proper events. Hence rely on them. */
4507
4508 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4509 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4510 wait_for_exit = true;
4511
4512 if (send_sighup) {
4513 set_free(pid_set);
4514
4515 pid_set = unit_pid_set(main_pid, control_pid);
4516 if (!pid_set)
4517 return -ENOMEM;
4518
4519 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4520 SIGHUP,
4521 CGROUP_IGNORE_SELF,
4522 pid_set,
4523 NULL, NULL);
4524 }
4525 }
4526 }
4527
4528 return wait_for_exit;
4529 }
4530
4531 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4532 _cleanup_free_ char *p = NULL;
4533 char *prefix;
4534 UnitDependencyInfo di;
4535 int r;
4536
4537 assert(u);
4538 assert(path);
4539
4540 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4541 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4542 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4543 * determine which units to make themselves a dependency of. */
4544
4545 if (!path_is_absolute(path))
4546 return -EINVAL;
4547
4548 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4549 if (r < 0)
4550 return r;
4551
4552 p = strdup(path);
4553 if (!p)
4554 return -ENOMEM;
4555
4556 path = path_simplify(p, false);
4557
4558 if (!path_is_normalized(path))
4559 return -EPERM;
4560
4561 if (hashmap_contains(u->requires_mounts_for, path))
4562 return 0;
4563
4564 di = (UnitDependencyInfo) {
4565 .origin_mask = mask
4566 };
4567
4568 r = hashmap_put(u->requires_mounts_for, path, di.data);
4569 if (r < 0)
4570 return r;
4571 p = NULL;
4572
4573 prefix = alloca(strlen(path) + 1);
4574 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4575 Set *x;
4576
4577 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4578 if (!x) {
4579 _cleanup_free_ char *q = NULL;
4580
4581 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4582 if (r < 0)
4583 return r;
4584
4585 q = strdup(prefix);
4586 if (!q)
4587 return -ENOMEM;
4588
4589 x = set_new(NULL);
4590 if (!x)
4591 return -ENOMEM;
4592
4593 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4594 if (r < 0) {
4595 set_free(x);
4596 return r;
4597 }
4598 q = NULL;
4599 }
4600
4601 r = set_put(x, u);
4602 if (r < 0)
4603 return r;
4604 }
4605
4606 return 0;
4607 }
4608
4609 int unit_setup_exec_runtime(Unit *u) {
4610 ExecRuntime **rt;
4611 size_t offset;
4612 Unit *other;
4613 Iterator i;
4614 void *v;
4615 int r;
4616
4617 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4618 assert(offset > 0);
4619
4620 /* Check if there already is an ExecRuntime for this unit? */
4621 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4622 if (*rt)
4623 return 0;
4624
4625 /* Try to get it from somebody else */
4626 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4627 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4628 if (r == 1)
4629 return 1;
4630 }
4631
4632 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4633 }
4634
4635 int unit_setup_dynamic_creds(Unit *u) {
4636 ExecContext *ec;
4637 DynamicCreds *dcreds;
4638 size_t offset;
4639
4640 assert(u);
4641
4642 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4643 assert(offset > 0);
4644 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4645
4646 ec = unit_get_exec_context(u);
4647 assert(ec);
4648
4649 if (!ec->dynamic_user)
4650 return 0;
4651
4652 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4653 }
4654
4655 bool unit_type_supported(UnitType t) {
4656 if (_unlikely_(t < 0))
4657 return false;
4658 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4659 return false;
4660
4661 if (!unit_vtable[t]->supported)
4662 return true;
4663
4664 return unit_vtable[t]->supported();
4665 }
4666
4667 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4668 int r;
4669
4670 assert(u);
4671 assert(where);
4672
4673 r = dir_is_empty(where);
4674 if (r > 0 || r == -ENOTDIR)
4675 return;
4676 if (r < 0) {
4677 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4678 return;
4679 }
4680
4681 log_struct(LOG_NOTICE,
4682 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4683 LOG_UNIT_ID(u),
4684 LOG_UNIT_INVOCATION_ID(u),
4685 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4686 "WHERE=%s", where);
4687 }
4688
4689 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4690 _cleanup_free_ char *canonical_where;
4691 int r;
4692
4693 assert(u);
4694 assert(where);
4695
4696 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4697 if (r < 0) {
4698 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4699 return 0;
4700 }
4701
4702 /* We will happily ignore a trailing slash (or any redundant slashes) */
4703 if (path_equal(where, canonical_where))
4704 return 0;
4705
4706 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4707 log_struct(LOG_ERR,
4708 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4709 LOG_UNIT_ID(u),
4710 LOG_UNIT_INVOCATION_ID(u),
4711 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4712 "WHERE=%s", where);
4713
4714 return -ELOOP;
4715 }
4716
4717 bool unit_is_pristine(Unit *u) {
4718 assert(u);
4719
4720 /* Check if the unit already exists or is already around,
4721 * in a number of different ways. Note that to cater for unit
4722 * types such as slice, we are generally fine with units that
4723 * are marked UNIT_LOADED even though nothing was actually
4724 * loaded, as those unit types don't require a file on disk. */
4725
4726 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4727 u->fragment_path ||
4728 u->source_path ||
4729 !strv_isempty(u->dropin_paths) ||
4730 u->job ||
4731 u->merged_into);
4732 }
4733
4734 pid_t unit_control_pid(Unit *u) {
4735 assert(u);
4736
4737 if (UNIT_VTABLE(u)->control_pid)
4738 return UNIT_VTABLE(u)->control_pid(u);
4739
4740 return 0;
4741 }
4742
4743 pid_t unit_main_pid(Unit *u) {
4744 assert(u);
4745
4746 if (UNIT_VTABLE(u)->main_pid)
4747 return UNIT_VTABLE(u)->main_pid(u);
4748
4749 return 0;
4750 }
4751
4752 static void unit_unref_uid_internal(
4753 Unit *u,
4754 uid_t *ref_uid,
4755 bool destroy_now,
4756 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4757
4758 assert(u);
4759 assert(ref_uid);
4760 assert(_manager_unref_uid);
4761
4762 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4763 * gid_t are actually the same time, with the same validity rules.
4764 *
4765 * Drops a reference to UID/GID from a unit. */
4766
4767 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4768 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4769
4770 if (!uid_is_valid(*ref_uid))
4771 return;
4772
4773 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4774 *ref_uid = UID_INVALID;
4775 }
4776
4777 void unit_unref_uid(Unit *u, bool destroy_now) {
4778 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4779 }
4780
4781 void unit_unref_gid(Unit *u, bool destroy_now) {
4782 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4783 }
4784
4785 static int unit_ref_uid_internal(
4786 Unit *u,
4787 uid_t *ref_uid,
4788 uid_t uid,
4789 bool clean_ipc,
4790 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4791
4792 int r;
4793
4794 assert(u);
4795 assert(ref_uid);
4796 assert(uid_is_valid(uid));
4797 assert(_manager_ref_uid);
4798
4799 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4800 * are actually the same type, and have the same validity rules.
4801 *
4802 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4803 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4804 * drops to zero. */
4805
4806 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4807 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4808
4809 if (*ref_uid == uid)
4810 return 0;
4811
4812 if (uid_is_valid(*ref_uid)) /* Already set? */
4813 return -EBUSY;
4814
4815 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4816 if (r < 0)
4817 return r;
4818
4819 *ref_uid = uid;
4820 return 1;
4821 }
4822
4823 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4824 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4825 }
4826
4827 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4828 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4829 }
4830
4831 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4832 int r = 0, q = 0;
4833
4834 assert(u);
4835
4836 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4837
4838 if (uid_is_valid(uid)) {
4839 r = unit_ref_uid(u, uid, clean_ipc);
4840 if (r < 0)
4841 return r;
4842 }
4843
4844 if (gid_is_valid(gid)) {
4845 q = unit_ref_gid(u, gid, clean_ipc);
4846 if (q < 0) {
4847 if (r > 0)
4848 unit_unref_uid(u, false);
4849
4850 return q;
4851 }
4852 }
4853
4854 return r > 0 || q > 0;
4855 }
4856
4857 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4858 ExecContext *c;
4859 int r;
4860
4861 assert(u);
4862
4863 c = unit_get_exec_context(u);
4864
4865 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4866 if (r < 0)
4867 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4868
4869 return r;
4870 }
4871
4872 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4873 assert(u);
4874
4875 unit_unref_uid(u, destroy_now);
4876 unit_unref_gid(u, destroy_now);
4877 }
4878
4879 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4880 int r;
4881
4882 assert(u);
4883
4884 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4885 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4886 * objects when no service references the UID/GID anymore. */
4887
4888 r = unit_ref_uid_gid(u, uid, gid);
4889 if (r > 0)
4890 bus_unit_send_change_signal(u);
4891 }
4892
4893 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4894 int r;
4895
4896 assert(u);
4897
4898 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4899
4900 if (sd_id128_equal(u->invocation_id, id))
4901 return 0;
4902
4903 if (!sd_id128_is_null(u->invocation_id))
4904 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4905
4906 if (sd_id128_is_null(id)) {
4907 r = 0;
4908 goto reset;
4909 }
4910
4911 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4912 if (r < 0)
4913 goto reset;
4914
4915 u->invocation_id = id;
4916 sd_id128_to_string(id, u->invocation_id_string);
4917
4918 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4919 if (r < 0)
4920 goto reset;
4921
4922 return 0;
4923
4924 reset:
4925 u->invocation_id = SD_ID128_NULL;
4926 u->invocation_id_string[0] = 0;
4927 return r;
4928 }
4929
4930 int unit_acquire_invocation_id(Unit *u) {
4931 sd_id128_t id;
4932 int r;
4933
4934 assert(u);
4935
4936 r = sd_id128_randomize(&id);
4937 if (r < 0)
4938 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4939
4940 r = unit_set_invocation_id(u, id);
4941 if (r < 0)
4942 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4943
4944 return 0;
4945 }
4946
4947 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4948 assert(u);
4949 assert(p);
4950
4951 /* Copy parameters from manager */
4952 p->environment = u->manager->environment;
4953 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4954 p->cgroup_supported = u->manager->cgroup_supported;
4955 p->prefix = u->manager->prefix;
4956 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4957
4958 /* Copy paramaters from unit */
4959 p->cgroup_path = u->cgroup_path;
4960 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4961 }
4962
4963 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4964 int r;
4965
4966 assert(u);
4967 assert(ret);
4968
4969 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4970 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4971
4972 (void) unit_realize_cgroup(u);
4973
4974 r = safe_fork(name, FORK_REOPEN_LOG, ret);
4975 if (r != 0)
4976 return r;
4977
4978 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4979 (void) ignore_signals(SIGPIPE, -1);
4980
4981 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
4982
4983 if (u->cgroup_path) {
4984 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4985 if (r < 0) {
4986 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4987 _exit(EXIT_CGROUP);
4988 }
4989 }
4990
4991 return 0;
4992 }
4993
4994 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
4995 assert(u);
4996 assert(d >= 0);
4997 assert(d < _UNIT_DEPENDENCY_MAX);
4998 assert(other);
4999
5000 if (di.origin_mask == 0 && di.destination_mask == 0) {
5001 /* No bit set anymore, let's drop the whole entry */
5002 assert_se(hashmap_remove(u->dependencies[d], other));
5003 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5004 } else
5005 /* Mask was reduced, let's update the entry */
5006 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5007 }
5008
5009 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5010 UnitDependency d;
5011
5012 assert(u);
5013
5014 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5015
5016 if (mask == 0)
5017 return;
5018
5019 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5020 bool done;
5021
5022 do {
5023 UnitDependencyInfo di;
5024 Unit *other;
5025 Iterator i;
5026
5027 done = true;
5028
5029 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5030 UnitDependency q;
5031
5032 if ((di.origin_mask & ~mask) == di.origin_mask)
5033 continue;
5034 di.origin_mask &= ~mask;
5035 unit_update_dependency_mask(u, d, other, di);
5036
5037 /* We updated the dependency from our unit to the other unit now. But most dependencies
5038 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5039 * all dependency types on the other unit and delete all those which point to us and
5040 * have the right mask set. */
5041
5042 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5043 UnitDependencyInfo dj;
5044
5045 dj.data = hashmap_get(other->dependencies[q], u);
5046 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5047 continue;
5048 dj.destination_mask &= ~mask;
5049
5050 unit_update_dependency_mask(other, q, u, dj);
5051 }
5052
5053 unit_add_to_gc_queue(other);
5054
5055 done = false;
5056 break;
5057 }
5058
5059 } while (!done);
5060 }
5061 }
5062
5063 static int unit_export_invocation_id(Unit *u) {
5064 const char *p;
5065 int r;
5066
5067 assert(u);
5068
5069 if (u->exported_invocation_id)
5070 return 0;
5071
5072 if (sd_id128_is_null(u->invocation_id))
5073 return 0;
5074
5075 p = strjoina("/run/systemd/units/invocation:", u->id);
5076 r = symlink_atomic(u->invocation_id_string, p);
5077 if (r < 0)
5078 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5079
5080 u->exported_invocation_id = true;
5081 return 0;
5082 }
5083
5084 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5085 const char *p;
5086 char buf[2];
5087 int r;
5088
5089 assert(u);
5090 assert(c);
5091
5092 if (u->exported_log_level_max)
5093 return 0;
5094
5095 if (c->log_level_max < 0)
5096 return 0;
5097
5098 assert(c->log_level_max <= 7);
5099
5100 buf[0] = '0' + c->log_level_max;
5101 buf[1] = 0;
5102
5103 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5104 r = symlink_atomic(buf, p);
5105 if (r < 0)
5106 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5107
5108 u->exported_log_level_max = true;
5109 return 0;
5110 }
5111
5112 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5113 _cleanup_close_ int fd = -1;
5114 struct iovec *iovec;
5115 const char *p;
5116 char *pattern;
5117 le64_t *sizes;
5118 ssize_t n;
5119 size_t i;
5120 int r;
5121
5122 if (u->exported_log_extra_fields)
5123 return 0;
5124
5125 if (c->n_log_extra_fields <= 0)
5126 return 0;
5127
5128 sizes = newa(le64_t, c->n_log_extra_fields);
5129 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5130
5131 for (i = 0; i < c->n_log_extra_fields; i++) {
5132 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5133
5134 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5135 iovec[i*2+1] = c->log_extra_fields[i];
5136 }
5137
5138 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5139 pattern = strjoina(p, ".XXXXXX");
5140
5141 fd = mkostemp_safe(pattern);
5142 if (fd < 0)
5143 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5144
5145 n = writev(fd, iovec, c->n_log_extra_fields*2);
5146 if (n < 0) {
5147 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5148 goto fail;
5149 }
5150
5151 (void) fchmod(fd, 0644);
5152
5153 if (rename(pattern, p) < 0) {
5154 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5155 goto fail;
5156 }
5157
5158 u->exported_log_extra_fields = true;
5159 return 0;
5160
5161 fail:
5162 (void) unlink(pattern);
5163 return r;
5164 }
5165
5166 void unit_export_state_files(Unit *u) {
5167 const ExecContext *c;
5168
5169 assert(u);
5170
5171 if (!u->id)
5172 return;
5173
5174 if (!MANAGER_IS_SYSTEM(u->manager))
5175 return;
5176
5177 if (u->manager->test_run_flags != 0)
5178 return;
5179
5180 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5181 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5182 * the IPC system itself and PID 1 also log to the journal.
5183 *
5184 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5185 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5186 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5187 * namespace at least.
5188 *
5189 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5190 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5191 * them with one. */
5192
5193 (void) unit_export_invocation_id(u);
5194
5195 c = unit_get_exec_context(u);
5196 if (c) {
5197 (void) unit_export_log_level_max(u, c);
5198 (void) unit_export_log_extra_fields(u, c);
5199 }
5200 }
5201
5202 void unit_unlink_state_files(Unit *u) {
5203 const char *p;
5204
5205 assert(u);
5206
5207 if (!u->id)
5208 return;
5209
5210 if (!MANAGER_IS_SYSTEM(u->manager))
5211 return;
5212
5213 /* Undoes the effect of unit_export_state() */
5214
5215 if (u->exported_invocation_id) {
5216 p = strjoina("/run/systemd/units/invocation:", u->id);
5217 (void) unlink(p);
5218
5219 u->exported_invocation_id = false;
5220 }
5221
5222 if (u->exported_log_level_max) {
5223 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5224 (void) unlink(p);
5225
5226 u->exported_log_level_max = false;
5227 }
5228
5229 if (u->exported_log_extra_fields) {
5230 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5231 (void) unlink(p);
5232
5233 u->exported_log_extra_fields = false;
5234 }
5235 }
5236
5237 int unit_prepare_exec(Unit *u) {
5238 int r;
5239
5240 assert(u);
5241
5242 /* Prepares everything so that we can fork of a process for this unit */
5243
5244 (void) unit_realize_cgroup(u);
5245
5246 if (u->reset_accounting) {
5247 (void) unit_reset_cpu_accounting(u);
5248 (void) unit_reset_ip_accounting(u);
5249 u->reset_accounting = false;
5250 }
5251
5252 unit_export_state_files(u);
5253
5254 r = unit_setup_exec_runtime(u);
5255 if (r < 0)
5256 return r;
5257
5258 r = unit_setup_dynamic_creds(u);
5259 if (r < 0)
5260 return r;
5261
5262 return 0;
5263 }
5264
5265 static void log_leftover(pid_t pid, int sig, void *userdata) {
5266 _cleanup_free_ char *comm = NULL;
5267
5268 (void) get_process_comm(pid, &comm);
5269
5270 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5271 return;
5272
5273 log_unit_warning(userdata,
5274 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5275 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5276 pid, strna(comm));
5277 }
5278
5279 void unit_warn_leftover_processes(Unit *u) {
5280 assert(u);
5281
5282 (void) unit_pick_cgroup_path(u);
5283
5284 if (!u->cgroup_path)
5285 return;
5286
5287 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5288 }
5289
5290 bool unit_needs_console(Unit *u) {
5291 ExecContext *ec;
5292 UnitActiveState state;
5293
5294 assert(u);
5295
5296 state = unit_active_state(u);
5297
5298 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5299 return false;
5300
5301 if (UNIT_VTABLE(u)->needs_console)
5302 return UNIT_VTABLE(u)->needs_console(u);
5303
5304 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5305 ec = unit_get_exec_context(u);
5306 if (!ec)
5307 return false;
5308
5309 return exec_context_may_touch_console(ec);
5310 }
5311
5312 const char *unit_label_path(Unit *u) {
5313 const char *p;
5314
5315 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5316 * when validating access checks. */
5317
5318 p = u->source_path ?: u->fragment_path;
5319 if (!p)
5320 return NULL;
5321
5322 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5323 if (path_equal(p, "/dev/null"))
5324 return NULL;
5325
5326 return p;
5327 }
5328
5329 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5330 int r;
5331
5332 assert(u);
5333
5334 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5335 * and not a kernel thread either */
5336
5337 /* First, a simple range check */
5338 if (!pid_is_valid(pid))
5339 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5340
5341 /* Some extra safety check */
5342 if (pid == 1 || pid == getpid_cached())
5343 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager processs, refusing.", pid);
5344
5345 /* Don't even begin to bother with kernel threads */
5346 r = is_kernel_thread(pid);
5347 if (r == -ESRCH)
5348 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5349 if (r < 0)
5350 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5351 if (r > 0)
5352 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5353
5354 return 0;
5355 }
5356
5357 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5358 [COLLECT_INACTIVE] = "inactive",
5359 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5360 };
5361
5362 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);