]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
pid1: include the source unit in UnitRef
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/prctl.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "alloc-util.h"
32 #include "bus-common-errors.h"
33 #include "bus-util.h"
34 #include "cgroup-util.h"
35 #include "dbus-unit.h"
36 #include "dbus.h"
37 #include "dropin.h"
38 #include "escape.h"
39 #include "execute.h"
40 #include "fd-util.h"
41 #include "fileio-label.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "id128-util.h"
45 #include "io-util.h"
46 #include "load-dropin.h"
47 #include "load-fragment.h"
48 #include "log.h"
49 #include "macro.h"
50 #include "missing.h"
51 #include "mkdir.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "process-util.h"
55 #include "set.h"
56 #include "signal-util.h"
57 #include "sparse-endian.h"
58 #include "special.h"
59 #include "specifier.h"
60 #include "stat-util.h"
61 #include "stdio-util.h"
62 #include "string-table.h"
63 #include "string-util.h"
64 #include "strv.h"
65 #include "umask-util.h"
66 #include "unit-name.h"
67 #include "unit.h"
68 #include "user-util.h"
69 #include "virt.h"
70
71 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
72 [UNIT_SERVICE] = &service_vtable,
73 [UNIT_SOCKET] = &socket_vtable,
74 [UNIT_TARGET] = &target_vtable,
75 [UNIT_DEVICE] = &device_vtable,
76 [UNIT_MOUNT] = &mount_vtable,
77 [UNIT_AUTOMOUNT] = &automount_vtable,
78 [UNIT_SWAP] = &swap_vtable,
79 [UNIT_TIMER] = &timer_vtable,
80 [UNIT_PATH] = &path_vtable,
81 [UNIT_SLICE] = &slice_vtable,
82 [UNIT_SCOPE] = &scope_vtable,
83 };
84
85 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
86
87 Unit *unit_new(Manager *m, size_t size) {
88 Unit *u;
89
90 assert(m);
91 assert(size >= sizeof(Unit));
92
93 u = malloc0(size);
94 if (!u)
95 return NULL;
96
97 u->names = set_new(&string_hash_ops);
98 if (!u->names)
99 return mfree(u);
100
101 u->manager = m;
102 u->type = _UNIT_TYPE_INVALID;
103 u->default_dependencies = true;
104 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
105 u->unit_file_preset = -1;
106 u->on_failure_job_mode = JOB_REPLACE;
107 u->cgroup_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
114
115 u->ip_accounting_ingress_map_fd = -1;
116 u->ip_accounting_egress_map_fd = -1;
117 u->ipv4_allow_map_fd = -1;
118 u->ipv6_allow_map_fd = -1;
119 u->ipv4_deny_map_fd = -1;
120 u->ipv6_deny_map_fd = -1;
121
122 u->last_section_private = -1;
123
124 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
125 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
126
127 return u;
128 }
129
130 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
131 Unit *u;
132 int r;
133
134 u = unit_new(m, size);
135 if (!u)
136 return -ENOMEM;
137
138 r = unit_add_name(u, name);
139 if (r < 0) {
140 unit_free(u);
141 return r;
142 }
143
144 *ret = u;
145 return r;
146 }
147
148 bool unit_has_name(Unit *u, const char *name) {
149 assert(u);
150 assert(name);
151
152 return set_contains(u->names, (char*) name);
153 }
154
155 static void unit_init(Unit *u) {
156 CGroupContext *cc;
157 ExecContext *ec;
158 KillContext *kc;
159
160 assert(u);
161 assert(u->manager);
162 assert(u->type >= 0);
163
164 cc = unit_get_cgroup_context(u);
165 if (cc) {
166 cgroup_context_init(cc);
167
168 /* Copy in the manager defaults into the cgroup
169 * context, _before_ the rest of the settings have
170 * been initialized */
171
172 cc->cpu_accounting = u->manager->default_cpu_accounting;
173 cc->io_accounting = u->manager->default_io_accounting;
174 cc->ip_accounting = u->manager->default_ip_accounting;
175 cc->blockio_accounting = u->manager->default_blockio_accounting;
176 cc->memory_accounting = u->manager->default_memory_accounting;
177 cc->tasks_accounting = u->manager->default_tasks_accounting;
178 cc->ip_accounting = u->manager->default_ip_accounting;
179
180 if (u->type != UNIT_SLICE)
181 cc->tasks_max = u->manager->default_tasks_max;
182 }
183
184 ec = unit_get_exec_context(u);
185 if (ec) {
186 exec_context_init(ec);
187
188 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
189 EXEC_KEYRING_PRIVATE : EXEC_KEYRING_INHERIT;
190 }
191
192 kc = unit_get_kill_context(u);
193 if (kc)
194 kill_context_init(kc);
195
196 if (UNIT_VTABLE(u)->init)
197 UNIT_VTABLE(u)->init(u);
198 }
199
200 int unit_add_name(Unit *u, const char *text) {
201 _cleanup_free_ char *s = NULL, *i = NULL;
202 UnitType t;
203 int r;
204
205 assert(u);
206 assert(text);
207
208 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
209
210 if (!u->instance)
211 return -EINVAL;
212
213 r = unit_name_replace_instance(text, u->instance, &s);
214 if (r < 0)
215 return r;
216 } else {
217 s = strdup(text);
218 if (!s)
219 return -ENOMEM;
220 }
221
222 if (set_contains(u->names, s))
223 return 0;
224 if (hashmap_contains(u->manager->units, s))
225 return -EEXIST;
226
227 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
228 return -EINVAL;
229
230 t = unit_name_to_type(s);
231 if (t < 0)
232 return -EINVAL;
233
234 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
235 return -EINVAL;
236
237 r = unit_name_to_instance(s, &i);
238 if (r < 0)
239 return r;
240
241 if (i && !unit_type_may_template(t))
242 return -EINVAL;
243
244 /* Ensure that this unit is either instanced or not instanced,
245 * but not both. Note that we do allow names with different
246 * instance names however! */
247 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
248 return -EINVAL;
249
250 if (!unit_type_may_alias(t) && !set_isempty(u->names))
251 return -EEXIST;
252
253 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
254 return -E2BIG;
255
256 r = set_put(u->names, s);
257 if (r < 0)
258 return r;
259 assert(r > 0);
260
261 r = hashmap_put(u->manager->units, s, u);
262 if (r < 0) {
263 (void) set_remove(u->names, s);
264 return r;
265 }
266
267 if (u->type == _UNIT_TYPE_INVALID) {
268 u->type = t;
269 u->id = s;
270 u->instance = i;
271
272 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
273
274 unit_init(u);
275
276 i = NULL;
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_may_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection.
346 * Returns true when the unit may be collected, and false if there's some
347 * reason to keep it loaded. */
348
349 if (u->job)
350 return false;
351
352 if (u->nop_job)
353 return false;
354
355 state = unit_active_state(u);
356
357 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
358 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
359 UNIT_VTABLE(u)->release_resources)
360 UNIT_VTABLE(u)->release_resources(u);
361
362 if (u->perpetual)
363 return false;
364
365 if (u->refs_by_target)
366 return false;
367
368 if (sd_bus_track_count(u->bus_track) > 0)
369 return false;
370
371 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
372 switch (u->collect_mode) {
373
374 case COLLECT_INACTIVE:
375 if (state != UNIT_INACTIVE)
376 return false;
377
378 break;
379
380 case COLLECT_INACTIVE_OR_FAILED:
381 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
382 return false;
383
384 break;
385
386 default:
387 assert_not_reached("Unknown garbage collection mode");
388 }
389
390 if (u->cgroup_path) {
391 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
392 * around. Units with active processes should never be collected. */
393
394 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
395 if (r < 0)
396 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
397 if (r <= 0)
398 return false;
399 }
400
401 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
402 return false;
403
404 return true;
405 }
406
407 void unit_add_to_load_queue(Unit *u) {
408 assert(u);
409 assert(u->type != _UNIT_TYPE_INVALID);
410
411 if (u->load_state != UNIT_STUB || u->in_load_queue)
412 return;
413
414 LIST_PREPEND(load_queue, u->manager->load_queue, u);
415 u->in_load_queue = true;
416 }
417
418 void unit_add_to_cleanup_queue(Unit *u) {
419 assert(u);
420
421 if (u->in_cleanup_queue)
422 return;
423
424 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
425 u->in_cleanup_queue = true;
426 }
427
428 void unit_add_to_gc_queue(Unit *u) {
429 assert(u);
430
431 if (u->in_gc_queue || u->in_cleanup_queue)
432 return;
433
434 if (!unit_may_gc(u))
435 return;
436
437 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
438 u->in_gc_queue = true;
439 }
440
441 void unit_add_to_dbus_queue(Unit *u) {
442 assert(u);
443 assert(u->type != _UNIT_TYPE_INVALID);
444
445 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
446 return;
447
448 /* Shortcut things if nobody cares */
449 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
450 sd_bus_track_count(u->bus_track) <= 0 &&
451 set_isempty(u->manager->private_buses)) {
452 u->sent_dbus_new_signal = true;
453 return;
454 }
455
456 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
457 u->in_dbus_queue = true;
458 }
459
460 static void bidi_set_free(Unit *u, Hashmap *h) {
461 Unit *other;
462 Iterator i;
463 void *v;
464
465 assert(u);
466
467 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468
469 HASHMAP_FOREACH_KEY(v, other, h, i) {
470 UnitDependency d;
471
472 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
473 hashmap_remove(other->dependencies[d], u);
474
475 unit_add_to_gc_queue(other);
476 }
477
478 hashmap_free(h);
479 }
480
481 static void unit_remove_transient(Unit *u) {
482 char **i;
483
484 assert(u);
485
486 if (!u->transient)
487 return;
488
489 if (u->fragment_path)
490 (void) unlink(u->fragment_path);
491
492 STRV_FOREACH(i, u->dropin_paths) {
493 _cleanup_free_ char *p = NULL, *pp = NULL;
494
495 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
496 if (!p)
497 continue;
498
499 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
500 if (!pp)
501 continue;
502
503 /* Only drop transient drop-ins */
504 if (!path_equal(u->manager->lookup_paths.transient, pp))
505 continue;
506
507 (void) unlink(*i);
508 (void) rmdir(p);
509 }
510 }
511
512 static void unit_free_requires_mounts_for(Unit *u) {
513 assert(u);
514
515 for (;;) {
516 _cleanup_free_ char *path;
517
518 path = hashmap_steal_first_key(u->requires_mounts_for);
519 if (!path)
520 break;
521 else {
522 char s[strlen(path) + 1];
523
524 PATH_FOREACH_PREFIX_MORE(s, path) {
525 char *y;
526 Set *x;
527
528 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
529 if (!x)
530 continue;
531
532 (void) set_remove(x, u);
533
534 if (set_isempty(x)) {
535 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
536 free(y);
537 set_free(x);
538 }
539 }
540 }
541 }
542
543 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
544 }
545
546 static void unit_done(Unit *u) {
547 ExecContext *ec;
548 CGroupContext *cc;
549
550 assert(u);
551
552 if (u->type < 0)
553 return;
554
555 if (UNIT_VTABLE(u)->done)
556 UNIT_VTABLE(u)->done(u);
557
558 ec = unit_get_exec_context(u);
559 if (ec)
560 exec_context_done(ec);
561
562 cc = unit_get_cgroup_context(u);
563 if (cc)
564 cgroup_context_done(cc);
565 }
566
567 void unit_free(Unit *u) {
568 UnitDependency d;
569 Iterator i;
570 char *t;
571
572 if (!u)
573 return;
574
575 u->transient_file = safe_fclose(u->transient_file);
576
577 if (!MANAGER_IS_RELOADING(u->manager))
578 unit_remove_transient(u);
579
580 bus_unit_send_removed_signal(u);
581
582 unit_done(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585
586 sd_bus_track_unref(u->bus_track);
587 u->deserialized_refs = strv_free(u->deserialized_refs);
588
589 unit_free_requires_mounts_for(u);
590
591 SET_FOREACH(t, u->names, i)
592 hashmap_remove_value(u->manager->units, t, u);
593
594 if (!sd_id128_is_null(u->invocation_id))
595 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
596
597 if (u->job) {
598 Job *j = u->job;
599 job_uninstall(j);
600 job_free(j);
601 }
602
603 if (u->nop_job) {
604 Job *j = u->nop_job;
605 job_uninstall(j);
606 job_free(j);
607 }
608
609 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
610 bidi_set_free(u, u->dependencies[d]);
611
612 if (u->type != _UNIT_TYPE_INVALID)
613 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
614
615 if (u->in_load_queue)
616 LIST_REMOVE(load_queue, u->manager->load_queue, u);
617
618 if (u->in_dbus_queue)
619 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
620
621 if (u->in_cleanup_queue)
622 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
623
624 if (u->in_gc_queue)
625 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
626
627 if (u->in_cgroup_realize_queue)
628 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
629
630 if (u->in_cgroup_empty_queue)
631 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
632
633 if (u->on_console)
634 manager_unref_console(u->manager);
635
636 unit_release_cgroup(u);
637
638 if (!MANAGER_IS_RELOADING(u->manager))
639 unit_unlink_state_files(u);
640
641 unit_unref_uid_gid(u, false);
642
643 (void) manager_update_failed_units(u->manager, u, false);
644 set_remove(u->manager->startup_units, u);
645
646 free(u->description);
647 strv_free(u->documentation);
648 free(u->fragment_path);
649 free(u->source_path);
650 strv_free(u->dropin_paths);
651 free(u->instance);
652
653 free(u->job_timeout_reboot_arg);
654
655 set_free_free(u->names);
656
657 unit_unwatch_all_pids(u);
658
659 condition_free_list(u->conditions);
660 condition_free_list(u->asserts);
661
662 free(u->reboot_arg);
663
664 unit_ref_unset(&u->slice);
665 while (u->refs_by_target)
666 unit_ref_unset(u->refs_by_target);
667
668 safe_close(u->ip_accounting_ingress_map_fd);
669 safe_close(u->ip_accounting_egress_map_fd);
670
671 safe_close(u->ipv4_allow_map_fd);
672 safe_close(u->ipv6_allow_map_fd);
673 safe_close(u->ipv4_deny_map_fd);
674 safe_close(u->ipv6_deny_map_fd);
675
676 bpf_program_unref(u->ip_bpf_ingress);
677 bpf_program_unref(u->ip_bpf_egress);
678
679 free(u);
680 }
681
682 UnitActiveState unit_active_state(Unit *u) {
683 assert(u);
684
685 if (u->load_state == UNIT_MERGED)
686 return unit_active_state(unit_follow_merge(u));
687
688 /* After a reload it might happen that a unit is not correctly
689 * loaded but still has a process around. That's why we won't
690 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
691
692 return UNIT_VTABLE(u)->active_state(u);
693 }
694
695 const char* unit_sub_state_to_string(Unit *u) {
696 assert(u);
697
698 return UNIT_VTABLE(u)->sub_state_to_string(u);
699 }
700
701 static int set_complete_move(Set **s, Set **other) {
702 assert(s);
703 assert(other);
704
705 if (!other)
706 return 0;
707
708 if (*s)
709 return set_move(*s, *other);
710 else {
711 *s = *other;
712 *other = NULL;
713 }
714
715 return 0;
716 }
717
718 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
719 assert(s);
720 assert(other);
721
722 if (!*other)
723 return 0;
724
725 if (*s)
726 return hashmap_move(*s, *other);
727 else {
728 *s = *other;
729 *other = NULL;
730 }
731
732 return 0;
733 }
734
735 static int merge_names(Unit *u, Unit *other) {
736 char *t;
737 Iterator i;
738 int r;
739
740 assert(u);
741 assert(other);
742
743 r = set_complete_move(&u->names, &other->names);
744 if (r < 0)
745 return r;
746
747 set_free_free(other->names);
748 other->names = NULL;
749 other->id = NULL;
750
751 SET_FOREACH(t, u->names, i)
752 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
753
754 return 0;
755 }
756
757 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
758 unsigned n_reserve;
759
760 assert(u);
761 assert(other);
762 assert(d < _UNIT_DEPENDENCY_MAX);
763
764 /*
765 * If u does not have this dependency set allocated, there is no need
766 * to reserve anything. In that case other's set will be transferred
767 * as a whole to u by complete_move().
768 */
769 if (!u->dependencies[d])
770 return 0;
771
772 /* merge_dependencies() will skip a u-on-u dependency */
773 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
774
775 return hashmap_reserve(u->dependencies[d], n_reserve);
776 }
777
778 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
779 Iterator i;
780 Unit *back;
781 void *v;
782 int r;
783
784 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
785
786 assert(u);
787 assert(other);
788 assert(d < _UNIT_DEPENDENCY_MAX);
789
790 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
791 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
792 UnitDependency k;
793
794 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
795 * pointers back, and let's fix them up, to instead point to 'u'. */
796
797 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
798 if (back == u) {
799 /* Do not add dependencies between u and itself. */
800 if (hashmap_remove(back->dependencies[k], other))
801 maybe_warn_about_dependency(u, other_id, k);
802 } else {
803 UnitDependencyInfo di_u, di_other, di_merged;
804
805 /* Let's drop this dependency between "back" and "other", and let's create it between
806 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
807 * and any such dependency which might already exist */
808
809 di_other.data = hashmap_get(back->dependencies[k], other);
810 if (!di_other.data)
811 continue; /* dependency isn't set, let's try the next one */
812
813 di_u.data = hashmap_get(back->dependencies[k], u);
814
815 di_merged = (UnitDependencyInfo) {
816 .origin_mask = di_u.origin_mask | di_other.origin_mask,
817 .destination_mask = di_u.destination_mask | di_other.destination_mask,
818 };
819
820 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
821 if (r < 0)
822 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
823 assert(r >= 0);
824
825 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
826 }
827 }
828
829 }
830
831 /* Also do not move dependencies on u to itself */
832 back = hashmap_remove(other->dependencies[d], u);
833 if (back)
834 maybe_warn_about_dependency(u, other_id, d);
835
836 /* The move cannot fail. The caller must have performed a reservation. */
837 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
838
839 other->dependencies[d] = hashmap_free(other->dependencies[d]);
840 }
841
842 int unit_merge(Unit *u, Unit *other) {
843 UnitDependency d;
844 const char *other_id = NULL;
845 int r;
846
847 assert(u);
848 assert(other);
849 assert(u->manager == other->manager);
850 assert(u->type != _UNIT_TYPE_INVALID);
851
852 other = unit_follow_merge(other);
853
854 if (other == u)
855 return 0;
856
857 if (u->type != other->type)
858 return -EINVAL;
859
860 if (!u->instance != !other->instance)
861 return -EINVAL;
862
863 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
864 return -EEXIST;
865
866 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
867 return -EEXIST;
868
869 if (other->job)
870 return -EEXIST;
871
872 if (other->nop_job)
873 return -EEXIST;
874
875 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
876 return -EEXIST;
877
878 if (other->id)
879 other_id = strdupa(other->id);
880
881 /* Make reservations to ensure merge_dependencies() won't fail */
882 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
883 r = reserve_dependencies(u, other, d);
884 /*
885 * We don't rollback reservations if we fail. We don't have
886 * a way to undo reservations. A reservation is not a leak.
887 */
888 if (r < 0)
889 return r;
890 }
891
892 /* Merge names */
893 r = merge_names(u, other);
894 if (r < 0)
895 return r;
896
897 /* Redirect all references */
898 while (other->refs_by_target)
899 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
900
901 /* Merge dependencies */
902 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
903 merge_dependencies(u, other, other_id, d);
904
905 other->load_state = UNIT_MERGED;
906 other->merged_into = u;
907
908 /* If there is still some data attached to the other node, we
909 * don't need it anymore, and can free it. */
910 if (other->load_state != UNIT_STUB)
911 if (UNIT_VTABLE(other)->done)
912 UNIT_VTABLE(other)->done(other);
913
914 unit_add_to_dbus_queue(u);
915 unit_add_to_cleanup_queue(other);
916
917 return 0;
918 }
919
920 int unit_merge_by_name(Unit *u, const char *name) {
921 _cleanup_free_ char *s = NULL;
922 Unit *other;
923 int r;
924
925 assert(u);
926 assert(name);
927
928 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
929 if (!u->instance)
930 return -EINVAL;
931
932 r = unit_name_replace_instance(name, u->instance, &s);
933 if (r < 0)
934 return r;
935
936 name = s;
937 }
938
939 other = manager_get_unit(u->manager, name);
940 if (other)
941 return unit_merge(u, other);
942
943 return unit_add_name(u, name);
944 }
945
946 Unit* unit_follow_merge(Unit *u) {
947 assert(u);
948
949 while (u->load_state == UNIT_MERGED)
950 assert_se(u = u->merged_into);
951
952 return u;
953 }
954
955 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
956 ExecDirectoryType dt;
957 char **dp;
958 int r;
959
960 assert(u);
961 assert(c);
962
963 if (c->working_directory) {
964 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
965 if (r < 0)
966 return r;
967 }
968
969 if (c->root_directory) {
970 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
971 if (r < 0)
972 return r;
973 }
974
975 if (c->root_image) {
976 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
977 if (r < 0)
978 return r;
979 }
980
981 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
982 if (!u->manager->prefix[dt])
983 continue;
984
985 STRV_FOREACH(dp, c->directories[dt].paths) {
986 _cleanup_free_ char *p;
987
988 p = strjoin(u->manager->prefix[dt], "/", *dp);
989 if (!p)
990 return -ENOMEM;
991
992 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
993 if (r < 0)
994 return r;
995 }
996 }
997
998 if (!MANAGER_IS_SYSTEM(u->manager))
999 return 0;
1000
1001 if (c->private_tmp) {
1002 const char *p;
1003
1004 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1005 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1006 if (r < 0)
1007 return r;
1008 }
1009
1010 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 if (!IN_SET(c->std_output,
1016 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1017 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1018 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1019 !IN_SET(c->std_error,
1020 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1021 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1022 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1023 return 0;
1024
1025 /* If syslog or kernel logging is requested, make sure our own
1026 * logging daemon is run first. */
1027
1028 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1029 if (r < 0)
1030 return r;
1031
1032 return 0;
1033 }
1034
1035 const char *unit_description(Unit *u) {
1036 assert(u);
1037
1038 if (u->description)
1039 return u->description;
1040
1041 return strna(u->id);
1042 }
1043
1044 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1045 const struct {
1046 UnitDependencyMask mask;
1047 const char *name;
1048 } table[] = {
1049 { UNIT_DEPENDENCY_FILE, "file" },
1050 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1051 { UNIT_DEPENDENCY_DEFAULT, "default" },
1052 { UNIT_DEPENDENCY_UDEV, "udev" },
1053 { UNIT_DEPENDENCY_PATH, "path" },
1054 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1055 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1056 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1057 };
1058 size_t i;
1059
1060 assert(f);
1061 assert(kind);
1062 assert(space);
1063
1064 for (i = 0; i < ELEMENTSOF(table); i++) {
1065
1066 if (mask == 0)
1067 break;
1068
1069 if ((mask & table[i].mask) == table[i].mask) {
1070 if (*space)
1071 fputc(' ', f);
1072 else
1073 *space = true;
1074
1075 fputs(kind, f);
1076 fputs("-", f);
1077 fputs(table[i].name, f);
1078
1079 mask &= ~table[i].mask;
1080 }
1081 }
1082
1083 assert(mask == 0);
1084 }
1085
1086 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1087 char *t, **j;
1088 UnitDependency d;
1089 Iterator i;
1090 const char *prefix2;
1091 char
1092 timestamp0[FORMAT_TIMESTAMP_MAX],
1093 timestamp1[FORMAT_TIMESTAMP_MAX],
1094 timestamp2[FORMAT_TIMESTAMP_MAX],
1095 timestamp3[FORMAT_TIMESTAMP_MAX],
1096 timestamp4[FORMAT_TIMESTAMP_MAX],
1097 timespan[FORMAT_TIMESPAN_MAX];
1098 Unit *following;
1099 _cleanup_set_free_ Set *following_set = NULL;
1100 const char *n;
1101 CGroupMask m;
1102 int r;
1103
1104 assert(u);
1105 assert(u->type >= 0);
1106
1107 prefix = strempty(prefix);
1108 prefix2 = strjoina(prefix, "\t");
1109
1110 fprintf(f,
1111 "%s-> Unit %s:\n"
1112 "%s\tDescription: %s\n"
1113 "%s\tInstance: %s\n"
1114 "%s\tUnit Load State: %s\n"
1115 "%s\tUnit Active State: %s\n"
1116 "%s\tState Change Timestamp: %s\n"
1117 "%s\tInactive Exit Timestamp: %s\n"
1118 "%s\tActive Enter Timestamp: %s\n"
1119 "%s\tActive Exit Timestamp: %s\n"
1120 "%s\tInactive Enter Timestamp: %s\n"
1121 "%s\tMay GC: %s\n"
1122 "%s\tNeed Daemon Reload: %s\n"
1123 "%s\tTransient: %s\n"
1124 "%s\tPerpetual: %s\n"
1125 "%s\tGarbage Collection Mode: %s\n"
1126 "%s\tSlice: %s\n"
1127 "%s\tCGroup: %s\n"
1128 "%s\tCGroup realized: %s\n",
1129 prefix, u->id,
1130 prefix, unit_description(u),
1131 prefix, strna(u->instance),
1132 prefix, unit_load_state_to_string(u->load_state),
1133 prefix, unit_active_state_to_string(unit_active_state(u)),
1134 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1135 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1136 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1137 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1138 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1139 prefix, yes_no(unit_may_gc(u)),
1140 prefix, yes_no(unit_need_daemon_reload(u)),
1141 prefix, yes_no(u->transient),
1142 prefix, yes_no(u->perpetual),
1143 prefix, collect_mode_to_string(u->collect_mode),
1144 prefix, strna(unit_slice_name(u)),
1145 prefix, strna(u->cgroup_path),
1146 prefix, yes_no(u->cgroup_realized));
1147
1148 if (u->cgroup_realized_mask != 0) {
1149 _cleanup_free_ char *s = NULL;
1150 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1151 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1152 }
1153 if (u->cgroup_enabled_mask != 0) {
1154 _cleanup_free_ char *s = NULL;
1155 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1156 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1157 }
1158 m = unit_get_own_mask(u);
1159 if (m != 0) {
1160 _cleanup_free_ char *s = NULL;
1161 (void) cg_mask_to_string(m, &s);
1162 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1163 }
1164 m = unit_get_members_mask(u);
1165 if (m != 0) {
1166 _cleanup_free_ char *s = NULL;
1167 (void) cg_mask_to_string(m, &s);
1168 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1169 }
1170
1171 SET_FOREACH(t, u->names, i)
1172 fprintf(f, "%s\tName: %s\n", prefix, t);
1173
1174 if (!sd_id128_is_null(u->invocation_id))
1175 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1176 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1177
1178 STRV_FOREACH(j, u->documentation)
1179 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1180
1181 following = unit_following(u);
1182 if (following)
1183 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1184
1185 r = unit_following_set(u, &following_set);
1186 if (r >= 0) {
1187 Unit *other;
1188
1189 SET_FOREACH(other, following_set, i)
1190 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1191 }
1192
1193 if (u->fragment_path)
1194 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1195
1196 if (u->source_path)
1197 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1198
1199 STRV_FOREACH(j, u->dropin_paths)
1200 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1201
1202 if (u->failure_action != EMERGENCY_ACTION_NONE)
1203 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1204 if (u->success_action != EMERGENCY_ACTION_NONE)
1205 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1206
1207 if (u->job_timeout != USEC_INFINITY)
1208 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1209
1210 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1211 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1212
1213 if (u->job_timeout_reboot_arg)
1214 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1215
1216 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1217 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1218
1219 if (dual_timestamp_is_set(&u->condition_timestamp))
1220 fprintf(f,
1221 "%s\tCondition Timestamp: %s\n"
1222 "%s\tCondition Result: %s\n",
1223 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1224 prefix, yes_no(u->condition_result));
1225
1226 if (dual_timestamp_is_set(&u->assert_timestamp))
1227 fprintf(f,
1228 "%s\tAssert Timestamp: %s\n"
1229 "%s\tAssert Result: %s\n",
1230 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1231 prefix, yes_no(u->assert_result));
1232
1233 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1234 UnitDependencyInfo di;
1235 Unit *other;
1236
1237 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1238 bool space = false;
1239
1240 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1241
1242 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1243 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1244
1245 fputs(")\n", f);
1246 }
1247 }
1248
1249 if (!hashmap_isempty(u->requires_mounts_for)) {
1250 UnitDependencyInfo di;
1251 const char *path;
1252
1253 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1254 bool space = false;
1255
1256 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1257
1258 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1259 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1260
1261 fputs(")\n", f);
1262 }
1263 }
1264
1265 if (u->load_state == UNIT_LOADED) {
1266
1267 fprintf(f,
1268 "%s\tStopWhenUnneeded: %s\n"
1269 "%s\tRefuseManualStart: %s\n"
1270 "%s\tRefuseManualStop: %s\n"
1271 "%s\tDefaultDependencies: %s\n"
1272 "%s\tOnFailureJobMode: %s\n"
1273 "%s\tIgnoreOnIsolate: %s\n",
1274 prefix, yes_no(u->stop_when_unneeded),
1275 prefix, yes_no(u->refuse_manual_start),
1276 prefix, yes_no(u->refuse_manual_stop),
1277 prefix, yes_no(u->default_dependencies),
1278 prefix, job_mode_to_string(u->on_failure_job_mode),
1279 prefix, yes_no(u->ignore_on_isolate));
1280
1281 if (UNIT_VTABLE(u)->dump)
1282 UNIT_VTABLE(u)->dump(u, f, prefix2);
1283
1284 } else if (u->load_state == UNIT_MERGED)
1285 fprintf(f,
1286 "%s\tMerged into: %s\n",
1287 prefix, u->merged_into->id);
1288 else if (u->load_state == UNIT_ERROR)
1289 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1290
1291 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1292 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1293
1294 if (u->job)
1295 job_dump(u->job, f, prefix2);
1296
1297 if (u->nop_job)
1298 job_dump(u->nop_job, f, prefix2);
1299 }
1300
1301 /* Common implementation for multiple backends */
1302 int unit_load_fragment_and_dropin(Unit *u) {
1303 int r;
1304
1305 assert(u);
1306
1307 /* Load a .{service,socket,...} file */
1308 r = unit_load_fragment(u);
1309 if (r < 0)
1310 return r;
1311
1312 if (u->load_state == UNIT_STUB)
1313 return -ENOENT;
1314
1315 /* Load drop-in directory data. If u is an alias, we might be reloading the
1316 * target unit needlessly. But we cannot be sure which drops-ins have already
1317 * been loaded and which not, at least without doing complicated book-keeping,
1318 * so let's always reread all drop-ins. */
1319 return unit_load_dropin(unit_follow_merge(u));
1320 }
1321
1322 /* Common implementation for multiple backends */
1323 int unit_load_fragment_and_dropin_optional(Unit *u) {
1324 int r;
1325
1326 assert(u);
1327
1328 /* Same as unit_load_fragment_and_dropin(), but whether
1329 * something can be loaded or not doesn't matter. */
1330
1331 /* Load a .service file */
1332 r = unit_load_fragment(u);
1333 if (r < 0)
1334 return r;
1335
1336 if (u->load_state == UNIT_STUB)
1337 u->load_state = UNIT_LOADED;
1338
1339 /* Load drop-in directory data */
1340 return unit_load_dropin(unit_follow_merge(u));
1341 }
1342
1343 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1344 assert(u);
1345 assert(target);
1346
1347 if (target->type != UNIT_TARGET)
1348 return 0;
1349
1350 /* Only add the dependency if both units are loaded, so that
1351 * that loop check below is reliable */
1352 if (u->load_state != UNIT_LOADED ||
1353 target->load_state != UNIT_LOADED)
1354 return 0;
1355
1356 /* If either side wants no automatic dependencies, then let's
1357 * skip this */
1358 if (!u->default_dependencies ||
1359 !target->default_dependencies)
1360 return 0;
1361
1362 /* Don't create loops */
1363 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1364 return 0;
1365
1366 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1367 }
1368
1369 static int unit_add_target_dependencies(Unit *u) {
1370
1371 static const UnitDependency deps[] = {
1372 UNIT_REQUIRED_BY,
1373 UNIT_REQUISITE_OF,
1374 UNIT_WANTED_BY,
1375 UNIT_BOUND_BY
1376 };
1377
1378 unsigned k;
1379 int r = 0;
1380
1381 assert(u);
1382
1383 for (k = 0; k < ELEMENTSOF(deps); k++) {
1384 Unit *target;
1385 Iterator i;
1386 void *v;
1387
1388 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1389 r = unit_add_default_target_dependency(u, target);
1390 if (r < 0)
1391 return r;
1392 }
1393 }
1394
1395 return r;
1396 }
1397
1398 static int unit_add_slice_dependencies(Unit *u) {
1399 UnitDependencyMask mask;
1400 assert(u);
1401
1402 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1403 return 0;
1404
1405 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1406 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1407 relationship). */
1408 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1409
1410 if (UNIT_ISSET(u->slice))
1411 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1412
1413 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1414 return 0;
1415
1416 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1417 }
1418
1419 static int unit_add_mount_dependencies(Unit *u) {
1420 UnitDependencyInfo di;
1421 const char *path;
1422 Iterator i;
1423 int r;
1424
1425 assert(u);
1426
1427 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1428 char prefix[strlen(path) + 1];
1429
1430 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1431 _cleanup_free_ char *p = NULL;
1432 Unit *m;
1433
1434 r = unit_name_from_path(prefix, ".mount", &p);
1435 if (r < 0)
1436 return r;
1437
1438 m = manager_get_unit(u->manager, p);
1439 if (!m) {
1440 /* Make sure to load the mount unit if
1441 * it exists. If so the dependencies
1442 * on this unit will be added later
1443 * during the loading of the mount
1444 * unit. */
1445 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1446 continue;
1447 }
1448 if (m == u)
1449 continue;
1450
1451 if (m->load_state != UNIT_LOADED)
1452 continue;
1453
1454 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1455 if (r < 0)
1456 return r;
1457
1458 if (m->fragment_path) {
1459 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1460 if (r < 0)
1461 return r;
1462 }
1463 }
1464 }
1465
1466 return 0;
1467 }
1468
1469 static int unit_add_startup_units(Unit *u) {
1470 CGroupContext *c;
1471 int r;
1472
1473 c = unit_get_cgroup_context(u);
1474 if (!c)
1475 return 0;
1476
1477 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1478 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1479 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1480 return 0;
1481
1482 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1483 if (r < 0)
1484 return r;
1485
1486 return set_put(u->manager->startup_units, u);
1487 }
1488
1489 int unit_load(Unit *u) {
1490 int r;
1491
1492 assert(u);
1493
1494 if (u->in_load_queue) {
1495 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1496 u->in_load_queue = false;
1497 }
1498
1499 if (u->type == _UNIT_TYPE_INVALID)
1500 return -EINVAL;
1501
1502 if (u->load_state != UNIT_STUB)
1503 return 0;
1504
1505 if (u->transient_file) {
1506 r = fflush_and_check(u->transient_file);
1507 if (r < 0)
1508 goto fail;
1509
1510 u->transient_file = safe_fclose(u->transient_file);
1511 u->fragment_mtime = now(CLOCK_REALTIME);
1512 }
1513
1514 if (UNIT_VTABLE(u)->load) {
1515 r = UNIT_VTABLE(u)->load(u);
1516 if (r < 0)
1517 goto fail;
1518 }
1519
1520 if (u->load_state == UNIT_STUB) {
1521 r = -ENOENT;
1522 goto fail;
1523 }
1524
1525 if (u->load_state == UNIT_LOADED) {
1526
1527 r = unit_add_target_dependencies(u);
1528 if (r < 0)
1529 goto fail;
1530
1531 r = unit_add_slice_dependencies(u);
1532 if (r < 0)
1533 goto fail;
1534
1535 r = unit_add_mount_dependencies(u);
1536 if (r < 0)
1537 goto fail;
1538
1539 r = unit_add_startup_units(u);
1540 if (r < 0)
1541 goto fail;
1542
1543 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1544 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1545 r = -EINVAL;
1546 goto fail;
1547 }
1548
1549 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1550 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1551
1552 unit_update_cgroup_members_masks(u);
1553 }
1554
1555 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1556
1557 unit_add_to_dbus_queue(unit_follow_merge(u));
1558 unit_add_to_gc_queue(u);
1559
1560 return 0;
1561
1562 fail:
1563 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1564 u->load_error = r;
1565 unit_add_to_dbus_queue(u);
1566 unit_add_to_gc_queue(u);
1567
1568 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1569
1570 return r;
1571 }
1572
1573 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1574 Condition *c;
1575 int triggered = -1;
1576
1577 assert(u);
1578 assert(to_string);
1579
1580 /* If the condition list is empty, then it is true */
1581 if (!first)
1582 return true;
1583
1584 /* Otherwise, if all of the non-trigger conditions apply and
1585 * if any of the trigger conditions apply (unless there are
1586 * none) we return true */
1587 LIST_FOREACH(conditions, c, first) {
1588 int r;
1589
1590 r = condition_test(c);
1591 if (r < 0)
1592 log_unit_warning(u,
1593 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1594 to_string(c->type),
1595 c->trigger ? "|" : "",
1596 c->negate ? "!" : "",
1597 c->parameter);
1598 else
1599 log_unit_debug(u,
1600 "%s=%s%s%s %s.",
1601 to_string(c->type),
1602 c->trigger ? "|" : "",
1603 c->negate ? "!" : "",
1604 c->parameter,
1605 condition_result_to_string(c->result));
1606
1607 if (!c->trigger && r <= 0)
1608 return false;
1609
1610 if (c->trigger && triggered <= 0)
1611 triggered = r > 0;
1612 }
1613
1614 return triggered != 0;
1615 }
1616
1617 static bool unit_condition_test(Unit *u) {
1618 assert(u);
1619
1620 dual_timestamp_get(&u->condition_timestamp);
1621 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1622
1623 return u->condition_result;
1624 }
1625
1626 static bool unit_assert_test(Unit *u) {
1627 assert(u);
1628
1629 dual_timestamp_get(&u->assert_timestamp);
1630 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1631
1632 return u->assert_result;
1633 }
1634
1635 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1636 DISABLE_WARNING_FORMAT_NONLITERAL;
1637 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1638 REENABLE_WARNING;
1639 }
1640
1641 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1642 const char *format;
1643 const UnitStatusMessageFormats *format_table;
1644
1645 assert(u);
1646 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1647
1648 if (t != JOB_RELOAD) {
1649 format_table = &UNIT_VTABLE(u)->status_message_formats;
1650 if (format_table) {
1651 format = format_table->starting_stopping[t == JOB_STOP];
1652 if (format)
1653 return format;
1654 }
1655 }
1656
1657 /* Return generic strings */
1658 if (t == JOB_START)
1659 return "Starting %s.";
1660 else if (t == JOB_STOP)
1661 return "Stopping %s.";
1662 else
1663 return "Reloading %s.";
1664 }
1665
1666 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1667 const char *format;
1668
1669 assert(u);
1670
1671 /* Reload status messages have traditionally not been printed to console. */
1672 if (!IN_SET(t, JOB_START, JOB_STOP))
1673 return;
1674
1675 format = unit_get_status_message_format(u, t);
1676
1677 DISABLE_WARNING_FORMAT_NONLITERAL;
1678 unit_status_printf(u, "", format);
1679 REENABLE_WARNING;
1680 }
1681
1682 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1683 const char *format, *mid;
1684 char buf[LINE_MAX];
1685
1686 assert(u);
1687
1688 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1689 return;
1690
1691 if (log_on_console())
1692 return;
1693
1694 /* We log status messages for all units and all operations. */
1695
1696 format = unit_get_status_message_format(u, t);
1697
1698 DISABLE_WARNING_FORMAT_NONLITERAL;
1699 xsprintf(buf, format, unit_description(u));
1700 REENABLE_WARNING;
1701
1702 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1703 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1704 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1705
1706 /* Note that we deliberately use LOG_MESSAGE() instead of
1707 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1708 * closely what is written to screen using the status output,
1709 * which is supposed the highest level, friendliest output
1710 * possible, which means we should avoid the low-level unit
1711 * name. */
1712 log_struct(LOG_INFO,
1713 LOG_MESSAGE("%s", buf),
1714 LOG_UNIT_ID(u),
1715 LOG_UNIT_INVOCATION_ID(u),
1716 mid,
1717 NULL);
1718 }
1719
1720 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1721 assert(u);
1722 assert(t >= 0);
1723 assert(t < _JOB_TYPE_MAX);
1724
1725 unit_status_log_starting_stopping_reloading(u, t);
1726 unit_status_print_starting_stopping(u, t);
1727 }
1728
1729 int unit_start_limit_test(Unit *u) {
1730 assert(u);
1731
1732 if (ratelimit_test(&u->start_limit)) {
1733 u->start_limit_hit = false;
1734 return 0;
1735 }
1736
1737 log_unit_warning(u, "Start request repeated too quickly.");
1738 u->start_limit_hit = true;
1739
1740 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1741 }
1742
1743 bool unit_shall_confirm_spawn(Unit *u) {
1744 assert(u);
1745
1746 if (manager_is_confirm_spawn_disabled(u->manager))
1747 return false;
1748
1749 /* For some reasons units remaining in the same process group
1750 * as PID 1 fail to acquire the console even if it's not used
1751 * by any process. So skip the confirmation question for them. */
1752 return !unit_get_exec_context(u)->same_pgrp;
1753 }
1754
1755 static bool unit_verify_deps(Unit *u) {
1756 Unit *other;
1757 Iterator j;
1758 void *v;
1759
1760 assert(u);
1761
1762 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1763 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1764 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1765 * conjunction with After= as for them any such check would make things entirely racy. */
1766
1767 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1768
1769 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1770 continue;
1771
1772 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1773 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1774 return false;
1775 }
1776 }
1777
1778 return true;
1779 }
1780
1781 /* Errors:
1782 * -EBADR: This unit type does not support starting.
1783 * -EALREADY: Unit is already started.
1784 * -EAGAIN: An operation is already in progress. Retry later.
1785 * -ECANCELED: Too many requests for now.
1786 * -EPROTO: Assert failed
1787 * -EINVAL: Unit not loaded
1788 * -EOPNOTSUPP: Unit type not supported
1789 * -ENOLINK: The necessary dependencies are not fulfilled.
1790 */
1791 int unit_start(Unit *u) {
1792 UnitActiveState state;
1793 Unit *following;
1794
1795 assert(u);
1796
1797 /* If this is already started, then this will succeed. Note
1798 * that this will even succeed if this unit is not startable
1799 * by the user. This is relied on to detect when we need to
1800 * wait for units and when waiting is finished. */
1801 state = unit_active_state(u);
1802 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1803 return -EALREADY;
1804
1805 /* Units that aren't loaded cannot be started */
1806 if (u->load_state != UNIT_LOADED)
1807 return -EINVAL;
1808
1809 /* If the conditions failed, don't do anything at all. If we
1810 * already are activating this call might still be useful to
1811 * speed up activation in case there is some hold-off time,
1812 * but we don't want to recheck the condition in that case. */
1813 if (state != UNIT_ACTIVATING &&
1814 !unit_condition_test(u)) {
1815 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1816 return -EALREADY;
1817 }
1818
1819 /* If the asserts failed, fail the entire job */
1820 if (state != UNIT_ACTIVATING &&
1821 !unit_assert_test(u)) {
1822 log_unit_notice(u, "Starting requested but asserts failed.");
1823 return -EPROTO;
1824 }
1825
1826 /* Units of types that aren't supported cannot be
1827 * started. Note that we do this test only after the condition
1828 * checks, so that we rather return condition check errors
1829 * (which are usually not considered a true failure) than "not
1830 * supported" errors (which are considered a failure).
1831 */
1832 if (!unit_supported(u))
1833 return -EOPNOTSUPP;
1834
1835 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1836 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1837 * effect anymore, due to a reload or due to a failed condition. */
1838 if (!unit_verify_deps(u))
1839 return -ENOLINK;
1840
1841 /* Forward to the main object, if we aren't it. */
1842 following = unit_following(u);
1843 if (following) {
1844 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1845 return unit_start(following);
1846 }
1847
1848 /* If it is stopped, but we cannot start it, then fail */
1849 if (!UNIT_VTABLE(u)->start)
1850 return -EBADR;
1851
1852 /* We don't suppress calls to ->start() here when we are
1853 * already starting, to allow this request to be used as a
1854 * "hurry up" call, for example when the unit is in some "auto
1855 * restart" state where it waits for a holdoff timer to elapse
1856 * before it will start again. */
1857
1858 unit_add_to_dbus_queue(u);
1859
1860 return UNIT_VTABLE(u)->start(u);
1861 }
1862
1863 bool unit_can_start(Unit *u) {
1864 assert(u);
1865
1866 if (u->load_state != UNIT_LOADED)
1867 return false;
1868
1869 if (!unit_supported(u))
1870 return false;
1871
1872 return !!UNIT_VTABLE(u)->start;
1873 }
1874
1875 bool unit_can_isolate(Unit *u) {
1876 assert(u);
1877
1878 return unit_can_start(u) &&
1879 u->allow_isolate;
1880 }
1881
1882 /* Errors:
1883 * -EBADR: This unit type does not support stopping.
1884 * -EALREADY: Unit is already stopped.
1885 * -EAGAIN: An operation is already in progress. Retry later.
1886 */
1887 int unit_stop(Unit *u) {
1888 UnitActiveState state;
1889 Unit *following;
1890
1891 assert(u);
1892
1893 state = unit_active_state(u);
1894 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1895 return -EALREADY;
1896
1897 following = unit_following(u);
1898 if (following) {
1899 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1900 return unit_stop(following);
1901 }
1902
1903 if (!UNIT_VTABLE(u)->stop)
1904 return -EBADR;
1905
1906 unit_add_to_dbus_queue(u);
1907
1908 return UNIT_VTABLE(u)->stop(u);
1909 }
1910
1911 bool unit_can_stop(Unit *u) {
1912 assert(u);
1913
1914 if (!unit_supported(u))
1915 return false;
1916
1917 if (u->perpetual)
1918 return false;
1919
1920 return !!UNIT_VTABLE(u)->stop;
1921 }
1922
1923 /* Errors:
1924 * -EBADR: This unit type does not support reloading.
1925 * -ENOEXEC: Unit is not started.
1926 * -EAGAIN: An operation is already in progress. Retry later.
1927 */
1928 int unit_reload(Unit *u) {
1929 UnitActiveState state;
1930 Unit *following;
1931
1932 assert(u);
1933
1934 if (u->load_state != UNIT_LOADED)
1935 return -EINVAL;
1936
1937 if (!unit_can_reload(u))
1938 return -EBADR;
1939
1940 state = unit_active_state(u);
1941 if (state == UNIT_RELOADING)
1942 return -EALREADY;
1943
1944 if (state != UNIT_ACTIVE) {
1945 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1946 return -ENOEXEC;
1947 }
1948
1949 following = unit_following(u);
1950 if (following) {
1951 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1952 return unit_reload(following);
1953 }
1954
1955 unit_add_to_dbus_queue(u);
1956
1957 if (!UNIT_VTABLE(u)->reload) {
1958 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1959 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1960 return 0;
1961 }
1962
1963 return UNIT_VTABLE(u)->reload(u);
1964 }
1965
1966 bool unit_can_reload(Unit *u) {
1967 assert(u);
1968
1969 if (UNIT_VTABLE(u)->can_reload)
1970 return UNIT_VTABLE(u)->can_reload(u);
1971
1972 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1973 return true;
1974
1975 return UNIT_VTABLE(u)->reload;
1976 }
1977
1978 static void unit_check_unneeded(Unit *u) {
1979
1980 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1981
1982 static const UnitDependency needed_dependencies[] = {
1983 UNIT_REQUIRED_BY,
1984 UNIT_REQUISITE_OF,
1985 UNIT_WANTED_BY,
1986 UNIT_BOUND_BY,
1987 };
1988
1989 unsigned j;
1990 int r;
1991
1992 assert(u);
1993
1994 /* If this service shall be shut down when unneeded then do
1995 * so. */
1996
1997 if (!u->stop_when_unneeded)
1998 return;
1999
2000 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
2001 return;
2002
2003 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
2004 Unit *other;
2005 Iterator i;
2006 void *v;
2007
2008 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
2009 if (unit_active_or_pending(other) || unit_will_restart(other))
2010 return;
2011 }
2012
2013 /* If stopping a unit fails continuously we might enter a stop
2014 * loop here, hence stop acting on the service being
2015 * unnecessary after a while. */
2016 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2017 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
2018 return;
2019 }
2020
2021 log_unit_info(u, "Unit not needed anymore. Stopping.");
2022
2023 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2024 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2025 if (r < 0)
2026 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2027 }
2028
2029 static void unit_check_binds_to(Unit *u) {
2030 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2031 bool stop = false;
2032 Unit *other;
2033 Iterator i;
2034 void *v;
2035 int r;
2036
2037 assert(u);
2038
2039 if (u->job)
2040 return;
2041
2042 if (unit_active_state(u) != UNIT_ACTIVE)
2043 return;
2044
2045 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2046 if (other->job)
2047 continue;
2048
2049 if (!other->coldplugged)
2050 /* We might yet create a job for the other unit… */
2051 continue;
2052
2053 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2054 continue;
2055
2056 stop = true;
2057 break;
2058 }
2059
2060 if (!stop)
2061 return;
2062
2063 /* If stopping a unit fails continuously we might enter a stop
2064 * loop here, hence stop acting on the service being
2065 * unnecessary after a while. */
2066 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2067 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2068 return;
2069 }
2070
2071 assert(other);
2072 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2073
2074 /* A unit we need to run is gone. Sniff. Let's stop this. */
2075 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2076 if (r < 0)
2077 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2078 }
2079
2080 static void retroactively_start_dependencies(Unit *u) {
2081 Iterator i;
2082 Unit *other;
2083 void *v;
2084
2085 assert(u);
2086 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2087
2088 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2089 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2090 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2091 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2092
2093 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2094 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2095 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2096 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2097
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2099 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2100 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2101 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2102
2103 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2104 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2105 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2106
2107 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2108 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2109 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2110 }
2111
2112 static void retroactively_stop_dependencies(Unit *u) {
2113 Unit *other;
2114 Iterator i;
2115 void *v;
2116
2117 assert(u);
2118 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2119
2120 /* Pull down units which are bound to us recursively if enabled */
2121 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2122 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2123 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2124 }
2125
2126 static void check_unneeded_dependencies(Unit *u) {
2127 Unit *other;
2128 Iterator i;
2129 void *v;
2130
2131 assert(u);
2132 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2133
2134 /* Garbage collect services that might not be needed anymore, if enabled */
2135 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2136 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2137 unit_check_unneeded(other);
2138 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2139 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2140 unit_check_unneeded(other);
2141 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2142 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2143 unit_check_unneeded(other);
2144 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2145 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2146 unit_check_unneeded(other);
2147 }
2148
2149 void unit_start_on_failure(Unit *u) {
2150 Unit *other;
2151 Iterator i;
2152 void *v;
2153
2154 assert(u);
2155
2156 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2157 return;
2158
2159 log_unit_info(u, "Triggering OnFailure= dependencies.");
2160
2161 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2162 int r;
2163
2164 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2165 if (r < 0)
2166 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2167 }
2168 }
2169
2170 void unit_trigger_notify(Unit *u) {
2171 Unit *other;
2172 Iterator i;
2173 void *v;
2174
2175 assert(u);
2176
2177 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2178 if (UNIT_VTABLE(other)->trigger_notify)
2179 UNIT_VTABLE(other)->trigger_notify(other, u);
2180 }
2181
2182 static int unit_log_resources(Unit *u) {
2183
2184 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2185 size_t n_message_parts = 0, n_iovec = 0;
2186 char* message_parts[3 + 1], *t;
2187 nsec_t nsec = NSEC_INFINITY;
2188 CGroupIPAccountingMetric m;
2189 size_t i;
2190 int r;
2191 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2192 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2193 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2194 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2195 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2196 };
2197
2198 assert(u);
2199
2200 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2201 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2202 * information and the complete data in structured fields. */
2203
2204 (void) unit_get_cpu_usage(u, &nsec);
2205 if (nsec != NSEC_INFINITY) {
2206 char buf[FORMAT_TIMESPAN_MAX] = "";
2207
2208 /* Format the CPU time for inclusion in the structured log message */
2209 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2210 r = log_oom();
2211 goto finish;
2212 }
2213 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2214
2215 /* Format the CPU time for inclusion in the human language message string */
2216 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2217 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2218 if (!t) {
2219 r = log_oom();
2220 goto finish;
2221 }
2222
2223 message_parts[n_message_parts++] = t;
2224 }
2225
2226 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2227 char buf[FORMAT_BYTES_MAX] = "";
2228 uint64_t value = UINT64_MAX;
2229
2230 assert(ip_fields[m]);
2231
2232 (void) unit_get_ip_accounting(u, m, &value);
2233 if (value == UINT64_MAX)
2234 continue;
2235
2236 /* Format IP accounting data for inclusion in the structured log message */
2237 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2238 r = log_oom();
2239 goto finish;
2240 }
2241 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2242
2243 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2244 * bytes counters (and not for the packets counters) */
2245 if (m == CGROUP_IP_INGRESS_BYTES)
2246 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2247 format_bytes(buf, sizeof(buf), value),
2248 " IP traffic");
2249 else if (m == CGROUP_IP_EGRESS_BYTES)
2250 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2251 format_bytes(buf, sizeof(buf), value),
2252 " IP traffic");
2253 else
2254 continue;
2255 if (!t) {
2256 r = log_oom();
2257 goto finish;
2258 }
2259
2260 message_parts[n_message_parts++] = t;
2261 }
2262
2263 /* Is there any accounting data available at all? */
2264 if (n_iovec == 0) {
2265 r = 0;
2266 goto finish;
2267 }
2268
2269 if (n_message_parts == 0)
2270 t = strjoina("MESSAGE=", u->id, ": Completed");
2271 else {
2272 _cleanup_free_ char *joined;
2273
2274 message_parts[n_message_parts] = NULL;
2275
2276 joined = strv_join(message_parts, ", ");
2277 if (!joined) {
2278 r = log_oom();
2279 goto finish;
2280 }
2281
2282 t = strjoina("MESSAGE=", u->id, ": ", joined);
2283 }
2284
2285 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2286 * and hence don't increase n_iovec for them */
2287 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2288 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2289
2290 t = strjoina(u->manager->unit_log_field, u->id);
2291 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2292
2293 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2294 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2295
2296 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2297 r = 0;
2298
2299 finish:
2300 for (i = 0; i < n_message_parts; i++)
2301 free(message_parts[i]);
2302
2303 for (i = 0; i < n_iovec; i++)
2304 free(iovec[i].iov_base);
2305
2306 return r;
2307
2308 }
2309
2310 static void unit_update_on_console(Unit *u) {
2311 bool b;
2312
2313 assert(u);
2314
2315 b = unit_needs_console(u);
2316 if (u->on_console == b)
2317 return;
2318
2319 u->on_console = b;
2320 if (b)
2321 manager_ref_console(u->manager);
2322 else
2323 manager_unref_console(u->manager);
2324
2325 }
2326
2327 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2328 Manager *m;
2329 bool unexpected;
2330
2331 assert(u);
2332 assert(os < _UNIT_ACTIVE_STATE_MAX);
2333 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2334
2335 /* Note that this is called for all low-level state changes,
2336 * even if they might map to the same high-level
2337 * UnitActiveState! That means that ns == os is an expected
2338 * behavior here. For example: if a mount point is remounted
2339 * this function will be called too! */
2340
2341 m = u->manager;
2342
2343 /* Update timestamps for state changes */
2344 if (!MANAGER_IS_RELOADING(m)) {
2345 dual_timestamp_get(&u->state_change_timestamp);
2346
2347 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2348 u->inactive_exit_timestamp = u->state_change_timestamp;
2349 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2350 u->inactive_enter_timestamp = u->state_change_timestamp;
2351
2352 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2353 u->active_enter_timestamp = u->state_change_timestamp;
2354 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2355 u->active_exit_timestamp = u->state_change_timestamp;
2356 }
2357
2358 /* Keep track of failed units */
2359 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2360
2361 /* Make sure the cgroup and state files are always removed when we become inactive */
2362 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2363 unit_prune_cgroup(u);
2364 unit_unlink_state_files(u);
2365 }
2366
2367 unit_update_on_console(u);
2368
2369 if (u->job) {
2370 unexpected = false;
2371
2372 if (u->job->state == JOB_WAITING)
2373
2374 /* So we reached a different state for this
2375 * job. Let's see if we can run it now if it
2376 * failed previously due to EAGAIN. */
2377 job_add_to_run_queue(u->job);
2378
2379 /* Let's check whether this state change constitutes a
2380 * finished job, or maybe contradicts a running job and
2381 * hence needs to invalidate jobs. */
2382
2383 switch (u->job->type) {
2384
2385 case JOB_START:
2386 case JOB_VERIFY_ACTIVE:
2387
2388 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2389 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2390 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2391 unexpected = true;
2392
2393 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2394 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2395 }
2396
2397 break;
2398
2399 case JOB_RELOAD:
2400 case JOB_RELOAD_OR_START:
2401 case JOB_TRY_RELOAD:
2402
2403 if (u->job->state == JOB_RUNNING) {
2404 if (ns == UNIT_ACTIVE)
2405 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2406 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2407 unexpected = true;
2408
2409 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2410 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2411 }
2412 }
2413
2414 break;
2415
2416 case JOB_STOP:
2417 case JOB_RESTART:
2418 case JOB_TRY_RESTART:
2419
2420 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2421 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2422 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2423 unexpected = true;
2424 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2425 }
2426
2427 break;
2428
2429 default:
2430 assert_not_reached("Job type unknown");
2431 }
2432
2433 } else
2434 unexpected = true;
2435
2436 if (!MANAGER_IS_RELOADING(m)) {
2437
2438 /* If this state change happened without being
2439 * requested by a job, then let's retroactively start
2440 * or stop dependencies. We skip that step when
2441 * deserializing, since we don't want to create any
2442 * additional jobs just because something is already
2443 * activated. */
2444
2445 if (unexpected) {
2446 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2447 retroactively_start_dependencies(u);
2448 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2449 retroactively_stop_dependencies(u);
2450 }
2451
2452 /* stop unneeded units regardless if going down was expected or not */
2453 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2454 check_unneeded_dependencies(u);
2455
2456 if (ns != os && ns == UNIT_FAILED) {
2457 log_unit_debug(u, "Unit entered failed state.");
2458 unit_start_on_failure(u);
2459 }
2460 }
2461
2462 /* Some names are special */
2463 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2464
2465 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2466 /* The bus might have just become available,
2467 * hence try to connect to it, if we aren't
2468 * yet connected. */
2469 bus_init(m, true);
2470
2471 if (u->type == UNIT_SERVICE &&
2472 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2473 !MANAGER_IS_RELOADING(m)) {
2474 /* Write audit record if we have just finished starting up */
2475 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2476 u->in_audit = true;
2477 }
2478
2479 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2480 manager_send_unit_plymouth(m, u);
2481
2482 } else {
2483 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2484 * anyway. */
2485
2486 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2487 !UNIT_IS_INACTIVE_OR_FAILED(os)
2488 && !MANAGER_IS_RELOADING(m)) {
2489
2490 /* This unit just stopped/failed. */
2491 if (u->type == UNIT_SERVICE) {
2492
2493 /* Hmm, if there was no start record written
2494 * write it now, so that we always have a nice
2495 * pair */
2496 if (!u->in_audit) {
2497 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2498
2499 if (ns == UNIT_INACTIVE)
2500 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2501 } else
2502 /* Write audit record if we have just finished shutting down */
2503 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2504
2505 u->in_audit = false;
2506 }
2507
2508 /* Write a log message about consumed resources */
2509 unit_log_resources(u);
2510 }
2511 }
2512
2513 manager_recheck_journal(m);
2514 unit_trigger_notify(u);
2515
2516 if (!MANAGER_IS_RELOADING(u->manager)) {
2517 /* Maybe we finished startup and are now ready for
2518 * being stopped because unneeded? */
2519 unit_check_unneeded(u);
2520
2521 /* Maybe we finished startup, but something we needed
2522 * has vanished? Let's die then. (This happens when
2523 * something BindsTo= to a Type=oneshot unit, as these
2524 * units go directly from starting to inactive,
2525 * without ever entering started.) */
2526 unit_check_binds_to(u);
2527
2528 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2529 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2530 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2531 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2532 }
2533
2534 unit_add_to_dbus_queue(u);
2535 unit_add_to_gc_queue(u);
2536 }
2537
2538 int unit_watch_pid(Unit *u, pid_t pid) {
2539 int r;
2540
2541 assert(u);
2542 assert(pid_is_valid(pid));
2543
2544 /* Watch a specific PID */
2545
2546 r = set_ensure_allocated(&u->pids, NULL);
2547 if (r < 0)
2548 return r;
2549
2550 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2551 if (r < 0)
2552 return r;
2553
2554 /* First try, let's add the unit keyed by "pid". */
2555 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2556 if (r == -EEXIST) {
2557 Unit **array;
2558 bool found = false;
2559 size_t n = 0;
2560
2561 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2562 * to an array of Units rather than just a Unit), lists us already. */
2563
2564 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2565 if (array)
2566 for (; array[n]; n++)
2567 if (array[n] == u)
2568 found = true;
2569
2570 if (found) /* Found it already? if so, do nothing */
2571 r = 0;
2572 else {
2573 Unit **new_array;
2574
2575 /* Allocate a new array */
2576 new_array = new(Unit*, n + 2);
2577 if (!new_array)
2578 return -ENOMEM;
2579
2580 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2581 new_array[n] = u;
2582 new_array[n+1] = NULL;
2583
2584 /* Add or replace the old array */
2585 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2586 if (r < 0) {
2587 free(new_array);
2588 return r;
2589 }
2590
2591 free(array);
2592 }
2593 } else if (r < 0)
2594 return r;
2595
2596 r = set_put(u->pids, PID_TO_PTR(pid));
2597 if (r < 0)
2598 return r;
2599
2600 return 0;
2601 }
2602
2603 void unit_unwatch_pid(Unit *u, pid_t pid) {
2604 Unit **array;
2605
2606 assert(u);
2607 assert(pid_is_valid(pid));
2608
2609 /* First let's drop the unit in case it's keyed as "pid". */
2610 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2611
2612 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2613 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2614 if (array) {
2615 size_t n, m = 0;
2616
2617 /* Let's iterate through the array, dropping our own entry */
2618 for (n = 0; array[n]; n++)
2619 if (array[n] != u)
2620 array[m++] = array[n];
2621 array[m] = NULL;
2622
2623 if (m == 0) {
2624 /* The array is now empty, remove the entire entry */
2625 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2626 free(array);
2627 }
2628 }
2629
2630 (void) set_remove(u->pids, PID_TO_PTR(pid));
2631 }
2632
2633 void unit_unwatch_all_pids(Unit *u) {
2634 assert(u);
2635
2636 while (!set_isempty(u->pids))
2637 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2638
2639 u->pids = set_free(u->pids);
2640 }
2641
2642 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2643 Iterator i;
2644 void *e;
2645
2646 assert(u);
2647
2648 /* Cleans dead PIDs from our list */
2649
2650 SET_FOREACH(e, u->pids, i) {
2651 pid_t pid = PTR_TO_PID(e);
2652
2653 if (pid == except1 || pid == except2)
2654 continue;
2655
2656 if (!pid_is_unwaited(pid))
2657 unit_unwatch_pid(u, pid);
2658 }
2659 }
2660
2661 bool unit_job_is_applicable(Unit *u, JobType j) {
2662 assert(u);
2663 assert(j >= 0 && j < _JOB_TYPE_MAX);
2664
2665 switch (j) {
2666
2667 case JOB_VERIFY_ACTIVE:
2668 case JOB_START:
2669 case JOB_NOP:
2670 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2671 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2672 * jobs for it. */
2673 return true;
2674
2675 case JOB_STOP:
2676 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2677 * external events), hence it makes no sense to permit enqueing such a request either. */
2678 return !u->perpetual;
2679
2680 case JOB_RESTART:
2681 case JOB_TRY_RESTART:
2682 return unit_can_stop(u) && unit_can_start(u);
2683
2684 case JOB_RELOAD:
2685 case JOB_TRY_RELOAD:
2686 return unit_can_reload(u);
2687
2688 case JOB_RELOAD_OR_START:
2689 return unit_can_reload(u) && unit_can_start(u);
2690
2691 default:
2692 assert_not_reached("Invalid job type");
2693 }
2694 }
2695
2696 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2697 assert(u);
2698
2699 /* Only warn about some unit types */
2700 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2701 return;
2702
2703 if (streq_ptr(u->id, other))
2704 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2705 else
2706 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2707 }
2708
2709 static int unit_add_dependency_hashmap(
2710 Hashmap **h,
2711 Unit *other,
2712 UnitDependencyMask origin_mask,
2713 UnitDependencyMask destination_mask) {
2714
2715 UnitDependencyInfo info;
2716 int r;
2717
2718 assert(h);
2719 assert(other);
2720 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2721 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2722 assert(origin_mask > 0 || destination_mask > 0);
2723
2724 r = hashmap_ensure_allocated(h, NULL);
2725 if (r < 0)
2726 return r;
2727
2728 assert_cc(sizeof(void*) == sizeof(info));
2729
2730 info.data = hashmap_get(*h, other);
2731 if (info.data) {
2732 /* Entry already exists. Add in our mask. */
2733
2734 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2735 (info.destination_mask & destination_mask) == info.destination_mask)
2736 return 0; /* NOP */
2737
2738 info.origin_mask |= origin_mask;
2739 info.destination_mask |= destination_mask;
2740
2741 r = hashmap_update(*h, other, info.data);
2742 } else {
2743 info = (UnitDependencyInfo) {
2744 .origin_mask = origin_mask,
2745 .destination_mask = destination_mask,
2746 };
2747
2748 r = hashmap_put(*h, other, info.data);
2749 }
2750 if (r < 0)
2751 return r;
2752
2753 return 1;
2754 }
2755
2756 int unit_add_dependency(
2757 Unit *u,
2758 UnitDependency d,
2759 Unit *other,
2760 bool add_reference,
2761 UnitDependencyMask mask) {
2762
2763 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2764 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2765 [UNIT_WANTS] = UNIT_WANTED_BY,
2766 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2767 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2768 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2769 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2770 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2771 [UNIT_WANTED_BY] = UNIT_WANTS,
2772 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2773 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2774 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2775 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2776 [UNIT_BEFORE] = UNIT_AFTER,
2777 [UNIT_AFTER] = UNIT_BEFORE,
2778 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2779 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2780 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2781 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2782 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2783 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2784 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2785 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2786 };
2787 Unit *original_u = u, *original_other = other;
2788 int r;
2789
2790 assert(u);
2791 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2792 assert(other);
2793
2794 u = unit_follow_merge(u);
2795 other = unit_follow_merge(other);
2796
2797 /* We won't allow dependencies on ourselves. We will not
2798 * consider them an error however. */
2799 if (u == other) {
2800 maybe_warn_about_dependency(original_u, original_other->id, d);
2801 return 0;
2802 }
2803
2804 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2805 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2806 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2807 return 0;
2808 }
2809
2810 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2811 if (r < 0)
2812 return r;
2813
2814 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2815 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2816 if (r < 0)
2817 return r;
2818 }
2819
2820 if (add_reference) {
2821 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2822 if (r < 0)
2823 return r;
2824
2825 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2826 if (r < 0)
2827 return r;
2828 }
2829
2830 unit_add_to_dbus_queue(u);
2831 return 0;
2832 }
2833
2834 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2835 int r;
2836
2837 assert(u);
2838
2839 r = unit_add_dependency(u, d, other, add_reference, mask);
2840 if (r < 0)
2841 return r;
2842
2843 return unit_add_dependency(u, e, other, add_reference, mask);
2844 }
2845
2846 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2847 int r;
2848
2849 assert(u);
2850 assert(name || path);
2851 assert(buf);
2852 assert(ret);
2853
2854 if (!name)
2855 name = basename(path);
2856
2857 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2858 *buf = NULL;
2859 *ret = name;
2860 return 0;
2861 }
2862
2863 if (u->instance)
2864 r = unit_name_replace_instance(name, u->instance, buf);
2865 else {
2866 _cleanup_free_ char *i = NULL;
2867
2868 r = unit_name_to_prefix(u->id, &i);
2869 if (r < 0)
2870 return r;
2871
2872 r = unit_name_replace_instance(name, i, buf);
2873 }
2874 if (r < 0)
2875 return r;
2876
2877 *ret = *buf;
2878 return 0;
2879 }
2880
2881 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2882 _cleanup_free_ char *buf = NULL;
2883 Unit *other;
2884 int r;
2885
2886 assert(u);
2887 assert(name || path);
2888
2889 r = resolve_template(u, name, path, &buf, &name);
2890 if (r < 0)
2891 return r;
2892
2893 r = manager_load_unit(u->manager, name, path, NULL, &other);
2894 if (r < 0)
2895 return r;
2896
2897 return unit_add_dependency(u, d, other, add_reference, mask);
2898 }
2899
2900 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2901 _cleanup_free_ char *buf = NULL;
2902 Unit *other;
2903 int r;
2904
2905 assert(u);
2906 assert(name || path);
2907
2908 r = resolve_template(u, name, path, &buf, &name);
2909 if (r < 0)
2910 return r;
2911
2912 r = manager_load_unit(u->manager, name, path, NULL, &other);
2913 if (r < 0)
2914 return r;
2915
2916 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2917 }
2918
2919 int set_unit_path(const char *p) {
2920 /* This is mostly for debug purposes */
2921 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2922 return -errno;
2923
2924 return 0;
2925 }
2926
2927 char *unit_dbus_path(Unit *u) {
2928 assert(u);
2929
2930 if (!u->id)
2931 return NULL;
2932
2933 return unit_dbus_path_from_name(u->id);
2934 }
2935
2936 char *unit_dbus_path_invocation_id(Unit *u) {
2937 assert(u);
2938
2939 if (sd_id128_is_null(u->invocation_id))
2940 return NULL;
2941
2942 return unit_dbus_path_from_name(u->invocation_id_string);
2943 }
2944
2945 int unit_set_slice(Unit *u, Unit *slice) {
2946 assert(u);
2947 assert(slice);
2948
2949 /* Sets the unit slice if it has not been set before. Is extra
2950 * careful, to only allow this for units that actually have a
2951 * cgroup context. Also, we don't allow to set this for slices
2952 * (since the parent slice is derived from the name). Make
2953 * sure the unit we set is actually a slice. */
2954
2955 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2956 return -EOPNOTSUPP;
2957
2958 if (u->type == UNIT_SLICE)
2959 return -EINVAL;
2960
2961 if (unit_active_state(u) != UNIT_INACTIVE)
2962 return -EBUSY;
2963
2964 if (slice->type != UNIT_SLICE)
2965 return -EINVAL;
2966
2967 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2968 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2969 return -EPERM;
2970
2971 if (UNIT_DEREF(u->slice) == slice)
2972 return 0;
2973
2974 /* Disallow slice changes if @u is already bound to cgroups */
2975 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2976 return -EBUSY;
2977
2978 unit_ref_set(&u->slice, u, slice);
2979 return 1;
2980 }
2981
2982 int unit_set_default_slice(Unit *u) {
2983 _cleanup_free_ char *b = NULL;
2984 const char *slice_name;
2985 Unit *slice;
2986 int r;
2987
2988 assert(u);
2989
2990 if (UNIT_ISSET(u->slice))
2991 return 0;
2992
2993 if (u->instance) {
2994 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2995
2996 /* Implicitly place all instantiated units in their
2997 * own per-template slice */
2998
2999 r = unit_name_to_prefix(u->id, &prefix);
3000 if (r < 0)
3001 return r;
3002
3003 /* The prefix is already escaped, but it might include
3004 * "-" which has a special meaning for slice units,
3005 * hence escape it here extra. */
3006 escaped = unit_name_escape(prefix);
3007 if (!escaped)
3008 return -ENOMEM;
3009
3010 if (MANAGER_IS_SYSTEM(u->manager))
3011 b = strjoin("system-", escaped, ".slice");
3012 else
3013 b = strappend(escaped, ".slice");
3014 if (!b)
3015 return -ENOMEM;
3016
3017 slice_name = b;
3018 } else
3019 slice_name =
3020 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3021 ? SPECIAL_SYSTEM_SLICE
3022 : SPECIAL_ROOT_SLICE;
3023
3024 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3025 if (r < 0)
3026 return r;
3027
3028 return unit_set_slice(u, slice);
3029 }
3030
3031 const char *unit_slice_name(Unit *u) {
3032 assert(u);
3033
3034 if (!UNIT_ISSET(u->slice))
3035 return NULL;
3036
3037 return UNIT_DEREF(u->slice)->id;
3038 }
3039
3040 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3041 _cleanup_free_ char *t = NULL;
3042 int r;
3043
3044 assert(u);
3045 assert(type);
3046 assert(_found);
3047
3048 r = unit_name_change_suffix(u->id, type, &t);
3049 if (r < 0)
3050 return r;
3051 if (unit_has_name(u, t))
3052 return -EINVAL;
3053
3054 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3055 assert(r < 0 || *_found != u);
3056 return r;
3057 }
3058
3059 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3060 const char *name, *old_owner, *new_owner;
3061 Unit *u = userdata;
3062 int r;
3063
3064 assert(message);
3065 assert(u);
3066
3067 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3068 if (r < 0) {
3069 bus_log_parse_error(r);
3070 return 0;
3071 }
3072
3073 old_owner = empty_to_null(old_owner);
3074 new_owner = empty_to_null(new_owner);
3075
3076 if (UNIT_VTABLE(u)->bus_name_owner_change)
3077 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3078
3079 return 0;
3080 }
3081
3082 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3083 const char *match;
3084
3085 assert(u);
3086 assert(bus);
3087 assert(name);
3088
3089 if (u->match_bus_slot)
3090 return -EBUSY;
3091
3092 match = strjoina("type='signal',"
3093 "sender='org.freedesktop.DBus',"
3094 "path='/org/freedesktop/DBus',"
3095 "interface='org.freedesktop.DBus',"
3096 "member='NameOwnerChanged',"
3097 "arg0='", name, "'");
3098
3099 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3100 }
3101
3102 int unit_watch_bus_name(Unit *u, const char *name) {
3103 int r;
3104
3105 assert(u);
3106 assert(name);
3107
3108 /* Watch a specific name on the bus. We only support one unit
3109 * watching each name for now. */
3110
3111 if (u->manager->api_bus) {
3112 /* If the bus is already available, install the match directly.
3113 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3114 r = unit_install_bus_match(u, u->manager->api_bus, name);
3115 if (r < 0)
3116 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3117 }
3118
3119 r = hashmap_put(u->manager->watch_bus, name, u);
3120 if (r < 0) {
3121 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3122 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3123 }
3124
3125 return 0;
3126 }
3127
3128 void unit_unwatch_bus_name(Unit *u, const char *name) {
3129 assert(u);
3130 assert(name);
3131
3132 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3133 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3134 }
3135
3136 bool unit_can_serialize(Unit *u) {
3137 assert(u);
3138
3139 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3140 }
3141
3142 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3143 _cleanup_free_ char *s = NULL;
3144 int r = 0;
3145
3146 assert(f);
3147 assert(key);
3148
3149 if (mask != 0) {
3150 r = cg_mask_to_string(mask, &s);
3151 if (r >= 0) {
3152 fputs(key, f);
3153 fputc('=', f);
3154 fputs(s, f);
3155 fputc('\n', f);
3156 }
3157 }
3158 return r;
3159 }
3160
3161 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3162 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3163 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3164 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3165 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3166 };
3167
3168 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3169 CGroupIPAccountingMetric m;
3170 int r;
3171
3172 assert(u);
3173 assert(f);
3174 assert(fds);
3175
3176 if (unit_can_serialize(u)) {
3177 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3178 if (r < 0)
3179 return r;
3180 }
3181
3182 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3183
3184 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3185 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3186 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3187 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3188
3189 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3190 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3191
3192 if (dual_timestamp_is_set(&u->condition_timestamp))
3193 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3194
3195 if (dual_timestamp_is_set(&u->assert_timestamp))
3196 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3197
3198 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3199
3200 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3201 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3202 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3203
3204 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3205 if (u->cpu_usage_last != NSEC_INFINITY)
3206 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3207
3208 if (u->cgroup_path)
3209 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3210 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3211 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3212 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3213 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3214
3215 if (uid_is_valid(u->ref_uid))
3216 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3217 if (gid_is_valid(u->ref_gid))
3218 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3219
3220 if (!sd_id128_is_null(u->invocation_id))
3221 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3222
3223 bus_track_serialize(u->bus_track, f, "ref");
3224
3225 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3226 uint64_t v;
3227
3228 r = unit_get_ip_accounting(u, m, &v);
3229 if (r >= 0)
3230 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3231 }
3232
3233 if (serialize_jobs) {
3234 if (u->job) {
3235 fprintf(f, "job\n");
3236 job_serialize(u->job, f);
3237 }
3238
3239 if (u->nop_job) {
3240 fprintf(f, "job\n");
3241 job_serialize(u->nop_job, f);
3242 }
3243 }
3244
3245 /* End marker */
3246 fputc('\n', f);
3247 return 0;
3248 }
3249
3250 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3251 assert(u);
3252 assert(f);
3253 assert(key);
3254
3255 if (!value)
3256 return 0;
3257
3258 fputs(key, f);
3259 fputc('=', f);
3260 fputs(value, f);
3261 fputc('\n', f);
3262
3263 return 1;
3264 }
3265
3266 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3267 _cleanup_free_ char *c = NULL;
3268
3269 assert(u);
3270 assert(f);
3271 assert(key);
3272
3273 if (!value)
3274 return 0;
3275
3276 c = cescape(value);
3277 if (!c)
3278 return -ENOMEM;
3279
3280 fputs(key, f);
3281 fputc('=', f);
3282 fputs(c, f);
3283 fputc('\n', f);
3284
3285 return 1;
3286 }
3287
3288 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3289 int copy;
3290
3291 assert(u);
3292 assert(f);
3293 assert(key);
3294
3295 if (fd < 0)
3296 return 0;
3297
3298 copy = fdset_put_dup(fds, fd);
3299 if (copy < 0)
3300 return copy;
3301
3302 fprintf(f, "%s=%i\n", key, copy);
3303 return 1;
3304 }
3305
3306 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3307 va_list ap;
3308
3309 assert(u);
3310 assert(f);
3311 assert(key);
3312 assert(format);
3313
3314 fputs(key, f);
3315 fputc('=', f);
3316
3317 va_start(ap, format);
3318 vfprintf(f, format, ap);
3319 va_end(ap);
3320
3321 fputc('\n', f);
3322 }
3323
3324 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3325 int r;
3326
3327 assert(u);
3328 assert(f);
3329 assert(fds);
3330
3331 for (;;) {
3332 char line[LINE_MAX], *l, *v;
3333 CGroupIPAccountingMetric m;
3334 size_t k;
3335
3336 if (!fgets(line, sizeof(line), f)) {
3337 if (feof(f))
3338 return 0;
3339 return -errno;
3340 }
3341
3342 char_array_0(line);
3343 l = strstrip(line);
3344
3345 /* End marker */
3346 if (isempty(l))
3347 break;
3348
3349 k = strcspn(l, "=");
3350
3351 if (l[k] == '=') {
3352 l[k] = 0;
3353 v = l+k+1;
3354 } else
3355 v = l+k;
3356
3357 if (streq(l, "job")) {
3358 if (v[0] == '\0') {
3359 /* new-style serialized job */
3360 Job *j;
3361
3362 j = job_new_raw(u);
3363 if (!j)
3364 return log_oom();
3365
3366 r = job_deserialize(j, f);
3367 if (r < 0) {
3368 job_free(j);
3369 return r;
3370 }
3371
3372 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3373 if (r < 0) {
3374 job_free(j);
3375 return r;
3376 }
3377
3378 r = job_install_deserialized(j);
3379 if (r < 0) {
3380 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3381 job_free(j);
3382 return r;
3383 }
3384 } else /* legacy for pre-44 */
3385 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3386 continue;
3387 } else if (streq(l, "state-change-timestamp")) {
3388 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3389 continue;
3390 } else if (streq(l, "inactive-exit-timestamp")) {
3391 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3392 continue;
3393 } else if (streq(l, "active-enter-timestamp")) {
3394 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3395 continue;
3396 } else if (streq(l, "active-exit-timestamp")) {
3397 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3398 continue;
3399 } else if (streq(l, "inactive-enter-timestamp")) {
3400 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3401 continue;
3402 } else if (streq(l, "condition-timestamp")) {
3403 dual_timestamp_deserialize(v, &u->condition_timestamp);
3404 continue;
3405 } else if (streq(l, "assert-timestamp")) {
3406 dual_timestamp_deserialize(v, &u->assert_timestamp);
3407 continue;
3408 } else if (streq(l, "condition-result")) {
3409
3410 r = parse_boolean(v);
3411 if (r < 0)
3412 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3413 else
3414 u->condition_result = r;
3415
3416 continue;
3417
3418 } else if (streq(l, "assert-result")) {
3419
3420 r = parse_boolean(v);
3421 if (r < 0)
3422 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3423 else
3424 u->assert_result = r;
3425
3426 continue;
3427
3428 } else if (streq(l, "transient")) {
3429
3430 r = parse_boolean(v);
3431 if (r < 0)
3432 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3433 else
3434 u->transient = r;
3435
3436 continue;
3437
3438 } else if (streq(l, "exported-invocation-id")) {
3439
3440 r = parse_boolean(v);
3441 if (r < 0)
3442 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3443 else
3444 u->exported_invocation_id = r;
3445
3446 continue;
3447
3448 } else if (streq(l, "exported-log-level-max")) {
3449
3450 r = parse_boolean(v);
3451 if (r < 0)
3452 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3453 else
3454 u->exported_log_level_max = r;
3455
3456 continue;
3457
3458 } else if (streq(l, "exported-log-extra-fields")) {
3459
3460 r = parse_boolean(v);
3461 if (r < 0)
3462 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3463 else
3464 u->exported_log_extra_fields = r;
3465
3466 continue;
3467
3468 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3469
3470 r = safe_atou64(v, &u->cpu_usage_base);
3471 if (r < 0)
3472 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3473
3474 continue;
3475
3476 } else if (streq(l, "cpu-usage-last")) {
3477
3478 r = safe_atou64(v, &u->cpu_usage_last);
3479 if (r < 0)
3480 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3481
3482 continue;
3483
3484 } else if (streq(l, "cgroup")) {
3485
3486 r = unit_set_cgroup_path(u, v);
3487 if (r < 0)
3488 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3489
3490 (void) unit_watch_cgroup(u);
3491
3492 continue;
3493 } else if (streq(l, "cgroup-realized")) {
3494 int b;
3495
3496 b = parse_boolean(v);
3497 if (b < 0)
3498 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3499 else
3500 u->cgroup_realized = b;
3501
3502 continue;
3503
3504 } else if (streq(l, "cgroup-realized-mask")) {
3505
3506 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3507 if (r < 0)
3508 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3509 continue;
3510
3511 } else if (streq(l, "cgroup-enabled-mask")) {
3512
3513 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3514 if (r < 0)
3515 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3516 continue;
3517
3518 } else if (streq(l, "cgroup-bpf-realized")) {
3519 int i;
3520
3521 r = safe_atoi(v, &i);
3522 if (r < 0)
3523 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3524 else
3525 u->cgroup_bpf_state =
3526 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3527 i > 0 ? UNIT_CGROUP_BPF_ON :
3528 UNIT_CGROUP_BPF_OFF;
3529
3530 continue;
3531
3532 } else if (streq(l, "ref-uid")) {
3533 uid_t uid;
3534
3535 r = parse_uid(v, &uid);
3536 if (r < 0)
3537 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3538 else
3539 unit_ref_uid_gid(u, uid, GID_INVALID);
3540
3541 continue;
3542
3543 } else if (streq(l, "ref-gid")) {
3544 gid_t gid;
3545
3546 r = parse_gid(v, &gid);
3547 if (r < 0)
3548 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3549 else
3550 unit_ref_uid_gid(u, UID_INVALID, gid);
3551
3552 } else if (streq(l, "ref")) {
3553
3554 r = strv_extend(&u->deserialized_refs, v);
3555 if (r < 0)
3556 log_oom();
3557
3558 continue;
3559 } else if (streq(l, "invocation-id")) {
3560 sd_id128_t id;
3561
3562 r = sd_id128_from_string(v, &id);
3563 if (r < 0)
3564 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3565 else {
3566 r = unit_set_invocation_id(u, id);
3567 if (r < 0)
3568 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3569 }
3570
3571 continue;
3572 }
3573
3574 /* Check if this is an IP accounting metric serialization field */
3575 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3576 if (streq(l, ip_accounting_metric_field[m]))
3577 break;
3578 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3579 uint64_t c;
3580
3581 r = safe_atou64(v, &c);
3582 if (r < 0)
3583 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3584 else
3585 u->ip_accounting_extra[m] = c;
3586 continue;
3587 }
3588
3589 if (unit_can_serialize(u)) {
3590 r = exec_runtime_deserialize_compat(u, l, v, fds);
3591 if (r < 0) {
3592 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3593 continue;
3594 }
3595
3596 /* Returns positive if key was handled by the call */
3597 if (r > 0)
3598 continue;
3599
3600 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3601 if (r < 0)
3602 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3603 }
3604 }
3605
3606 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3607 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3608 * before 228 where the base for timeouts was not persistent across reboots. */
3609
3610 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3611 dual_timestamp_get(&u->state_change_timestamp);
3612
3613 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3614 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3615 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3616 unit_invalidate_cgroup_bpf(u);
3617
3618 return 0;
3619 }
3620
3621 void unit_deserialize_skip(FILE *f) {
3622 assert(f);
3623
3624 /* Skip serialized data for this unit. We don't know what it is. */
3625
3626 for (;;) {
3627 char line[LINE_MAX], *l;
3628
3629 if (!fgets(line, sizeof line, f))
3630 return;
3631
3632 char_array_0(line);
3633 l = strstrip(line);
3634
3635 /* End marker */
3636 if (isempty(l))
3637 return;
3638 }
3639 }
3640
3641
3642 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3643 Unit *device;
3644 _cleanup_free_ char *e = NULL;
3645 int r;
3646
3647 assert(u);
3648
3649 /* Adds in links to the device node that this unit is based on */
3650 if (isempty(what))
3651 return 0;
3652
3653 if (!is_device_path(what))
3654 return 0;
3655
3656 /* When device units aren't supported (such as in a
3657 * container), don't create dependencies on them. */
3658 if (!unit_type_supported(UNIT_DEVICE))
3659 return 0;
3660
3661 r = unit_name_from_path(what, ".device", &e);
3662 if (r < 0)
3663 return r;
3664
3665 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3666 if (r < 0)
3667 return r;
3668
3669 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3670 dep = UNIT_BINDS_TO;
3671
3672 r = unit_add_two_dependencies(u, UNIT_AFTER,
3673 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3674 device, true, mask);
3675 if (r < 0)
3676 return r;
3677
3678 if (wants) {
3679 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3680 if (r < 0)
3681 return r;
3682 }
3683
3684 return 0;
3685 }
3686
3687 int unit_coldplug(Unit *u) {
3688 int r = 0, q;
3689 char **i;
3690
3691 assert(u);
3692
3693 /* Make sure we don't enter a loop, when coldplugging
3694 * recursively. */
3695 if (u->coldplugged)
3696 return 0;
3697
3698 u->coldplugged = true;
3699
3700 STRV_FOREACH(i, u->deserialized_refs) {
3701 q = bus_unit_track_add_name(u, *i);
3702 if (q < 0 && r >= 0)
3703 r = q;
3704 }
3705 u->deserialized_refs = strv_free(u->deserialized_refs);
3706
3707 if (UNIT_VTABLE(u)->coldplug) {
3708 q = UNIT_VTABLE(u)->coldplug(u);
3709 if (q < 0 && r >= 0)
3710 r = q;
3711 }
3712
3713 if (u->job) {
3714 q = job_coldplug(u->job);
3715 if (q < 0 && r >= 0)
3716 r = q;
3717 }
3718
3719 return r;
3720 }
3721
3722 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3723 struct stat st;
3724
3725 if (!path)
3726 return false;
3727
3728 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3729 * are never out-of-date. */
3730 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3731 return false;
3732
3733 if (stat(path, &st) < 0)
3734 /* What, cannot access this anymore? */
3735 return true;
3736
3737 if (path_masked)
3738 /* For masked files check if they are still so */
3739 return !null_or_empty(&st);
3740 else
3741 /* For non-empty files check the mtime */
3742 return timespec_load(&st.st_mtim) > mtime;
3743
3744 return false;
3745 }
3746
3747 bool unit_need_daemon_reload(Unit *u) {
3748 _cleanup_strv_free_ char **t = NULL;
3749 char **path;
3750
3751 assert(u);
3752
3753 /* For unit files, we allow masking… */
3754 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3755 u->load_state == UNIT_MASKED))
3756 return true;
3757
3758 /* Source paths should not be masked… */
3759 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3760 return true;
3761
3762 if (u->load_state == UNIT_LOADED)
3763 (void) unit_find_dropin_paths(u, &t);
3764 if (!strv_equal(u->dropin_paths, t))
3765 return true;
3766
3767 /* … any drop-ins that are masked are simply omitted from the list. */
3768 STRV_FOREACH(path, u->dropin_paths)
3769 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3770 return true;
3771
3772 return false;
3773 }
3774
3775 void unit_reset_failed(Unit *u) {
3776 assert(u);
3777
3778 if (UNIT_VTABLE(u)->reset_failed)
3779 UNIT_VTABLE(u)->reset_failed(u);
3780
3781 RATELIMIT_RESET(u->start_limit);
3782 u->start_limit_hit = false;
3783 }
3784
3785 Unit *unit_following(Unit *u) {
3786 assert(u);
3787
3788 if (UNIT_VTABLE(u)->following)
3789 return UNIT_VTABLE(u)->following(u);
3790
3791 return NULL;
3792 }
3793
3794 bool unit_stop_pending(Unit *u) {
3795 assert(u);
3796
3797 /* This call does check the current state of the unit. It's
3798 * hence useful to be called from state change calls of the
3799 * unit itself, where the state isn't updated yet. This is
3800 * different from unit_inactive_or_pending() which checks both
3801 * the current state and for a queued job. */
3802
3803 return u->job && u->job->type == JOB_STOP;
3804 }
3805
3806 bool unit_inactive_or_pending(Unit *u) {
3807 assert(u);
3808
3809 /* Returns true if the unit is inactive or going down */
3810
3811 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3812 return true;
3813
3814 if (unit_stop_pending(u))
3815 return true;
3816
3817 return false;
3818 }
3819
3820 bool unit_active_or_pending(Unit *u) {
3821 assert(u);
3822
3823 /* Returns true if the unit is active or going up */
3824
3825 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3826 return true;
3827
3828 if (u->job &&
3829 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3830 return true;
3831
3832 return false;
3833 }
3834
3835 bool unit_will_restart(Unit *u) {
3836 assert(u);
3837
3838 if (!UNIT_VTABLE(u)->will_restart)
3839 return false;
3840
3841 return UNIT_VTABLE(u)->will_restart(u);
3842 }
3843
3844 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3845 assert(u);
3846 assert(w >= 0 && w < _KILL_WHO_MAX);
3847 assert(SIGNAL_VALID(signo));
3848
3849 if (!UNIT_VTABLE(u)->kill)
3850 return -EOPNOTSUPP;
3851
3852 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3853 }
3854
3855 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3856 Set *pid_set;
3857 int r;
3858
3859 pid_set = set_new(NULL);
3860 if (!pid_set)
3861 return NULL;
3862
3863 /* Exclude the main/control pids from being killed via the cgroup */
3864 if (main_pid > 0) {
3865 r = set_put(pid_set, PID_TO_PTR(main_pid));
3866 if (r < 0)
3867 goto fail;
3868 }
3869
3870 if (control_pid > 0) {
3871 r = set_put(pid_set, PID_TO_PTR(control_pid));
3872 if (r < 0)
3873 goto fail;
3874 }
3875
3876 return pid_set;
3877
3878 fail:
3879 set_free(pid_set);
3880 return NULL;
3881 }
3882
3883 int unit_kill_common(
3884 Unit *u,
3885 KillWho who,
3886 int signo,
3887 pid_t main_pid,
3888 pid_t control_pid,
3889 sd_bus_error *error) {
3890
3891 int r = 0;
3892 bool killed = false;
3893
3894 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3895 if (main_pid < 0)
3896 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3897 else if (main_pid == 0)
3898 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3899 }
3900
3901 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3902 if (control_pid < 0)
3903 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3904 else if (control_pid == 0)
3905 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3906 }
3907
3908 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3909 if (control_pid > 0) {
3910 if (kill(control_pid, signo) < 0)
3911 r = -errno;
3912 else
3913 killed = true;
3914 }
3915
3916 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3917 if (main_pid > 0) {
3918 if (kill(main_pid, signo) < 0)
3919 r = -errno;
3920 else
3921 killed = true;
3922 }
3923
3924 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3925 _cleanup_set_free_ Set *pid_set = NULL;
3926 int q;
3927
3928 /* Exclude the main/control pids from being killed via the cgroup */
3929 pid_set = unit_pid_set(main_pid, control_pid);
3930 if (!pid_set)
3931 return -ENOMEM;
3932
3933 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3934 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3935 r = q;
3936 else
3937 killed = true;
3938 }
3939
3940 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3941 return -ESRCH;
3942
3943 return r;
3944 }
3945
3946 int unit_following_set(Unit *u, Set **s) {
3947 assert(u);
3948 assert(s);
3949
3950 if (UNIT_VTABLE(u)->following_set)
3951 return UNIT_VTABLE(u)->following_set(u, s);
3952
3953 *s = NULL;
3954 return 0;
3955 }
3956
3957 UnitFileState unit_get_unit_file_state(Unit *u) {
3958 int r;
3959
3960 assert(u);
3961
3962 if (u->unit_file_state < 0 && u->fragment_path) {
3963 r = unit_file_get_state(
3964 u->manager->unit_file_scope,
3965 NULL,
3966 u->id,
3967 &u->unit_file_state);
3968 if (r < 0)
3969 u->unit_file_state = UNIT_FILE_BAD;
3970 }
3971
3972 return u->unit_file_state;
3973 }
3974
3975 int unit_get_unit_file_preset(Unit *u) {
3976 assert(u);
3977
3978 if (u->unit_file_preset < 0 && u->fragment_path)
3979 u->unit_file_preset = unit_file_query_preset(
3980 u->manager->unit_file_scope,
3981 NULL,
3982 basename(u->fragment_path));
3983
3984 return u->unit_file_preset;
3985 }
3986
3987 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3988 assert(ref);
3989 assert(source);
3990 assert(target);
3991
3992 if (ref->target)
3993 unit_ref_unset(ref);
3994
3995 ref->source = source;
3996 ref->target = target;
3997 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3998 return target;
3999 }
4000
4001 void unit_ref_unset(UnitRef *ref) {
4002 assert(ref);
4003
4004 if (!ref->target)
4005 return;
4006
4007 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4008 * be unreferenced now. */
4009 unit_add_to_gc_queue(ref->target);
4010
4011 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4012 ref->source = ref->target = NULL;
4013 }
4014
4015 static int user_from_unit_name(Unit *u, char **ret) {
4016
4017 static const uint8_t hash_key[] = {
4018 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4019 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4020 };
4021
4022 _cleanup_free_ char *n = NULL;
4023 int r;
4024
4025 r = unit_name_to_prefix(u->id, &n);
4026 if (r < 0)
4027 return r;
4028
4029 if (valid_user_group_name(n)) {
4030 *ret = n;
4031 n = NULL;
4032 return 0;
4033 }
4034
4035 /* If we can't use the unit name as a user name, then let's hash it and use that */
4036 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4037 return -ENOMEM;
4038
4039 return 0;
4040 }
4041
4042 int unit_patch_contexts(Unit *u) {
4043 CGroupContext *cc;
4044 ExecContext *ec;
4045 unsigned i;
4046 int r;
4047
4048 assert(u);
4049
4050 /* Patch in the manager defaults into the exec and cgroup
4051 * contexts, _after_ the rest of the settings have been
4052 * initialized */
4053
4054 ec = unit_get_exec_context(u);
4055 if (ec) {
4056 /* This only copies in the ones that need memory */
4057 for (i = 0; i < _RLIMIT_MAX; i++)
4058 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4059 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4060 if (!ec->rlimit[i])
4061 return -ENOMEM;
4062 }
4063
4064 if (MANAGER_IS_USER(u->manager) &&
4065 !ec->working_directory) {
4066
4067 r = get_home_dir(&ec->working_directory);
4068 if (r < 0)
4069 return r;
4070
4071 /* Allow user services to run, even if the
4072 * home directory is missing */
4073 ec->working_directory_missing_ok = true;
4074 }
4075
4076 if (ec->private_devices)
4077 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4078
4079 if (ec->protect_kernel_modules)
4080 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4081
4082 if (ec->dynamic_user) {
4083 if (!ec->user) {
4084 r = user_from_unit_name(u, &ec->user);
4085 if (r < 0)
4086 return r;
4087 }
4088
4089 if (!ec->group) {
4090 ec->group = strdup(ec->user);
4091 if (!ec->group)
4092 return -ENOMEM;
4093 }
4094
4095 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4096 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4097
4098 ec->private_tmp = true;
4099 ec->remove_ipc = true;
4100 ec->protect_system = PROTECT_SYSTEM_STRICT;
4101 if (ec->protect_home == PROTECT_HOME_NO)
4102 ec->protect_home = PROTECT_HOME_READ_ONLY;
4103 }
4104 }
4105
4106 cc = unit_get_cgroup_context(u);
4107 if (cc) {
4108
4109 if (ec &&
4110 ec->private_devices &&
4111 cc->device_policy == CGROUP_AUTO)
4112 cc->device_policy = CGROUP_CLOSED;
4113 }
4114
4115 return 0;
4116 }
4117
4118 ExecContext *unit_get_exec_context(Unit *u) {
4119 size_t offset;
4120 assert(u);
4121
4122 if (u->type < 0)
4123 return NULL;
4124
4125 offset = UNIT_VTABLE(u)->exec_context_offset;
4126 if (offset <= 0)
4127 return NULL;
4128
4129 return (ExecContext*) ((uint8_t*) u + offset);
4130 }
4131
4132 KillContext *unit_get_kill_context(Unit *u) {
4133 size_t offset;
4134 assert(u);
4135
4136 if (u->type < 0)
4137 return NULL;
4138
4139 offset = UNIT_VTABLE(u)->kill_context_offset;
4140 if (offset <= 0)
4141 return NULL;
4142
4143 return (KillContext*) ((uint8_t*) u + offset);
4144 }
4145
4146 CGroupContext *unit_get_cgroup_context(Unit *u) {
4147 size_t offset;
4148
4149 if (u->type < 0)
4150 return NULL;
4151
4152 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4153 if (offset <= 0)
4154 return NULL;
4155
4156 return (CGroupContext*) ((uint8_t*) u + offset);
4157 }
4158
4159 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4160 size_t offset;
4161
4162 if (u->type < 0)
4163 return NULL;
4164
4165 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4166 if (offset <= 0)
4167 return NULL;
4168
4169 return *(ExecRuntime**) ((uint8_t*) u + offset);
4170 }
4171
4172 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4173 assert(u);
4174
4175 if (UNIT_WRITE_FLAGS_NOOP(flags))
4176 return NULL;
4177
4178 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4179 return u->manager->lookup_paths.transient;
4180
4181 if (flags & UNIT_PERSISTENT)
4182 return u->manager->lookup_paths.persistent_control;
4183
4184 if (flags & UNIT_RUNTIME)
4185 return u->manager->lookup_paths.runtime_control;
4186
4187 return NULL;
4188 }
4189
4190 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4191 char *ret = NULL;
4192
4193 if (!s)
4194 return NULL;
4195
4196 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4197 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4198 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4199 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4200 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4201 * allocations. */
4202
4203 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4204 ret = specifier_escape(s);
4205 if (!ret)
4206 return NULL;
4207
4208 s = ret;
4209 }
4210
4211 if (flags & UNIT_ESCAPE_C) {
4212 char *a;
4213
4214 a = cescape(s);
4215 free(ret);
4216 if (!a)
4217 return NULL;
4218
4219 ret = a;
4220 }
4221
4222 if (buf) {
4223 *buf = ret;
4224 return ret ?: (char*) s;
4225 }
4226
4227 return ret ?: strdup(s);
4228 }
4229
4230 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4231 _cleanup_free_ char *result = NULL;
4232 size_t n = 0, allocated = 0;
4233 char **i, *ret;
4234
4235 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4236 * way suitable for ExecStart= stanzas */
4237
4238 STRV_FOREACH(i, l) {
4239 _cleanup_free_ char *buf = NULL;
4240 const char *p;
4241 size_t a;
4242 char *q;
4243
4244 p = unit_escape_setting(*i, flags, &buf);
4245 if (!p)
4246 return NULL;
4247
4248 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4249 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4250 return NULL;
4251
4252 q = result + n;
4253 if (n > 0)
4254 *(q++) = ' ';
4255
4256 *(q++) = '"';
4257 q = stpcpy(q, p);
4258 *(q++) = '"';
4259
4260 n += a;
4261 }
4262
4263 if (!GREEDY_REALLOC(result, allocated, n + 1))
4264 return NULL;
4265
4266 result[n] = 0;
4267
4268 ret = result;
4269 result = NULL;
4270
4271 return ret;
4272 }
4273
4274 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4275 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4276 const char *dir, *wrapped;
4277 int r;
4278
4279 assert(u);
4280 assert(name);
4281 assert(data);
4282
4283 if (UNIT_WRITE_FLAGS_NOOP(flags))
4284 return 0;
4285
4286 data = unit_escape_setting(data, flags, &escaped);
4287 if (!data)
4288 return -ENOMEM;
4289
4290 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4291 * previous section header is the same */
4292
4293 if (flags & UNIT_PRIVATE) {
4294 if (!UNIT_VTABLE(u)->private_section)
4295 return -EINVAL;
4296
4297 if (!u->transient_file || u->last_section_private < 0)
4298 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4299 else if (u->last_section_private == 0)
4300 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4301 } else {
4302 if (!u->transient_file || u->last_section_private < 0)
4303 data = strjoina("[Unit]\n", data);
4304 else if (u->last_section_private > 0)
4305 data = strjoina("\n[Unit]\n", data);
4306 }
4307
4308 if (u->transient_file) {
4309 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4310 * write to the transient unit file. */
4311 fputs(data, u->transient_file);
4312
4313 if (!endswith(data, "\n"))
4314 fputc('\n', u->transient_file);
4315
4316 /* Remember which section we wrote this entry to */
4317 u->last_section_private = !!(flags & UNIT_PRIVATE);
4318 return 0;
4319 }
4320
4321 dir = unit_drop_in_dir(u, flags);
4322 if (!dir)
4323 return -EINVAL;
4324
4325 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4326 "# or an equivalent operation. Do not edit.\n",
4327 data,
4328 "\n");
4329
4330 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4331 if (r < 0)
4332 return r;
4333
4334 (void) mkdir_p_label(p, 0755);
4335 r = write_string_file_atomic_label(q, wrapped);
4336 if (r < 0)
4337 return r;
4338
4339 r = strv_push(&u->dropin_paths, q);
4340 if (r < 0)
4341 return r;
4342 q = NULL;
4343
4344 strv_uniq(u->dropin_paths);
4345
4346 u->dropin_mtime = now(CLOCK_REALTIME);
4347
4348 return 0;
4349 }
4350
4351 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4352 _cleanup_free_ char *p = NULL;
4353 va_list ap;
4354 int r;
4355
4356 assert(u);
4357 assert(name);
4358 assert(format);
4359
4360 if (UNIT_WRITE_FLAGS_NOOP(flags))
4361 return 0;
4362
4363 va_start(ap, format);
4364 r = vasprintf(&p, format, ap);
4365 va_end(ap);
4366
4367 if (r < 0)
4368 return -ENOMEM;
4369
4370 return unit_write_setting(u, flags, name, p);
4371 }
4372
4373 int unit_make_transient(Unit *u) {
4374 _cleanup_free_ char *path = NULL;
4375 FILE *f;
4376
4377 assert(u);
4378
4379 if (!UNIT_VTABLE(u)->can_transient)
4380 return -EOPNOTSUPP;
4381
4382 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4383
4384 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4385 if (!path)
4386 return -ENOMEM;
4387
4388 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4389 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4390
4391 RUN_WITH_UMASK(0022) {
4392 f = fopen(path, "we");
4393 if (!f)
4394 return -errno;
4395 }
4396
4397 safe_fclose(u->transient_file);
4398 u->transient_file = f;
4399
4400 free_and_replace(u->fragment_path, path);
4401
4402 u->source_path = mfree(u->source_path);
4403 u->dropin_paths = strv_free(u->dropin_paths);
4404 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4405
4406 u->load_state = UNIT_STUB;
4407 u->load_error = 0;
4408 u->transient = true;
4409
4410 unit_add_to_dbus_queue(u);
4411 unit_add_to_gc_queue(u);
4412
4413 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4414 u->transient_file);
4415
4416 return 0;
4417 }
4418
4419 static void log_kill(pid_t pid, int sig, void *userdata) {
4420 _cleanup_free_ char *comm = NULL;
4421
4422 (void) get_process_comm(pid, &comm);
4423
4424 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4425 only, like for example systemd's own PAM stub process. */
4426 if (comm && comm[0] == '(')
4427 return;
4428
4429 log_unit_notice(userdata,
4430 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4431 pid,
4432 strna(comm),
4433 signal_to_string(sig));
4434 }
4435
4436 static int operation_to_signal(KillContext *c, KillOperation k) {
4437 assert(c);
4438
4439 switch (k) {
4440
4441 case KILL_TERMINATE:
4442 case KILL_TERMINATE_AND_LOG:
4443 return c->kill_signal;
4444
4445 case KILL_KILL:
4446 return SIGKILL;
4447
4448 case KILL_ABORT:
4449 return SIGABRT;
4450
4451 default:
4452 assert_not_reached("KillOperation unknown");
4453 }
4454 }
4455
4456 int unit_kill_context(
4457 Unit *u,
4458 KillContext *c,
4459 KillOperation k,
4460 pid_t main_pid,
4461 pid_t control_pid,
4462 bool main_pid_alien) {
4463
4464 bool wait_for_exit = false, send_sighup;
4465 cg_kill_log_func_t log_func = NULL;
4466 int sig, r;
4467
4468 assert(u);
4469 assert(c);
4470
4471 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4472 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4473
4474 if (c->kill_mode == KILL_NONE)
4475 return 0;
4476
4477 sig = operation_to_signal(c, k);
4478
4479 send_sighup =
4480 c->send_sighup &&
4481 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4482 sig != SIGHUP;
4483
4484 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4485 log_func = log_kill;
4486
4487 if (main_pid > 0) {
4488 if (log_func)
4489 log_func(main_pid, sig, u);
4490
4491 r = kill_and_sigcont(main_pid, sig);
4492 if (r < 0 && r != -ESRCH) {
4493 _cleanup_free_ char *comm = NULL;
4494 (void) get_process_comm(main_pid, &comm);
4495
4496 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4497 } else {
4498 if (!main_pid_alien)
4499 wait_for_exit = true;
4500
4501 if (r != -ESRCH && send_sighup)
4502 (void) kill(main_pid, SIGHUP);
4503 }
4504 }
4505
4506 if (control_pid > 0) {
4507 if (log_func)
4508 log_func(control_pid, sig, u);
4509
4510 r = kill_and_sigcont(control_pid, sig);
4511 if (r < 0 && r != -ESRCH) {
4512 _cleanup_free_ char *comm = NULL;
4513 (void) get_process_comm(control_pid, &comm);
4514
4515 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4516 } else {
4517 wait_for_exit = true;
4518
4519 if (r != -ESRCH && send_sighup)
4520 (void) kill(control_pid, SIGHUP);
4521 }
4522 }
4523
4524 if (u->cgroup_path &&
4525 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4526 _cleanup_set_free_ Set *pid_set = NULL;
4527
4528 /* Exclude the main/control pids from being killed via the cgroup */
4529 pid_set = unit_pid_set(main_pid, control_pid);
4530 if (!pid_set)
4531 return -ENOMEM;
4532
4533 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4534 sig,
4535 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4536 pid_set,
4537 log_func, u);
4538 if (r < 0) {
4539 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4540 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4541
4542 } else if (r > 0) {
4543
4544 /* FIXME: For now, on the legacy hierarchy, we
4545 * will not wait for the cgroup members to die
4546 * if we are running in a container or if this
4547 * is a delegation unit, simply because cgroup
4548 * notification is unreliable in these
4549 * cases. It doesn't work at all in
4550 * containers, and outside of containers it
4551 * can be confused easily by left-over
4552 * directories in the cgroup — which however
4553 * should not exist in non-delegated units. On
4554 * the unified hierarchy that's different,
4555 * there we get proper events. Hence rely on
4556 * them. */
4557
4558 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4559 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u, delegate)))
4560 wait_for_exit = true;
4561
4562 if (send_sighup) {
4563 set_free(pid_set);
4564
4565 pid_set = unit_pid_set(main_pid, control_pid);
4566 if (!pid_set)
4567 return -ENOMEM;
4568
4569 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4570 SIGHUP,
4571 CGROUP_IGNORE_SELF,
4572 pid_set,
4573 NULL, NULL);
4574 }
4575 }
4576 }
4577
4578 return wait_for_exit;
4579 }
4580
4581 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4582 char prefix[strlen(path) + 1], *p;
4583 UnitDependencyInfo di;
4584 int r;
4585
4586 assert(u);
4587 assert(path);
4588
4589 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4590 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4591 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4592 * determine which units to make themselves a dependency of. */
4593
4594 if (!path_is_absolute(path))
4595 return -EINVAL;
4596
4597 r = hashmap_ensure_allocated(&u->requires_mounts_for, &string_hash_ops);
4598 if (r < 0)
4599 return r;
4600
4601 p = strdup(path);
4602 if (!p)
4603 return -ENOMEM;
4604
4605 path_kill_slashes(p);
4606
4607 if (!path_is_normalized(p)) {
4608 free(p);
4609 return -EPERM;
4610 }
4611
4612 if (hashmap_contains(u->requires_mounts_for, p)) {
4613 free(p);
4614 return 0;
4615 }
4616
4617 di = (UnitDependencyInfo) {
4618 .origin_mask = mask
4619 };
4620
4621 r = hashmap_put(u->requires_mounts_for, p, di.data);
4622 if (r < 0) {
4623 free(p);
4624 return r;
4625 }
4626
4627 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4628 Set *x;
4629
4630 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4631 if (!x) {
4632 char *q;
4633
4634 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4635 if (r < 0)
4636 return r;
4637
4638 q = strdup(prefix);
4639 if (!q)
4640 return -ENOMEM;
4641
4642 x = set_new(NULL);
4643 if (!x) {
4644 free(q);
4645 return -ENOMEM;
4646 }
4647
4648 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4649 if (r < 0) {
4650 free(q);
4651 set_free(x);
4652 return r;
4653 }
4654 }
4655
4656 r = set_put(x, u);
4657 if (r < 0)
4658 return r;
4659 }
4660
4661 return 0;
4662 }
4663
4664 int unit_setup_exec_runtime(Unit *u) {
4665 ExecRuntime **rt;
4666 size_t offset;
4667 Unit *other;
4668 Iterator i;
4669 void *v;
4670 int r;
4671
4672 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4673 assert(offset > 0);
4674
4675 /* Check if there already is an ExecRuntime for this unit? */
4676 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4677 if (*rt)
4678 return 0;
4679
4680 /* Try to get it from somebody else */
4681 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4682 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4683 if (r == 1)
4684 return 1;
4685 }
4686
4687 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4688 }
4689
4690 int unit_setup_dynamic_creds(Unit *u) {
4691 ExecContext *ec;
4692 DynamicCreds *dcreds;
4693 size_t offset;
4694
4695 assert(u);
4696
4697 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4698 assert(offset > 0);
4699 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4700
4701 ec = unit_get_exec_context(u);
4702 assert(ec);
4703
4704 if (!ec->dynamic_user)
4705 return 0;
4706
4707 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4708 }
4709
4710 bool unit_type_supported(UnitType t) {
4711 if (_unlikely_(t < 0))
4712 return false;
4713 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4714 return false;
4715
4716 if (!unit_vtable[t]->supported)
4717 return true;
4718
4719 return unit_vtable[t]->supported();
4720 }
4721
4722 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4723 int r;
4724
4725 assert(u);
4726 assert(where);
4727
4728 r = dir_is_empty(where);
4729 if (r > 0 || r == -ENOTDIR)
4730 return;
4731 if (r < 0) {
4732 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4733 return;
4734 }
4735
4736 log_struct(LOG_NOTICE,
4737 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4738 LOG_UNIT_ID(u),
4739 LOG_UNIT_INVOCATION_ID(u),
4740 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4741 "WHERE=%s", where,
4742 NULL);
4743 }
4744
4745 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4746 _cleanup_free_ char *canonical_where;
4747 int r;
4748
4749 assert(u);
4750 assert(where);
4751
4752 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4753 if (r < 0) {
4754 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4755 return 0;
4756 }
4757
4758 /* We will happily ignore a trailing slash (or any redundant slashes) */
4759 if (path_equal(where, canonical_where))
4760 return 0;
4761
4762 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4763 log_struct(LOG_ERR,
4764 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4765 LOG_UNIT_ID(u),
4766 LOG_UNIT_INVOCATION_ID(u),
4767 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4768 "WHERE=%s", where,
4769 NULL);
4770
4771 return -ELOOP;
4772 }
4773
4774 bool unit_is_pristine(Unit *u) {
4775 assert(u);
4776
4777 /* Check if the unit already exists or is already around,
4778 * in a number of different ways. Note that to cater for unit
4779 * types such as slice, we are generally fine with units that
4780 * are marked UNIT_LOADED even though nothing was
4781 * actually loaded, as those unit types don't require a file
4782 * on disk to validly load. */
4783
4784 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4785 u->fragment_path ||
4786 u->source_path ||
4787 !strv_isempty(u->dropin_paths) ||
4788 u->job ||
4789 u->merged_into);
4790 }
4791
4792 pid_t unit_control_pid(Unit *u) {
4793 assert(u);
4794
4795 if (UNIT_VTABLE(u)->control_pid)
4796 return UNIT_VTABLE(u)->control_pid(u);
4797
4798 return 0;
4799 }
4800
4801 pid_t unit_main_pid(Unit *u) {
4802 assert(u);
4803
4804 if (UNIT_VTABLE(u)->main_pid)
4805 return UNIT_VTABLE(u)->main_pid(u);
4806
4807 return 0;
4808 }
4809
4810 static void unit_unref_uid_internal(
4811 Unit *u,
4812 uid_t *ref_uid,
4813 bool destroy_now,
4814 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4815
4816 assert(u);
4817 assert(ref_uid);
4818 assert(_manager_unref_uid);
4819
4820 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4821 * gid_t are actually the same time, with the same validity rules.
4822 *
4823 * Drops a reference to UID/GID from a unit. */
4824
4825 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4826 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4827
4828 if (!uid_is_valid(*ref_uid))
4829 return;
4830
4831 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4832 *ref_uid = UID_INVALID;
4833 }
4834
4835 void unit_unref_uid(Unit *u, bool destroy_now) {
4836 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4837 }
4838
4839 void unit_unref_gid(Unit *u, bool destroy_now) {
4840 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4841 }
4842
4843 static int unit_ref_uid_internal(
4844 Unit *u,
4845 uid_t *ref_uid,
4846 uid_t uid,
4847 bool clean_ipc,
4848 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4849
4850 int r;
4851
4852 assert(u);
4853 assert(ref_uid);
4854 assert(uid_is_valid(uid));
4855 assert(_manager_ref_uid);
4856
4857 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4858 * are actually the same type, and have the same validity rules.
4859 *
4860 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4861 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4862 * drops to zero. */
4863
4864 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4865 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4866
4867 if (*ref_uid == uid)
4868 return 0;
4869
4870 if (uid_is_valid(*ref_uid)) /* Already set? */
4871 return -EBUSY;
4872
4873 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4874 if (r < 0)
4875 return r;
4876
4877 *ref_uid = uid;
4878 return 1;
4879 }
4880
4881 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4882 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4883 }
4884
4885 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4886 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4887 }
4888
4889 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4890 int r = 0, q = 0;
4891
4892 assert(u);
4893
4894 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4895
4896 if (uid_is_valid(uid)) {
4897 r = unit_ref_uid(u, uid, clean_ipc);
4898 if (r < 0)
4899 return r;
4900 }
4901
4902 if (gid_is_valid(gid)) {
4903 q = unit_ref_gid(u, gid, clean_ipc);
4904 if (q < 0) {
4905 if (r > 0)
4906 unit_unref_uid(u, false);
4907
4908 return q;
4909 }
4910 }
4911
4912 return r > 0 || q > 0;
4913 }
4914
4915 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4916 ExecContext *c;
4917 int r;
4918
4919 assert(u);
4920
4921 c = unit_get_exec_context(u);
4922
4923 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4924 if (r < 0)
4925 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4926
4927 return r;
4928 }
4929
4930 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4931 assert(u);
4932
4933 unit_unref_uid(u, destroy_now);
4934 unit_unref_gid(u, destroy_now);
4935 }
4936
4937 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4938 int r;
4939
4940 assert(u);
4941
4942 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4943 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4944 * objects when no service references the UID/GID anymore. */
4945
4946 r = unit_ref_uid_gid(u, uid, gid);
4947 if (r > 0)
4948 bus_unit_send_change_signal(u);
4949 }
4950
4951 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4952 int r;
4953
4954 assert(u);
4955
4956 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4957
4958 if (sd_id128_equal(u->invocation_id, id))
4959 return 0;
4960
4961 if (!sd_id128_is_null(u->invocation_id))
4962 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4963
4964 if (sd_id128_is_null(id)) {
4965 r = 0;
4966 goto reset;
4967 }
4968
4969 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4970 if (r < 0)
4971 goto reset;
4972
4973 u->invocation_id = id;
4974 sd_id128_to_string(id, u->invocation_id_string);
4975
4976 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4977 if (r < 0)
4978 goto reset;
4979
4980 return 0;
4981
4982 reset:
4983 u->invocation_id = SD_ID128_NULL;
4984 u->invocation_id_string[0] = 0;
4985 return r;
4986 }
4987
4988 int unit_acquire_invocation_id(Unit *u) {
4989 sd_id128_t id;
4990 int r;
4991
4992 assert(u);
4993
4994 r = sd_id128_randomize(&id);
4995 if (r < 0)
4996 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4997
4998 r = unit_set_invocation_id(u, id);
4999 if (r < 0)
5000 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5001
5002 return 0;
5003 }
5004
5005 void unit_set_exec_params(Unit *u, ExecParameters *p) {
5006 assert(u);
5007 assert(p);
5008
5009 p->cgroup_path = u->cgroup_path;
5010 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, UNIT_CGROUP_BOOL(u, delegate));
5011 }
5012
5013 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5014 int r;
5015
5016 assert(u);
5017 assert(ret);
5018
5019 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5020 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5021
5022 (void) unit_realize_cgroup(u);
5023
5024 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5025 if (r != 0)
5026 return r;
5027
5028 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5029 (void) ignore_signals(SIGPIPE, -1);
5030
5031 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5032
5033 if (u->cgroup_path) {
5034 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5035 if (r < 0) {
5036 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5037 _exit(EXIT_CGROUP);
5038 }
5039 }
5040
5041 return 0;
5042 }
5043
5044 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5045 assert(u);
5046 assert(d >= 0);
5047 assert(d < _UNIT_DEPENDENCY_MAX);
5048 assert(other);
5049
5050 if (di.origin_mask == 0 && di.destination_mask == 0) {
5051 /* No bit set anymore, let's drop the whole entry */
5052 assert_se(hashmap_remove(u->dependencies[d], other));
5053 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5054 } else
5055 /* Mask was reduced, let's update the entry */
5056 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5057 }
5058
5059 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5060 UnitDependency d;
5061
5062 assert(u);
5063
5064 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5065
5066 if (mask == 0)
5067 return;
5068
5069 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5070 bool done;
5071
5072 do {
5073 UnitDependencyInfo di;
5074 Unit *other;
5075 Iterator i;
5076
5077 done = true;
5078
5079 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5080 UnitDependency q;
5081
5082 if ((di.origin_mask & ~mask) == di.origin_mask)
5083 continue;
5084 di.origin_mask &= ~mask;
5085 unit_update_dependency_mask(u, d, other, di);
5086
5087 /* We updated the dependency from our unit to the other unit now. But most dependencies
5088 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5089 * all dependency types on the other unit and delete all those which point to us and
5090 * have the right mask set. */
5091
5092 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5093 UnitDependencyInfo dj;
5094
5095 dj.data = hashmap_get(other->dependencies[q], u);
5096 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5097 continue;
5098 dj.destination_mask &= ~mask;
5099
5100 unit_update_dependency_mask(other, q, u, dj);
5101 }
5102
5103 unit_add_to_gc_queue(other);
5104
5105 done = false;
5106 break;
5107 }
5108
5109 } while (!done);
5110 }
5111 }
5112
5113 static int unit_export_invocation_id(Unit *u) {
5114 const char *p;
5115 int r;
5116
5117 assert(u);
5118
5119 if (u->exported_invocation_id)
5120 return 0;
5121
5122 if (sd_id128_is_null(u->invocation_id))
5123 return 0;
5124
5125 p = strjoina("/run/systemd/units/invocation:", u->id);
5126 r = symlink_atomic(u->invocation_id_string, p);
5127 if (r < 0)
5128 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5129
5130 u->exported_invocation_id = true;
5131 return 0;
5132 }
5133
5134 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5135 const char *p;
5136 char buf[2];
5137 int r;
5138
5139 assert(u);
5140 assert(c);
5141
5142 if (u->exported_log_level_max)
5143 return 0;
5144
5145 if (c->log_level_max < 0)
5146 return 0;
5147
5148 assert(c->log_level_max <= 7);
5149
5150 buf[0] = '0' + c->log_level_max;
5151 buf[1] = 0;
5152
5153 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5154 r = symlink_atomic(buf, p);
5155 if (r < 0)
5156 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5157
5158 u->exported_log_level_max = true;
5159 return 0;
5160 }
5161
5162 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5163 _cleanup_close_ int fd = -1;
5164 struct iovec *iovec;
5165 const char *p;
5166 char *pattern;
5167 le64_t *sizes;
5168 ssize_t n;
5169 size_t i;
5170 int r;
5171
5172 if (u->exported_log_extra_fields)
5173 return 0;
5174
5175 if (c->n_log_extra_fields <= 0)
5176 return 0;
5177
5178 sizes = newa(le64_t, c->n_log_extra_fields);
5179 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5180
5181 for (i = 0; i < c->n_log_extra_fields; i++) {
5182 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5183
5184 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5185 iovec[i*2+1] = c->log_extra_fields[i];
5186 }
5187
5188 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5189 pattern = strjoina(p, ".XXXXXX");
5190
5191 fd = mkostemp_safe(pattern);
5192 if (fd < 0)
5193 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5194
5195 n = writev(fd, iovec, c->n_log_extra_fields*2);
5196 if (n < 0) {
5197 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5198 goto fail;
5199 }
5200
5201 (void) fchmod(fd, 0644);
5202
5203 if (rename(pattern, p) < 0) {
5204 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5205 goto fail;
5206 }
5207
5208 u->exported_log_extra_fields = true;
5209 return 0;
5210
5211 fail:
5212 (void) unlink(pattern);
5213 return r;
5214 }
5215
5216 void unit_export_state_files(Unit *u) {
5217 const ExecContext *c;
5218
5219 assert(u);
5220
5221 if (!u->id)
5222 return;
5223
5224 if (!MANAGER_IS_SYSTEM(u->manager))
5225 return;
5226
5227 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5228 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5229 * the IPC system itself and PID 1 also log to the journal.
5230 *
5231 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5232 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5233 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5234 * namespace at least.
5235 *
5236 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5237 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5238 * them with one. */
5239
5240 (void) unit_export_invocation_id(u);
5241
5242 c = unit_get_exec_context(u);
5243 if (c) {
5244 (void) unit_export_log_level_max(u, c);
5245 (void) unit_export_log_extra_fields(u, c);
5246 }
5247 }
5248
5249 void unit_unlink_state_files(Unit *u) {
5250 const char *p;
5251
5252 assert(u);
5253
5254 if (!u->id)
5255 return;
5256
5257 if (!MANAGER_IS_SYSTEM(u->manager))
5258 return;
5259
5260 /* Undoes the effect of unit_export_state() */
5261
5262 if (u->exported_invocation_id) {
5263 p = strjoina("/run/systemd/units/invocation:", u->id);
5264 (void) unlink(p);
5265
5266 u->exported_invocation_id = false;
5267 }
5268
5269 if (u->exported_log_level_max) {
5270 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5271 (void) unlink(p);
5272
5273 u->exported_log_level_max = false;
5274 }
5275
5276 if (u->exported_log_extra_fields) {
5277 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5278 (void) unlink(p);
5279
5280 u->exported_log_extra_fields = false;
5281 }
5282 }
5283
5284 int unit_prepare_exec(Unit *u) {
5285 int r;
5286
5287 assert(u);
5288
5289 /* Prepares everything so that we can fork of a process for this unit */
5290
5291 (void) unit_realize_cgroup(u);
5292
5293 if (u->reset_accounting) {
5294 (void) unit_reset_cpu_accounting(u);
5295 (void) unit_reset_ip_accounting(u);
5296 u->reset_accounting = false;
5297 }
5298
5299 unit_export_state_files(u);
5300
5301 r = unit_setup_exec_runtime(u);
5302 if (r < 0)
5303 return r;
5304
5305 r = unit_setup_dynamic_creds(u);
5306 if (r < 0)
5307 return r;
5308
5309 return 0;
5310 }
5311
5312 static void log_leftover(pid_t pid, int sig, void *userdata) {
5313 _cleanup_free_ char *comm = NULL;
5314
5315 (void) get_process_comm(pid, &comm);
5316
5317 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5318 return;
5319
5320 log_unit_warning(userdata,
5321 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5322 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5323 pid, strna(comm));
5324 }
5325
5326 void unit_warn_leftover_processes(Unit *u) {
5327 assert(u);
5328
5329 (void) unit_pick_cgroup_path(u);
5330
5331 if (!u->cgroup_path)
5332 return;
5333
5334 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5335 }
5336
5337 bool unit_needs_console(Unit *u) {
5338 ExecContext *ec;
5339 UnitActiveState state;
5340
5341 assert(u);
5342
5343 state = unit_active_state(u);
5344
5345 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5346 return false;
5347
5348 if (UNIT_VTABLE(u)->needs_console)
5349 return UNIT_VTABLE(u)->needs_console(u);
5350
5351 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5352 ec = unit_get_exec_context(u);
5353 if (!ec)
5354 return false;
5355
5356 return exec_context_may_touch_console(ec);
5357 }
5358
5359 const char *unit_label_path(Unit *u) {
5360 const char *p;
5361
5362 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5363 * when validating access checks. */
5364
5365 p = u->source_path ?: u->fragment_path;
5366 if (!p)
5367 return NULL;
5368
5369 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5370 if (path_equal(p, "/dev/null"))
5371 return NULL;
5372
5373 return p;
5374 }
5375
5376 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5377 [COLLECT_INACTIVE] = "inactive",
5378 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5379 };
5380
5381 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);