]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
bootspec: fix debug message about default entry
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <unistd.h>
26
27 #include "sd-id128.h"
28 #include "sd-messages.h"
29
30 #include "alloc-util.h"
31 #include "bus-common-errors.h"
32 #include "bus-util.h"
33 #include "cgroup-util.h"
34 #include "dbus-unit.h"
35 #include "dbus.h"
36 #include "dropin.h"
37 #include "escape.h"
38 #include "execute.h"
39 #include "fd-util.h"
40 #include "fileio-label.h"
41 #include "format-util.h"
42 #include "fs-util.h"
43 #include "id128-util.h"
44 #include "io-util.h"
45 #include "load-dropin.h"
46 #include "load-fragment.h"
47 #include "log.h"
48 #include "macro.h"
49 #include "missing.h"
50 #include "mkdir.h"
51 #include "parse-util.h"
52 #include "path-util.h"
53 #include "process-util.h"
54 #include "set.h"
55 #include "signal-util.h"
56 #include "sparse-endian.h"
57 #include "special.h"
58 #include "specifier.h"
59 #include "stat-util.h"
60 #include "stdio-util.h"
61 #include "string-table.h"
62 #include "string-util.h"
63 #include "strv.h"
64 #include "umask-util.h"
65 #include "unit-name.h"
66 #include "unit.h"
67 #include "user-util.h"
68 #include "virt.h"
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_inotify_wd = -1;
107 u->job_timeout = USEC_INFINITY;
108 u->job_running_timeout = USEC_INFINITY;
109 u->ref_uid = UID_INVALID;
110 u->ref_gid = GID_INVALID;
111 u->cpu_usage_last = NSEC_INFINITY;
112 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
113
114 u->ip_accounting_ingress_map_fd = -1;
115 u->ip_accounting_egress_map_fd = -1;
116 u->ipv4_allow_map_fd = -1;
117 u->ipv6_allow_map_fd = -1;
118 u->ipv4_deny_map_fd = -1;
119 u->ipv6_deny_map_fd = -1;
120
121 u->last_section_private = -1;
122
123 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
124 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
125
126 return u;
127 }
128
129 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
130 Unit *u;
131 int r;
132
133 u = unit_new(m, size);
134 if (!u)
135 return -ENOMEM;
136
137 r = unit_add_name(u, name);
138 if (r < 0) {
139 unit_free(u);
140 return r;
141 }
142
143 *ret = u;
144 return r;
145 }
146
147 bool unit_has_name(Unit *u, const char *name) {
148 assert(u);
149 assert(name);
150
151 return set_contains(u->names, (char*) name);
152 }
153
154 static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->cpu_accounting = u->manager->default_cpu_accounting;
172 cc->io_accounting = u->manager->default_io_accounting;
173 cc->ip_accounting = u->manager->default_ip_accounting;
174 cc->blockio_accounting = u->manager->default_blockio_accounting;
175 cc->memory_accounting = u->manager->default_memory_accounting;
176 cc->tasks_accounting = u->manager->default_tasks_accounting;
177 cc->ip_accounting = u->manager->default_ip_accounting;
178
179 if (u->type != UNIT_SLICE)
180 cc->tasks_max = u->manager->default_tasks_max;
181 }
182
183 ec = unit_get_exec_context(u);
184 if (ec) {
185 exec_context_init(ec);
186
187 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
188 EXEC_KEYRING_PRIVATE : EXEC_KEYRING_INHERIT;
189 }
190
191 kc = unit_get_kill_context(u);
192 if (kc)
193 kill_context_init(kc);
194
195 if (UNIT_VTABLE(u)->init)
196 UNIT_VTABLE(u)->init(u);
197 }
198
199 int unit_add_name(Unit *u, const char *text) {
200 _cleanup_free_ char *s = NULL, *i = NULL;
201 UnitType t;
202 int r;
203
204 assert(u);
205 assert(text);
206
207 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
208
209 if (!u->instance)
210 return -EINVAL;
211
212 r = unit_name_replace_instance(text, u->instance, &s);
213 if (r < 0)
214 return r;
215 } else {
216 s = strdup(text);
217 if (!s)
218 return -ENOMEM;
219 }
220
221 if (set_contains(u->names, s))
222 return 0;
223 if (hashmap_contains(u->manager->units, s))
224 return -EEXIST;
225
226 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
227 return -EINVAL;
228
229 t = unit_name_to_type(s);
230 if (t < 0)
231 return -EINVAL;
232
233 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
234 return -EINVAL;
235
236 r = unit_name_to_instance(s, &i);
237 if (r < 0)
238 return r;
239
240 if (i && !unit_type_may_template(t))
241 return -EINVAL;
242
243 /* Ensure that this unit is either instanced or not instanced,
244 * but not both. Note that we do allow names with different
245 * instance names however! */
246 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
247 return -EINVAL;
248
249 if (!unit_type_may_alias(t) && !set_isempty(u->names))
250 return -EEXIST;
251
252 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
253 return -E2BIG;
254
255 r = set_put(u->names, s);
256 if (r < 0)
257 return r;
258 assert(r > 0);
259
260 r = hashmap_put(u->manager->units, s, u);
261 if (r < 0) {
262 (void) set_remove(u->names, s);
263 return r;
264 }
265
266 if (u->type == _UNIT_TYPE_INVALID) {
267 u->type = t;
268 u->id = s;
269 u->instance = i;
270
271 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
272
273 unit_init(u);
274
275 i = NULL;
276 }
277
278 s = NULL;
279
280 unit_add_to_dbus_queue(u);
281 return 0;
282 }
283
284 int unit_choose_id(Unit *u, const char *name) {
285 _cleanup_free_ char *t = NULL;
286 char *s, *i;
287 int r;
288
289 assert(u);
290 assert(name);
291
292 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
293
294 if (!u->instance)
295 return -EINVAL;
296
297 r = unit_name_replace_instance(name, u->instance, &t);
298 if (r < 0)
299 return r;
300
301 name = t;
302 }
303
304 /* Selects one of the names of this unit as the id */
305 s = set_get(u->names, (char*) name);
306 if (!s)
307 return -ENOENT;
308
309 /* Determine the new instance from the new id */
310 r = unit_name_to_instance(s, &i);
311 if (r < 0)
312 return r;
313
314 u->id = s;
315
316 free(u->instance);
317 u->instance = i;
318
319 unit_add_to_dbus_queue(u);
320
321 return 0;
322 }
323
324 int unit_set_description(Unit *u, const char *description) {
325 int r;
326
327 assert(u);
328
329 r = free_and_strdup(&u->description, empty_to_null(description));
330 if (r < 0)
331 return r;
332 if (r > 0)
333 unit_add_to_dbus_queue(u);
334
335 return 0;
336 }
337
338 bool unit_check_gc(Unit *u) {
339 UnitActiveState state;
340 int r;
341
342 assert(u);
343
344 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true, when the unit shall
345 * stay around, false if there's no reason to keep it loaded. */
346
347 if (u->job)
348 return true;
349
350 if (u->nop_job)
351 return true;
352
353 state = unit_active_state(u);
354
355 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
356 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
357 UNIT_VTABLE(u)->release_resources)
358 UNIT_VTABLE(u)->release_resources(u);
359
360 if (u->perpetual)
361 return true;
362
363 if (u->refs)
364 return true;
365
366 if (sd_bus_track_count(u->bus_track) > 0)
367 return true;
368
369 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
370 switch (u->collect_mode) {
371
372 case COLLECT_INACTIVE:
373 if (state != UNIT_INACTIVE)
374 return true;
375
376 break;
377
378 case COLLECT_INACTIVE_OR_FAILED:
379 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
380 return true;
381
382 break;
383
384 default:
385 assert_not_reached("Unknown garbage collection mode");
386 }
387
388 if (u->cgroup_path) {
389 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
390 * around. Units with active processes should never be collected. */
391
392 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
393 if (r < 0)
394 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
395 if (r <= 0)
396 return true;
397 }
398
399 if (UNIT_VTABLE(u)->check_gc)
400 if (UNIT_VTABLE(u)->check_gc(u))
401 return true;
402
403 return false;
404 }
405
406 void unit_add_to_load_queue(Unit *u) {
407 assert(u);
408 assert(u->type != _UNIT_TYPE_INVALID);
409
410 if (u->load_state != UNIT_STUB || u->in_load_queue)
411 return;
412
413 LIST_PREPEND(load_queue, u->manager->load_queue, u);
414 u->in_load_queue = true;
415 }
416
417 void unit_add_to_cleanup_queue(Unit *u) {
418 assert(u);
419
420 if (u->in_cleanup_queue)
421 return;
422
423 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
424 u->in_cleanup_queue = true;
425 }
426
427 void unit_add_to_gc_queue(Unit *u) {
428 assert(u);
429
430 if (u->in_gc_queue || u->in_cleanup_queue)
431 return;
432
433 if (unit_check_gc(u))
434 return;
435
436 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
437 u->in_gc_queue = true;
438 }
439
440 void unit_add_to_dbus_queue(Unit *u) {
441 assert(u);
442 assert(u->type != _UNIT_TYPE_INVALID);
443
444 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
445 return;
446
447 /* Shortcut things if nobody cares */
448 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
449 sd_bus_track_count(u->bus_track) <= 0 &&
450 set_isempty(u->manager->private_buses)) {
451 u->sent_dbus_new_signal = true;
452 return;
453 }
454
455 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
456 u->in_dbus_queue = true;
457 }
458
459 static void bidi_set_free(Unit *u, Hashmap *h) {
460 Unit *other;
461 Iterator i;
462 void *v;
463
464 assert(u);
465
466 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
467
468 HASHMAP_FOREACH_KEY(v, other, h, i) {
469 UnitDependency d;
470
471 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
472 hashmap_remove(other->dependencies[d], u);
473
474 unit_add_to_gc_queue(other);
475 }
476
477 hashmap_free(h);
478 }
479
480 static void unit_remove_transient(Unit *u) {
481 char **i;
482
483 assert(u);
484
485 if (!u->transient)
486 return;
487
488 if (u->fragment_path)
489 (void) unlink(u->fragment_path);
490
491 STRV_FOREACH(i, u->dropin_paths) {
492 _cleanup_free_ char *p = NULL, *pp = NULL;
493
494 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
495 if (!p)
496 continue;
497
498 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
499 if (!pp)
500 continue;
501
502 /* Only drop transient drop-ins */
503 if (!path_equal(u->manager->lookup_paths.transient, pp))
504 continue;
505
506 (void) unlink(*i);
507 (void) rmdir(p);
508 }
509 }
510
511 static void unit_free_requires_mounts_for(Unit *u) {
512 assert(u);
513
514 for (;;) {
515 _cleanup_free_ char *path;
516
517 path = hashmap_steal_first_key(u->requires_mounts_for);
518 if (!path)
519 break;
520 else {
521 char s[strlen(path) + 1];
522
523 PATH_FOREACH_PREFIX_MORE(s, path) {
524 char *y;
525 Set *x;
526
527 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
528 if (!x)
529 continue;
530
531 (void) set_remove(x, u);
532
533 if (set_isempty(x)) {
534 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
535 free(y);
536 set_free(x);
537 }
538 }
539 }
540 }
541
542 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
543 }
544
545 static void unit_done(Unit *u) {
546 ExecContext *ec;
547 CGroupContext *cc;
548
549 assert(u);
550
551 if (u->type < 0)
552 return;
553
554 if (UNIT_VTABLE(u)->done)
555 UNIT_VTABLE(u)->done(u);
556
557 ec = unit_get_exec_context(u);
558 if (ec)
559 exec_context_done(ec);
560
561 cc = unit_get_cgroup_context(u);
562 if (cc)
563 cgroup_context_done(cc);
564 }
565
566 void unit_free(Unit *u) {
567 UnitDependency d;
568 Iterator i;
569 char *t;
570
571 if (!u)
572 return;
573
574 u->transient_file = safe_fclose(u->transient_file);
575
576 if (!MANAGER_IS_RELOADING(u->manager))
577 unit_remove_transient(u);
578
579 bus_unit_send_removed_signal(u);
580
581 unit_done(u);
582
583 sd_bus_slot_unref(u->match_bus_slot);
584
585 sd_bus_track_unref(u->bus_track);
586 u->deserialized_refs = strv_free(u->deserialized_refs);
587
588 unit_free_requires_mounts_for(u);
589
590 SET_FOREACH(t, u->names, i)
591 hashmap_remove_value(u->manager->units, t, u);
592
593 if (!sd_id128_is_null(u->invocation_id))
594 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
595
596 if (u->job) {
597 Job *j = u->job;
598 job_uninstall(j);
599 job_free(j);
600 }
601
602 if (u->nop_job) {
603 Job *j = u->nop_job;
604 job_uninstall(j);
605 job_free(j);
606 }
607
608 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
609 bidi_set_free(u, u->dependencies[d]);
610
611 if (u->type != _UNIT_TYPE_INVALID)
612 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
613
614 if (u->in_load_queue)
615 LIST_REMOVE(load_queue, u->manager->load_queue, u);
616
617 if (u->in_dbus_queue)
618 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
619
620 if (u->in_cleanup_queue)
621 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
622
623 if (u->in_gc_queue)
624 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
625
626 if (u->in_cgroup_realize_queue)
627 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
628
629 if (u->in_cgroup_empty_queue)
630 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
631
632 unit_release_cgroup(u);
633
634 if (!MANAGER_IS_RELOADING(u->manager))
635 unit_unlink_state_files(u);
636
637 unit_unref_uid_gid(u, false);
638
639 (void) manager_update_failed_units(u->manager, u, false);
640 set_remove(u->manager->startup_units, u);
641
642 free(u->description);
643 strv_free(u->documentation);
644 free(u->fragment_path);
645 free(u->source_path);
646 strv_free(u->dropin_paths);
647 free(u->instance);
648
649 free(u->job_timeout_reboot_arg);
650
651 set_free_free(u->names);
652
653 unit_unwatch_all_pids(u);
654
655 condition_free_list(u->conditions);
656 condition_free_list(u->asserts);
657
658 free(u->reboot_arg);
659
660 unit_ref_unset(&u->slice);
661
662 while (u->refs)
663 unit_ref_unset(u->refs);
664
665 safe_close(u->ip_accounting_ingress_map_fd);
666 safe_close(u->ip_accounting_egress_map_fd);
667
668 safe_close(u->ipv4_allow_map_fd);
669 safe_close(u->ipv6_allow_map_fd);
670 safe_close(u->ipv4_deny_map_fd);
671 safe_close(u->ipv6_deny_map_fd);
672
673 bpf_program_unref(u->ip_bpf_ingress);
674 bpf_program_unref(u->ip_bpf_egress);
675
676 free(u);
677 }
678
679 UnitActiveState unit_active_state(Unit *u) {
680 assert(u);
681
682 if (u->load_state == UNIT_MERGED)
683 return unit_active_state(unit_follow_merge(u));
684
685 /* After a reload it might happen that a unit is not correctly
686 * loaded but still has a process around. That's why we won't
687 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
688
689 return UNIT_VTABLE(u)->active_state(u);
690 }
691
692 const char* unit_sub_state_to_string(Unit *u) {
693 assert(u);
694
695 return UNIT_VTABLE(u)->sub_state_to_string(u);
696 }
697
698 static int set_complete_move(Set **s, Set **other) {
699 assert(s);
700 assert(other);
701
702 if (!other)
703 return 0;
704
705 if (*s)
706 return set_move(*s, *other);
707 else {
708 *s = *other;
709 *other = NULL;
710 }
711
712 return 0;
713 }
714
715 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
716 assert(s);
717 assert(other);
718
719 if (!*other)
720 return 0;
721
722 if (*s)
723 return hashmap_move(*s, *other);
724 else {
725 *s = *other;
726 *other = NULL;
727 }
728
729 return 0;
730 }
731
732 static int merge_names(Unit *u, Unit *other) {
733 char *t;
734 Iterator i;
735 int r;
736
737 assert(u);
738 assert(other);
739
740 r = set_complete_move(&u->names, &other->names);
741 if (r < 0)
742 return r;
743
744 set_free_free(other->names);
745 other->names = NULL;
746 other->id = NULL;
747
748 SET_FOREACH(t, u->names, i)
749 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
750
751 return 0;
752 }
753
754 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
755 unsigned n_reserve;
756
757 assert(u);
758 assert(other);
759 assert(d < _UNIT_DEPENDENCY_MAX);
760
761 /*
762 * If u does not have this dependency set allocated, there is no need
763 * to reserve anything. In that case other's set will be transferred
764 * as a whole to u by complete_move().
765 */
766 if (!u->dependencies[d])
767 return 0;
768
769 /* merge_dependencies() will skip a u-on-u dependency */
770 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
771
772 return hashmap_reserve(u->dependencies[d], n_reserve);
773 }
774
775 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
776 Iterator i;
777 Unit *back;
778 void *v;
779 int r;
780
781 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
782
783 assert(u);
784 assert(other);
785 assert(d < _UNIT_DEPENDENCY_MAX);
786
787 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
788 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
789 UnitDependency k;
790
791 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
792 * pointers back, and let's fix them up, to instead point to 'u'. */
793
794 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
795 if (back == u) {
796 /* Do not add dependencies between u and itself. */
797 if (hashmap_remove(back->dependencies[k], other))
798 maybe_warn_about_dependency(u, other_id, k);
799 } else {
800 UnitDependencyInfo di_u, di_other, di_merged;
801
802 /* Let's drop this dependency between "back" and "other", and let's create it between
803 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
804 * and any such dependency which might already exist */
805
806 di_other.data = hashmap_get(back->dependencies[k], other);
807 if (!di_other.data)
808 continue; /* dependency isn't set, let's try the next one */
809
810 di_u.data = hashmap_get(back->dependencies[k], u);
811
812 di_merged = (UnitDependencyInfo) {
813 .origin_mask = di_u.origin_mask | di_other.origin_mask,
814 .destination_mask = di_u.destination_mask | di_other.destination_mask,
815 };
816
817 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
818 if (r < 0)
819 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
820 assert(r >= 0);
821
822 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
823 }
824 }
825
826 }
827
828 /* Also do not move dependencies on u to itself */
829 back = hashmap_remove(other->dependencies[d], u);
830 if (back)
831 maybe_warn_about_dependency(u, other_id, d);
832
833 /* The move cannot fail. The caller must have performed a reservation. */
834 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
835
836 other->dependencies[d] = hashmap_free(other->dependencies[d]);
837 }
838
839 int unit_merge(Unit *u, Unit *other) {
840 UnitDependency d;
841 const char *other_id = NULL;
842 int r;
843
844 assert(u);
845 assert(other);
846 assert(u->manager == other->manager);
847 assert(u->type != _UNIT_TYPE_INVALID);
848
849 other = unit_follow_merge(other);
850
851 if (other == u)
852 return 0;
853
854 if (u->type != other->type)
855 return -EINVAL;
856
857 if (!u->instance != !other->instance)
858 return -EINVAL;
859
860 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
861 return -EEXIST;
862
863 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
864 return -EEXIST;
865
866 if (other->job)
867 return -EEXIST;
868
869 if (other->nop_job)
870 return -EEXIST;
871
872 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
873 return -EEXIST;
874
875 if (other->id)
876 other_id = strdupa(other->id);
877
878 /* Make reservations to ensure merge_dependencies() won't fail */
879 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
880 r = reserve_dependencies(u, other, d);
881 /*
882 * We don't rollback reservations if we fail. We don't have
883 * a way to undo reservations. A reservation is not a leak.
884 */
885 if (r < 0)
886 return r;
887 }
888
889 /* Merge names */
890 r = merge_names(u, other);
891 if (r < 0)
892 return r;
893
894 /* Redirect all references */
895 while (other->refs)
896 unit_ref_set(other->refs, u);
897
898 /* Merge dependencies */
899 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
900 merge_dependencies(u, other, other_id, d);
901
902 other->load_state = UNIT_MERGED;
903 other->merged_into = u;
904
905 /* If there is still some data attached to the other node, we
906 * don't need it anymore, and can free it. */
907 if (other->load_state != UNIT_STUB)
908 if (UNIT_VTABLE(other)->done)
909 UNIT_VTABLE(other)->done(other);
910
911 unit_add_to_dbus_queue(u);
912 unit_add_to_cleanup_queue(other);
913
914 return 0;
915 }
916
917 int unit_merge_by_name(Unit *u, const char *name) {
918 _cleanup_free_ char *s = NULL;
919 Unit *other;
920 int r;
921
922 assert(u);
923 assert(name);
924
925 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
926 if (!u->instance)
927 return -EINVAL;
928
929 r = unit_name_replace_instance(name, u->instance, &s);
930 if (r < 0)
931 return r;
932
933 name = s;
934 }
935
936 other = manager_get_unit(u->manager, name);
937 if (other)
938 return unit_merge(u, other);
939
940 return unit_add_name(u, name);
941 }
942
943 Unit* unit_follow_merge(Unit *u) {
944 assert(u);
945
946 while (u->load_state == UNIT_MERGED)
947 assert_se(u = u->merged_into);
948
949 return u;
950 }
951
952 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
953 ExecDirectoryType dt;
954 char **dp;
955 int r;
956
957 assert(u);
958 assert(c);
959
960 if (c->working_directory) {
961 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
962 if (r < 0)
963 return r;
964 }
965
966 if (c->root_directory) {
967 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
968 if (r < 0)
969 return r;
970 }
971
972 if (c->root_image) {
973 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
974 if (r < 0)
975 return r;
976 }
977
978 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
979 if (!u->manager->prefix[dt])
980 continue;
981
982 STRV_FOREACH(dp, c->directories[dt].paths) {
983 _cleanup_free_ char *p;
984
985 p = strjoin(u->manager->prefix[dt], "/", *dp);
986 if (!p)
987 return -ENOMEM;
988
989 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
990 if (r < 0)
991 return r;
992 }
993 }
994
995 if (!MANAGER_IS_SYSTEM(u->manager))
996 return 0;
997
998 if (c->private_tmp) {
999 const char *p;
1000
1001 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1002 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1003 if (r < 0)
1004 return r;
1005 }
1006
1007 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1008 if (r < 0)
1009 return r;
1010 }
1011
1012 if (!IN_SET(c->std_output,
1013 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1014 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1015 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1016 !IN_SET(c->std_error,
1017 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1018 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1019 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1020 return 0;
1021
1022 /* If syslog or kernel logging is requested, make sure our own
1023 * logging daemon is run first. */
1024
1025 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1026 if (r < 0)
1027 return r;
1028
1029 return 0;
1030 }
1031
1032 const char *unit_description(Unit *u) {
1033 assert(u);
1034
1035 if (u->description)
1036 return u->description;
1037
1038 return strna(u->id);
1039 }
1040
1041 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1042 const struct {
1043 UnitDependencyMask mask;
1044 const char *name;
1045 } table[] = {
1046 { UNIT_DEPENDENCY_FILE, "file" },
1047 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1048 { UNIT_DEPENDENCY_DEFAULT, "default" },
1049 { UNIT_DEPENDENCY_UDEV, "udev" },
1050 { UNIT_DEPENDENCY_PATH, "path" },
1051 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1052 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1053 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1054 };
1055 size_t i;
1056
1057 assert(f);
1058 assert(kind);
1059 assert(space);
1060
1061 for (i = 0; i < ELEMENTSOF(table); i++) {
1062
1063 if (mask == 0)
1064 break;
1065
1066 if ((mask & table[i].mask) == table[i].mask) {
1067 if (*space)
1068 fputc(' ', f);
1069 else
1070 *space = true;
1071
1072 fputs(kind, f);
1073 fputs("-", f);
1074 fputs(table[i].name, f);
1075
1076 mask &= ~table[i].mask;
1077 }
1078 }
1079
1080 assert(mask == 0);
1081 }
1082
1083 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1084 char *t, **j;
1085 UnitDependency d;
1086 Iterator i;
1087 const char *prefix2;
1088 char
1089 timestamp0[FORMAT_TIMESTAMP_MAX],
1090 timestamp1[FORMAT_TIMESTAMP_MAX],
1091 timestamp2[FORMAT_TIMESTAMP_MAX],
1092 timestamp3[FORMAT_TIMESTAMP_MAX],
1093 timestamp4[FORMAT_TIMESTAMP_MAX],
1094 timespan[FORMAT_TIMESPAN_MAX];
1095 Unit *following;
1096 _cleanup_set_free_ Set *following_set = NULL;
1097 const char *n;
1098 CGroupMask m;
1099 int r;
1100
1101 assert(u);
1102 assert(u->type >= 0);
1103
1104 prefix = strempty(prefix);
1105 prefix2 = strjoina(prefix, "\t");
1106
1107 fprintf(f,
1108 "%s-> Unit %s:\n"
1109 "%s\tDescription: %s\n"
1110 "%s\tInstance: %s\n"
1111 "%s\tUnit Load State: %s\n"
1112 "%s\tUnit Active State: %s\n"
1113 "%s\tState Change Timestamp: %s\n"
1114 "%s\tInactive Exit Timestamp: %s\n"
1115 "%s\tActive Enter Timestamp: %s\n"
1116 "%s\tActive Exit Timestamp: %s\n"
1117 "%s\tInactive Enter Timestamp: %s\n"
1118 "%s\tGC Check Good: %s\n"
1119 "%s\tNeed Daemon Reload: %s\n"
1120 "%s\tTransient: %s\n"
1121 "%s\tPerpetual: %s\n"
1122 "%s\tGarbage Collection Mode: %s\n"
1123 "%s\tSlice: %s\n"
1124 "%s\tCGroup: %s\n"
1125 "%s\tCGroup realized: %s\n",
1126 prefix, u->id,
1127 prefix, unit_description(u),
1128 prefix, strna(u->instance),
1129 prefix, unit_load_state_to_string(u->load_state),
1130 prefix, unit_active_state_to_string(unit_active_state(u)),
1131 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1132 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1133 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1134 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1135 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1136 prefix, yes_no(unit_check_gc(u)),
1137 prefix, yes_no(unit_need_daemon_reload(u)),
1138 prefix, yes_no(u->transient),
1139 prefix, yes_no(u->perpetual),
1140 prefix, collect_mode_to_string(u->collect_mode),
1141 prefix, strna(unit_slice_name(u)),
1142 prefix, strna(u->cgroup_path),
1143 prefix, yes_no(u->cgroup_realized));
1144
1145 if (u->cgroup_realized_mask != 0) {
1146 _cleanup_free_ char *s = NULL;
1147 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1148 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1149 }
1150 if (u->cgroup_enabled_mask != 0) {
1151 _cleanup_free_ char *s = NULL;
1152 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1153 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1154 }
1155 m = unit_get_own_mask(u);
1156 if (m != 0) {
1157 _cleanup_free_ char *s = NULL;
1158 (void) cg_mask_to_string(m, &s);
1159 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1160 }
1161 m = unit_get_members_mask(u);
1162 if (m != 0) {
1163 _cleanup_free_ char *s = NULL;
1164 (void) cg_mask_to_string(m, &s);
1165 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1166 }
1167
1168 SET_FOREACH(t, u->names, i)
1169 fprintf(f, "%s\tName: %s\n", prefix, t);
1170
1171 if (!sd_id128_is_null(u->invocation_id))
1172 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1173 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1174
1175 STRV_FOREACH(j, u->documentation)
1176 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1177
1178 following = unit_following(u);
1179 if (following)
1180 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1181
1182 r = unit_following_set(u, &following_set);
1183 if (r >= 0) {
1184 Unit *other;
1185
1186 SET_FOREACH(other, following_set, i)
1187 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1188 }
1189
1190 if (u->fragment_path)
1191 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1192
1193 if (u->source_path)
1194 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1195
1196 STRV_FOREACH(j, u->dropin_paths)
1197 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1198
1199 if (u->failure_action != EMERGENCY_ACTION_NONE)
1200 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1201 if (u->success_action != EMERGENCY_ACTION_NONE)
1202 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1203
1204 if (u->job_timeout != USEC_INFINITY)
1205 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1206
1207 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1208 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1209
1210 if (u->job_timeout_reboot_arg)
1211 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1212
1213 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1214 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1215
1216 if (dual_timestamp_is_set(&u->condition_timestamp))
1217 fprintf(f,
1218 "%s\tCondition Timestamp: %s\n"
1219 "%s\tCondition Result: %s\n",
1220 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1221 prefix, yes_no(u->condition_result));
1222
1223 if (dual_timestamp_is_set(&u->assert_timestamp))
1224 fprintf(f,
1225 "%s\tAssert Timestamp: %s\n"
1226 "%s\tAssert Result: %s\n",
1227 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1228 prefix, yes_no(u->assert_result));
1229
1230 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1231 UnitDependencyInfo di;
1232 Unit *other;
1233
1234 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1235 bool space = false;
1236
1237 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1238
1239 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1240 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1241
1242 fputs(")\n", f);
1243 }
1244 }
1245
1246 if (!hashmap_isempty(u->requires_mounts_for)) {
1247 UnitDependencyInfo di;
1248 const char *path;
1249
1250 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1251 bool space = false;
1252
1253 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1254
1255 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1256 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1257
1258 fputs(")\n", f);
1259 }
1260 }
1261
1262 if (u->load_state == UNIT_LOADED) {
1263
1264 fprintf(f,
1265 "%s\tStopWhenUnneeded: %s\n"
1266 "%s\tRefuseManualStart: %s\n"
1267 "%s\tRefuseManualStop: %s\n"
1268 "%s\tDefaultDependencies: %s\n"
1269 "%s\tOnFailureJobMode: %s\n"
1270 "%s\tIgnoreOnIsolate: %s\n",
1271 prefix, yes_no(u->stop_when_unneeded),
1272 prefix, yes_no(u->refuse_manual_start),
1273 prefix, yes_no(u->refuse_manual_stop),
1274 prefix, yes_no(u->default_dependencies),
1275 prefix, job_mode_to_string(u->on_failure_job_mode),
1276 prefix, yes_no(u->ignore_on_isolate));
1277
1278 if (UNIT_VTABLE(u)->dump)
1279 UNIT_VTABLE(u)->dump(u, f, prefix2);
1280
1281 } else if (u->load_state == UNIT_MERGED)
1282 fprintf(f,
1283 "%s\tMerged into: %s\n",
1284 prefix, u->merged_into->id);
1285 else if (u->load_state == UNIT_ERROR)
1286 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1287
1288 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1289 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1290
1291 if (u->job)
1292 job_dump(u->job, f, prefix2);
1293
1294 if (u->nop_job)
1295 job_dump(u->nop_job, f, prefix2);
1296 }
1297
1298 /* Common implementation for multiple backends */
1299 int unit_load_fragment_and_dropin(Unit *u) {
1300 int r;
1301
1302 assert(u);
1303
1304 /* Load a .{service,socket,...} file */
1305 r = unit_load_fragment(u);
1306 if (r < 0)
1307 return r;
1308
1309 if (u->load_state == UNIT_STUB)
1310 return -ENOENT;
1311
1312 /* Load drop-in directory data. If u is an alias, we might be reloading the
1313 * target unit needlessly. But we cannot be sure which drops-ins have already
1314 * been loaded and which not, at least without doing complicated book-keeping,
1315 * so let's always reread all drop-ins. */
1316 return unit_load_dropin(unit_follow_merge(u));
1317 }
1318
1319 /* Common implementation for multiple backends */
1320 int unit_load_fragment_and_dropin_optional(Unit *u) {
1321 int r;
1322
1323 assert(u);
1324
1325 /* Same as unit_load_fragment_and_dropin(), but whether
1326 * something can be loaded or not doesn't matter. */
1327
1328 /* Load a .service file */
1329 r = unit_load_fragment(u);
1330 if (r < 0)
1331 return r;
1332
1333 if (u->load_state == UNIT_STUB)
1334 u->load_state = UNIT_LOADED;
1335
1336 /* Load drop-in directory data */
1337 return unit_load_dropin(unit_follow_merge(u));
1338 }
1339
1340 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1341 assert(u);
1342 assert(target);
1343
1344 if (target->type != UNIT_TARGET)
1345 return 0;
1346
1347 /* Only add the dependency if both units are loaded, so that
1348 * that loop check below is reliable */
1349 if (u->load_state != UNIT_LOADED ||
1350 target->load_state != UNIT_LOADED)
1351 return 0;
1352
1353 /* If either side wants no automatic dependencies, then let's
1354 * skip this */
1355 if (!u->default_dependencies ||
1356 !target->default_dependencies)
1357 return 0;
1358
1359 /* Don't create loops */
1360 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1361 return 0;
1362
1363 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1364 }
1365
1366 static int unit_add_target_dependencies(Unit *u) {
1367
1368 static const UnitDependency deps[] = {
1369 UNIT_REQUIRED_BY,
1370 UNIT_REQUISITE_OF,
1371 UNIT_WANTED_BY,
1372 UNIT_BOUND_BY
1373 };
1374
1375 unsigned k;
1376 int r = 0;
1377
1378 assert(u);
1379
1380 for (k = 0; k < ELEMENTSOF(deps); k++) {
1381 Unit *target;
1382 Iterator i;
1383 void *v;
1384
1385 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1386 r = unit_add_default_target_dependency(u, target);
1387 if (r < 0)
1388 return r;
1389 }
1390 }
1391
1392 return r;
1393 }
1394
1395 static int unit_add_slice_dependencies(Unit *u) {
1396 UnitDependencyMask mask;
1397 assert(u);
1398
1399 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1400 return 0;
1401
1402 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1403 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1404 relationship). */
1405 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1406
1407 if (UNIT_ISSET(u->slice))
1408 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1409
1410 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1411 return 0;
1412
1413 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1414 }
1415
1416 static int unit_add_mount_dependencies(Unit *u) {
1417 UnitDependencyInfo di;
1418 const char *path;
1419 Iterator i;
1420 int r;
1421
1422 assert(u);
1423
1424 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1425 char prefix[strlen(path) + 1];
1426
1427 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1428 _cleanup_free_ char *p = NULL;
1429 Unit *m;
1430
1431 r = unit_name_from_path(prefix, ".mount", &p);
1432 if (r < 0)
1433 return r;
1434
1435 m = manager_get_unit(u->manager, p);
1436 if (!m) {
1437 /* Make sure to load the mount unit if
1438 * it exists. If so the dependencies
1439 * on this unit will be added later
1440 * during the loading of the mount
1441 * unit. */
1442 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1443 continue;
1444 }
1445 if (m == u)
1446 continue;
1447
1448 if (m->load_state != UNIT_LOADED)
1449 continue;
1450
1451 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1452 if (r < 0)
1453 return r;
1454
1455 if (m->fragment_path) {
1456 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1457 if (r < 0)
1458 return r;
1459 }
1460 }
1461 }
1462
1463 return 0;
1464 }
1465
1466 static int unit_add_startup_units(Unit *u) {
1467 CGroupContext *c;
1468 int r;
1469
1470 c = unit_get_cgroup_context(u);
1471 if (!c)
1472 return 0;
1473
1474 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1475 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1476 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1477 return 0;
1478
1479 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1480 if (r < 0)
1481 return r;
1482
1483 return set_put(u->manager->startup_units, u);
1484 }
1485
1486 int unit_load(Unit *u) {
1487 int r;
1488
1489 assert(u);
1490
1491 if (u->in_load_queue) {
1492 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1493 u->in_load_queue = false;
1494 }
1495
1496 if (u->type == _UNIT_TYPE_INVALID)
1497 return -EINVAL;
1498
1499 if (u->load_state != UNIT_STUB)
1500 return 0;
1501
1502 if (u->transient_file) {
1503 r = fflush_and_check(u->transient_file);
1504 if (r < 0)
1505 goto fail;
1506
1507 u->transient_file = safe_fclose(u->transient_file);
1508 u->fragment_mtime = now(CLOCK_REALTIME);
1509 }
1510
1511 if (UNIT_VTABLE(u)->load) {
1512 r = UNIT_VTABLE(u)->load(u);
1513 if (r < 0)
1514 goto fail;
1515 }
1516
1517 if (u->load_state == UNIT_STUB) {
1518 r = -ENOENT;
1519 goto fail;
1520 }
1521
1522 if (u->load_state == UNIT_LOADED) {
1523
1524 r = unit_add_target_dependencies(u);
1525 if (r < 0)
1526 goto fail;
1527
1528 r = unit_add_slice_dependencies(u);
1529 if (r < 0)
1530 goto fail;
1531
1532 r = unit_add_mount_dependencies(u);
1533 if (r < 0)
1534 goto fail;
1535
1536 r = unit_add_startup_units(u);
1537 if (r < 0)
1538 goto fail;
1539
1540 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1541 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1542 r = -EINVAL;
1543 goto fail;
1544 }
1545
1546 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1547 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1548
1549 unit_update_cgroup_members_masks(u);
1550 }
1551
1552 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1553
1554 unit_add_to_dbus_queue(unit_follow_merge(u));
1555 unit_add_to_gc_queue(u);
1556
1557 return 0;
1558
1559 fail:
1560 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1561 u->load_error = r;
1562 unit_add_to_dbus_queue(u);
1563 unit_add_to_gc_queue(u);
1564
1565 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1566
1567 return r;
1568 }
1569
1570 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1571 Condition *c;
1572 int triggered = -1;
1573
1574 assert(u);
1575 assert(to_string);
1576
1577 /* If the condition list is empty, then it is true */
1578 if (!first)
1579 return true;
1580
1581 /* Otherwise, if all of the non-trigger conditions apply and
1582 * if any of the trigger conditions apply (unless there are
1583 * none) we return true */
1584 LIST_FOREACH(conditions, c, first) {
1585 int r;
1586
1587 r = condition_test(c);
1588 if (r < 0)
1589 log_unit_warning(u,
1590 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1591 to_string(c->type),
1592 c->trigger ? "|" : "",
1593 c->negate ? "!" : "",
1594 c->parameter);
1595 else
1596 log_unit_debug(u,
1597 "%s=%s%s%s %s.",
1598 to_string(c->type),
1599 c->trigger ? "|" : "",
1600 c->negate ? "!" : "",
1601 c->parameter,
1602 condition_result_to_string(c->result));
1603
1604 if (!c->trigger && r <= 0)
1605 return false;
1606
1607 if (c->trigger && triggered <= 0)
1608 triggered = r > 0;
1609 }
1610
1611 return triggered != 0;
1612 }
1613
1614 static bool unit_condition_test(Unit *u) {
1615 assert(u);
1616
1617 dual_timestamp_get(&u->condition_timestamp);
1618 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1619
1620 return u->condition_result;
1621 }
1622
1623 static bool unit_assert_test(Unit *u) {
1624 assert(u);
1625
1626 dual_timestamp_get(&u->assert_timestamp);
1627 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1628
1629 return u->assert_result;
1630 }
1631
1632 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1633 DISABLE_WARNING_FORMAT_NONLITERAL;
1634 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1635 REENABLE_WARNING;
1636 }
1637
1638 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1639 const char *format;
1640 const UnitStatusMessageFormats *format_table;
1641
1642 assert(u);
1643 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1644
1645 if (t != JOB_RELOAD) {
1646 format_table = &UNIT_VTABLE(u)->status_message_formats;
1647 if (format_table) {
1648 format = format_table->starting_stopping[t == JOB_STOP];
1649 if (format)
1650 return format;
1651 }
1652 }
1653
1654 /* Return generic strings */
1655 if (t == JOB_START)
1656 return "Starting %s.";
1657 else if (t == JOB_STOP)
1658 return "Stopping %s.";
1659 else
1660 return "Reloading %s.";
1661 }
1662
1663 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1664 const char *format;
1665
1666 assert(u);
1667
1668 /* Reload status messages have traditionally not been printed to console. */
1669 if (!IN_SET(t, JOB_START, JOB_STOP))
1670 return;
1671
1672 format = unit_get_status_message_format(u, t);
1673
1674 DISABLE_WARNING_FORMAT_NONLITERAL;
1675 unit_status_printf(u, "", format);
1676 REENABLE_WARNING;
1677 }
1678
1679 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1680 const char *format, *mid;
1681 char buf[LINE_MAX];
1682
1683 assert(u);
1684
1685 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1686 return;
1687
1688 if (log_on_console())
1689 return;
1690
1691 /* We log status messages for all units and all operations. */
1692
1693 format = unit_get_status_message_format(u, t);
1694
1695 DISABLE_WARNING_FORMAT_NONLITERAL;
1696 xsprintf(buf, format, unit_description(u));
1697 REENABLE_WARNING;
1698
1699 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1700 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1701 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1702
1703 /* Note that we deliberately use LOG_MESSAGE() instead of
1704 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1705 * closely what is written to screen using the status output,
1706 * which is supposed the highest level, friendliest output
1707 * possible, which means we should avoid the low-level unit
1708 * name. */
1709 log_struct(LOG_INFO,
1710 LOG_MESSAGE("%s", buf),
1711 LOG_UNIT_ID(u),
1712 LOG_UNIT_INVOCATION_ID(u),
1713 mid,
1714 NULL);
1715 }
1716
1717 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1718 assert(u);
1719 assert(t >= 0);
1720 assert(t < _JOB_TYPE_MAX);
1721
1722 unit_status_log_starting_stopping_reloading(u, t);
1723 unit_status_print_starting_stopping(u, t);
1724 }
1725
1726 int unit_start_limit_test(Unit *u) {
1727 assert(u);
1728
1729 if (ratelimit_test(&u->start_limit)) {
1730 u->start_limit_hit = false;
1731 return 0;
1732 }
1733
1734 log_unit_warning(u, "Start request repeated too quickly.");
1735 u->start_limit_hit = true;
1736
1737 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1738 }
1739
1740 bool unit_shall_confirm_spawn(Unit *u) {
1741 assert(u);
1742
1743 if (manager_is_confirm_spawn_disabled(u->manager))
1744 return false;
1745
1746 /* For some reasons units remaining in the same process group
1747 * as PID 1 fail to acquire the console even if it's not used
1748 * by any process. So skip the confirmation question for them. */
1749 return !unit_get_exec_context(u)->same_pgrp;
1750 }
1751
1752 static bool unit_verify_deps(Unit *u) {
1753 Unit *other;
1754 Iterator j;
1755 void *v;
1756
1757 assert(u);
1758
1759 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1760 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1761 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1762 * conjunction with After= as for them any such check would make things entirely racy. */
1763
1764 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1765
1766 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1767 continue;
1768
1769 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1770 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1771 return false;
1772 }
1773 }
1774
1775 return true;
1776 }
1777
1778 /* Errors:
1779 * -EBADR: This unit type does not support starting.
1780 * -EALREADY: Unit is already started.
1781 * -EAGAIN: An operation is already in progress. Retry later.
1782 * -ECANCELED: Too many requests for now.
1783 * -EPROTO: Assert failed
1784 * -EINVAL: Unit not loaded
1785 * -EOPNOTSUPP: Unit type not supported
1786 * -ENOLINK: The necessary dependencies are not fulfilled.
1787 */
1788 int unit_start(Unit *u) {
1789 UnitActiveState state;
1790 Unit *following;
1791
1792 assert(u);
1793
1794 /* If this is already started, then this will succeed. Note
1795 * that this will even succeed if this unit is not startable
1796 * by the user. This is relied on to detect when we need to
1797 * wait for units and when waiting is finished. */
1798 state = unit_active_state(u);
1799 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1800 return -EALREADY;
1801
1802 /* Units that aren't loaded cannot be started */
1803 if (u->load_state != UNIT_LOADED)
1804 return -EINVAL;
1805
1806 /* If the conditions failed, don't do anything at all. If we
1807 * already are activating this call might still be useful to
1808 * speed up activation in case there is some hold-off time,
1809 * but we don't want to recheck the condition in that case. */
1810 if (state != UNIT_ACTIVATING &&
1811 !unit_condition_test(u)) {
1812 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1813 return -EALREADY;
1814 }
1815
1816 /* If the asserts failed, fail the entire job */
1817 if (state != UNIT_ACTIVATING &&
1818 !unit_assert_test(u)) {
1819 log_unit_notice(u, "Starting requested but asserts failed.");
1820 return -EPROTO;
1821 }
1822
1823 /* Units of types that aren't supported cannot be
1824 * started. Note that we do this test only after the condition
1825 * checks, so that we rather return condition check errors
1826 * (which are usually not considered a true failure) than "not
1827 * supported" errors (which are considered a failure).
1828 */
1829 if (!unit_supported(u))
1830 return -EOPNOTSUPP;
1831
1832 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1833 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1834 * effect anymore, due to a reload or due to a failed condition. */
1835 if (!unit_verify_deps(u))
1836 return -ENOLINK;
1837
1838 /* Forward to the main object, if we aren't it. */
1839 following = unit_following(u);
1840 if (following) {
1841 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1842 return unit_start(following);
1843 }
1844
1845 /* If it is stopped, but we cannot start it, then fail */
1846 if (!UNIT_VTABLE(u)->start)
1847 return -EBADR;
1848
1849 /* We don't suppress calls to ->start() here when we are
1850 * already starting, to allow this request to be used as a
1851 * "hurry up" call, for example when the unit is in some "auto
1852 * restart" state where it waits for a holdoff timer to elapse
1853 * before it will start again. */
1854
1855 unit_add_to_dbus_queue(u);
1856
1857 return UNIT_VTABLE(u)->start(u);
1858 }
1859
1860 bool unit_can_start(Unit *u) {
1861 assert(u);
1862
1863 if (u->load_state != UNIT_LOADED)
1864 return false;
1865
1866 if (!unit_supported(u))
1867 return false;
1868
1869 return !!UNIT_VTABLE(u)->start;
1870 }
1871
1872 bool unit_can_isolate(Unit *u) {
1873 assert(u);
1874
1875 return unit_can_start(u) &&
1876 u->allow_isolate;
1877 }
1878
1879 /* Errors:
1880 * -EBADR: This unit type does not support stopping.
1881 * -EALREADY: Unit is already stopped.
1882 * -EAGAIN: An operation is already in progress. Retry later.
1883 */
1884 int unit_stop(Unit *u) {
1885 UnitActiveState state;
1886 Unit *following;
1887
1888 assert(u);
1889
1890 state = unit_active_state(u);
1891 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1892 return -EALREADY;
1893
1894 following = unit_following(u);
1895 if (following) {
1896 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1897 return unit_stop(following);
1898 }
1899
1900 if (!UNIT_VTABLE(u)->stop)
1901 return -EBADR;
1902
1903 unit_add_to_dbus_queue(u);
1904
1905 return UNIT_VTABLE(u)->stop(u);
1906 }
1907
1908 bool unit_can_stop(Unit *u) {
1909 assert(u);
1910
1911 if (!unit_supported(u))
1912 return false;
1913
1914 if (u->perpetual)
1915 return false;
1916
1917 return !!UNIT_VTABLE(u)->stop;
1918 }
1919
1920 /* Errors:
1921 * -EBADR: This unit type does not support reloading.
1922 * -ENOEXEC: Unit is not started.
1923 * -EAGAIN: An operation is already in progress. Retry later.
1924 */
1925 int unit_reload(Unit *u) {
1926 UnitActiveState state;
1927 Unit *following;
1928
1929 assert(u);
1930
1931 if (u->load_state != UNIT_LOADED)
1932 return -EINVAL;
1933
1934 if (!unit_can_reload(u))
1935 return -EBADR;
1936
1937 state = unit_active_state(u);
1938 if (state == UNIT_RELOADING)
1939 return -EALREADY;
1940
1941 if (state != UNIT_ACTIVE) {
1942 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1943 return -ENOEXEC;
1944 }
1945
1946 following = unit_following(u);
1947 if (following) {
1948 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1949 return unit_reload(following);
1950 }
1951
1952 unit_add_to_dbus_queue(u);
1953
1954 if (!UNIT_VTABLE(u)->reload) {
1955 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1956 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1957 return 0;
1958 }
1959
1960 return UNIT_VTABLE(u)->reload(u);
1961 }
1962
1963 bool unit_can_reload(Unit *u) {
1964 assert(u);
1965
1966 if (UNIT_VTABLE(u)->can_reload)
1967 return UNIT_VTABLE(u)->can_reload(u);
1968
1969 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1970 return true;
1971
1972 return UNIT_VTABLE(u)->reload;
1973 }
1974
1975 static void unit_check_unneeded(Unit *u) {
1976
1977 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1978
1979 static const UnitDependency needed_dependencies[] = {
1980 UNIT_REQUIRED_BY,
1981 UNIT_REQUISITE_OF,
1982 UNIT_WANTED_BY,
1983 UNIT_BOUND_BY,
1984 };
1985
1986 unsigned j;
1987 int r;
1988
1989 assert(u);
1990
1991 /* If this service shall be shut down when unneeded then do
1992 * so. */
1993
1994 if (!u->stop_when_unneeded)
1995 return;
1996
1997 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1998 return;
1999
2000 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
2001 Unit *other;
2002 Iterator i;
2003 void *v;
2004
2005 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
2006 if (unit_active_or_pending(other) || unit_will_restart(other))
2007 return;
2008 }
2009
2010 /* If stopping a unit fails continuously we might enter a stop
2011 * loop here, hence stop acting on the service being
2012 * unnecessary after a while. */
2013 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2014 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
2015 return;
2016 }
2017
2018 log_unit_info(u, "Unit not needed anymore. Stopping.");
2019
2020 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2021 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2022 if (r < 0)
2023 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2024 }
2025
2026 static void unit_check_binds_to(Unit *u) {
2027 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2028 bool stop = false;
2029 Unit *other;
2030 Iterator i;
2031 void *v;
2032 int r;
2033
2034 assert(u);
2035
2036 if (u->job)
2037 return;
2038
2039 if (unit_active_state(u) != UNIT_ACTIVE)
2040 return;
2041
2042 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2043 if (other->job)
2044 continue;
2045
2046 if (!other->coldplugged)
2047 /* We might yet create a job for the other unit… */
2048 continue;
2049
2050 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2051 continue;
2052
2053 stop = true;
2054 break;
2055 }
2056
2057 if (!stop)
2058 return;
2059
2060 /* If stopping a unit fails continuously we might enter a stop
2061 * loop here, hence stop acting on the service being
2062 * unnecessary after a while. */
2063 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2064 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2065 return;
2066 }
2067
2068 assert(other);
2069 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2070
2071 /* A unit we need to run is gone. Sniff. Let's stop this. */
2072 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2073 if (r < 0)
2074 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2075 }
2076
2077 static void retroactively_start_dependencies(Unit *u) {
2078 Iterator i;
2079 Unit *other;
2080 void *v;
2081
2082 assert(u);
2083 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2084
2085 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2086 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2087 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2088 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2089
2090 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2091 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2092 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2093 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2094
2095 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2096 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2097 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2098 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2099
2100 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2101 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2102 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2103
2104 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2107 }
2108
2109 static void retroactively_stop_dependencies(Unit *u) {
2110 Unit *other;
2111 Iterator i;
2112 void *v;
2113
2114 assert(u);
2115 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2116
2117 /* Pull down units which are bound to us recursively if enabled */
2118 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2119 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2120 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2121 }
2122
2123 static void check_unneeded_dependencies(Unit *u) {
2124 Unit *other;
2125 Iterator i;
2126 void *v;
2127
2128 assert(u);
2129 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2130
2131 /* Garbage collect services that might not be needed anymore, if enabled */
2132 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2133 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2134 unit_check_unneeded(other);
2135 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2136 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2137 unit_check_unneeded(other);
2138 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2139 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2140 unit_check_unneeded(other);
2141 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2142 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2143 unit_check_unneeded(other);
2144 }
2145
2146 void unit_start_on_failure(Unit *u) {
2147 Unit *other;
2148 Iterator i;
2149 void *v;
2150
2151 assert(u);
2152
2153 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2154 return;
2155
2156 log_unit_info(u, "Triggering OnFailure= dependencies.");
2157
2158 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2159 int r;
2160
2161 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2162 if (r < 0)
2163 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2164 }
2165 }
2166
2167 void unit_trigger_notify(Unit *u) {
2168 Unit *other;
2169 Iterator i;
2170 void *v;
2171
2172 assert(u);
2173
2174 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2175 if (UNIT_VTABLE(other)->trigger_notify)
2176 UNIT_VTABLE(other)->trigger_notify(other, u);
2177 }
2178
2179 static int unit_log_resources(Unit *u) {
2180
2181 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2182 size_t n_message_parts = 0, n_iovec = 0;
2183 char* message_parts[3 + 1], *t;
2184 nsec_t nsec = NSEC_INFINITY;
2185 CGroupIPAccountingMetric m;
2186 size_t i;
2187 int r;
2188 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2189 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2190 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2191 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2192 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2193 };
2194
2195 assert(u);
2196
2197 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2198 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2199 * information and the complete data in structured fields. */
2200
2201 (void) unit_get_cpu_usage(u, &nsec);
2202 if (nsec != NSEC_INFINITY) {
2203 char buf[FORMAT_TIMESPAN_MAX] = "";
2204
2205 /* Format the CPU time for inclusion in the structured log message */
2206 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2207 r = log_oom();
2208 goto finish;
2209 }
2210 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2211
2212 /* Format the CPU time for inclusion in the human language message string */
2213 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2214 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2215 if (!t) {
2216 r = log_oom();
2217 goto finish;
2218 }
2219
2220 message_parts[n_message_parts++] = t;
2221 }
2222
2223 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2224 char buf[FORMAT_BYTES_MAX] = "";
2225 uint64_t value = UINT64_MAX;
2226
2227 assert(ip_fields[m]);
2228
2229 (void) unit_get_ip_accounting(u, m, &value);
2230 if (value == UINT64_MAX)
2231 continue;
2232
2233 /* Format IP accounting data for inclusion in the structured log message */
2234 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2235 r = log_oom();
2236 goto finish;
2237 }
2238 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2239
2240 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2241 * bytes counters (and not for the packets counters) */
2242 if (m == CGROUP_IP_INGRESS_BYTES)
2243 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2244 format_bytes(buf, sizeof(buf), value),
2245 " IP traffic");
2246 else if (m == CGROUP_IP_EGRESS_BYTES)
2247 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2248 format_bytes(buf, sizeof(buf), value),
2249 " IP traffic");
2250 else
2251 continue;
2252 if (!t) {
2253 r = log_oom();
2254 goto finish;
2255 }
2256
2257 message_parts[n_message_parts++] = t;
2258 }
2259
2260 /* Is there any accounting data available at all? */
2261 if (n_iovec == 0) {
2262 r = 0;
2263 goto finish;
2264 }
2265
2266 if (n_message_parts == 0)
2267 t = strjoina("MESSAGE=", u->id, ": Completed");
2268 else {
2269 _cleanup_free_ char *joined;
2270
2271 message_parts[n_message_parts] = NULL;
2272
2273 joined = strv_join(message_parts, ", ");
2274 if (!joined) {
2275 r = log_oom();
2276 goto finish;
2277 }
2278
2279 t = strjoina("MESSAGE=", u->id, ": ", joined);
2280 }
2281
2282 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2283 * and hence don't increase n_iovec for them */
2284 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2285 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2286
2287 t = strjoina(u->manager->unit_log_field, u->id);
2288 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2289
2290 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2291 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2292
2293 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2294 r = 0;
2295
2296 finish:
2297 for (i = 0; i < n_message_parts; i++)
2298 free(message_parts[i]);
2299
2300 for (i = 0; i < n_iovec; i++)
2301 free(iovec[i].iov_base);
2302
2303 return r;
2304
2305 }
2306
2307 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2308 Manager *m;
2309 bool unexpected;
2310
2311 assert(u);
2312 assert(os < _UNIT_ACTIVE_STATE_MAX);
2313 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2314
2315 /* Note that this is called for all low-level state changes,
2316 * even if they might map to the same high-level
2317 * UnitActiveState! That means that ns == os is an expected
2318 * behavior here. For example: if a mount point is remounted
2319 * this function will be called too! */
2320
2321 m = u->manager;
2322
2323 /* Update timestamps for state changes */
2324 if (!MANAGER_IS_RELOADING(m)) {
2325 dual_timestamp_get(&u->state_change_timestamp);
2326
2327 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2328 u->inactive_exit_timestamp = u->state_change_timestamp;
2329 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2330 u->inactive_enter_timestamp = u->state_change_timestamp;
2331
2332 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2333 u->active_enter_timestamp = u->state_change_timestamp;
2334 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2335 u->active_exit_timestamp = u->state_change_timestamp;
2336 }
2337
2338 /* Keep track of failed units */
2339 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2340
2341 /* Make sure the cgroup and state files are always removed when we become inactive */
2342 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2343 unit_prune_cgroup(u);
2344 unit_unlink_state_files(u);
2345 }
2346
2347 /* Note that this doesn't apply to RemainAfterExit services exiting
2348 * successfully, since there's no change of state in that case. Which is
2349 * why it is handled in service_set_state() */
2350 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2351 ExecContext *ec;
2352
2353 ec = unit_get_exec_context(u);
2354 if (ec && exec_context_may_touch_console(ec)) {
2355 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2356 m->n_on_console--;
2357
2358 if (m->n_on_console == 0)
2359 /* unset no_console_output flag, since the console is free */
2360 m->no_console_output = false;
2361 } else
2362 m->n_on_console++;
2363 }
2364 }
2365
2366 if (u->job) {
2367 unexpected = false;
2368
2369 if (u->job->state == JOB_WAITING)
2370
2371 /* So we reached a different state for this
2372 * job. Let's see if we can run it now if it
2373 * failed previously due to EAGAIN. */
2374 job_add_to_run_queue(u->job);
2375
2376 /* Let's check whether this state change constitutes a
2377 * finished job, or maybe contradicts a running job and
2378 * hence needs to invalidate jobs. */
2379
2380 switch (u->job->type) {
2381
2382 case JOB_START:
2383 case JOB_VERIFY_ACTIVE:
2384
2385 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2386 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2387 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2388 unexpected = true;
2389
2390 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2391 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2392 }
2393
2394 break;
2395
2396 case JOB_RELOAD:
2397 case JOB_RELOAD_OR_START:
2398 case JOB_TRY_RELOAD:
2399
2400 if (u->job->state == JOB_RUNNING) {
2401 if (ns == UNIT_ACTIVE)
2402 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2403 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2404 unexpected = true;
2405
2406 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2407 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2408 }
2409 }
2410
2411 break;
2412
2413 case JOB_STOP:
2414 case JOB_RESTART:
2415 case JOB_TRY_RESTART:
2416
2417 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2418 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2419 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2420 unexpected = true;
2421 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2422 }
2423
2424 break;
2425
2426 default:
2427 assert_not_reached("Job type unknown");
2428 }
2429
2430 } else
2431 unexpected = true;
2432
2433 if (!MANAGER_IS_RELOADING(m)) {
2434
2435 /* If this state change happened without being
2436 * requested by a job, then let's retroactively start
2437 * or stop dependencies. We skip that step when
2438 * deserializing, since we don't want to create any
2439 * additional jobs just because something is already
2440 * activated. */
2441
2442 if (unexpected) {
2443 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2444 retroactively_start_dependencies(u);
2445 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2446 retroactively_stop_dependencies(u);
2447 }
2448
2449 /* stop unneeded units regardless if going down was expected or not */
2450 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2451 check_unneeded_dependencies(u);
2452
2453 if (ns != os && ns == UNIT_FAILED) {
2454 log_unit_debug(u, "Unit entered failed state.");
2455 unit_start_on_failure(u);
2456 }
2457 }
2458
2459 /* Some names are special */
2460 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2461
2462 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2463 /* The bus might have just become available,
2464 * hence try to connect to it, if we aren't
2465 * yet connected. */
2466 bus_init(m, true);
2467
2468 if (u->type == UNIT_SERVICE &&
2469 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2470 !MANAGER_IS_RELOADING(m)) {
2471 /* Write audit record if we have just finished starting up */
2472 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2473 u->in_audit = true;
2474 }
2475
2476 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2477 manager_send_unit_plymouth(m, u);
2478
2479 } else {
2480 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2481 * anyway. */
2482
2483 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2484 !UNIT_IS_INACTIVE_OR_FAILED(os)
2485 && !MANAGER_IS_RELOADING(m)) {
2486
2487 /* This unit just stopped/failed. */
2488 if (u->type == UNIT_SERVICE) {
2489
2490 /* Hmm, if there was no start record written
2491 * write it now, so that we always have a nice
2492 * pair */
2493 if (!u->in_audit) {
2494 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2495
2496 if (ns == UNIT_INACTIVE)
2497 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2498 } else
2499 /* Write audit record if we have just finished shutting down */
2500 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2501
2502 u->in_audit = false;
2503 }
2504
2505 /* Write a log message about consumed resources */
2506 unit_log_resources(u);
2507 }
2508 }
2509
2510 manager_recheck_journal(m);
2511 unit_trigger_notify(u);
2512
2513 if (!MANAGER_IS_RELOADING(u->manager)) {
2514 /* Maybe we finished startup and are now ready for
2515 * being stopped because unneeded? */
2516 unit_check_unneeded(u);
2517
2518 /* Maybe we finished startup, but something we needed
2519 * has vanished? Let's die then. (This happens when
2520 * something BindsTo= to a Type=oneshot unit, as these
2521 * units go directly from starting to inactive,
2522 * without ever entering started.) */
2523 unit_check_binds_to(u);
2524
2525 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2526 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2527 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2528 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2529 }
2530
2531 unit_add_to_dbus_queue(u);
2532 unit_add_to_gc_queue(u);
2533 }
2534
2535 int unit_watch_pid(Unit *u, pid_t pid) {
2536 int q, r;
2537
2538 assert(u);
2539 assert(pid >= 1);
2540
2541 /* Watch a specific PID. We only support one or two units
2542 * watching each PID for now, not more. */
2543
2544 r = set_ensure_allocated(&u->pids, NULL);
2545 if (r < 0)
2546 return r;
2547
2548 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2549 if (r < 0)
2550 return r;
2551
2552 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2553 if (r == -EEXIST) {
2554 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2555 if (r < 0)
2556 return r;
2557
2558 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2559 }
2560
2561 q = set_put(u->pids, PID_TO_PTR(pid));
2562 if (q < 0)
2563 return q;
2564
2565 return r;
2566 }
2567
2568 void unit_unwatch_pid(Unit *u, pid_t pid) {
2569 assert(u);
2570 assert(pid >= 1);
2571
2572 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2573 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2574 (void) set_remove(u->pids, PID_TO_PTR(pid));
2575 }
2576
2577 void unit_unwatch_all_pids(Unit *u) {
2578 assert(u);
2579
2580 while (!set_isempty(u->pids))
2581 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2582
2583 u->pids = set_free(u->pids);
2584 }
2585
2586 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2587 Iterator i;
2588 void *e;
2589
2590 assert(u);
2591
2592 /* Cleans dead PIDs from our list */
2593
2594 SET_FOREACH(e, u->pids, i) {
2595 pid_t pid = PTR_TO_PID(e);
2596
2597 if (pid == except1 || pid == except2)
2598 continue;
2599
2600 if (!pid_is_unwaited(pid))
2601 unit_unwatch_pid(u, pid);
2602 }
2603 }
2604
2605 bool unit_job_is_applicable(Unit *u, JobType j) {
2606 assert(u);
2607 assert(j >= 0 && j < _JOB_TYPE_MAX);
2608
2609 switch (j) {
2610
2611 case JOB_VERIFY_ACTIVE:
2612 case JOB_START:
2613 case JOB_NOP:
2614 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2615 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2616 * jobs for it. */
2617 return true;
2618
2619 case JOB_STOP:
2620 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2621 * external events), hence it makes no sense to permit enqueing such a request either. */
2622 return !u->perpetual;
2623
2624 case JOB_RESTART:
2625 case JOB_TRY_RESTART:
2626 return unit_can_stop(u) && unit_can_start(u);
2627
2628 case JOB_RELOAD:
2629 case JOB_TRY_RELOAD:
2630 return unit_can_reload(u);
2631
2632 case JOB_RELOAD_OR_START:
2633 return unit_can_reload(u) && unit_can_start(u);
2634
2635 default:
2636 assert_not_reached("Invalid job type");
2637 }
2638 }
2639
2640 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2641 assert(u);
2642
2643 /* Only warn about some unit types */
2644 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2645 return;
2646
2647 if (streq_ptr(u->id, other))
2648 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2649 else
2650 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2651 }
2652
2653 static int unit_add_dependency_hashmap(
2654 Hashmap **h,
2655 Unit *other,
2656 UnitDependencyMask origin_mask,
2657 UnitDependencyMask destination_mask) {
2658
2659 UnitDependencyInfo info;
2660 int r;
2661
2662 assert(h);
2663 assert(other);
2664 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2665 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2666 assert(origin_mask > 0 || destination_mask > 0);
2667
2668 r = hashmap_ensure_allocated(h, NULL);
2669 if (r < 0)
2670 return r;
2671
2672 assert_cc(sizeof(void*) == sizeof(info));
2673
2674 info.data = hashmap_get(*h, other);
2675 if (info.data) {
2676 /* Entry already exists. Add in our mask. */
2677
2678 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2679 (info.destination_mask & destination_mask) == info.destination_mask)
2680 return 0; /* NOP */
2681
2682 info.origin_mask |= origin_mask;
2683 info.destination_mask |= destination_mask;
2684
2685 r = hashmap_update(*h, other, info.data);
2686 } else {
2687 info = (UnitDependencyInfo) {
2688 .origin_mask = origin_mask,
2689 .destination_mask = destination_mask,
2690 };
2691
2692 r = hashmap_put(*h, other, info.data);
2693 }
2694 if (r < 0)
2695 return r;
2696
2697 return 1;
2698 }
2699
2700 int unit_add_dependency(
2701 Unit *u,
2702 UnitDependency d,
2703 Unit *other,
2704 bool add_reference,
2705 UnitDependencyMask mask) {
2706
2707 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2708 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2709 [UNIT_WANTS] = UNIT_WANTED_BY,
2710 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2711 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2712 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2713 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2714 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2715 [UNIT_WANTED_BY] = UNIT_WANTS,
2716 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2717 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2718 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2719 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2720 [UNIT_BEFORE] = UNIT_AFTER,
2721 [UNIT_AFTER] = UNIT_BEFORE,
2722 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2723 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2724 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2725 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2726 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2727 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2728 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2729 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2730 };
2731 Unit *original_u = u, *original_other = other;
2732 int r;
2733
2734 assert(u);
2735 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2736 assert(other);
2737
2738 u = unit_follow_merge(u);
2739 other = unit_follow_merge(other);
2740
2741 /* We won't allow dependencies on ourselves. We will not
2742 * consider them an error however. */
2743 if (u == other) {
2744 maybe_warn_about_dependency(original_u, original_other->id, d);
2745 return 0;
2746 }
2747
2748 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2749 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2750 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2751 return 0;
2752 }
2753
2754 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2755 if (r < 0)
2756 return r;
2757
2758 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2759 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2760 if (r < 0)
2761 return r;
2762 }
2763
2764 if (add_reference) {
2765 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2766 if (r < 0)
2767 return r;
2768
2769 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2770 if (r < 0)
2771 return r;
2772 }
2773
2774 unit_add_to_dbus_queue(u);
2775 return 0;
2776 }
2777
2778 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2779 int r;
2780
2781 assert(u);
2782
2783 r = unit_add_dependency(u, d, other, add_reference, mask);
2784 if (r < 0)
2785 return r;
2786
2787 return unit_add_dependency(u, e, other, add_reference, mask);
2788 }
2789
2790 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2791 int r;
2792
2793 assert(u);
2794 assert(name || path);
2795 assert(buf);
2796 assert(ret);
2797
2798 if (!name)
2799 name = basename(path);
2800
2801 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2802 *buf = NULL;
2803 *ret = name;
2804 return 0;
2805 }
2806
2807 if (u->instance)
2808 r = unit_name_replace_instance(name, u->instance, buf);
2809 else {
2810 _cleanup_free_ char *i = NULL;
2811
2812 r = unit_name_to_prefix(u->id, &i);
2813 if (r < 0)
2814 return r;
2815
2816 r = unit_name_replace_instance(name, i, buf);
2817 }
2818 if (r < 0)
2819 return r;
2820
2821 *ret = *buf;
2822 return 0;
2823 }
2824
2825 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2826 _cleanup_free_ char *buf = NULL;
2827 Unit *other;
2828 int r;
2829
2830 assert(u);
2831 assert(name || path);
2832
2833 r = resolve_template(u, name, path, &buf, &name);
2834 if (r < 0)
2835 return r;
2836
2837 r = manager_load_unit(u->manager, name, path, NULL, &other);
2838 if (r < 0)
2839 return r;
2840
2841 return unit_add_dependency(u, d, other, add_reference, mask);
2842 }
2843
2844 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2845 _cleanup_free_ char *buf = NULL;
2846 Unit *other;
2847 int r;
2848
2849 assert(u);
2850 assert(name || path);
2851
2852 r = resolve_template(u, name, path, &buf, &name);
2853 if (r < 0)
2854 return r;
2855
2856 r = manager_load_unit(u->manager, name, path, NULL, &other);
2857 if (r < 0)
2858 return r;
2859
2860 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2861 }
2862
2863 int set_unit_path(const char *p) {
2864 /* This is mostly for debug purposes */
2865 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2866 return -errno;
2867
2868 return 0;
2869 }
2870
2871 char *unit_dbus_path(Unit *u) {
2872 assert(u);
2873
2874 if (!u->id)
2875 return NULL;
2876
2877 return unit_dbus_path_from_name(u->id);
2878 }
2879
2880 char *unit_dbus_path_invocation_id(Unit *u) {
2881 assert(u);
2882
2883 if (sd_id128_is_null(u->invocation_id))
2884 return NULL;
2885
2886 return unit_dbus_path_from_name(u->invocation_id_string);
2887 }
2888
2889 int unit_set_slice(Unit *u, Unit *slice) {
2890 assert(u);
2891 assert(slice);
2892
2893 /* Sets the unit slice if it has not been set before. Is extra
2894 * careful, to only allow this for units that actually have a
2895 * cgroup context. Also, we don't allow to set this for slices
2896 * (since the parent slice is derived from the name). Make
2897 * sure the unit we set is actually a slice. */
2898
2899 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2900 return -EOPNOTSUPP;
2901
2902 if (u->type == UNIT_SLICE)
2903 return -EINVAL;
2904
2905 if (unit_active_state(u) != UNIT_INACTIVE)
2906 return -EBUSY;
2907
2908 if (slice->type != UNIT_SLICE)
2909 return -EINVAL;
2910
2911 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2912 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2913 return -EPERM;
2914
2915 if (UNIT_DEREF(u->slice) == slice)
2916 return 0;
2917
2918 /* Disallow slice changes if @u is already bound to cgroups */
2919 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2920 return -EBUSY;
2921
2922 unit_ref_unset(&u->slice);
2923 unit_ref_set(&u->slice, slice);
2924 return 1;
2925 }
2926
2927 int unit_set_default_slice(Unit *u) {
2928 _cleanup_free_ char *b = NULL;
2929 const char *slice_name;
2930 Unit *slice;
2931 int r;
2932
2933 assert(u);
2934
2935 if (UNIT_ISSET(u->slice))
2936 return 0;
2937
2938 if (u->instance) {
2939 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2940
2941 /* Implicitly place all instantiated units in their
2942 * own per-template slice */
2943
2944 r = unit_name_to_prefix(u->id, &prefix);
2945 if (r < 0)
2946 return r;
2947
2948 /* The prefix is already escaped, but it might include
2949 * "-" which has a special meaning for slice units,
2950 * hence escape it here extra. */
2951 escaped = unit_name_escape(prefix);
2952 if (!escaped)
2953 return -ENOMEM;
2954
2955 if (MANAGER_IS_SYSTEM(u->manager))
2956 b = strjoin("system-", escaped, ".slice");
2957 else
2958 b = strappend(escaped, ".slice");
2959 if (!b)
2960 return -ENOMEM;
2961
2962 slice_name = b;
2963 } else
2964 slice_name =
2965 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2966 ? SPECIAL_SYSTEM_SLICE
2967 : SPECIAL_ROOT_SLICE;
2968
2969 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2970 if (r < 0)
2971 return r;
2972
2973 return unit_set_slice(u, slice);
2974 }
2975
2976 const char *unit_slice_name(Unit *u) {
2977 assert(u);
2978
2979 if (!UNIT_ISSET(u->slice))
2980 return NULL;
2981
2982 return UNIT_DEREF(u->slice)->id;
2983 }
2984
2985 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2986 _cleanup_free_ char *t = NULL;
2987 int r;
2988
2989 assert(u);
2990 assert(type);
2991 assert(_found);
2992
2993 r = unit_name_change_suffix(u->id, type, &t);
2994 if (r < 0)
2995 return r;
2996 if (unit_has_name(u, t))
2997 return -EINVAL;
2998
2999 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3000 assert(r < 0 || *_found != u);
3001 return r;
3002 }
3003
3004 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3005 const char *name, *old_owner, *new_owner;
3006 Unit *u = userdata;
3007 int r;
3008
3009 assert(message);
3010 assert(u);
3011
3012 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3013 if (r < 0) {
3014 bus_log_parse_error(r);
3015 return 0;
3016 }
3017
3018 old_owner = isempty(old_owner) ? NULL : old_owner;
3019 new_owner = isempty(new_owner) ? NULL : new_owner;
3020
3021 if (UNIT_VTABLE(u)->bus_name_owner_change)
3022 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3023
3024 return 0;
3025 }
3026
3027 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3028 const char *match;
3029
3030 assert(u);
3031 assert(bus);
3032 assert(name);
3033
3034 if (u->match_bus_slot)
3035 return -EBUSY;
3036
3037 match = strjoina("type='signal',"
3038 "sender='org.freedesktop.DBus',"
3039 "path='/org/freedesktop/DBus',"
3040 "interface='org.freedesktop.DBus',"
3041 "member='NameOwnerChanged',"
3042 "arg0='", name, "'");
3043
3044 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
3045 }
3046
3047 int unit_watch_bus_name(Unit *u, const char *name) {
3048 int r;
3049
3050 assert(u);
3051 assert(name);
3052
3053 /* Watch a specific name on the bus. We only support one unit
3054 * watching each name for now. */
3055
3056 if (u->manager->api_bus) {
3057 /* If the bus is already available, install the match directly.
3058 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3059 r = unit_install_bus_match(u, u->manager->api_bus, name);
3060 if (r < 0)
3061 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3062 }
3063
3064 r = hashmap_put(u->manager->watch_bus, name, u);
3065 if (r < 0) {
3066 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3067 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3068 }
3069
3070 return 0;
3071 }
3072
3073 void unit_unwatch_bus_name(Unit *u, const char *name) {
3074 assert(u);
3075 assert(name);
3076
3077 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3078 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3079 }
3080
3081 bool unit_can_serialize(Unit *u) {
3082 assert(u);
3083
3084 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3085 }
3086
3087 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3088 _cleanup_free_ char *s = NULL;
3089 int r = 0;
3090
3091 assert(f);
3092 assert(key);
3093
3094 if (mask != 0) {
3095 r = cg_mask_to_string(mask, &s);
3096 if (r >= 0) {
3097 fputs(key, f);
3098 fputc('=', f);
3099 fputs(s, f);
3100 fputc('\n', f);
3101 }
3102 }
3103 return r;
3104 }
3105
3106 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3107 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3108 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3109 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3110 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3111 };
3112
3113 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3114 CGroupIPAccountingMetric m;
3115 int r;
3116
3117 assert(u);
3118 assert(f);
3119 assert(fds);
3120
3121 if (unit_can_serialize(u)) {
3122 ExecRuntime *rt;
3123
3124 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3125 if (r < 0)
3126 return r;
3127
3128 rt = unit_get_exec_runtime(u);
3129 if (rt) {
3130 r = exec_runtime_serialize(u, rt, f, fds);
3131 if (r < 0)
3132 return r;
3133 }
3134 }
3135
3136 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3137
3138 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3139 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3140 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3141 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3142
3143 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3144 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3145
3146 if (dual_timestamp_is_set(&u->condition_timestamp))
3147 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3148
3149 if (dual_timestamp_is_set(&u->assert_timestamp))
3150 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3151
3152 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3153
3154 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3155 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3156 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3157
3158 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3159 if (u->cpu_usage_last != NSEC_INFINITY)
3160 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3161
3162 if (u->cgroup_path)
3163 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3164 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3165 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3166 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3167 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3168
3169 if (uid_is_valid(u->ref_uid))
3170 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3171 if (gid_is_valid(u->ref_gid))
3172 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3173
3174 if (!sd_id128_is_null(u->invocation_id))
3175 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3176
3177 bus_track_serialize(u->bus_track, f, "ref");
3178
3179 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3180 uint64_t v;
3181
3182 r = unit_get_ip_accounting(u, m, &v);
3183 if (r >= 0)
3184 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3185 }
3186
3187 if (serialize_jobs) {
3188 if (u->job) {
3189 fprintf(f, "job\n");
3190 job_serialize(u->job, f);
3191 }
3192
3193 if (u->nop_job) {
3194 fprintf(f, "job\n");
3195 job_serialize(u->nop_job, f);
3196 }
3197 }
3198
3199 /* End marker */
3200 fputc('\n', f);
3201 return 0;
3202 }
3203
3204 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3205 assert(u);
3206 assert(f);
3207 assert(key);
3208
3209 if (!value)
3210 return 0;
3211
3212 fputs(key, f);
3213 fputc('=', f);
3214 fputs(value, f);
3215 fputc('\n', f);
3216
3217 return 1;
3218 }
3219
3220 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3221 _cleanup_free_ char *c = NULL;
3222
3223 assert(u);
3224 assert(f);
3225 assert(key);
3226
3227 if (!value)
3228 return 0;
3229
3230 c = cescape(value);
3231 if (!c)
3232 return -ENOMEM;
3233
3234 fputs(key, f);
3235 fputc('=', f);
3236 fputs(c, f);
3237 fputc('\n', f);
3238
3239 return 1;
3240 }
3241
3242 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3243 int copy;
3244
3245 assert(u);
3246 assert(f);
3247 assert(key);
3248
3249 if (fd < 0)
3250 return 0;
3251
3252 copy = fdset_put_dup(fds, fd);
3253 if (copy < 0)
3254 return copy;
3255
3256 fprintf(f, "%s=%i\n", key, copy);
3257 return 1;
3258 }
3259
3260 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3261 va_list ap;
3262
3263 assert(u);
3264 assert(f);
3265 assert(key);
3266 assert(format);
3267
3268 fputs(key, f);
3269 fputc('=', f);
3270
3271 va_start(ap, format);
3272 vfprintf(f, format, ap);
3273 va_end(ap);
3274
3275 fputc('\n', f);
3276 }
3277
3278 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3279 ExecRuntime **rt = NULL;
3280 size_t offset;
3281 int r;
3282
3283 assert(u);
3284 assert(f);
3285 assert(fds);
3286
3287 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3288 if (offset > 0)
3289 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3290
3291 for (;;) {
3292 char line[LINE_MAX], *l, *v;
3293 CGroupIPAccountingMetric m;
3294 size_t k;
3295
3296 if (!fgets(line, sizeof(line), f)) {
3297 if (feof(f))
3298 return 0;
3299 return -errno;
3300 }
3301
3302 char_array_0(line);
3303 l = strstrip(line);
3304
3305 /* End marker */
3306 if (isempty(l))
3307 break;
3308
3309 k = strcspn(l, "=");
3310
3311 if (l[k] == '=') {
3312 l[k] = 0;
3313 v = l+k+1;
3314 } else
3315 v = l+k;
3316
3317 if (streq(l, "job")) {
3318 if (v[0] == '\0') {
3319 /* new-style serialized job */
3320 Job *j;
3321
3322 j = job_new_raw(u);
3323 if (!j)
3324 return log_oom();
3325
3326 r = job_deserialize(j, f);
3327 if (r < 0) {
3328 job_free(j);
3329 return r;
3330 }
3331
3332 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3333 if (r < 0) {
3334 job_free(j);
3335 return r;
3336 }
3337
3338 r = job_install_deserialized(j);
3339 if (r < 0) {
3340 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3341 job_free(j);
3342 return r;
3343 }
3344 } else /* legacy for pre-44 */
3345 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3346 continue;
3347 } else if (streq(l, "state-change-timestamp")) {
3348 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3349 continue;
3350 } else if (streq(l, "inactive-exit-timestamp")) {
3351 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3352 continue;
3353 } else if (streq(l, "active-enter-timestamp")) {
3354 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3355 continue;
3356 } else if (streq(l, "active-exit-timestamp")) {
3357 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3358 continue;
3359 } else if (streq(l, "inactive-enter-timestamp")) {
3360 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3361 continue;
3362 } else if (streq(l, "condition-timestamp")) {
3363 dual_timestamp_deserialize(v, &u->condition_timestamp);
3364 continue;
3365 } else if (streq(l, "assert-timestamp")) {
3366 dual_timestamp_deserialize(v, &u->assert_timestamp);
3367 continue;
3368 } else if (streq(l, "condition-result")) {
3369
3370 r = parse_boolean(v);
3371 if (r < 0)
3372 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3373 else
3374 u->condition_result = r;
3375
3376 continue;
3377
3378 } else if (streq(l, "assert-result")) {
3379
3380 r = parse_boolean(v);
3381 if (r < 0)
3382 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3383 else
3384 u->assert_result = r;
3385
3386 continue;
3387
3388 } else if (streq(l, "transient")) {
3389
3390 r = parse_boolean(v);
3391 if (r < 0)
3392 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3393 else
3394 u->transient = r;
3395
3396 continue;
3397
3398 } else if (streq(l, "exported-invocation-id")) {
3399
3400 r = parse_boolean(v);
3401 if (r < 0)
3402 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3403 else
3404 u->exported_invocation_id = r;
3405
3406 continue;
3407
3408 } else if (streq(l, "exported-log-level-max")) {
3409
3410 r = parse_boolean(v);
3411 if (r < 0)
3412 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3413 else
3414 u->exported_log_level_max = r;
3415
3416 continue;
3417
3418 } else if (streq(l, "exported-log-extra-fields")) {
3419
3420 r = parse_boolean(v);
3421 if (r < 0)
3422 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3423 else
3424 u->exported_log_extra_fields = r;
3425
3426 continue;
3427
3428 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3429
3430 r = safe_atou64(v, &u->cpu_usage_base);
3431 if (r < 0)
3432 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3433
3434 continue;
3435
3436 } else if (streq(l, "cpu-usage-last")) {
3437
3438 r = safe_atou64(v, &u->cpu_usage_last);
3439 if (r < 0)
3440 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3441
3442 continue;
3443
3444 } else if (streq(l, "cgroup")) {
3445
3446 r = unit_set_cgroup_path(u, v);
3447 if (r < 0)
3448 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3449
3450 (void) unit_watch_cgroup(u);
3451
3452 continue;
3453 } else if (streq(l, "cgroup-realized")) {
3454 int b;
3455
3456 b = parse_boolean(v);
3457 if (b < 0)
3458 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3459 else
3460 u->cgroup_realized = b;
3461
3462 continue;
3463
3464 } else if (streq(l, "cgroup-realized-mask")) {
3465
3466 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3467 if (r < 0)
3468 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3469 continue;
3470
3471 } else if (streq(l, "cgroup-enabled-mask")) {
3472
3473 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3474 if (r < 0)
3475 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3476 continue;
3477
3478 } else if (streq(l, "cgroup-bpf-realized")) {
3479 int i;
3480
3481 r = safe_atoi(v, &i);
3482 if (r < 0)
3483 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3484 else
3485 u->cgroup_bpf_state =
3486 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3487 i > 0 ? UNIT_CGROUP_BPF_ON :
3488 UNIT_CGROUP_BPF_OFF;
3489
3490 continue;
3491
3492 } else if (streq(l, "ref-uid")) {
3493 uid_t uid;
3494
3495 r = parse_uid(v, &uid);
3496 if (r < 0)
3497 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3498 else
3499 unit_ref_uid_gid(u, uid, GID_INVALID);
3500
3501 continue;
3502
3503 } else if (streq(l, "ref-gid")) {
3504 gid_t gid;
3505
3506 r = parse_gid(v, &gid);
3507 if (r < 0)
3508 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3509 else
3510 unit_ref_uid_gid(u, UID_INVALID, gid);
3511
3512 } else if (streq(l, "ref")) {
3513
3514 r = strv_extend(&u->deserialized_refs, v);
3515 if (r < 0)
3516 log_oom();
3517
3518 continue;
3519 } else if (streq(l, "invocation-id")) {
3520 sd_id128_t id;
3521
3522 r = sd_id128_from_string(v, &id);
3523 if (r < 0)
3524 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3525 else {
3526 r = unit_set_invocation_id(u, id);
3527 if (r < 0)
3528 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3529 }
3530
3531 continue;
3532 }
3533
3534 /* Check if this is an IP accounting metric serialization field */
3535 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3536 if (streq(l, ip_accounting_metric_field[m]))
3537 break;
3538 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3539 uint64_t c;
3540
3541 r = safe_atou64(v, &c);
3542 if (r < 0)
3543 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3544 else
3545 u->ip_accounting_extra[m] = c;
3546 continue;
3547 }
3548
3549 if (unit_can_serialize(u)) {
3550 if (rt) {
3551 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3552 if (r < 0) {
3553 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3554 continue;
3555 }
3556
3557 /* Returns positive if key was handled by the call */
3558 if (r > 0)
3559 continue;
3560 }
3561
3562 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3563 if (r < 0)
3564 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3565 }
3566 }
3567
3568 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3569 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3570 * before 228 where the base for timeouts was not persistent across reboots. */
3571
3572 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3573 dual_timestamp_get(&u->state_change_timestamp);
3574
3575 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3576 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3577 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3578 unit_invalidate_cgroup_bpf(u);
3579
3580 return 0;
3581 }
3582
3583 void unit_deserialize_skip(FILE *f) {
3584 assert(f);
3585
3586 /* Skip serialized data for this unit. We don't know what it is. */
3587
3588 for (;;) {
3589 char line[LINE_MAX], *l;
3590
3591 if (!fgets(line, sizeof line, f))
3592 return;
3593
3594 char_array_0(line);
3595 l = strstrip(line);
3596
3597 /* End marker */
3598 if (isempty(l))
3599 return;
3600 }
3601 }
3602
3603
3604 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3605 Unit *device;
3606 _cleanup_free_ char *e = NULL;
3607 int r;
3608
3609 assert(u);
3610
3611 /* Adds in links to the device node that this unit is based on */
3612 if (isempty(what))
3613 return 0;
3614
3615 if (!is_device_path(what))
3616 return 0;
3617
3618 /* When device units aren't supported (such as in a
3619 * container), don't create dependencies on them. */
3620 if (!unit_type_supported(UNIT_DEVICE))
3621 return 0;
3622
3623 r = unit_name_from_path(what, ".device", &e);
3624 if (r < 0)
3625 return r;
3626
3627 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3628 if (r < 0)
3629 return r;
3630
3631 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3632 dep = UNIT_BINDS_TO;
3633
3634 r = unit_add_two_dependencies(u, UNIT_AFTER,
3635 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3636 device, true, mask);
3637 if (r < 0)
3638 return r;
3639
3640 if (wants) {
3641 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3642 if (r < 0)
3643 return r;
3644 }
3645
3646 return 0;
3647 }
3648
3649 int unit_coldplug(Unit *u) {
3650 int r = 0, q;
3651 char **i;
3652
3653 assert(u);
3654
3655 /* Make sure we don't enter a loop, when coldplugging
3656 * recursively. */
3657 if (u->coldplugged)
3658 return 0;
3659
3660 u->coldplugged = true;
3661
3662 STRV_FOREACH(i, u->deserialized_refs) {
3663 q = bus_unit_track_add_name(u, *i);
3664 if (q < 0 && r >= 0)
3665 r = q;
3666 }
3667 u->deserialized_refs = strv_free(u->deserialized_refs);
3668
3669 if (UNIT_VTABLE(u)->coldplug) {
3670 q = UNIT_VTABLE(u)->coldplug(u);
3671 if (q < 0 && r >= 0)
3672 r = q;
3673 }
3674
3675 if (u->job) {
3676 q = job_coldplug(u->job);
3677 if (q < 0 && r >= 0)
3678 r = q;
3679 }
3680
3681 return r;
3682 }
3683
3684 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3685 struct stat st;
3686
3687 if (!path)
3688 return false;
3689
3690 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3691 * are never out-of-date. */
3692 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3693 return false;
3694
3695 if (stat(path, &st) < 0)
3696 /* What, cannot access this anymore? */
3697 return true;
3698
3699 if (path_masked)
3700 /* For masked files check if they are still so */
3701 return !null_or_empty(&st);
3702 else
3703 /* For non-empty files check the mtime */
3704 return timespec_load(&st.st_mtim) > mtime;
3705
3706 return false;
3707 }
3708
3709 bool unit_need_daemon_reload(Unit *u) {
3710 _cleanup_strv_free_ char **t = NULL;
3711 char **path;
3712
3713 assert(u);
3714
3715 /* For unit files, we allow masking… */
3716 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3717 u->load_state == UNIT_MASKED))
3718 return true;
3719
3720 /* Source paths should not be masked… */
3721 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3722 return true;
3723
3724 if (u->load_state == UNIT_LOADED)
3725 (void) unit_find_dropin_paths(u, &t);
3726 if (!strv_equal(u->dropin_paths, t))
3727 return true;
3728
3729 /* … any drop-ins that are masked are simply omitted from the list. */
3730 STRV_FOREACH(path, u->dropin_paths)
3731 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3732 return true;
3733
3734 return false;
3735 }
3736
3737 void unit_reset_failed(Unit *u) {
3738 assert(u);
3739
3740 if (UNIT_VTABLE(u)->reset_failed)
3741 UNIT_VTABLE(u)->reset_failed(u);
3742
3743 RATELIMIT_RESET(u->start_limit);
3744 u->start_limit_hit = false;
3745 }
3746
3747 Unit *unit_following(Unit *u) {
3748 assert(u);
3749
3750 if (UNIT_VTABLE(u)->following)
3751 return UNIT_VTABLE(u)->following(u);
3752
3753 return NULL;
3754 }
3755
3756 bool unit_stop_pending(Unit *u) {
3757 assert(u);
3758
3759 /* This call does check the current state of the unit. It's
3760 * hence useful to be called from state change calls of the
3761 * unit itself, where the state isn't updated yet. This is
3762 * different from unit_inactive_or_pending() which checks both
3763 * the current state and for a queued job. */
3764
3765 return u->job && u->job->type == JOB_STOP;
3766 }
3767
3768 bool unit_inactive_or_pending(Unit *u) {
3769 assert(u);
3770
3771 /* Returns true if the unit is inactive or going down */
3772
3773 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3774 return true;
3775
3776 if (unit_stop_pending(u))
3777 return true;
3778
3779 return false;
3780 }
3781
3782 bool unit_active_or_pending(Unit *u) {
3783 assert(u);
3784
3785 /* Returns true if the unit is active or going up */
3786
3787 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3788 return true;
3789
3790 if (u->job &&
3791 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3792 return true;
3793
3794 return false;
3795 }
3796
3797 bool unit_will_restart(Unit *u) {
3798 assert(u);
3799
3800 if (!UNIT_VTABLE(u)->will_restart)
3801 return false;
3802
3803 return UNIT_VTABLE(u)->will_restart(u);
3804 }
3805
3806 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3807 assert(u);
3808 assert(w >= 0 && w < _KILL_WHO_MAX);
3809 assert(SIGNAL_VALID(signo));
3810
3811 if (!UNIT_VTABLE(u)->kill)
3812 return -EOPNOTSUPP;
3813
3814 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3815 }
3816
3817 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3818 Set *pid_set;
3819 int r;
3820
3821 pid_set = set_new(NULL);
3822 if (!pid_set)
3823 return NULL;
3824
3825 /* Exclude the main/control pids from being killed via the cgroup */
3826 if (main_pid > 0) {
3827 r = set_put(pid_set, PID_TO_PTR(main_pid));
3828 if (r < 0)
3829 goto fail;
3830 }
3831
3832 if (control_pid > 0) {
3833 r = set_put(pid_set, PID_TO_PTR(control_pid));
3834 if (r < 0)
3835 goto fail;
3836 }
3837
3838 return pid_set;
3839
3840 fail:
3841 set_free(pid_set);
3842 return NULL;
3843 }
3844
3845 int unit_kill_common(
3846 Unit *u,
3847 KillWho who,
3848 int signo,
3849 pid_t main_pid,
3850 pid_t control_pid,
3851 sd_bus_error *error) {
3852
3853 int r = 0;
3854 bool killed = false;
3855
3856 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3857 if (main_pid < 0)
3858 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3859 else if (main_pid == 0)
3860 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3861 }
3862
3863 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3864 if (control_pid < 0)
3865 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3866 else if (control_pid == 0)
3867 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3868 }
3869
3870 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3871 if (control_pid > 0) {
3872 if (kill(control_pid, signo) < 0)
3873 r = -errno;
3874 else
3875 killed = true;
3876 }
3877
3878 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3879 if (main_pid > 0) {
3880 if (kill(main_pid, signo) < 0)
3881 r = -errno;
3882 else
3883 killed = true;
3884 }
3885
3886 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3887 _cleanup_set_free_ Set *pid_set = NULL;
3888 int q;
3889
3890 /* Exclude the main/control pids from being killed via the cgroup */
3891 pid_set = unit_pid_set(main_pid, control_pid);
3892 if (!pid_set)
3893 return -ENOMEM;
3894
3895 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3896 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3897 r = q;
3898 else
3899 killed = true;
3900 }
3901
3902 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3903 return -ESRCH;
3904
3905 return r;
3906 }
3907
3908 int unit_following_set(Unit *u, Set **s) {
3909 assert(u);
3910 assert(s);
3911
3912 if (UNIT_VTABLE(u)->following_set)
3913 return UNIT_VTABLE(u)->following_set(u, s);
3914
3915 *s = NULL;
3916 return 0;
3917 }
3918
3919 UnitFileState unit_get_unit_file_state(Unit *u) {
3920 int r;
3921
3922 assert(u);
3923
3924 if (u->unit_file_state < 0 && u->fragment_path) {
3925 r = unit_file_get_state(
3926 u->manager->unit_file_scope,
3927 NULL,
3928 basename(u->fragment_path),
3929 &u->unit_file_state);
3930 if (r < 0)
3931 u->unit_file_state = UNIT_FILE_BAD;
3932 }
3933
3934 return u->unit_file_state;
3935 }
3936
3937 int unit_get_unit_file_preset(Unit *u) {
3938 assert(u);
3939
3940 if (u->unit_file_preset < 0 && u->fragment_path)
3941 u->unit_file_preset = unit_file_query_preset(
3942 u->manager->unit_file_scope,
3943 NULL,
3944 basename(u->fragment_path));
3945
3946 return u->unit_file_preset;
3947 }
3948
3949 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3950 assert(ref);
3951 assert(u);
3952
3953 if (ref->unit)
3954 unit_ref_unset(ref);
3955
3956 ref->unit = u;
3957 LIST_PREPEND(refs, u->refs, ref);
3958 return u;
3959 }
3960
3961 void unit_ref_unset(UnitRef *ref) {
3962 assert(ref);
3963
3964 if (!ref->unit)
3965 return;
3966
3967 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3968 * be unreferenced now. */
3969 unit_add_to_gc_queue(ref->unit);
3970
3971 LIST_REMOVE(refs, ref->unit->refs, ref);
3972 ref->unit = NULL;
3973 }
3974
3975 static int user_from_unit_name(Unit *u, char **ret) {
3976
3977 static const uint8_t hash_key[] = {
3978 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3979 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3980 };
3981
3982 _cleanup_free_ char *n = NULL;
3983 int r;
3984
3985 r = unit_name_to_prefix(u->id, &n);
3986 if (r < 0)
3987 return r;
3988
3989 if (valid_user_group_name(n)) {
3990 *ret = n;
3991 n = NULL;
3992 return 0;
3993 }
3994
3995 /* If we can't use the unit name as a user name, then let's hash it and use that */
3996 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3997 return -ENOMEM;
3998
3999 return 0;
4000 }
4001
4002 int unit_patch_contexts(Unit *u) {
4003 CGroupContext *cc;
4004 ExecContext *ec;
4005 unsigned i;
4006 int r;
4007
4008 assert(u);
4009
4010 /* Patch in the manager defaults into the exec and cgroup
4011 * contexts, _after_ the rest of the settings have been
4012 * initialized */
4013
4014 ec = unit_get_exec_context(u);
4015 if (ec) {
4016 /* This only copies in the ones that need memory */
4017 for (i = 0; i < _RLIMIT_MAX; i++)
4018 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4019 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4020 if (!ec->rlimit[i])
4021 return -ENOMEM;
4022 }
4023
4024 if (MANAGER_IS_USER(u->manager) &&
4025 !ec->working_directory) {
4026
4027 r = get_home_dir(&ec->working_directory);
4028 if (r < 0)
4029 return r;
4030
4031 /* Allow user services to run, even if the
4032 * home directory is missing */
4033 ec->working_directory_missing_ok = true;
4034 }
4035
4036 if (ec->private_devices)
4037 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4038
4039 if (ec->protect_kernel_modules)
4040 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4041
4042 if (ec->dynamic_user) {
4043 if (!ec->user) {
4044 r = user_from_unit_name(u, &ec->user);
4045 if (r < 0)
4046 return r;
4047 }
4048
4049 if (!ec->group) {
4050 ec->group = strdup(ec->user);
4051 if (!ec->group)
4052 return -ENOMEM;
4053 }
4054
4055 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4056 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4057
4058 ec->private_tmp = true;
4059 ec->remove_ipc = true;
4060 ec->protect_system = PROTECT_SYSTEM_STRICT;
4061 if (ec->protect_home == PROTECT_HOME_NO)
4062 ec->protect_home = PROTECT_HOME_READ_ONLY;
4063 }
4064 }
4065
4066 cc = unit_get_cgroup_context(u);
4067 if (cc) {
4068
4069 if (ec &&
4070 ec->private_devices &&
4071 cc->device_policy == CGROUP_AUTO)
4072 cc->device_policy = CGROUP_CLOSED;
4073 }
4074
4075 return 0;
4076 }
4077
4078 ExecContext *unit_get_exec_context(Unit *u) {
4079 size_t offset;
4080 assert(u);
4081
4082 if (u->type < 0)
4083 return NULL;
4084
4085 offset = UNIT_VTABLE(u)->exec_context_offset;
4086 if (offset <= 0)
4087 return NULL;
4088
4089 return (ExecContext*) ((uint8_t*) u + offset);
4090 }
4091
4092 KillContext *unit_get_kill_context(Unit *u) {
4093 size_t offset;
4094 assert(u);
4095
4096 if (u->type < 0)
4097 return NULL;
4098
4099 offset = UNIT_VTABLE(u)->kill_context_offset;
4100 if (offset <= 0)
4101 return NULL;
4102
4103 return (KillContext*) ((uint8_t*) u + offset);
4104 }
4105
4106 CGroupContext *unit_get_cgroup_context(Unit *u) {
4107 size_t offset;
4108
4109 if (u->type < 0)
4110 return NULL;
4111
4112 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4113 if (offset <= 0)
4114 return NULL;
4115
4116 return (CGroupContext*) ((uint8_t*) u + offset);
4117 }
4118
4119 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4120 size_t offset;
4121
4122 if (u->type < 0)
4123 return NULL;
4124
4125 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4126 if (offset <= 0)
4127 return NULL;
4128
4129 return *(ExecRuntime**) ((uint8_t*) u + offset);
4130 }
4131
4132 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4133 assert(u);
4134
4135 if (UNIT_WRITE_FLAGS_NOOP(flags))
4136 return NULL;
4137
4138 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4139 return u->manager->lookup_paths.transient;
4140
4141 if (flags & UNIT_PERSISTENT)
4142 return u->manager->lookup_paths.persistent_control;
4143
4144 if (flags & UNIT_RUNTIME)
4145 return u->manager->lookup_paths.runtime_control;
4146
4147 return NULL;
4148 }
4149
4150 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4151 char *ret = NULL;
4152
4153 if (!s)
4154 return NULL;
4155
4156 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4157 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4158 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4159 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4160 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4161 * allocations. */
4162
4163 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4164 ret = specifier_escape(s);
4165 if (!ret)
4166 return NULL;
4167
4168 s = ret;
4169 }
4170
4171 if (flags & UNIT_ESCAPE_C) {
4172 char *a;
4173
4174 a = cescape(s);
4175 free(ret);
4176 if (!a)
4177 return NULL;
4178
4179 ret = a;
4180 }
4181
4182 if (buf) {
4183 *buf = ret;
4184 return ret ?: (char*) s;
4185 }
4186
4187 return ret ?: strdup(s);
4188 }
4189
4190 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4191 _cleanup_free_ char *result = NULL;
4192 size_t n = 0, allocated = 0;
4193 char **i, *ret;
4194
4195 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4196 * way suitable for ExecStart= stanzas */
4197
4198 STRV_FOREACH(i, l) {
4199 _cleanup_free_ char *buf = NULL;
4200 const char *p;
4201 size_t a;
4202 char *q;
4203
4204 p = unit_escape_setting(*i, flags, &buf);
4205 if (!p)
4206 return NULL;
4207
4208 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4209 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4210 return NULL;
4211
4212 q = result + n;
4213 if (n > 0)
4214 *(q++) = ' ';
4215
4216 *(q++) = '"';
4217 q = stpcpy(q, p);
4218 *(q++) = '"';
4219
4220 n += a;
4221 }
4222
4223 if (!GREEDY_REALLOC(result, allocated, n + 1))
4224 return NULL;
4225
4226 result[n] = 0;
4227
4228 ret = result;
4229 result = NULL;
4230
4231 return ret;
4232 }
4233
4234 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4235 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4236 const char *dir, *wrapped;
4237 int r;
4238
4239 assert(u);
4240 assert(name);
4241 assert(data);
4242
4243 if (UNIT_WRITE_FLAGS_NOOP(flags))
4244 return 0;
4245
4246 data = unit_escape_setting(data, flags, &escaped);
4247 if (!data)
4248 return -ENOMEM;
4249
4250 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4251 * previous section header is the same */
4252
4253 if (flags & UNIT_PRIVATE) {
4254 if (!UNIT_VTABLE(u)->private_section)
4255 return -EINVAL;
4256
4257 if (!u->transient_file || u->last_section_private < 0)
4258 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4259 else if (u->last_section_private == 0)
4260 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4261 } else {
4262 if (!u->transient_file || u->last_section_private < 0)
4263 data = strjoina("[Unit]\n", data);
4264 else if (u->last_section_private > 0)
4265 data = strjoina("\n[Unit]\n", data);
4266 }
4267
4268 if (u->transient_file) {
4269 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4270 * write to the transient unit file. */
4271 fputs(data, u->transient_file);
4272
4273 if (!endswith(data, "\n"))
4274 fputc('\n', u->transient_file);
4275
4276 /* Remember which section we wrote this entry to */
4277 u->last_section_private = !!(flags & UNIT_PRIVATE);
4278 return 0;
4279 }
4280
4281 dir = unit_drop_in_dir(u, flags);
4282 if (!dir)
4283 return -EINVAL;
4284
4285 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4286 "# or an equivalent operation. Do not edit.\n",
4287 data,
4288 "\n");
4289
4290 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4291 if (r < 0)
4292 return r;
4293
4294 (void) mkdir_p_label(p, 0755);
4295 r = write_string_file_atomic_label(q, wrapped);
4296 if (r < 0)
4297 return r;
4298
4299 r = strv_push(&u->dropin_paths, q);
4300 if (r < 0)
4301 return r;
4302 q = NULL;
4303
4304 strv_uniq(u->dropin_paths);
4305
4306 u->dropin_mtime = now(CLOCK_REALTIME);
4307
4308 return 0;
4309 }
4310
4311 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4312 _cleanup_free_ char *p = NULL;
4313 va_list ap;
4314 int r;
4315
4316 assert(u);
4317 assert(name);
4318 assert(format);
4319
4320 if (UNIT_WRITE_FLAGS_NOOP(flags))
4321 return 0;
4322
4323 va_start(ap, format);
4324 r = vasprintf(&p, format, ap);
4325 va_end(ap);
4326
4327 if (r < 0)
4328 return -ENOMEM;
4329
4330 return unit_write_setting(u, flags, name, p);
4331 }
4332
4333 int unit_make_transient(Unit *u) {
4334 _cleanup_free_ char *path = NULL;
4335 FILE *f;
4336
4337 assert(u);
4338
4339 if (!UNIT_VTABLE(u)->can_transient)
4340 return -EOPNOTSUPP;
4341
4342 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4343
4344 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4345 if (!path)
4346 return -ENOMEM;
4347
4348 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4349 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4350
4351 RUN_WITH_UMASK(0022) {
4352 f = fopen(path, "we");
4353 if (!f)
4354 return -errno;
4355 }
4356
4357 safe_fclose(u->transient_file);
4358 u->transient_file = f;
4359
4360 free_and_replace(u->fragment_path, path);
4361
4362 u->source_path = mfree(u->source_path);
4363 u->dropin_paths = strv_free(u->dropin_paths);
4364 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4365
4366 u->load_state = UNIT_STUB;
4367 u->load_error = 0;
4368 u->transient = true;
4369
4370 unit_add_to_dbus_queue(u);
4371 unit_add_to_gc_queue(u);
4372
4373 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4374 u->transient_file);
4375
4376 return 0;
4377 }
4378
4379 static void log_kill(pid_t pid, int sig, void *userdata) {
4380 _cleanup_free_ char *comm = NULL;
4381
4382 (void) get_process_comm(pid, &comm);
4383
4384 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4385 only, like for example systemd's own PAM stub process. */
4386 if (comm && comm[0] == '(')
4387 return;
4388
4389 log_unit_notice(userdata,
4390 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4391 pid,
4392 strna(comm),
4393 signal_to_string(sig));
4394 }
4395
4396 static int operation_to_signal(KillContext *c, KillOperation k) {
4397 assert(c);
4398
4399 switch (k) {
4400
4401 case KILL_TERMINATE:
4402 case KILL_TERMINATE_AND_LOG:
4403 return c->kill_signal;
4404
4405 case KILL_KILL:
4406 return SIGKILL;
4407
4408 case KILL_ABORT:
4409 return SIGABRT;
4410
4411 default:
4412 assert_not_reached("KillOperation unknown");
4413 }
4414 }
4415
4416 int unit_kill_context(
4417 Unit *u,
4418 KillContext *c,
4419 KillOperation k,
4420 pid_t main_pid,
4421 pid_t control_pid,
4422 bool main_pid_alien) {
4423
4424 bool wait_for_exit = false, send_sighup;
4425 cg_kill_log_func_t log_func = NULL;
4426 int sig, r;
4427
4428 assert(u);
4429 assert(c);
4430
4431 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4432 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4433
4434 if (c->kill_mode == KILL_NONE)
4435 return 0;
4436
4437 sig = operation_to_signal(c, k);
4438
4439 send_sighup =
4440 c->send_sighup &&
4441 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4442 sig != SIGHUP;
4443
4444 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4445 log_func = log_kill;
4446
4447 if (main_pid > 0) {
4448 if (log_func)
4449 log_func(main_pid, sig, u);
4450
4451 r = kill_and_sigcont(main_pid, sig);
4452 if (r < 0 && r != -ESRCH) {
4453 _cleanup_free_ char *comm = NULL;
4454 (void) get_process_comm(main_pid, &comm);
4455
4456 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4457 } else {
4458 if (!main_pid_alien)
4459 wait_for_exit = true;
4460
4461 if (r != -ESRCH && send_sighup)
4462 (void) kill(main_pid, SIGHUP);
4463 }
4464 }
4465
4466 if (control_pid > 0) {
4467 if (log_func)
4468 log_func(control_pid, sig, u);
4469
4470 r = kill_and_sigcont(control_pid, sig);
4471 if (r < 0 && r != -ESRCH) {
4472 _cleanup_free_ char *comm = NULL;
4473 (void) get_process_comm(control_pid, &comm);
4474
4475 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4476 } else {
4477 wait_for_exit = true;
4478
4479 if (r != -ESRCH && send_sighup)
4480 (void) kill(control_pid, SIGHUP);
4481 }
4482 }
4483
4484 if (u->cgroup_path &&
4485 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4486 _cleanup_set_free_ Set *pid_set = NULL;
4487
4488 /* Exclude the main/control pids from being killed via the cgroup */
4489 pid_set = unit_pid_set(main_pid, control_pid);
4490 if (!pid_set)
4491 return -ENOMEM;
4492
4493 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4494 sig,
4495 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4496 pid_set,
4497 log_func, u);
4498 if (r < 0) {
4499 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4500 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4501
4502 } else if (r > 0) {
4503
4504 /* FIXME: For now, on the legacy hierarchy, we
4505 * will not wait for the cgroup members to die
4506 * if we are running in a container or if this
4507 * is a delegation unit, simply because cgroup
4508 * notification is unreliable in these
4509 * cases. It doesn't work at all in
4510 * containers, and outside of containers it
4511 * can be confused easily by left-over
4512 * directories in the cgroup — which however
4513 * should not exist in non-delegated units. On
4514 * the unified hierarchy that's different,
4515 * there we get proper events. Hence rely on
4516 * them. */
4517
4518 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4519 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u, delegate)))
4520 wait_for_exit = true;
4521
4522 if (send_sighup) {
4523 set_free(pid_set);
4524
4525 pid_set = unit_pid_set(main_pid, control_pid);
4526 if (!pid_set)
4527 return -ENOMEM;
4528
4529 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4530 SIGHUP,
4531 CGROUP_IGNORE_SELF,
4532 pid_set,
4533 NULL, NULL);
4534 }
4535 }
4536 }
4537
4538 return wait_for_exit;
4539 }
4540
4541 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4542 char prefix[strlen(path) + 1], *p;
4543 UnitDependencyInfo di;
4544 int r;
4545
4546 assert(u);
4547 assert(path);
4548
4549 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4550 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4551 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4552 * determine which units to make themselves a dependency of. */
4553
4554 if (!path_is_absolute(path))
4555 return -EINVAL;
4556
4557 r = hashmap_ensure_allocated(&u->requires_mounts_for, &string_hash_ops);
4558 if (r < 0)
4559 return r;
4560
4561 p = strdup(path);
4562 if (!p)
4563 return -ENOMEM;
4564
4565 path_kill_slashes(p);
4566
4567 if (!path_is_normalized(p)) {
4568 free(p);
4569 return -EPERM;
4570 }
4571
4572 if (hashmap_contains(u->requires_mounts_for, p)) {
4573 free(p);
4574 return 0;
4575 }
4576
4577 di = (UnitDependencyInfo) {
4578 .origin_mask = mask
4579 };
4580
4581 r = hashmap_put(u->requires_mounts_for, p, di.data);
4582 if (r < 0) {
4583 free(p);
4584 return r;
4585 }
4586
4587 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4588 Set *x;
4589
4590 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4591 if (!x) {
4592 char *q;
4593
4594 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4595 if (r < 0)
4596 return r;
4597
4598 q = strdup(prefix);
4599 if (!q)
4600 return -ENOMEM;
4601
4602 x = set_new(NULL);
4603 if (!x) {
4604 free(q);
4605 return -ENOMEM;
4606 }
4607
4608 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4609 if (r < 0) {
4610 free(q);
4611 set_free(x);
4612 return r;
4613 }
4614 }
4615
4616 r = set_put(x, u);
4617 if (r < 0)
4618 return r;
4619 }
4620
4621 return 0;
4622 }
4623
4624 int unit_setup_exec_runtime(Unit *u) {
4625 ExecRuntime **rt;
4626 size_t offset;
4627 Unit *other;
4628 Iterator i;
4629 void *v;
4630
4631 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4632 assert(offset > 0);
4633
4634 /* Check if there already is an ExecRuntime for this unit? */
4635 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4636 if (*rt)
4637 return 0;
4638
4639 /* Try to get it from somebody else */
4640 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4641
4642 *rt = unit_get_exec_runtime(other);
4643 if (*rt) {
4644 exec_runtime_ref(*rt);
4645 return 0;
4646 }
4647 }
4648
4649 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4650 }
4651
4652 int unit_setup_dynamic_creds(Unit *u) {
4653 ExecContext *ec;
4654 DynamicCreds *dcreds;
4655 size_t offset;
4656
4657 assert(u);
4658
4659 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4660 assert(offset > 0);
4661 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4662
4663 ec = unit_get_exec_context(u);
4664 assert(ec);
4665
4666 if (!ec->dynamic_user)
4667 return 0;
4668
4669 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4670 }
4671
4672 bool unit_type_supported(UnitType t) {
4673 if (_unlikely_(t < 0))
4674 return false;
4675 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4676 return false;
4677
4678 if (!unit_vtable[t]->supported)
4679 return true;
4680
4681 return unit_vtable[t]->supported();
4682 }
4683
4684 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4685 int r;
4686
4687 assert(u);
4688 assert(where);
4689
4690 r = dir_is_empty(where);
4691 if (r > 0)
4692 return;
4693 if (r < 0) {
4694 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4695 return;
4696 }
4697
4698 log_struct(LOG_NOTICE,
4699 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4700 LOG_UNIT_ID(u),
4701 LOG_UNIT_INVOCATION_ID(u),
4702 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4703 "WHERE=%s", where,
4704 NULL);
4705 }
4706
4707 int unit_fail_if_symlink(Unit *u, const char* where) {
4708 int r;
4709
4710 assert(u);
4711 assert(where);
4712
4713 r = is_symlink(where);
4714 if (r < 0) {
4715 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4716 return 0;
4717 }
4718 if (r == 0)
4719 return 0;
4720
4721 log_struct(LOG_ERR,
4722 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4723 LOG_UNIT_ID(u),
4724 LOG_UNIT_INVOCATION_ID(u),
4725 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4726 "WHERE=%s", where,
4727 NULL);
4728
4729 return -ELOOP;
4730 }
4731
4732 bool unit_is_pristine(Unit *u) {
4733 assert(u);
4734
4735 /* Check if the unit already exists or is already around,
4736 * in a number of different ways. Note that to cater for unit
4737 * types such as slice, we are generally fine with units that
4738 * are marked UNIT_LOADED even though nothing was
4739 * actually loaded, as those unit types don't require a file
4740 * on disk to validly load. */
4741
4742 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4743 u->fragment_path ||
4744 u->source_path ||
4745 !strv_isempty(u->dropin_paths) ||
4746 u->job ||
4747 u->merged_into);
4748 }
4749
4750 pid_t unit_control_pid(Unit *u) {
4751 assert(u);
4752
4753 if (UNIT_VTABLE(u)->control_pid)
4754 return UNIT_VTABLE(u)->control_pid(u);
4755
4756 return 0;
4757 }
4758
4759 pid_t unit_main_pid(Unit *u) {
4760 assert(u);
4761
4762 if (UNIT_VTABLE(u)->main_pid)
4763 return UNIT_VTABLE(u)->main_pid(u);
4764
4765 return 0;
4766 }
4767
4768 static void unit_unref_uid_internal(
4769 Unit *u,
4770 uid_t *ref_uid,
4771 bool destroy_now,
4772 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4773
4774 assert(u);
4775 assert(ref_uid);
4776 assert(_manager_unref_uid);
4777
4778 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4779 * gid_t are actually the same time, with the same validity rules.
4780 *
4781 * Drops a reference to UID/GID from a unit. */
4782
4783 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4784 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4785
4786 if (!uid_is_valid(*ref_uid))
4787 return;
4788
4789 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4790 *ref_uid = UID_INVALID;
4791 }
4792
4793 void unit_unref_uid(Unit *u, bool destroy_now) {
4794 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4795 }
4796
4797 void unit_unref_gid(Unit *u, bool destroy_now) {
4798 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4799 }
4800
4801 static int unit_ref_uid_internal(
4802 Unit *u,
4803 uid_t *ref_uid,
4804 uid_t uid,
4805 bool clean_ipc,
4806 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4807
4808 int r;
4809
4810 assert(u);
4811 assert(ref_uid);
4812 assert(uid_is_valid(uid));
4813 assert(_manager_ref_uid);
4814
4815 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4816 * are actually the same type, and have the same validity rules.
4817 *
4818 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4819 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4820 * drops to zero. */
4821
4822 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4823 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4824
4825 if (*ref_uid == uid)
4826 return 0;
4827
4828 if (uid_is_valid(*ref_uid)) /* Already set? */
4829 return -EBUSY;
4830
4831 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4832 if (r < 0)
4833 return r;
4834
4835 *ref_uid = uid;
4836 return 1;
4837 }
4838
4839 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4840 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4841 }
4842
4843 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4844 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4845 }
4846
4847 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4848 int r = 0, q = 0;
4849
4850 assert(u);
4851
4852 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4853
4854 if (uid_is_valid(uid)) {
4855 r = unit_ref_uid(u, uid, clean_ipc);
4856 if (r < 0)
4857 return r;
4858 }
4859
4860 if (gid_is_valid(gid)) {
4861 q = unit_ref_gid(u, gid, clean_ipc);
4862 if (q < 0) {
4863 if (r > 0)
4864 unit_unref_uid(u, false);
4865
4866 return q;
4867 }
4868 }
4869
4870 return r > 0 || q > 0;
4871 }
4872
4873 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4874 ExecContext *c;
4875 int r;
4876
4877 assert(u);
4878
4879 c = unit_get_exec_context(u);
4880
4881 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4882 if (r < 0)
4883 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4884
4885 return r;
4886 }
4887
4888 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4889 assert(u);
4890
4891 unit_unref_uid(u, destroy_now);
4892 unit_unref_gid(u, destroy_now);
4893 }
4894
4895 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4896 int r;
4897
4898 assert(u);
4899
4900 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4901 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4902 * objects when no service references the UID/GID anymore. */
4903
4904 r = unit_ref_uid_gid(u, uid, gid);
4905 if (r > 0)
4906 bus_unit_send_change_signal(u);
4907 }
4908
4909 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4910 int r;
4911
4912 assert(u);
4913
4914 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4915
4916 if (sd_id128_equal(u->invocation_id, id))
4917 return 0;
4918
4919 if (!sd_id128_is_null(u->invocation_id))
4920 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4921
4922 if (sd_id128_is_null(id)) {
4923 r = 0;
4924 goto reset;
4925 }
4926
4927 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4928 if (r < 0)
4929 goto reset;
4930
4931 u->invocation_id = id;
4932 sd_id128_to_string(id, u->invocation_id_string);
4933
4934 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4935 if (r < 0)
4936 goto reset;
4937
4938 return 0;
4939
4940 reset:
4941 u->invocation_id = SD_ID128_NULL;
4942 u->invocation_id_string[0] = 0;
4943 return r;
4944 }
4945
4946 int unit_acquire_invocation_id(Unit *u) {
4947 sd_id128_t id;
4948 int r;
4949
4950 assert(u);
4951
4952 r = sd_id128_randomize(&id);
4953 if (r < 0)
4954 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4955
4956 r = unit_set_invocation_id(u, id);
4957 if (r < 0)
4958 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4959
4960 return 0;
4961 }
4962
4963 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4964 assert(u);
4965 assert(p);
4966
4967 p->cgroup_path = u->cgroup_path;
4968 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, UNIT_CGROUP_BOOL(u, delegate));
4969 }
4970
4971 int unit_fork_helper_process(Unit *u, pid_t *ret) {
4972 pid_t pid;
4973 int r;
4974
4975 assert(u);
4976 assert(ret);
4977
4978 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4979 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4980
4981 (void) unit_realize_cgroup(u);
4982
4983 pid = fork();
4984 if (pid < 0)
4985 return -errno;
4986
4987 if (pid == 0) {
4988
4989 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4990 (void) ignore_signals(SIGPIPE, -1);
4991
4992 log_close();
4993 log_open();
4994
4995 if (u->cgroup_path) {
4996 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4997 if (r < 0) {
4998 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4999 _exit(EXIT_CGROUP);
5000 }
5001 }
5002
5003 *ret = getpid_cached();
5004 return 0;
5005 }
5006
5007 *ret = pid;
5008 return 1;
5009 }
5010
5011 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5012 assert(u);
5013 assert(d >= 0);
5014 assert(d < _UNIT_DEPENDENCY_MAX);
5015 assert(other);
5016
5017 if (di.origin_mask == 0 && di.destination_mask == 0) {
5018 /* No bit set anymore, let's drop the whole entry */
5019 assert_se(hashmap_remove(u->dependencies[d], other));
5020 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5021 } else
5022 /* Mask was reduced, let's update the entry */
5023 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5024 }
5025
5026 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5027 UnitDependency d;
5028
5029 assert(u);
5030
5031 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5032
5033 if (mask == 0)
5034 return;
5035
5036 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5037 bool done;
5038
5039 do {
5040 UnitDependencyInfo di;
5041 Unit *other;
5042 Iterator i;
5043
5044 done = true;
5045
5046 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5047 UnitDependency q;
5048
5049 if ((di.origin_mask & ~mask) == di.origin_mask)
5050 continue;
5051 di.origin_mask &= ~mask;
5052 unit_update_dependency_mask(u, d, other, di);
5053
5054 /* We updated the dependency from our unit to the other unit now. But most dependencies
5055 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5056 * all dependency types on the other unit and delete all those which point to us and
5057 * have the right mask set. */
5058
5059 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5060 UnitDependencyInfo dj;
5061
5062 dj.data = hashmap_get(other->dependencies[q], u);
5063 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5064 continue;
5065 dj.destination_mask &= ~mask;
5066
5067 unit_update_dependency_mask(other, q, u, dj);
5068 }
5069
5070 unit_add_to_gc_queue(other);
5071
5072 done = false;
5073 break;
5074 }
5075
5076 } while (!done);
5077 }
5078 }
5079
5080 static int unit_export_invocation_id(Unit *u) {
5081 const char *p;
5082 int r;
5083
5084 assert(u);
5085
5086 if (u->exported_invocation_id)
5087 return 0;
5088
5089 if (sd_id128_is_null(u->invocation_id))
5090 return 0;
5091
5092 p = strjoina("/run/systemd/units/invocation:", u->id);
5093 r = symlink_atomic(u->invocation_id_string, p);
5094 if (r < 0)
5095 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5096
5097 u->exported_invocation_id = true;
5098 return 0;
5099 }
5100
5101 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5102 const char *p;
5103 char buf[2];
5104 int r;
5105
5106 assert(u);
5107 assert(c);
5108
5109 if (u->exported_log_level_max)
5110 return 0;
5111
5112 if (c->log_level_max < 0)
5113 return 0;
5114
5115 assert(c->log_level_max <= 7);
5116
5117 buf[0] = '0' + c->log_level_max;
5118 buf[1] = 0;
5119
5120 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5121 r = symlink_atomic(buf, p);
5122 if (r < 0)
5123 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5124
5125 u->exported_log_level_max = true;
5126 return 0;
5127 }
5128
5129 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5130 _cleanup_close_ int fd = -1;
5131 struct iovec *iovec;
5132 const char *p;
5133 char *pattern;
5134 le64_t *sizes;
5135 ssize_t n;
5136 size_t i;
5137 int r;
5138
5139 if (u->exported_log_extra_fields)
5140 return 0;
5141
5142 if (c->n_log_extra_fields <= 0)
5143 return 0;
5144
5145 sizes = newa(le64_t, c->n_log_extra_fields);
5146 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5147
5148 for (i = 0; i < c->n_log_extra_fields; i++) {
5149 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5150
5151 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5152 iovec[i*2+1] = c->log_extra_fields[i];
5153 }
5154
5155 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5156 pattern = strjoina(p, ".XXXXXX");
5157
5158 fd = mkostemp_safe(pattern);
5159 if (fd < 0)
5160 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5161
5162 n = writev(fd, iovec, c->n_log_extra_fields*2);
5163 if (n < 0) {
5164 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5165 goto fail;
5166 }
5167
5168 (void) fchmod(fd, 0644);
5169
5170 if (rename(pattern, p) < 0) {
5171 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5172 goto fail;
5173 }
5174
5175 u->exported_log_extra_fields = true;
5176 return 0;
5177
5178 fail:
5179 (void) unlink(pattern);
5180 return r;
5181 }
5182
5183 void unit_export_state_files(Unit *u) {
5184 const ExecContext *c;
5185
5186 assert(u);
5187
5188 if (!u->id)
5189 return;
5190
5191 if (!MANAGER_IS_SYSTEM(u->manager))
5192 return;
5193
5194 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5195 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5196 * the IPC system itself and PID 1 also log to the journal.
5197 *
5198 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5199 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5200 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5201 * namespace at least.
5202 *
5203 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5204 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5205 * them with one. */
5206
5207 (void) unit_export_invocation_id(u);
5208
5209 c = unit_get_exec_context(u);
5210 if (c) {
5211 (void) unit_export_log_level_max(u, c);
5212 (void) unit_export_log_extra_fields(u, c);
5213 }
5214 }
5215
5216 void unit_unlink_state_files(Unit *u) {
5217 const char *p;
5218
5219 assert(u);
5220
5221 if (!u->id)
5222 return;
5223
5224 if (!MANAGER_IS_SYSTEM(u->manager))
5225 return;
5226
5227 /* Undoes the effect of unit_export_state() */
5228
5229 if (u->exported_invocation_id) {
5230 p = strjoina("/run/systemd/units/invocation:", u->id);
5231 (void) unlink(p);
5232
5233 u->exported_invocation_id = false;
5234 }
5235
5236 if (u->exported_log_level_max) {
5237 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5238 (void) unlink(p);
5239
5240 u->exported_log_level_max = false;
5241 }
5242
5243 if (u->exported_log_extra_fields) {
5244 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5245 (void) unlink(p);
5246
5247 u->exported_log_extra_fields = false;
5248 }
5249 }
5250
5251 int unit_prepare_exec(Unit *u) {
5252 int r;
5253
5254 assert(u);
5255
5256 /* Prepares everything so that we can fork of a process for this unit */
5257
5258 (void) unit_realize_cgroup(u);
5259
5260 if (u->reset_accounting) {
5261 (void) unit_reset_cpu_accounting(u);
5262 (void) unit_reset_ip_accounting(u);
5263 u->reset_accounting = false;
5264 }
5265
5266 unit_export_state_files(u);
5267
5268 r = unit_setup_exec_runtime(u);
5269 if (r < 0)
5270 return r;
5271
5272 r = unit_setup_dynamic_creds(u);
5273 if (r < 0)
5274 return r;
5275
5276 return 0;
5277 }
5278
5279 static void log_leftover(pid_t pid, int sig, void *userdata) {
5280 _cleanup_free_ char *comm = NULL;
5281
5282 (void) get_process_comm(pid, &comm);
5283
5284 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5285 return;
5286
5287 log_unit_warning(userdata,
5288 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5289 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5290 pid, strna(comm));
5291 }
5292
5293 void unit_warn_leftover_processes(Unit *u) {
5294 assert(u);
5295
5296 (void) unit_pick_cgroup_path(u);
5297
5298 if (!u->cgroup_path)
5299 return;
5300
5301 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5302 }
5303
5304 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5305 [COLLECT_INACTIVE] = "inactive",
5306 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5307 };
5308
5309 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);