]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
tree-wide: use path_hash_ops instead of string_hash_ops whenever we key by a path
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/prctl.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "alloc-util.h"
32 #include "bus-common-errors.h"
33 #include "bus-util.h"
34 #include "cgroup-util.h"
35 #include "dbus-unit.h"
36 #include "dbus.h"
37 #include "dropin.h"
38 #include "escape.h"
39 #include "execute.h"
40 #include "fd-util.h"
41 #include "fileio-label.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "id128-util.h"
45 #include "io-util.h"
46 #include "load-dropin.h"
47 #include "load-fragment.h"
48 #include "log.h"
49 #include "macro.h"
50 #include "missing.h"
51 #include "mkdir.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "process-util.h"
55 #include "set.h"
56 #include "signal-util.h"
57 #include "sparse-endian.h"
58 #include "special.h"
59 #include "specifier.h"
60 #include "stat-util.h"
61 #include "stdio-util.h"
62 #include "string-table.h"
63 #include "string-util.h"
64 #include "strv.h"
65 #include "umask-util.h"
66 #include "unit-name.h"
67 #include "unit.h"
68 #include "user-util.h"
69 #include "virt.h"
70
71 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
72 [UNIT_SERVICE] = &service_vtable,
73 [UNIT_SOCKET] = &socket_vtable,
74 [UNIT_TARGET] = &target_vtable,
75 [UNIT_DEVICE] = &device_vtable,
76 [UNIT_MOUNT] = &mount_vtable,
77 [UNIT_AUTOMOUNT] = &automount_vtable,
78 [UNIT_SWAP] = &swap_vtable,
79 [UNIT_TIMER] = &timer_vtable,
80 [UNIT_PATH] = &path_vtable,
81 [UNIT_SLICE] = &slice_vtable,
82 [UNIT_SCOPE] = &scope_vtable,
83 };
84
85 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
86
87 Unit *unit_new(Manager *m, size_t size) {
88 Unit *u;
89
90 assert(m);
91 assert(size >= sizeof(Unit));
92
93 u = malloc0(size);
94 if (!u)
95 return NULL;
96
97 u->names = set_new(&string_hash_ops);
98 if (!u->names)
99 return mfree(u);
100
101 u->manager = m;
102 u->type = _UNIT_TYPE_INVALID;
103 u->default_dependencies = true;
104 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
105 u->unit_file_preset = -1;
106 u->on_failure_job_mode = JOB_REPLACE;
107 u->cgroup_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
114
115 u->ip_accounting_ingress_map_fd = -1;
116 u->ip_accounting_egress_map_fd = -1;
117 u->ipv4_allow_map_fd = -1;
118 u->ipv6_allow_map_fd = -1;
119 u->ipv4_deny_map_fd = -1;
120 u->ipv6_deny_map_fd = -1;
121
122 u->last_section_private = -1;
123
124 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
125 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
126
127 return u;
128 }
129
130 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
131 Unit *u;
132 int r;
133
134 u = unit_new(m, size);
135 if (!u)
136 return -ENOMEM;
137
138 r = unit_add_name(u, name);
139 if (r < 0) {
140 unit_free(u);
141 return r;
142 }
143
144 *ret = u;
145 return r;
146 }
147
148 bool unit_has_name(Unit *u, const char *name) {
149 assert(u);
150 assert(name);
151
152 return set_contains(u->names, (char*) name);
153 }
154
155 static void unit_init(Unit *u) {
156 CGroupContext *cc;
157 ExecContext *ec;
158 KillContext *kc;
159
160 assert(u);
161 assert(u->manager);
162 assert(u->type >= 0);
163
164 cc = unit_get_cgroup_context(u);
165 if (cc) {
166 cgroup_context_init(cc);
167
168 /* Copy in the manager defaults into the cgroup
169 * context, _before_ the rest of the settings have
170 * been initialized */
171
172 cc->cpu_accounting = u->manager->default_cpu_accounting;
173 cc->io_accounting = u->manager->default_io_accounting;
174 cc->ip_accounting = u->manager->default_ip_accounting;
175 cc->blockio_accounting = u->manager->default_blockio_accounting;
176 cc->memory_accounting = u->manager->default_memory_accounting;
177 cc->tasks_accounting = u->manager->default_tasks_accounting;
178 cc->ip_accounting = u->manager->default_ip_accounting;
179
180 if (u->type != UNIT_SLICE)
181 cc->tasks_max = u->manager->default_tasks_max;
182 }
183
184 ec = unit_get_exec_context(u);
185 if (ec) {
186 exec_context_init(ec);
187
188 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
189 EXEC_KEYRING_PRIVATE : EXEC_KEYRING_INHERIT;
190 }
191
192 kc = unit_get_kill_context(u);
193 if (kc)
194 kill_context_init(kc);
195
196 if (UNIT_VTABLE(u)->init)
197 UNIT_VTABLE(u)->init(u);
198 }
199
200 int unit_add_name(Unit *u, const char *text) {
201 _cleanup_free_ char *s = NULL, *i = NULL;
202 UnitType t;
203 int r;
204
205 assert(u);
206 assert(text);
207
208 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
209
210 if (!u->instance)
211 return -EINVAL;
212
213 r = unit_name_replace_instance(text, u->instance, &s);
214 if (r < 0)
215 return r;
216 } else {
217 s = strdup(text);
218 if (!s)
219 return -ENOMEM;
220 }
221
222 if (set_contains(u->names, s))
223 return 0;
224 if (hashmap_contains(u->manager->units, s))
225 return -EEXIST;
226
227 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
228 return -EINVAL;
229
230 t = unit_name_to_type(s);
231 if (t < 0)
232 return -EINVAL;
233
234 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
235 return -EINVAL;
236
237 r = unit_name_to_instance(s, &i);
238 if (r < 0)
239 return r;
240
241 if (i && !unit_type_may_template(t))
242 return -EINVAL;
243
244 /* Ensure that this unit is either instanced or not instanced,
245 * but not both. Note that we do allow names with different
246 * instance names however! */
247 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
248 return -EINVAL;
249
250 if (!unit_type_may_alias(t) && !set_isempty(u->names))
251 return -EEXIST;
252
253 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
254 return -E2BIG;
255
256 r = set_put(u->names, s);
257 if (r < 0)
258 return r;
259 assert(r > 0);
260
261 r = hashmap_put(u->manager->units, s, u);
262 if (r < 0) {
263 (void) set_remove(u->names, s);
264 return r;
265 }
266
267 if (u->type == _UNIT_TYPE_INVALID) {
268 u->type = t;
269 u->id = s;
270 u->instance = i;
271
272 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
273
274 unit_init(u);
275
276 i = NULL;
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_check_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true, when the unit shall
346 * stay around, false if there's no reason to keep it loaded. */
347
348 if (u->job)
349 return true;
350
351 if (u->nop_job)
352 return true;
353
354 state = unit_active_state(u);
355
356 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
357 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
358 UNIT_VTABLE(u)->release_resources)
359 UNIT_VTABLE(u)->release_resources(u);
360
361 if (u->perpetual)
362 return true;
363
364 if (u->refs)
365 return true;
366
367 if (sd_bus_track_count(u->bus_track) > 0)
368 return true;
369
370 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
371 switch (u->collect_mode) {
372
373 case COLLECT_INACTIVE:
374 if (state != UNIT_INACTIVE)
375 return true;
376
377 break;
378
379 case COLLECT_INACTIVE_OR_FAILED:
380 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
381 return true;
382
383 break;
384
385 default:
386 assert_not_reached("Unknown garbage collection mode");
387 }
388
389 if (u->cgroup_path) {
390 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
391 * around. Units with active processes should never be collected. */
392
393 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
394 if (r < 0)
395 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
396 if (r <= 0)
397 return true;
398 }
399
400 if (UNIT_VTABLE(u)->check_gc)
401 if (UNIT_VTABLE(u)->check_gc(u))
402 return true;
403
404 return false;
405 }
406
407 void unit_add_to_load_queue(Unit *u) {
408 assert(u);
409 assert(u->type != _UNIT_TYPE_INVALID);
410
411 if (u->load_state != UNIT_STUB || u->in_load_queue)
412 return;
413
414 LIST_PREPEND(load_queue, u->manager->load_queue, u);
415 u->in_load_queue = true;
416 }
417
418 void unit_add_to_cleanup_queue(Unit *u) {
419 assert(u);
420
421 if (u->in_cleanup_queue)
422 return;
423
424 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
425 u->in_cleanup_queue = true;
426 }
427
428 void unit_add_to_gc_queue(Unit *u) {
429 assert(u);
430
431 if (u->in_gc_queue || u->in_cleanup_queue)
432 return;
433
434 if (unit_check_gc(u))
435 return;
436
437 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
438 u->in_gc_queue = true;
439 }
440
441 void unit_add_to_dbus_queue(Unit *u) {
442 assert(u);
443 assert(u->type != _UNIT_TYPE_INVALID);
444
445 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
446 return;
447
448 /* Shortcut things if nobody cares */
449 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
450 sd_bus_track_count(u->bus_track) <= 0 &&
451 set_isempty(u->manager->private_buses)) {
452 u->sent_dbus_new_signal = true;
453 return;
454 }
455
456 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
457 u->in_dbus_queue = true;
458 }
459
460 static void bidi_set_free(Unit *u, Hashmap *h) {
461 Unit *other;
462 Iterator i;
463 void *v;
464
465 assert(u);
466
467 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468
469 HASHMAP_FOREACH_KEY(v, other, h, i) {
470 UnitDependency d;
471
472 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
473 hashmap_remove(other->dependencies[d], u);
474
475 unit_add_to_gc_queue(other);
476 }
477
478 hashmap_free(h);
479 }
480
481 static void unit_remove_transient(Unit *u) {
482 char **i;
483
484 assert(u);
485
486 if (!u->transient)
487 return;
488
489 if (u->fragment_path)
490 (void) unlink(u->fragment_path);
491
492 STRV_FOREACH(i, u->dropin_paths) {
493 _cleanup_free_ char *p = NULL, *pp = NULL;
494
495 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
496 if (!p)
497 continue;
498
499 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
500 if (!pp)
501 continue;
502
503 /* Only drop transient drop-ins */
504 if (!path_equal(u->manager->lookup_paths.transient, pp))
505 continue;
506
507 (void) unlink(*i);
508 (void) rmdir(p);
509 }
510 }
511
512 static void unit_free_requires_mounts_for(Unit *u) {
513 assert(u);
514
515 for (;;) {
516 _cleanup_free_ char *path;
517
518 path = hashmap_steal_first_key(u->requires_mounts_for);
519 if (!path)
520 break;
521 else {
522 char s[strlen(path) + 1];
523
524 PATH_FOREACH_PREFIX_MORE(s, path) {
525 char *y;
526 Set *x;
527
528 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
529 if (!x)
530 continue;
531
532 (void) set_remove(x, u);
533
534 if (set_isempty(x)) {
535 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
536 free(y);
537 set_free(x);
538 }
539 }
540 }
541 }
542
543 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
544 }
545
546 static void unit_done(Unit *u) {
547 ExecContext *ec;
548 CGroupContext *cc;
549
550 assert(u);
551
552 if (u->type < 0)
553 return;
554
555 if (UNIT_VTABLE(u)->done)
556 UNIT_VTABLE(u)->done(u);
557
558 ec = unit_get_exec_context(u);
559 if (ec)
560 exec_context_done(ec);
561
562 cc = unit_get_cgroup_context(u);
563 if (cc)
564 cgroup_context_done(cc);
565 }
566
567 void unit_free(Unit *u) {
568 UnitDependency d;
569 Iterator i;
570 char *t;
571
572 if (!u)
573 return;
574
575 u->transient_file = safe_fclose(u->transient_file);
576
577 if (!MANAGER_IS_RELOADING(u->manager))
578 unit_remove_transient(u);
579
580 bus_unit_send_removed_signal(u);
581
582 unit_done(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585
586 sd_bus_track_unref(u->bus_track);
587 u->deserialized_refs = strv_free(u->deserialized_refs);
588
589 unit_free_requires_mounts_for(u);
590
591 SET_FOREACH(t, u->names, i)
592 hashmap_remove_value(u->manager->units, t, u);
593
594 if (!sd_id128_is_null(u->invocation_id))
595 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
596
597 if (u->job) {
598 Job *j = u->job;
599 job_uninstall(j);
600 job_free(j);
601 }
602
603 if (u->nop_job) {
604 Job *j = u->nop_job;
605 job_uninstall(j);
606 job_free(j);
607 }
608
609 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
610 bidi_set_free(u, u->dependencies[d]);
611
612 if (u->type != _UNIT_TYPE_INVALID)
613 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
614
615 if (u->in_load_queue)
616 LIST_REMOVE(load_queue, u->manager->load_queue, u);
617
618 if (u->in_dbus_queue)
619 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
620
621 if (u->in_cleanup_queue)
622 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
623
624 if (u->in_gc_queue)
625 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
626
627 if (u->in_cgroup_realize_queue)
628 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
629
630 if (u->in_cgroup_empty_queue)
631 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
632
633 if (u->on_console)
634 manager_unref_console(u->manager);
635
636 unit_release_cgroup(u);
637
638 if (!MANAGER_IS_RELOADING(u->manager))
639 unit_unlink_state_files(u);
640
641 unit_unref_uid_gid(u, false);
642
643 (void) manager_update_failed_units(u->manager, u, false);
644 set_remove(u->manager->startup_units, u);
645
646 free(u->description);
647 strv_free(u->documentation);
648 free(u->fragment_path);
649 free(u->source_path);
650 strv_free(u->dropin_paths);
651 free(u->instance);
652
653 free(u->job_timeout_reboot_arg);
654
655 set_free_free(u->names);
656
657 unit_unwatch_all_pids(u);
658
659 condition_free_list(u->conditions);
660 condition_free_list(u->asserts);
661
662 free(u->reboot_arg);
663
664 unit_ref_unset(&u->slice);
665
666 while (u->refs)
667 unit_ref_unset(u->refs);
668
669 safe_close(u->ip_accounting_ingress_map_fd);
670 safe_close(u->ip_accounting_egress_map_fd);
671
672 safe_close(u->ipv4_allow_map_fd);
673 safe_close(u->ipv6_allow_map_fd);
674 safe_close(u->ipv4_deny_map_fd);
675 safe_close(u->ipv6_deny_map_fd);
676
677 bpf_program_unref(u->ip_bpf_ingress);
678 bpf_program_unref(u->ip_bpf_egress);
679
680 free(u);
681 }
682
683 UnitActiveState unit_active_state(Unit *u) {
684 assert(u);
685
686 if (u->load_state == UNIT_MERGED)
687 return unit_active_state(unit_follow_merge(u));
688
689 /* After a reload it might happen that a unit is not correctly
690 * loaded but still has a process around. That's why we won't
691 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
692
693 return UNIT_VTABLE(u)->active_state(u);
694 }
695
696 const char* unit_sub_state_to_string(Unit *u) {
697 assert(u);
698
699 return UNIT_VTABLE(u)->sub_state_to_string(u);
700 }
701
702 static int set_complete_move(Set **s, Set **other) {
703 assert(s);
704 assert(other);
705
706 if (!other)
707 return 0;
708
709 if (*s)
710 return set_move(*s, *other);
711 else {
712 *s = *other;
713 *other = NULL;
714 }
715
716 return 0;
717 }
718
719 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
720 assert(s);
721 assert(other);
722
723 if (!*other)
724 return 0;
725
726 if (*s)
727 return hashmap_move(*s, *other);
728 else {
729 *s = *other;
730 *other = NULL;
731 }
732
733 return 0;
734 }
735
736 static int merge_names(Unit *u, Unit *other) {
737 char *t;
738 Iterator i;
739 int r;
740
741 assert(u);
742 assert(other);
743
744 r = set_complete_move(&u->names, &other->names);
745 if (r < 0)
746 return r;
747
748 set_free_free(other->names);
749 other->names = NULL;
750 other->id = NULL;
751
752 SET_FOREACH(t, u->names, i)
753 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
754
755 return 0;
756 }
757
758 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
759 unsigned n_reserve;
760
761 assert(u);
762 assert(other);
763 assert(d < _UNIT_DEPENDENCY_MAX);
764
765 /*
766 * If u does not have this dependency set allocated, there is no need
767 * to reserve anything. In that case other's set will be transferred
768 * as a whole to u by complete_move().
769 */
770 if (!u->dependencies[d])
771 return 0;
772
773 /* merge_dependencies() will skip a u-on-u dependency */
774 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
775
776 return hashmap_reserve(u->dependencies[d], n_reserve);
777 }
778
779 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
780 Iterator i;
781 Unit *back;
782 void *v;
783 int r;
784
785 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
786
787 assert(u);
788 assert(other);
789 assert(d < _UNIT_DEPENDENCY_MAX);
790
791 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
792 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
793 UnitDependency k;
794
795 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
796 * pointers back, and let's fix them up, to instead point to 'u'. */
797
798 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
799 if (back == u) {
800 /* Do not add dependencies between u and itself. */
801 if (hashmap_remove(back->dependencies[k], other))
802 maybe_warn_about_dependency(u, other_id, k);
803 } else {
804 UnitDependencyInfo di_u, di_other, di_merged;
805
806 /* Let's drop this dependency between "back" and "other", and let's create it between
807 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
808 * and any such dependency which might already exist */
809
810 di_other.data = hashmap_get(back->dependencies[k], other);
811 if (!di_other.data)
812 continue; /* dependency isn't set, let's try the next one */
813
814 di_u.data = hashmap_get(back->dependencies[k], u);
815
816 di_merged = (UnitDependencyInfo) {
817 .origin_mask = di_u.origin_mask | di_other.origin_mask,
818 .destination_mask = di_u.destination_mask | di_other.destination_mask,
819 };
820
821 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
822 if (r < 0)
823 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
824 assert(r >= 0);
825
826 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
827 }
828 }
829
830 }
831
832 /* Also do not move dependencies on u to itself */
833 back = hashmap_remove(other->dependencies[d], u);
834 if (back)
835 maybe_warn_about_dependency(u, other_id, d);
836
837 /* The move cannot fail. The caller must have performed a reservation. */
838 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
839
840 other->dependencies[d] = hashmap_free(other->dependencies[d]);
841 }
842
843 int unit_merge(Unit *u, Unit *other) {
844 UnitDependency d;
845 const char *other_id = NULL;
846 int r;
847
848 assert(u);
849 assert(other);
850 assert(u->manager == other->manager);
851 assert(u->type != _UNIT_TYPE_INVALID);
852
853 other = unit_follow_merge(other);
854
855 if (other == u)
856 return 0;
857
858 if (u->type != other->type)
859 return -EINVAL;
860
861 if (!u->instance != !other->instance)
862 return -EINVAL;
863
864 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
865 return -EEXIST;
866
867 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
868 return -EEXIST;
869
870 if (other->job)
871 return -EEXIST;
872
873 if (other->nop_job)
874 return -EEXIST;
875
876 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
877 return -EEXIST;
878
879 if (other->id)
880 other_id = strdupa(other->id);
881
882 /* Make reservations to ensure merge_dependencies() won't fail */
883 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
884 r = reserve_dependencies(u, other, d);
885 /*
886 * We don't rollback reservations if we fail. We don't have
887 * a way to undo reservations. A reservation is not a leak.
888 */
889 if (r < 0)
890 return r;
891 }
892
893 /* Merge names */
894 r = merge_names(u, other);
895 if (r < 0)
896 return r;
897
898 /* Redirect all references */
899 while (other->refs)
900 unit_ref_set(other->refs, u);
901
902 /* Merge dependencies */
903 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
904 merge_dependencies(u, other, other_id, d);
905
906 other->load_state = UNIT_MERGED;
907 other->merged_into = u;
908
909 /* If there is still some data attached to the other node, we
910 * don't need it anymore, and can free it. */
911 if (other->load_state != UNIT_STUB)
912 if (UNIT_VTABLE(other)->done)
913 UNIT_VTABLE(other)->done(other);
914
915 unit_add_to_dbus_queue(u);
916 unit_add_to_cleanup_queue(other);
917
918 return 0;
919 }
920
921 int unit_merge_by_name(Unit *u, const char *name) {
922 _cleanup_free_ char *s = NULL;
923 Unit *other;
924 int r;
925
926 assert(u);
927 assert(name);
928
929 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
930 if (!u->instance)
931 return -EINVAL;
932
933 r = unit_name_replace_instance(name, u->instance, &s);
934 if (r < 0)
935 return r;
936
937 name = s;
938 }
939
940 other = manager_get_unit(u->manager, name);
941 if (other)
942 return unit_merge(u, other);
943
944 return unit_add_name(u, name);
945 }
946
947 Unit* unit_follow_merge(Unit *u) {
948 assert(u);
949
950 while (u->load_state == UNIT_MERGED)
951 assert_se(u = u->merged_into);
952
953 return u;
954 }
955
956 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
957 ExecDirectoryType dt;
958 char **dp;
959 int r;
960
961 assert(u);
962 assert(c);
963
964 if (c->working_directory) {
965 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
966 if (r < 0)
967 return r;
968 }
969
970 if (c->root_directory) {
971 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
972 if (r < 0)
973 return r;
974 }
975
976 if (c->root_image) {
977 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
978 if (r < 0)
979 return r;
980 }
981
982 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
983 if (!u->manager->prefix[dt])
984 continue;
985
986 STRV_FOREACH(dp, c->directories[dt].paths) {
987 _cleanup_free_ char *p;
988
989 p = strjoin(u->manager->prefix[dt], "/", *dp);
990 if (!p)
991 return -ENOMEM;
992
993 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
994 if (r < 0)
995 return r;
996 }
997 }
998
999 if (!MANAGER_IS_SYSTEM(u->manager))
1000 return 0;
1001
1002 if (c->private_tmp) {
1003 const char *p;
1004
1005 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1006 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1007 if (r < 0)
1008 return r;
1009 }
1010
1011 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1012 if (r < 0)
1013 return r;
1014 }
1015
1016 if (!IN_SET(c->std_output,
1017 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1018 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1019 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1020 !IN_SET(c->std_error,
1021 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1022 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1023 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1024 return 0;
1025
1026 /* If syslog or kernel logging is requested, make sure our own
1027 * logging daemon is run first. */
1028
1029 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1030 if (r < 0)
1031 return r;
1032
1033 return 0;
1034 }
1035
1036 const char *unit_description(Unit *u) {
1037 assert(u);
1038
1039 if (u->description)
1040 return u->description;
1041
1042 return strna(u->id);
1043 }
1044
1045 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1046 const struct {
1047 UnitDependencyMask mask;
1048 const char *name;
1049 } table[] = {
1050 { UNIT_DEPENDENCY_FILE, "file" },
1051 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1052 { UNIT_DEPENDENCY_DEFAULT, "default" },
1053 { UNIT_DEPENDENCY_UDEV, "udev" },
1054 { UNIT_DEPENDENCY_PATH, "path" },
1055 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1056 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1057 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1058 };
1059 size_t i;
1060
1061 assert(f);
1062 assert(kind);
1063 assert(space);
1064
1065 for (i = 0; i < ELEMENTSOF(table); i++) {
1066
1067 if (mask == 0)
1068 break;
1069
1070 if ((mask & table[i].mask) == table[i].mask) {
1071 if (*space)
1072 fputc(' ', f);
1073 else
1074 *space = true;
1075
1076 fputs(kind, f);
1077 fputs("-", f);
1078 fputs(table[i].name, f);
1079
1080 mask &= ~table[i].mask;
1081 }
1082 }
1083
1084 assert(mask == 0);
1085 }
1086
1087 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1088 char *t, **j;
1089 UnitDependency d;
1090 Iterator i;
1091 const char *prefix2;
1092 char
1093 timestamp0[FORMAT_TIMESTAMP_MAX],
1094 timestamp1[FORMAT_TIMESTAMP_MAX],
1095 timestamp2[FORMAT_TIMESTAMP_MAX],
1096 timestamp3[FORMAT_TIMESTAMP_MAX],
1097 timestamp4[FORMAT_TIMESTAMP_MAX],
1098 timespan[FORMAT_TIMESPAN_MAX];
1099 Unit *following;
1100 _cleanup_set_free_ Set *following_set = NULL;
1101 const char *n;
1102 CGroupMask m;
1103 int r;
1104
1105 assert(u);
1106 assert(u->type >= 0);
1107
1108 prefix = strempty(prefix);
1109 prefix2 = strjoina(prefix, "\t");
1110
1111 fprintf(f,
1112 "%s-> Unit %s:\n"
1113 "%s\tDescription: %s\n"
1114 "%s\tInstance: %s\n"
1115 "%s\tUnit Load State: %s\n"
1116 "%s\tUnit Active State: %s\n"
1117 "%s\tState Change Timestamp: %s\n"
1118 "%s\tInactive Exit Timestamp: %s\n"
1119 "%s\tActive Enter Timestamp: %s\n"
1120 "%s\tActive Exit Timestamp: %s\n"
1121 "%s\tInactive Enter Timestamp: %s\n"
1122 "%s\tGC Check Good: %s\n"
1123 "%s\tNeed Daemon Reload: %s\n"
1124 "%s\tTransient: %s\n"
1125 "%s\tPerpetual: %s\n"
1126 "%s\tGarbage Collection Mode: %s\n"
1127 "%s\tSlice: %s\n"
1128 "%s\tCGroup: %s\n"
1129 "%s\tCGroup realized: %s\n",
1130 prefix, u->id,
1131 prefix, unit_description(u),
1132 prefix, strna(u->instance),
1133 prefix, unit_load_state_to_string(u->load_state),
1134 prefix, unit_active_state_to_string(unit_active_state(u)),
1135 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1136 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1137 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1138 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1139 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1140 prefix, yes_no(unit_check_gc(u)),
1141 prefix, yes_no(unit_need_daemon_reload(u)),
1142 prefix, yes_no(u->transient),
1143 prefix, yes_no(u->perpetual),
1144 prefix, collect_mode_to_string(u->collect_mode),
1145 prefix, strna(unit_slice_name(u)),
1146 prefix, strna(u->cgroup_path),
1147 prefix, yes_no(u->cgroup_realized));
1148
1149 if (u->cgroup_realized_mask != 0) {
1150 _cleanup_free_ char *s = NULL;
1151 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1152 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1153 }
1154 if (u->cgroup_enabled_mask != 0) {
1155 _cleanup_free_ char *s = NULL;
1156 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1157 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1158 }
1159 m = unit_get_own_mask(u);
1160 if (m != 0) {
1161 _cleanup_free_ char *s = NULL;
1162 (void) cg_mask_to_string(m, &s);
1163 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1164 }
1165 m = unit_get_members_mask(u);
1166 if (m != 0) {
1167 _cleanup_free_ char *s = NULL;
1168 (void) cg_mask_to_string(m, &s);
1169 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1170 }
1171
1172 SET_FOREACH(t, u->names, i)
1173 fprintf(f, "%s\tName: %s\n", prefix, t);
1174
1175 if (!sd_id128_is_null(u->invocation_id))
1176 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1177 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1178
1179 STRV_FOREACH(j, u->documentation)
1180 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1181
1182 following = unit_following(u);
1183 if (following)
1184 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1185
1186 r = unit_following_set(u, &following_set);
1187 if (r >= 0) {
1188 Unit *other;
1189
1190 SET_FOREACH(other, following_set, i)
1191 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1192 }
1193
1194 if (u->fragment_path)
1195 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1196
1197 if (u->source_path)
1198 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1199
1200 STRV_FOREACH(j, u->dropin_paths)
1201 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1202
1203 if (u->failure_action != EMERGENCY_ACTION_NONE)
1204 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1205 if (u->success_action != EMERGENCY_ACTION_NONE)
1206 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1207
1208 if (u->job_timeout != USEC_INFINITY)
1209 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1210
1211 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1212 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1213
1214 if (u->job_timeout_reboot_arg)
1215 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1216
1217 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1218 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1219
1220 if (dual_timestamp_is_set(&u->condition_timestamp))
1221 fprintf(f,
1222 "%s\tCondition Timestamp: %s\n"
1223 "%s\tCondition Result: %s\n",
1224 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1225 prefix, yes_no(u->condition_result));
1226
1227 if (dual_timestamp_is_set(&u->assert_timestamp))
1228 fprintf(f,
1229 "%s\tAssert Timestamp: %s\n"
1230 "%s\tAssert Result: %s\n",
1231 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1232 prefix, yes_no(u->assert_result));
1233
1234 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1235 UnitDependencyInfo di;
1236 Unit *other;
1237
1238 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1239 bool space = false;
1240
1241 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1242
1243 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1244 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1245
1246 fputs(")\n", f);
1247 }
1248 }
1249
1250 if (!hashmap_isempty(u->requires_mounts_for)) {
1251 UnitDependencyInfo di;
1252 const char *path;
1253
1254 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1255 bool space = false;
1256
1257 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1258
1259 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1260 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1261
1262 fputs(")\n", f);
1263 }
1264 }
1265
1266 if (u->load_state == UNIT_LOADED) {
1267
1268 fprintf(f,
1269 "%s\tStopWhenUnneeded: %s\n"
1270 "%s\tRefuseManualStart: %s\n"
1271 "%s\tRefuseManualStop: %s\n"
1272 "%s\tDefaultDependencies: %s\n"
1273 "%s\tOnFailureJobMode: %s\n"
1274 "%s\tIgnoreOnIsolate: %s\n",
1275 prefix, yes_no(u->stop_when_unneeded),
1276 prefix, yes_no(u->refuse_manual_start),
1277 prefix, yes_no(u->refuse_manual_stop),
1278 prefix, yes_no(u->default_dependencies),
1279 prefix, job_mode_to_string(u->on_failure_job_mode),
1280 prefix, yes_no(u->ignore_on_isolate));
1281
1282 if (UNIT_VTABLE(u)->dump)
1283 UNIT_VTABLE(u)->dump(u, f, prefix2);
1284
1285 } else if (u->load_state == UNIT_MERGED)
1286 fprintf(f,
1287 "%s\tMerged into: %s\n",
1288 prefix, u->merged_into->id);
1289 else if (u->load_state == UNIT_ERROR)
1290 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1291
1292 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1293 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1294
1295 if (u->job)
1296 job_dump(u->job, f, prefix2);
1297
1298 if (u->nop_job)
1299 job_dump(u->nop_job, f, prefix2);
1300 }
1301
1302 /* Common implementation for multiple backends */
1303 int unit_load_fragment_and_dropin(Unit *u) {
1304 int r;
1305
1306 assert(u);
1307
1308 /* Load a .{service,socket,...} file */
1309 r = unit_load_fragment(u);
1310 if (r < 0)
1311 return r;
1312
1313 if (u->load_state == UNIT_STUB)
1314 return -ENOENT;
1315
1316 /* Load drop-in directory data. If u is an alias, we might be reloading the
1317 * target unit needlessly. But we cannot be sure which drops-ins have already
1318 * been loaded and which not, at least without doing complicated book-keeping,
1319 * so let's always reread all drop-ins. */
1320 return unit_load_dropin(unit_follow_merge(u));
1321 }
1322
1323 /* Common implementation for multiple backends */
1324 int unit_load_fragment_and_dropin_optional(Unit *u) {
1325 int r;
1326
1327 assert(u);
1328
1329 /* Same as unit_load_fragment_and_dropin(), but whether
1330 * something can be loaded or not doesn't matter. */
1331
1332 /* Load a .service file */
1333 r = unit_load_fragment(u);
1334 if (r < 0)
1335 return r;
1336
1337 if (u->load_state == UNIT_STUB)
1338 u->load_state = UNIT_LOADED;
1339
1340 /* Load drop-in directory data */
1341 return unit_load_dropin(unit_follow_merge(u));
1342 }
1343
1344 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1345 assert(u);
1346 assert(target);
1347
1348 if (target->type != UNIT_TARGET)
1349 return 0;
1350
1351 /* Only add the dependency if both units are loaded, so that
1352 * that loop check below is reliable */
1353 if (u->load_state != UNIT_LOADED ||
1354 target->load_state != UNIT_LOADED)
1355 return 0;
1356
1357 /* If either side wants no automatic dependencies, then let's
1358 * skip this */
1359 if (!u->default_dependencies ||
1360 !target->default_dependencies)
1361 return 0;
1362
1363 /* Don't create loops */
1364 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1365 return 0;
1366
1367 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1368 }
1369
1370 static int unit_add_target_dependencies(Unit *u) {
1371
1372 static const UnitDependency deps[] = {
1373 UNIT_REQUIRED_BY,
1374 UNIT_REQUISITE_OF,
1375 UNIT_WANTED_BY,
1376 UNIT_BOUND_BY
1377 };
1378
1379 unsigned k;
1380 int r = 0;
1381
1382 assert(u);
1383
1384 for (k = 0; k < ELEMENTSOF(deps); k++) {
1385 Unit *target;
1386 Iterator i;
1387 void *v;
1388
1389 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1390 r = unit_add_default_target_dependency(u, target);
1391 if (r < 0)
1392 return r;
1393 }
1394 }
1395
1396 return r;
1397 }
1398
1399 static int unit_add_slice_dependencies(Unit *u) {
1400 UnitDependencyMask mask;
1401 assert(u);
1402
1403 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1404 return 0;
1405
1406 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1407 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1408 relationship). */
1409 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1410
1411 if (UNIT_ISSET(u->slice))
1412 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1413
1414 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1415 return 0;
1416
1417 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1418 }
1419
1420 static int unit_add_mount_dependencies(Unit *u) {
1421 UnitDependencyInfo di;
1422 const char *path;
1423 Iterator i;
1424 int r;
1425
1426 assert(u);
1427
1428 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1429 char prefix[strlen(path) + 1];
1430
1431 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1432 _cleanup_free_ char *p = NULL;
1433 Unit *m;
1434
1435 r = unit_name_from_path(prefix, ".mount", &p);
1436 if (r < 0)
1437 return r;
1438
1439 m = manager_get_unit(u->manager, p);
1440 if (!m) {
1441 /* Make sure to load the mount unit if
1442 * it exists. If so the dependencies
1443 * on this unit will be added later
1444 * during the loading of the mount
1445 * unit. */
1446 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1447 continue;
1448 }
1449 if (m == u)
1450 continue;
1451
1452 if (m->load_state != UNIT_LOADED)
1453 continue;
1454
1455 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1456 if (r < 0)
1457 return r;
1458
1459 if (m->fragment_path) {
1460 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1461 if (r < 0)
1462 return r;
1463 }
1464 }
1465 }
1466
1467 return 0;
1468 }
1469
1470 static int unit_add_startup_units(Unit *u) {
1471 CGroupContext *c;
1472 int r;
1473
1474 c = unit_get_cgroup_context(u);
1475 if (!c)
1476 return 0;
1477
1478 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1479 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1480 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1481 return 0;
1482
1483 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1484 if (r < 0)
1485 return r;
1486
1487 return set_put(u->manager->startup_units, u);
1488 }
1489
1490 int unit_load(Unit *u) {
1491 int r;
1492
1493 assert(u);
1494
1495 if (u->in_load_queue) {
1496 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1497 u->in_load_queue = false;
1498 }
1499
1500 if (u->type == _UNIT_TYPE_INVALID)
1501 return -EINVAL;
1502
1503 if (u->load_state != UNIT_STUB)
1504 return 0;
1505
1506 if (u->transient_file) {
1507 r = fflush_and_check(u->transient_file);
1508 if (r < 0)
1509 goto fail;
1510
1511 u->transient_file = safe_fclose(u->transient_file);
1512 u->fragment_mtime = now(CLOCK_REALTIME);
1513 }
1514
1515 if (UNIT_VTABLE(u)->load) {
1516 r = UNIT_VTABLE(u)->load(u);
1517 if (r < 0)
1518 goto fail;
1519 }
1520
1521 if (u->load_state == UNIT_STUB) {
1522 r = -ENOENT;
1523 goto fail;
1524 }
1525
1526 if (u->load_state == UNIT_LOADED) {
1527
1528 r = unit_add_target_dependencies(u);
1529 if (r < 0)
1530 goto fail;
1531
1532 r = unit_add_slice_dependencies(u);
1533 if (r < 0)
1534 goto fail;
1535
1536 r = unit_add_mount_dependencies(u);
1537 if (r < 0)
1538 goto fail;
1539
1540 r = unit_add_startup_units(u);
1541 if (r < 0)
1542 goto fail;
1543
1544 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1545 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1546 r = -EINVAL;
1547 goto fail;
1548 }
1549
1550 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1551 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1552
1553 unit_update_cgroup_members_masks(u);
1554 }
1555
1556 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1557
1558 unit_add_to_dbus_queue(unit_follow_merge(u));
1559 unit_add_to_gc_queue(u);
1560
1561 return 0;
1562
1563 fail:
1564 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1565 u->load_error = r;
1566 unit_add_to_dbus_queue(u);
1567 unit_add_to_gc_queue(u);
1568
1569 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1570
1571 return r;
1572 }
1573
1574 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1575 Condition *c;
1576 int triggered = -1;
1577
1578 assert(u);
1579 assert(to_string);
1580
1581 /* If the condition list is empty, then it is true */
1582 if (!first)
1583 return true;
1584
1585 /* Otherwise, if all of the non-trigger conditions apply and
1586 * if any of the trigger conditions apply (unless there are
1587 * none) we return true */
1588 LIST_FOREACH(conditions, c, first) {
1589 int r;
1590
1591 r = condition_test(c);
1592 if (r < 0)
1593 log_unit_warning(u,
1594 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1595 to_string(c->type),
1596 c->trigger ? "|" : "",
1597 c->negate ? "!" : "",
1598 c->parameter);
1599 else
1600 log_unit_debug(u,
1601 "%s=%s%s%s %s.",
1602 to_string(c->type),
1603 c->trigger ? "|" : "",
1604 c->negate ? "!" : "",
1605 c->parameter,
1606 condition_result_to_string(c->result));
1607
1608 if (!c->trigger && r <= 0)
1609 return false;
1610
1611 if (c->trigger && triggered <= 0)
1612 triggered = r > 0;
1613 }
1614
1615 return triggered != 0;
1616 }
1617
1618 static bool unit_condition_test(Unit *u) {
1619 assert(u);
1620
1621 dual_timestamp_get(&u->condition_timestamp);
1622 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1623
1624 return u->condition_result;
1625 }
1626
1627 static bool unit_assert_test(Unit *u) {
1628 assert(u);
1629
1630 dual_timestamp_get(&u->assert_timestamp);
1631 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1632
1633 return u->assert_result;
1634 }
1635
1636 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1637 DISABLE_WARNING_FORMAT_NONLITERAL;
1638 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1639 REENABLE_WARNING;
1640 }
1641
1642 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1643 const char *format;
1644 const UnitStatusMessageFormats *format_table;
1645
1646 assert(u);
1647 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1648
1649 if (t != JOB_RELOAD) {
1650 format_table = &UNIT_VTABLE(u)->status_message_formats;
1651 if (format_table) {
1652 format = format_table->starting_stopping[t == JOB_STOP];
1653 if (format)
1654 return format;
1655 }
1656 }
1657
1658 /* Return generic strings */
1659 if (t == JOB_START)
1660 return "Starting %s.";
1661 else if (t == JOB_STOP)
1662 return "Stopping %s.";
1663 else
1664 return "Reloading %s.";
1665 }
1666
1667 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1668 const char *format;
1669
1670 assert(u);
1671
1672 /* Reload status messages have traditionally not been printed to console. */
1673 if (!IN_SET(t, JOB_START, JOB_STOP))
1674 return;
1675
1676 format = unit_get_status_message_format(u, t);
1677
1678 DISABLE_WARNING_FORMAT_NONLITERAL;
1679 unit_status_printf(u, "", format);
1680 REENABLE_WARNING;
1681 }
1682
1683 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1684 const char *format, *mid;
1685 char buf[LINE_MAX];
1686
1687 assert(u);
1688
1689 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1690 return;
1691
1692 if (log_on_console())
1693 return;
1694
1695 /* We log status messages for all units and all operations. */
1696
1697 format = unit_get_status_message_format(u, t);
1698
1699 DISABLE_WARNING_FORMAT_NONLITERAL;
1700 xsprintf(buf, format, unit_description(u));
1701 REENABLE_WARNING;
1702
1703 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1704 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1705 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1706
1707 /* Note that we deliberately use LOG_MESSAGE() instead of
1708 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1709 * closely what is written to screen using the status output,
1710 * which is supposed the highest level, friendliest output
1711 * possible, which means we should avoid the low-level unit
1712 * name. */
1713 log_struct(LOG_INFO,
1714 LOG_MESSAGE("%s", buf),
1715 LOG_UNIT_ID(u),
1716 LOG_UNIT_INVOCATION_ID(u),
1717 mid,
1718 NULL);
1719 }
1720
1721 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1722 assert(u);
1723 assert(t >= 0);
1724 assert(t < _JOB_TYPE_MAX);
1725
1726 unit_status_log_starting_stopping_reloading(u, t);
1727 unit_status_print_starting_stopping(u, t);
1728 }
1729
1730 int unit_start_limit_test(Unit *u) {
1731 assert(u);
1732
1733 if (ratelimit_test(&u->start_limit)) {
1734 u->start_limit_hit = false;
1735 return 0;
1736 }
1737
1738 log_unit_warning(u, "Start request repeated too quickly.");
1739 u->start_limit_hit = true;
1740
1741 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1742 }
1743
1744 bool unit_shall_confirm_spawn(Unit *u) {
1745 assert(u);
1746
1747 if (manager_is_confirm_spawn_disabled(u->manager))
1748 return false;
1749
1750 /* For some reasons units remaining in the same process group
1751 * as PID 1 fail to acquire the console even if it's not used
1752 * by any process. So skip the confirmation question for them. */
1753 return !unit_get_exec_context(u)->same_pgrp;
1754 }
1755
1756 static bool unit_verify_deps(Unit *u) {
1757 Unit *other;
1758 Iterator j;
1759 void *v;
1760
1761 assert(u);
1762
1763 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1764 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1765 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1766 * conjunction with After= as for them any such check would make things entirely racy. */
1767
1768 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1769
1770 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1771 continue;
1772
1773 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1774 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1775 return false;
1776 }
1777 }
1778
1779 return true;
1780 }
1781
1782 /* Errors:
1783 * -EBADR: This unit type does not support starting.
1784 * -EALREADY: Unit is already started.
1785 * -EAGAIN: An operation is already in progress. Retry later.
1786 * -ECANCELED: Too many requests for now.
1787 * -EPROTO: Assert failed
1788 * -EINVAL: Unit not loaded
1789 * -EOPNOTSUPP: Unit type not supported
1790 * -ENOLINK: The necessary dependencies are not fulfilled.
1791 */
1792 int unit_start(Unit *u) {
1793 UnitActiveState state;
1794 Unit *following;
1795
1796 assert(u);
1797
1798 /* If this is already started, then this will succeed. Note
1799 * that this will even succeed if this unit is not startable
1800 * by the user. This is relied on to detect when we need to
1801 * wait for units and when waiting is finished. */
1802 state = unit_active_state(u);
1803 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1804 return -EALREADY;
1805
1806 /* Units that aren't loaded cannot be started */
1807 if (u->load_state != UNIT_LOADED)
1808 return -EINVAL;
1809
1810 /* If the conditions failed, don't do anything at all. If we
1811 * already are activating this call might still be useful to
1812 * speed up activation in case there is some hold-off time,
1813 * but we don't want to recheck the condition in that case. */
1814 if (state != UNIT_ACTIVATING &&
1815 !unit_condition_test(u)) {
1816 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1817 return -EALREADY;
1818 }
1819
1820 /* If the asserts failed, fail the entire job */
1821 if (state != UNIT_ACTIVATING &&
1822 !unit_assert_test(u)) {
1823 log_unit_notice(u, "Starting requested but asserts failed.");
1824 return -EPROTO;
1825 }
1826
1827 /* Units of types that aren't supported cannot be
1828 * started. Note that we do this test only after the condition
1829 * checks, so that we rather return condition check errors
1830 * (which are usually not considered a true failure) than "not
1831 * supported" errors (which are considered a failure).
1832 */
1833 if (!unit_supported(u))
1834 return -EOPNOTSUPP;
1835
1836 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1837 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1838 * effect anymore, due to a reload or due to a failed condition. */
1839 if (!unit_verify_deps(u))
1840 return -ENOLINK;
1841
1842 /* Forward to the main object, if we aren't it. */
1843 following = unit_following(u);
1844 if (following) {
1845 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1846 return unit_start(following);
1847 }
1848
1849 /* If it is stopped, but we cannot start it, then fail */
1850 if (!UNIT_VTABLE(u)->start)
1851 return -EBADR;
1852
1853 /* We don't suppress calls to ->start() here when we are
1854 * already starting, to allow this request to be used as a
1855 * "hurry up" call, for example when the unit is in some "auto
1856 * restart" state where it waits for a holdoff timer to elapse
1857 * before it will start again. */
1858
1859 unit_add_to_dbus_queue(u);
1860
1861 return UNIT_VTABLE(u)->start(u);
1862 }
1863
1864 bool unit_can_start(Unit *u) {
1865 assert(u);
1866
1867 if (u->load_state != UNIT_LOADED)
1868 return false;
1869
1870 if (!unit_supported(u))
1871 return false;
1872
1873 return !!UNIT_VTABLE(u)->start;
1874 }
1875
1876 bool unit_can_isolate(Unit *u) {
1877 assert(u);
1878
1879 return unit_can_start(u) &&
1880 u->allow_isolate;
1881 }
1882
1883 /* Errors:
1884 * -EBADR: This unit type does not support stopping.
1885 * -EALREADY: Unit is already stopped.
1886 * -EAGAIN: An operation is already in progress. Retry later.
1887 */
1888 int unit_stop(Unit *u) {
1889 UnitActiveState state;
1890 Unit *following;
1891
1892 assert(u);
1893
1894 state = unit_active_state(u);
1895 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1896 return -EALREADY;
1897
1898 following = unit_following(u);
1899 if (following) {
1900 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1901 return unit_stop(following);
1902 }
1903
1904 if (!UNIT_VTABLE(u)->stop)
1905 return -EBADR;
1906
1907 unit_add_to_dbus_queue(u);
1908
1909 return UNIT_VTABLE(u)->stop(u);
1910 }
1911
1912 bool unit_can_stop(Unit *u) {
1913 assert(u);
1914
1915 if (!unit_supported(u))
1916 return false;
1917
1918 if (u->perpetual)
1919 return false;
1920
1921 return !!UNIT_VTABLE(u)->stop;
1922 }
1923
1924 /* Errors:
1925 * -EBADR: This unit type does not support reloading.
1926 * -ENOEXEC: Unit is not started.
1927 * -EAGAIN: An operation is already in progress. Retry later.
1928 */
1929 int unit_reload(Unit *u) {
1930 UnitActiveState state;
1931 Unit *following;
1932
1933 assert(u);
1934
1935 if (u->load_state != UNIT_LOADED)
1936 return -EINVAL;
1937
1938 if (!unit_can_reload(u))
1939 return -EBADR;
1940
1941 state = unit_active_state(u);
1942 if (state == UNIT_RELOADING)
1943 return -EALREADY;
1944
1945 if (state != UNIT_ACTIVE) {
1946 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1947 return -ENOEXEC;
1948 }
1949
1950 following = unit_following(u);
1951 if (following) {
1952 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1953 return unit_reload(following);
1954 }
1955
1956 unit_add_to_dbus_queue(u);
1957
1958 if (!UNIT_VTABLE(u)->reload) {
1959 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1960 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1961 return 0;
1962 }
1963
1964 return UNIT_VTABLE(u)->reload(u);
1965 }
1966
1967 bool unit_can_reload(Unit *u) {
1968 assert(u);
1969
1970 if (UNIT_VTABLE(u)->can_reload)
1971 return UNIT_VTABLE(u)->can_reload(u);
1972
1973 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1974 return true;
1975
1976 return UNIT_VTABLE(u)->reload;
1977 }
1978
1979 static void unit_check_unneeded(Unit *u) {
1980
1981 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1982
1983 static const UnitDependency needed_dependencies[] = {
1984 UNIT_REQUIRED_BY,
1985 UNIT_REQUISITE_OF,
1986 UNIT_WANTED_BY,
1987 UNIT_BOUND_BY,
1988 };
1989
1990 unsigned j;
1991 int r;
1992
1993 assert(u);
1994
1995 /* If this service shall be shut down when unneeded then do
1996 * so. */
1997
1998 if (!u->stop_when_unneeded)
1999 return;
2000
2001 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
2002 return;
2003
2004 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
2005 Unit *other;
2006 Iterator i;
2007 void *v;
2008
2009 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
2010 if (unit_active_or_pending(other) || unit_will_restart(other))
2011 return;
2012 }
2013
2014 /* If stopping a unit fails continuously we might enter a stop
2015 * loop here, hence stop acting on the service being
2016 * unnecessary after a while. */
2017 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2018 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
2019 return;
2020 }
2021
2022 log_unit_info(u, "Unit not needed anymore. Stopping.");
2023
2024 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2025 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2026 if (r < 0)
2027 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2028 }
2029
2030 static void unit_check_binds_to(Unit *u) {
2031 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2032 bool stop = false;
2033 Unit *other;
2034 Iterator i;
2035 void *v;
2036 int r;
2037
2038 assert(u);
2039
2040 if (u->job)
2041 return;
2042
2043 if (unit_active_state(u) != UNIT_ACTIVE)
2044 return;
2045
2046 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2047 if (other->job)
2048 continue;
2049
2050 if (!other->coldplugged)
2051 /* We might yet create a job for the other unit… */
2052 continue;
2053
2054 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2055 continue;
2056
2057 stop = true;
2058 break;
2059 }
2060
2061 if (!stop)
2062 return;
2063
2064 /* If stopping a unit fails continuously we might enter a stop
2065 * loop here, hence stop acting on the service being
2066 * unnecessary after a while. */
2067 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2068 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2069 return;
2070 }
2071
2072 assert(other);
2073 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2074
2075 /* A unit we need to run is gone. Sniff. Let's stop this. */
2076 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2077 if (r < 0)
2078 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2079 }
2080
2081 static void retroactively_start_dependencies(Unit *u) {
2082 Iterator i;
2083 Unit *other;
2084 void *v;
2085
2086 assert(u);
2087 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2088
2089 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2090 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2091 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2092 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2093
2094 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2095 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2096 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2097 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2098
2099 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2100 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2101 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2102 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2103
2104 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2107
2108 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2109 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2110 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2111 }
2112
2113 static void retroactively_stop_dependencies(Unit *u) {
2114 Unit *other;
2115 Iterator i;
2116 void *v;
2117
2118 assert(u);
2119 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2120
2121 /* Pull down units which are bound to us recursively if enabled */
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2124 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2125 }
2126
2127 static void check_unneeded_dependencies(Unit *u) {
2128 Unit *other;
2129 Iterator i;
2130 void *v;
2131
2132 assert(u);
2133 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2134
2135 /* Garbage collect services that might not be needed anymore, if enabled */
2136 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2137 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2138 unit_check_unneeded(other);
2139 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2140 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2141 unit_check_unneeded(other);
2142 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2143 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2144 unit_check_unneeded(other);
2145 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2146 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2147 unit_check_unneeded(other);
2148 }
2149
2150 void unit_start_on_failure(Unit *u) {
2151 Unit *other;
2152 Iterator i;
2153 void *v;
2154
2155 assert(u);
2156
2157 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2158 return;
2159
2160 log_unit_info(u, "Triggering OnFailure= dependencies.");
2161
2162 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2163 int r;
2164
2165 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2166 if (r < 0)
2167 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2168 }
2169 }
2170
2171 void unit_trigger_notify(Unit *u) {
2172 Unit *other;
2173 Iterator i;
2174 void *v;
2175
2176 assert(u);
2177
2178 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2179 if (UNIT_VTABLE(other)->trigger_notify)
2180 UNIT_VTABLE(other)->trigger_notify(other, u);
2181 }
2182
2183 static int unit_log_resources(Unit *u) {
2184
2185 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2186 size_t n_message_parts = 0, n_iovec = 0;
2187 char* message_parts[3 + 1], *t;
2188 nsec_t nsec = NSEC_INFINITY;
2189 CGroupIPAccountingMetric m;
2190 size_t i;
2191 int r;
2192 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2193 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2194 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2195 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2196 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2197 };
2198
2199 assert(u);
2200
2201 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2202 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2203 * information and the complete data in structured fields. */
2204
2205 (void) unit_get_cpu_usage(u, &nsec);
2206 if (nsec != NSEC_INFINITY) {
2207 char buf[FORMAT_TIMESPAN_MAX] = "";
2208
2209 /* Format the CPU time for inclusion in the structured log message */
2210 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2211 r = log_oom();
2212 goto finish;
2213 }
2214 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2215
2216 /* Format the CPU time for inclusion in the human language message string */
2217 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2218 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2219 if (!t) {
2220 r = log_oom();
2221 goto finish;
2222 }
2223
2224 message_parts[n_message_parts++] = t;
2225 }
2226
2227 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2228 char buf[FORMAT_BYTES_MAX] = "";
2229 uint64_t value = UINT64_MAX;
2230
2231 assert(ip_fields[m]);
2232
2233 (void) unit_get_ip_accounting(u, m, &value);
2234 if (value == UINT64_MAX)
2235 continue;
2236
2237 /* Format IP accounting data for inclusion in the structured log message */
2238 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2239 r = log_oom();
2240 goto finish;
2241 }
2242 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2243
2244 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2245 * bytes counters (and not for the packets counters) */
2246 if (m == CGROUP_IP_INGRESS_BYTES)
2247 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2248 format_bytes(buf, sizeof(buf), value),
2249 " IP traffic");
2250 else if (m == CGROUP_IP_EGRESS_BYTES)
2251 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2252 format_bytes(buf, sizeof(buf), value),
2253 " IP traffic");
2254 else
2255 continue;
2256 if (!t) {
2257 r = log_oom();
2258 goto finish;
2259 }
2260
2261 message_parts[n_message_parts++] = t;
2262 }
2263
2264 /* Is there any accounting data available at all? */
2265 if (n_iovec == 0) {
2266 r = 0;
2267 goto finish;
2268 }
2269
2270 if (n_message_parts == 0)
2271 t = strjoina("MESSAGE=", u->id, ": Completed");
2272 else {
2273 _cleanup_free_ char *joined;
2274
2275 message_parts[n_message_parts] = NULL;
2276
2277 joined = strv_join(message_parts, ", ");
2278 if (!joined) {
2279 r = log_oom();
2280 goto finish;
2281 }
2282
2283 t = strjoina("MESSAGE=", u->id, ": ", joined);
2284 }
2285
2286 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2287 * and hence don't increase n_iovec for them */
2288 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2289 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2290
2291 t = strjoina(u->manager->unit_log_field, u->id);
2292 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2293
2294 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2295 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2296
2297 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2298 r = 0;
2299
2300 finish:
2301 for (i = 0; i < n_message_parts; i++)
2302 free(message_parts[i]);
2303
2304 for (i = 0; i < n_iovec; i++)
2305 free(iovec[i].iov_base);
2306
2307 return r;
2308
2309 }
2310
2311 static void unit_update_on_console(Unit *u) {
2312 bool b;
2313
2314 assert(u);
2315
2316 b = unit_needs_console(u);
2317 if (u->on_console == b)
2318 return;
2319
2320 u->on_console = b;
2321 if (b)
2322 manager_ref_console(u->manager);
2323 else
2324 manager_unref_console(u->manager);
2325
2326 }
2327
2328 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2329 Manager *m;
2330 bool unexpected;
2331
2332 assert(u);
2333 assert(os < _UNIT_ACTIVE_STATE_MAX);
2334 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2335
2336 /* Note that this is called for all low-level state changes,
2337 * even if they might map to the same high-level
2338 * UnitActiveState! That means that ns == os is an expected
2339 * behavior here. For example: if a mount point is remounted
2340 * this function will be called too! */
2341
2342 m = u->manager;
2343
2344 /* Update timestamps for state changes */
2345 if (!MANAGER_IS_RELOADING(m)) {
2346 dual_timestamp_get(&u->state_change_timestamp);
2347
2348 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2349 u->inactive_exit_timestamp = u->state_change_timestamp;
2350 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2351 u->inactive_enter_timestamp = u->state_change_timestamp;
2352
2353 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2354 u->active_enter_timestamp = u->state_change_timestamp;
2355 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2356 u->active_exit_timestamp = u->state_change_timestamp;
2357 }
2358
2359 /* Keep track of failed units */
2360 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2361
2362 /* Make sure the cgroup and state files are always removed when we become inactive */
2363 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2364 unit_prune_cgroup(u);
2365 unit_unlink_state_files(u);
2366 }
2367
2368 unit_update_on_console(u);
2369
2370 if (u->job) {
2371 unexpected = false;
2372
2373 if (u->job->state == JOB_WAITING)
2374
2375 /* So we reached a different state for this
2376 * job. Let's see if we can run it now if it
2377 * failed previously due to EAGAIN. */
2378 job_add_to_run_queue(u->job);
2379
2380 /* Let's check whether this state change constitutes a
2381 * finished job, or maybe contradicts a running job and
2382 * hence needs to invalidate jobs. */
2383
2384 switch (u->job->type) {
2385
2386 case JOB_START:
2387 case JOB_VERIFY_ACTIVE:
2388
2389 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2390 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2391 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2392 unexpected = true;
2393
2394 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2395 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2396 }
2397
2398 break;
2399
2400 case JOB_RELOAD:
2401 case JOB_RELOAD_OR_START:
2402 case JOB_TRY_RELOAD:
2403
2404 if (u->job->state == JOB_RUNNING) {
2405 if (ns == UNIT_ACTIVE)
2406 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2407 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2408 unexpected = true;
2409
2410 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2411 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2412 }
2413 }
2414
2415 break;
2416
2417 case JOB_STOP:
2418 case JOB_RESTART:
2419 case JOB_TRY_RESTART:
2420
2421 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2422 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2423 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2424 unexpected = true;
2425 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2426 }
2427
2428 break;
2429
2430 default:
2431 assert_not_reached("Job type unknown");
2432 }
2433
2434 } else
2435 unexpected = true;
2436
2437 if (!MANAGER_IS_RELOADING(m)) {
2438
2439 /* If this state change happened without being
2440 * requested by a job, then let's retroactively start
2441 * or stop dependencies. We skip that step when
2442 * deserializing, since we don't want to create any
2443 * additional jobs just because something is already
2444 * activated. */
2445
2446 if (unexpected) {
2447 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2448 retroactively_start_dependencies(u);
2449 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2450 retroactively_stop_dependencies(u);
2451 }
2452
2453 /* stop unneeded units regardless if going down was expected or not */
2454 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2455 check_unneeded_dependencies(u);
2456
2457 if (ns != os && ns == UNIT_FAILED) {
2458 log_unit_debug(u, "Unit entered failed state.");
2459 unit_start_on_failure(u);
2460 }
2461 }
2462
2463 /* Some names are special */
2464 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2465
2466 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2467 /* The bus might have just become available,
2468 * hence try to connect to it, if we aren't
2469 * yet connected. */
2470 bus_init(m, true);
2471
2472 if (u->type == UNIT_SERVICE &&
2473 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2474 !MANAGER_IS_RELOADING(m)) {
2475 /* Write audit record if we have just finished starting up */
2476 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2477 u->in_audit = true;
2478 }
2479
2480 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2481 manager_send_unit_plymouth(m, u);
2482
2483 } else {
2484 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2485 * anyway. */
2486
2487 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2488 !UNIT_IS_INACTIVE_OR_FAILED(os)
2489 && !MANAGER_IS_RELOADING(m)) {
2490
2491 /* This unit just stopped/failed. */
2492 if (u->type == UNIT_SERVICE) {
2493
2494 /* Hmm, if there was no start record written
2495 * write it now, so that we always have a nice
2496 * pair */
2497 if (!u->in_audit) {
2498 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2499
2500 if (ns == UNIT_INACTIVE)
2501 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2502 } else
2503 /* Write audit record if we have just finished shutting down */
2504 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2505
2506 u->in_audit = false;
2507 }
2508
2509 /* Write a log message about consumed resources */
2510 unit_log_resources(u);
2511 }
2512 }
2513
2514 manager_recheck_journal(m);
2515 unit_trigger_notify(u);
2516
2517 if (!MANAGER_IS_RELOADING(u->manager)) {
2518 /* Maybe we finished startup and are now ready for
2519 * being stopped because unneeded? */
2520 unit_check_unneeded(u);
2521
2522 /* Maybe we finished startup, but something we needed
2523 * has vanished? Let's die then. (This happens when
2524 * something BindsTo= to a Type=oneshot unit, as these
2525 * units go directly from starting to inactive,
2526 * without ever entering started.) */
2527 unit_check_binds_to(u);
2528
2529 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2530 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2531 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2532 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2533 }
2534
2535 unit_add_to_dbus_queue(u);
2536 unit_add_to_gc_queue(u);
2537 }
2538
2539 int unit_watch_pid(Unit *u, pid_t pid) {
2540 int r;
2541
2542 assert(u);
2543 assert(pid_is_valid(pid));
2544
2545 /* Watch a specific PID */
2546
2547 r = set_ensure_allocated(&u->pids, NULL);
2548 if (r < 0)
2549 return r;
2550
2551 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2552 if (r < 0)
2553 return r;
2554
2555 /* First try, let's add the unit keyed by "pid". */
2556 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2557 if (r == -EEXIST) {
2558 Unit **array;
2559 bool found = false;
2560 size_t n = 0;
2561
2562 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2563 * to an array of Units rather than just a Unit), lists us already. */
2564
2565 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2566 if (array)
2567 for (; array[n]; n++)
2568 if (array[n] == u)
2569 found = true;
2570
2571 if (found) /* Found it already? if so, do nothing */
2572 r = 0;
2573 else {
2574 Unit **new_array;
2575
2576 /* Allocate a new array */
2577 new_array = new(Unit*, n + 2);
2578 if (!new_array)
2579 return -ENOMEM;
2580
2581 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2582 new_array[n] = u;
2583 new_array[n+1] = NULL;
2584
2585 /* Add or replace the old array */
2586 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2587 if (r < 0) {
2588 free(new_array);
2589 return r;
2590 }
2591
2592 free(array);
2593 }
2594 } else if (r < 0)
2595 return r;
2596
2597 r = set_put(u->pids, PID_TO_PTR(pid));
2598 if (r < 0)
2599 return r;
2600
2601 return 0;
2602 }
2603
2604 void unit_unwatch_pid(Unit *u, pid_t pid) {
2605 Unit **array;
2606
2607 assert(u);
2608 assert(pid_is_valid(pid));
2609
2610 /* First let's drop the unit in case it's keyed as "pid". */
2611 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2612
2613 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2614 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2615 if (array) {
2616 size_t n, m = 0;
2617
2618 /* Let's iterate through the array, dropping our own entry */
2619 for (n = 0; array[n]; n++)
2620 if (array[n] != u)
2621 array[m++] = array[n];
2622 array[m] = NULL;
2623
2624 if (m == 0) {
2625 /* The array is now empty, remove the entire entry */
2626 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2627 free(array);
2628 }
2629 }
2630
2631 (void) set_remove(u->pids, PID_TO_PTR(pid));
2632 }
2633
2634 void unit_unwatch_all_pids(Unit *u) {
2635 assert(u);
2636
2637 while (!set_isempty(u->pids))
2638 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2639
2640 u->pids = set_free(u->pids);
2641 }
2642
2643 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2644 Iterator i;
2645 void *e;
2646
2647 assert(u);
2648
2649 /* Cleans dead PIDs from our list */
2650
2651 SET_FOREACH(e, u->pids, i) {
2652 pid_t pid = PTR_TO_PID(e);
2653
2654 if (pid == except1 || pid == except2)
2655 continue;
2656
2657 if (!pid_is_unwaited(pid))
2658 unit_unwatch_pid(u, pid);
2659 }
2660 }
2661
2662 bool unit_job_is_applicable(Unit *u, JobType j) {
2663 assert(u);
2664 assert(j >= 0 && j < _JOB_TYPE_MAX);
2665
2666 switch (j) {
2667
2668 case JOB_VERIFY_ACTIVE:
2669 case JOB_START:
2670 case JOB_NOP:
2671 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2672 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2673 * jobs for it. */
2674 return true;
2675
2676 case JOB_STOP:
2677 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2678 * external events), hence it makes no sense to permit enqueing such a request either. */
2679 return !u->perpetual;
2680
2681 case JOB_RESTART:
2682 case JOB_TRY_RESTART:
2683 return unit_can_stop(u) && unit_can_start(u);
2684
2685 case JOB_RELOAD:
2686 case JOB_TRY_RELOAD:
2687 return unit_can_reload(u);
2688
2689 case JOB_RELOAD_OR_START:
2690 return unit_can_reload(u) && unit_can_start(u);
2691
2692 default:
2693 assert_not_reached("Invalid job type");
2694 }
2695 }
2696
2697 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2698 assert(u);
2699
2700 /* Only warn about some unit types */
2701 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2702 return;
2703
2704 if (streq_ptr(u->id, other))
2705 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2706 else
2707 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2708 }
2709
2710 static int unit_add_dependency_hashmap(
2711 Hashmap **h,
2712 Unit *other,
2713 UnitDependencyMask origin_mask,
2714 UnitDependencyMask destination_mask) {
2715
2716 UnitDependencyInfo info;
2717 int r;
2718
2719 assert(h);
2720 assert(other);
2721 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2722 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2723 assert(origin_mask > 0 || destination_mask > 0);
2724
2725 r = hashmap_ensure_allocated(h, NULL);
2726 if (r < 0)
2727 return r;
2728
2729 assert_cc(sizeof(void*) == sizeof(info));
2730
2731 info.data = hashmap_get(*h, other);
2732 if (info.data) {
2733 /* Entry already exists. Add in our mask. */
2734
2735 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2736 (info.destination_mask & destination_mask) == info.destination_mask)
2737 return 0; /* NOP */
2738
2739 info.origin_mask |= origin_mask;
2740 info.destination_mask |= destination_mask;
2741
2742 r = hashmap_update(*h, other, info.data);
2743 } else {
2744 info = (UnitDependencyInfo) {
2745 .origin_mask = origin_mask,
2746 .destination_mask = destination_mask,
2747 };
2748
2749 r = hashmap_put(*h, other, info.data);
2750 }
2751 if (r < 0)
2752 return r;
2753
2754 return 1;
2755 }
2756
2757 int unit_add_dependency(
2758 Unit *u,
2759 UnitDependency d,
2760 Unit *other,
2761 bool add_reference,
2762 UnitDependencyMask mask) {
2763
2764 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2765 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2766 [UNIT_WANTS] = UNIT_WANTED_BY,
2767 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2768 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2769 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2770 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2771 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2772 [UNIT_WANTED_BY] = UNIT_WANTS,
2773 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2774 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2775 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2776 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2777 [UNIT_BEFORE] = UNIT_AFTER,
2778 [UNIT_AFTER] = UNIT_BEFORE,
2779 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2780 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2781 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2782 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2783 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2784 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2785 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2786 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2787 };
2788 Unit *original_u = u, *original_other = other;
2789 int r;
2790
2791 assert(u);
2792 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2793 assert(other);
2794
2795 u = unit_follow_merge(u);
2796 other = unit_follow_merge(other);
2797
2798 /* We won't allow dependencies on ourselves. We will not
2799 * consider them an error however. */
2800 if (u == other) {
2801 maybe_warn_about_dependency(original_u, original_other->id, d);
2802 return 0;
2803 }
2804
2805 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2806 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2807 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2808 return 0;
2809 }
2810
2811 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2812 if (r < 0)
2813 return r;
2814
2815 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2816 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2817 if (r < 0)
2818 return r;
2819 }
2820
2821 if (add_reference) {
2822 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2823 if (r < 0)
2824 return r;
2825
2826 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2827 if (r < 0)
2828 return r;
2829 }
2830
2831 unit_add_to_dbus_queue(u);
2832 return 0;
2833 }
2834
2835 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2836 int r;
2837
2838 assert(u);
2839
2840 r = unit_add_dependency(u, d, other, add_reference, mask);
2841 if (r < 0)
2842 return r;
2843
2844 return unit_add_dependency(u, e, other, add_reference, mask);
2845 }
2846
2847 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2848 int r;
2849
2850 assert(u);
2851 assert(name || path);
2852 assert(buf);
2853 assert(ret);
2854
2855 if (!name)
2856 name = basename(path);
2857
2858 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2859 *buf = NULL;
2860 *ret = name;
2861 return 0;
2862 }
2863
2864 if (u->instance)
2865 r = unit_name_replace_instance(name, u->instance, buf);
2866 else {
2867 _cleanup_free_ char *i = NULL;
2868
2869 r = unit_name_to_prefix(u->id, &i);
2870 if (r < 0)
2871 return r;
2872
2873 r = unit_name_replace_instance(name, i, buf);
2874 }
2875 if (r < 0)
2876 return r;
2877
2878 *ret = *buf;
2879 return 0;
2880 }
2881
2882 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2883 _cleanup_free_ char *buf = NULL;
2884 Unit *other;
2885 int r;
2886
2887 assert(u);
2888 assert(name || path);
2889
2890 r = resolve_template(u, name, path, &buf, &name);
2891 if (r < 0)
2892 return r;
2893
2894 r = manager_load_unit(u->manager, name, path, NULL, &other);
2895 if (r < 0)
2896 return r;
2897
2898 return unit_add_dependency(u, d, other, add_reference, mask);
2899 }
2900
2901 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2902 _cleanup_free_ char *buf = NULL;
2903 Unit *other;
2904 int r;
2905
2906 assert(u);
2907 assert(name || path);
2908
2909 r = resolve_template(u, name, path, &buf, &name);
2910 if (r < 0)
2911 return r;
2912
2913 r = manager_load_unit(u->manager, name, path, NULL, &other);
2914 if (r < 0)
2915 return r;
2916
2917 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2918 }
2919
2920 int set_unit_path(const char *p) {
2921 /* This is mostly for debug purposes */
2922 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2923 return -errno;
2924
2925 return 0;
2926 }
2927
2928 char *unit_dbus_path(Unit *u) {
2929 assert(u);
2930
2931 if (!u->id)
2932 return NULL;
2933
2934 return unit_dbus_path_from_name(u->id);
2935 }
2936
2937 char *unit_dbus_path_invocation_id(Unit *u) {
2938 assert(u);
2939
2940 if (sd_id128_is_null(u->invocation_id))
2941 return NULL;
2942
2943 return unit_dbus_path_from_name(u->invocation_id_string);
2944 }
2945
2946 int unit_set_slice(Unit *u, Unit *slice) {
2947 assert(u);
2948 assert(slice);
2949
2950 /* Sets the unit slice if it has not been set before. Is extra
2951 * careful, to only allow this for units that actually have a
2952 * cgroup context. Also, we don't allow to set this for slices
2953 * (since the parent slice is derived from the name). Make
2954 * sure the unit we set is actually a slice. */
2955
2956 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2957 return -EOPNOTSUPP;
2958
2959 if (u->type == UNIT_SLICE)
2960 return -EINVAL;
2961
2962 if (unit_active_state(u) != UNIT_INACTIVE)
2963 return -EBUSY;
2964
2965 if (slice->type != UNIT_SLICE)
2966 return -EINVAL;
2967
2968 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2969 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2970 return -EPERM;
2971
2972 if (UNIT_DEREF(u->slice) == slice)
2973 return 0;
2974
2975 /* Disallow slice changes if @u is already bound to cgroups */
2976 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2977 return -EBUSY;
2978
2979 unit_ref_unset(&u->slice);
2980 unit_ref_set(&u->slice, slice);
2981 return 1;
2982 }
2983
2984 int unit_set_default_slice(Unit *u) {
2985 _cleanup_free_ char *b = NULL;
2986 const char *slice_name;
2987 Unit *slice;
2988 int r;
2989
2990 assert(u);
2991
2992 if (UNIT_ISSET(u->slice))
2993 return 0;
2994
2995 if (u->instance) {
2996 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2997
2998 /* Implicitly place all instantiated units in their
2999 * own per-template slice */
3000
3001 r = unit_name_to_prefix(u->id, &prefix);
3002 if (r < 0)
3003 return r;
3004
3005 /* The prefix is already escaped, but it might include
3006 * "-" which has a special meaning for slice units,
3007 * hence escape it here extra. */
3008 escaped = unit_name_escape(prefix);
3009 if (!escaped)
3010 return -ENOMEM;
3011
3012 if (MANAGER_IS_SYSTEM(u->manager))
3013 b = strjoin("system-", escaped, ".slice");
3014 else
3015 b = strappend(escaped, ".slice");
3016 if (!b)
3017 return -ENOMEM;
3018
3019 slice_name = b;
3020 } else
3021 slice_name =
3022 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3023 ? SPECIAL_SYSTEM_SLICE
3024 : SPECIAL_ROOT_SLICE;
3025
3026 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3027 if (r < 0)
3028 return r;
3029
3030 return unit_set_slice(u, slice);
3031 }
3032
3033 const char *unit_slice_name(Unit *u) {
3034 assert(u);
3035
3036 if (!UNIT_ISSET(u->slice))
3037 return NULL;
3038
3039 return UNIT_DEREF(u->slice)->id;
3040 }
3041
3042 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3043 _cleanup_free_ char *t = NULL;
3044 int r;
3045
3046 assert(u);
3047 assert(type);
3048 assert(_found);
3049
3050 r = unit_name_change_suffix(u->id, type, &t);
3051 if (r < 0)
3052 return r;
3053 if (unit_has_name(u, t))
3054 return -EINVAL;
3055
3056 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3057 assert(r < 0 || *_found != u);
3058 return r;
3059 }
3060
3061 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3062 const char *name, *old_owner, *new_owner;
3063 Unit *u = userdata;
3064 int r;
3065
3066 assert(message);
3067 assert(u);
3068
3069 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3070 if (r < 0) {
3071 bus_log_parse_error(r);
3072 return 0;
3073 }
3074
3075 old_owner = empty_to_null(old_owner);
3076 new_owner = empty_to_null(new_owner);
3077
3078 if (UNIT_VTABLE(u)->bus_name_owner_change)
3079 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3080
3081 return 0;
3082 }
3083
3084 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3085 const char *match;
3086
3087 assert(u);
3088 assert(bus);
3089 assert(name);
3090
3091 if (u->match_bus_slot)
3092 return -EBUSY;
3093
3094 match = strjoina("type='signal',"
3095 "sender='org.freedesktop.DBus',"
3096 "path='/org/freedesktop/DBus',"
3097 "interface='org.freedesktop.DBus',"
3098 "member='NameOwnerChanged',"
3099 "arg0='", name, "'");
3100
3101 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3102 }
3103
3104 int unit_watch_bus_name(Unit *u, const char *name) {
3105 int r;
3106
3107 assert(u);
3108 assert(name);
3109
3110 /* Watch a specific name on the bus. We only support one unit
3111 * watching each name for now. */
3112
3113 if (u->manager->api_bus) {
3114 /* If the bus is already available, install the match directly.
3115 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3116 r = unit_install_bus_match(u, u->manager->api_bus, name);
3117 if (r < 0)
3118 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3119 }
3120
3121 r = hashmap_put(u->manager->watch_bus, name, u);
3122 if (r < 0) {
3123 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3124 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3125 }
3126
3127 return 0;
3128 }
3129
3130 void unit_unwatch_bus_name(Unit *u, const char *name) {
3131 assert(u);
3132 assert(name);
3133
3134 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3135 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3136 }
3137
3138 bool unit_can_serialize(Unit *u) {
3139 assert(u);
3140
3141 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3142 }
3143
3144 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3145 _cleanup_free_ char *s = NULL;
3146 int r = 0;
3147
3148 assert(f);
3149 assert(key);
3150
3151 if (mask != 0) {
3152 r = cg_mask_to_string(mask, &s);
3153 if (r >= 0) {
3154 fputs(key, f);
3155 fputc('=', f);
3156 fputs(s, f);
3157 fputc('\n', f);
3158 }
3159 }
3160 return r;
3161 }
3162
3163 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3164 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3165 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3166 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3167 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3168 };
3169
3170 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3171 CGroupIPAccountingMetric m;
3172 int r;
3173
3174 assert(u);
3175 assert(f);
3176 assert(fds);
3177
3178 if (unit_can_serialize(u)) {
3179 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3180 if (r < 0)
3181 return r;
3182 }
3183
3184 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3185
3186 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3187 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3188 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3189 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3190
3191 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3192 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3193
3194 if (dual_timestamp_is_set(&u->condition_timestamp))
3195 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3196
3197 if (dual_timestamp_is_set(&u->assert_timestamp))
3198 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3199
3200 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3201
3202 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3203 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3204 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3205
3206 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3207 if (u->cpu_usage_last != NSEC_INFINITY)
3208 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3209
3210 if (u->cgroup_path)
3211 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3212 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3213 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3214 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3215 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3216
3217 if (uid_is_valid(u->ref_uid))
3218 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3219 if (gid_is_valid(u->ref_gid))
3220 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3221
3222 if (!sd_id128_is_null(u->invocation_id))
3223 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3224
3225 bus_track_serialize(u->bus_track, f, "ref");
3226
3227 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3228 uint64_t v;
3229
3230 r = unit_get_ip_accounting(u, m, &v);
3231 if (r >= 0)
3232 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3233 }
3234
3235 if (serialize_jobs) {
3236 if (u->job) {
3237 fprintf(f, "job\n");
3238 job_serialize(u->job, f);
3239 }
3240
3241 if (u->nop_job) {
3242 fprintf(f, "job\n");
3243 job_serialize(u->nop_job, f);
3244 }
3245 }
3246
3247 /* End marker */
3248 fputc('\n', f);
3249 return 0;
3250 }
3251
3252 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3253 assert(u);
3254 assert(f);
3255 assert(key);
3256
3257 if (!value)
3258 return 0;
3259
3260 fputs(key, f);
3261 fputc('=', f);
3262 fputs(value, f);
3263 fputc('\n', f);
3264
3265 return 1;
3266 }
3267
3268 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3269 _cleanup_free_ char *c = NULL;
3270
3271 assert(u);
3272 assert(f);
3273 assert(key);
3274
3275 if (!value)
3276 return 0;
3277
3278 c = cescape(value);
3279 if (!c)
3280 return -ENOMEM;
3281
3282 fputs(key, f);
3283 fputc('=', f);
3284 fputs(c, f);
3285 fputc('\n', f);
3286
3287 return 1;
3288 }
3289
3290 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3291 int copy;
3292
3293 assert(u);
3294 assert(f);
3295 assert(key);
3296
3297 if (fd < 0)
3298 return 0;
3299
3300 copy = fdset_put_dup(fds, fd);
3301 if (copy < 0)
3302 return copy;
3303
3304 fprintf(f, "%s=%i\n", key, copy);
3305 return 1;
3306 }
3307
3308 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3309 va_list ap;
3310
3311 assert(u);
3312 assert(f);
3313 assert(key);
3314 assert(format);
3315
3316 fputs(key, f);
3317 fputc('=', f);
3318
3319 va_start(ap, format);
3320 vfprintf(f, format, ap);
3321 va_end(ap);
3322
3323 fputc('\n', f);
3324 }
3325
3326 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3327 int r;
3328
3329 assert(u);
3330 assert(f);
3331 assert(fds);
3332
3333 for (;;) {
3334 char line[LINE_MAX], *l, *v;
3335 CGroupIPAccountingMetric m;
3336 size_t k;
3337
3338 if (!fgets(line, sizeof(line), f)) {
3339 if (feof(f))
3340 return 0;
3341 return -errno;
3342 }
3343
3344 char_array_0(line);
3345 l = strstrip(line);
3346
3347 /* End marker */
3348 if (isempty(l))
3349 break;
3350
3351 k = strcspn(l, "=");
3352
3353 if (l[k] == '=') {
3354 l[k] = 0;
3355 v = l+k+1;
3356 } else
3357 v = l+k;
3358
3359 if (streq(l, "job")) {
3360 if (v[0] == '\0') {
3361 /* new-style serialized job */
3362 Job *j;
3363
3364 j = job_new_raw(u);
3365 if (!j)
3366 return log_oom();
3367
3368 r = job_deserialize(j, f);
3369 if (r < 0) {
3370 job_free(j);
3371 return r;
3372 }
3373
3374 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3375 if (r < 0) {
3376 job_free(j);
3377 return r;
3378 }
3379
3380 r = job_install_deserialized(j);
3381 if (r < 0) {
3382 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3383 job_free(j);
3384 return r;
3385 }
3386 } else /* legacy for pre-44 */
3387 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3388 continue;
3389 } else if (streq(l, "state-change-timestamp")) {
3390 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3391 continue;
3392 } else if (streq(l, "inactive-exit-timestamp")) {
3393 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3394 continue;
3395 } else if (streq(l, "active-enter-timestamp")) {
3396 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3397 continue;
3398 } else if (streq(l, "active-exit-timestamp")) {
3399 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3400 continue;
3401 } else if (streq(l, "inactive-enter-timestamp")) {
3402 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3403 continue;
3404 } else if (streq(l, "condition-timestamp")) {
3405 dual_timestamp_deserialize(v, &u->condition_timestamp);
3406 continue;
3407 } else if (streq(l, "assert-timestamp")) {
3408 dual_timestamp_deserialize(v, &u->assert_timestamp);
3409 continue;
3410 } else if (streq(l, "condition-result")) {
3411
3412 r = parse_boolean(v);
3413 if (r < 0)
3414 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3415 else
3416 u->condition_result = r;
3417
3418 continue;
3419
3420 } else if (streq(l, "assert-result")) {
3421
3422 r = parse_boolean(v);
3423 if (r < 0)
3424 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3425 else
3426 u->assert_result = r;
3427
3428 continue;
3429
3430 } else if (streq(l, "transient")) {
3431
3432 r = parse_boolean(v);
3433 if (r < 0)
3434 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3435 else
3436 u->transient = r;
3437
3438 continue;
3439
3440 } else if (streq(l, "exported-invocation-id")) {
3441
3442 r = parse_boolean(v);
3443 if (r < 0)
3444 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3445 else
3446 u->exported_invocation_id = r;
3447
3448 continue;
3449
3450 } else if (streq(l, "exported-log-level-max")) {
3451
3452 r = parse_boolean(v);
3453 if (r < 0)
3454 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3455 else
3456 u->exported_log_level_max = r;
3457
3458 continue;
3459
3460 } else if (streq(l, "exported-log-extra-fields")) {
3461
3462 r = parse_boolean(v);
3463 if (r < 0)
3464 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3465 else
3466 u->exported_log_extra_fields = r;
3467
3468 continue;
3469
3470 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3471
3472 r = safe_atou64(v, &u->cpu_usage_base);
3473 if (r < 0)
3474 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3475
3476 continue;
3477
3478 } else if (streq(l, "cpu-usage-last")) {
3479
3480 r = safe_atou64(v, &u->cpu_usage_last);
3481 if (r < 0)
3482 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3483
3484 continue;
3485
3486 } else if (streq(l, "cgroup")) {
3487
3488 r = unit_set_cgroup_path(u, v);
3489 if (r < 0)
3490 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3491
3492 (void) unit_watch_cgroup(u);
3493
3494 continue;
3495 } else if (streq(l, "cgroup-realized")) {
3496 int b;
3497
3498 b = parse_boolean(v);
3499 if (b < 0)
3500 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3501 else
3502 u->cgroup_realized = b;
3503
3504 continue;
3505
3506 } else if (streq(l, "cgroup-realized-mask")) {
3507
3508 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3509 if (r < 0)
3510 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3511 continue;
3512
3513 } else if (streq(l, "cgroup-enabled-mask")) {
3514
3515 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3516 if (r < 0)
3517 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3518 continue;
3519
3520 } else if (streq(l, "cgroup-bpf-realized")) {
3521 int i;
3522
3523 r = safe_atoi(v, &i);
3524 if (r < 0)
3525 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3526 else
3527 u->cgroup_bpf_state =
3528 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3529 i > 0 ? UNIT_CGROUP_BPF_ON :
3530 UNIT_CGROUP_BPF_OFF;
3531
3532 continue;
3533
3534 } else if (streq(l, "ref-uid")) {
3535 uid_t uid;
3536
3537 r = parse_uid(v, &uid);
3538 if (r < 0)
3539 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3540 else
3541 unit_ref_uid_gid(u, uid, GID_INVALID);
3542
3543 continue;
3544
3545 } else if (streq(l, "ref-gid")) {
3546 gid_t gid;
3547
3548 r = parse_gid(v, &gid);
3549 if (r < 0)
3550 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3551 else
3552 unit_ref_uid_gid(u, UID_INVALID, gid);
3553
3554 } else if (streq(l, "ref")) {
3555
3556 r = strv_extend(&u->deserialized_refs, v);
3557 if (r < 0)
3558 log_oom();
3559
3560 continue;
3561 } else if (streq(l, "invocation-id")) {
3562 sd_id128_t id;
3563
3564 r = sd_id128_from_string(v, &id);
3565 if (r < 0)
3566 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3567 else {
3568 r = unit_set_invocation_id(u, id);
3569 if (r < 0)
3570 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3571 }
3572
3573 continue;
3574 }
3575
3576 /* Check if this is an IP accounting metric serialization field */
3577 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3578 if (streq(l, ip_accounting_metric_field[m]))
3579 break;
3580 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3581 uint64_t c;
3582
3583 r = safe_atou64(v, &c);
3584 if (r < 0)
3585 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3586 else
3587 u->ip_accounting_extra[m] = c;
3588 continue;
3589 }
3590
3591 if (unit_can_serialize(u)) {
3592 r = exec_runtime_deserialize_compat(u, l, v, fds);
3593 if (r < 0) {
3594 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3595 continue;
3596 }
3597
3598 /* Returns positive if key was handled by the call */
3599 if (r > 0)
3600 continue;
3601
3602 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3603 if (r < 0)
3604 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3605 }
3606 }
3607
3608 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3609 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3610 * before 228 where the base for timeouts was not persistent across reboots. */
3611
3612 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3613 dual_timestamp_get(&u->state_change_timestamp);
3614
3615 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3616 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3617 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3618 unit_invalidate_cgroup_bpf(u);
3619
3620 return 0;
3621 }
3622
3623 void unit_deserialize_skip(FILE *f) {
3624 assert(f);
3625
3626 /* Skip serialized data for this unit. We don't know what it is. */
3627
3628 for (;;) {
3629 char line[LINE_MAX], *l;
3630
3631 if (!fgets(line, sizeof line, f))
3632 return;
3633
3634 char_array_0(line);
3635 l = strstrip(line);
3636
3637 /* End marker */
3638 if (isempty(l))
3639 return;
3640 }
3641 }
3642
3643
3644 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3645 Unit *device;
3646 _cleanup_free_ char *e = NULL;
3647 int r;
3648
3649 assert(u);
3650
3651 /* Adds in links to the device node that this unit is based on */
3652 if (isempty(what))
3653 return 0;
3654
3655 if (!is_device_path(what))
3656 return 0;
3657
3658 /* When device units aren't supported (such as in a
3659 * container), don't create dependencies on them. */
3660 if (!unit_type_supported(UNIT_DEVICE))
3661 return 0;
3662
3663 r = unit_name_from_path(what, ".device", &e);
3664 if (r < 0)
3665 return r;
3666
3667 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3668 if (r < 0)
3669 return r;
3670
3671 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3672 dep = UNIT_BINDS_TO;
3673
3674 r = unit_add_two_dependencies(u, UNIT_AFTER,
3675 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3676 device, true, mask);
3677 if (r < 0)
3678 return r;
3679
3680 if (wants) {
3681 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3682 if (r < 0)
3683 return r;
3684 }
3685
3686 return 0;
3687 }
3688
3689 int unit_coldplug(Unit *u) {
3690 int r = 0, q;
3691 char **i;
3692
3693 assert(u);
3694
3695 /* Make sure we don't enter a loop, when coldplugging
3696 * recursively. */
3697 if (u->coldplugged)
3698 return 0;
3699
3700 u->coldplugged = true;
3701
3702 STRV_FOREACH(i, u->deserialized_refs) {
3703 q = bus_unit_track_add_name(u, *i);
3704 if (q < 0 && r >= 0)
3705 r = q;
3706 }
3707 u->deserialized_refs = strv_free(u->deserialized_refs);
3708
3709 if (UNIT_VTABLE(u)->coldplug) {
3710 q = UNIT_VTABLE(u)->coldplug(u);
3711 if (q < 0 && r >= 0)
3712 r = q;
3713 }
3714
3715 if (u->job) {
3716 q = job_coldplug(u->job);
3717 if (q < 0 && r >= 0)
3718 r = q;
3719 }
3720
3721 return r;
3722 }
3723
3724 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3725 struct stat st;
3726
3727 if (!path)
3728 return false;
3729
3730 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3731 * are never out-of-date. */
3732 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3733 return false;
3734
3735 if (stat(path, &st) < 0)
3736 /* What, cannot access this anymore? */
3737 return true;
3738
3739 if (path_masked)
3740 /* For masked files check if they are still so */
3741 return !null_or_empty(&st);
3742 else
3743 /* For non-empty files check the mtime */
3744 return timespec_load(&st.st_mtim) > mtime;
3745
3746 return false;
3747 }
3748
3749 bool unit_need_daemon_reload(Unit *u) {
3750 _cleanup_strv_free_ char **t = NULL;
3751 char **path;
3752
3753 assert(u);
3754
3755 /* For unit files, we allow masking… */
3756 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3757 u->load_state == UNIT_MASKED))
3758 return true;
3759
3760 /* Source paths should not be masked… */
3761 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3762 return true;
3763
3764 if (u->load_state == UNIT_LOADED)
3765 (void) unit_find_dropin_paths(u, &t);
3766 if (!strv_equal(u->dropin_paths, t))
3767 return true;
3768
3769 /* … any drop-ins that are masked are simply omitted from the list. */
3770 STRV_FOREACH(path, u->dropin_paths)
3771 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3772 return true;
3773
3774 return false;
3775 }
3776
3777 void unit_reset_failed(Unit *u) {
3778 assert(u);
3779
3780 if (UNIT_VTABLE(u)->reset_failed)
3781 UNIT_VTABLE(u)->reset_failed(u);
3782
3783 RATELIMIT_RESET(u->start_limit);
3784 u->start_limit_hit = false;
3785 }
3786
3787 Unit *unit_following(Unit *u) {
3788 assert(u);
3789
3790 if (UNIT_VTABLE(u)->following)
3791 return UNIT_VTABLE(u)->following(u);
3792
3793 return NULL;
3794 }
3795
3796 bool unit_stop_pending(Unit *u) {
3797 assert(u);
3798
3799 /* This call does check the current state of the unit. It's
3800 * hence useful to be called from state change calls of the
3801 * unit itself, where the state isn't updated yet. This is
3802 * different from unit_inactive_or_pending() which checks both
3803 * the current state and for a queued job. */
3804
3805 return u->job && u->job->type == JOB_STOP;
3806 }
3807
3808 bool unit_inactive_or_pending(Unit *u) {
3809 assert(u);
3810
3811 /* Returns true if the unit is inactive or going down */
3812
3813 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3814 return true;
3815
3816 if (unit_stop_pending(u))
3817 return true;
3818
3819 return false;
3820 }
3821
3822 bool unit_active_or_pending(Unit *u) {
3823 assert(u);
3824
3825 /* Returns true if the unit is active or going up */
3826
3827 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3828 return true;
3829
3830 if (u->job &&
3831 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3832 return true;
3833
3834 return false;
3835 }
3836
3837 bool unit_will_restart(Unit *u) {
3838 assert(u);
3839
3840 if (!UNIT_VTABLE(u)->will_restart)
3841 return false;
3842
3843 return UNIT_VTABLE(u)->will_restart(u);
3844 }
3845
3846 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3847 assert(u);
3848 assert(w >= 0 && w < _KILL_WHO_MAX);
3849 assert(SIGNAL_VALID(signo));
3850
3851 if (!UNIT_VTABLE(u)->kill)
3852 return -EOPNOTSUPP;
3853
3854 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3855 }
3856
3857 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3858 Set *pid_set;
3859 int r;
3860
3861 pid_set = set_new(NULL);
3862 if (!pid_set)
3863 return NULL;
3864
3865 /* Exclude the main/control pids from being killed via the cgroup */
3866 if (main_pid > 0) {
3867 r = set_put(pid_set, PID_TO_PTR(main_pid));
3868 if (r < 0)
3869 goto fail;
3870 }
3871
3872 if (control_pid > 0) {
3873 r = set_put(pid_set, PID_TO_PTR(control_pid));
3874 if (r < 0)
3875 goto fail;
3876 }
3877
3878 return pid_set;
3879
3880 fail:
3881 set_free(pid_set);
3882 return NULL;
3883 }
3884
3885 int unit_kill_common(
3886 Unit *u,
3887 KillWho who,
3888 int signo,
3889 pid_t main_pid,
3890 pid_t control_pid,
3891 sd_bus_error *error) {
3892
3893 int r = 0;
3894 bool killed = false;
3895
3896 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3897 if (main_pid < 0)
3898 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3899 else if (main_pid == 0)
3900 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3901 }
3902
3903 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3904 if (control_pid < 0)
3905 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3906 else if (control_pid == 0)
3907 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3908 }
3909
3910 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3911 if (control_pid > 0) {
3912 if (kill(control_pid, signo) < 0)
3913 r = -errno;
3914 else
3915 killed = true;
3916 }
3917
3918 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3919 if (main_pid > 0) {
3920 if (kill(main_pid, signo) < 0)
3921 r = -errno;
3922 else
3923 killed = true;
3924 }
3925
3926 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3927 _cleanup_set_free_ Set *pid_set = NULL;
3928 int q;
3929
3930 /* Exclude the main/control pids from being killed via the cgroup */
3931 pid_set = unit_pid_set(main_pid, control_pid);
3932 if (!pid_set)
3933 return -ENOMEM;
3934
3935 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3936 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3937 r = q;
3938 else
3939 killed = true;
3940 }
3941
3942 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3943 return -ESRCH;
3944
3945 return r;
3946 }
3947
3948 int unit_following_set(Unit *u, Set **s) {
3949 assert(u);
3950 assert(s);
3951
3952 if (UNIT_VTABLE(u)->following_set)
3953 return UNIT_VTABLE(u)->following_set(u, s);
3954
3955 *s = NULL;
3956 return 0;
3957 }
3958
3959 UnitFileState unit_get_unit_file_state(Unit *u) {
3960 int r;
3961
3962 assert(u);
3963
3964 if (u->unit_file_state < 0 && u->fragment_path) {
3965 r = unit_file_get_state(
3966 u->manager->unit_file_scope,
3967 NULL,
3968 u->id,
3969 &u->unit_file_state);
3970 if (r < 0)
3971 u->unit_file_state = UNIT_FILE_BAD;
3972 }
3973
3974 return u->unit_file_state;
3975 }
3976
3977 int unit_get_unit_file_preset(Unit *u) {
3978 assert(u);
3979
3980 if (u->unit_file_preset < 0 && u->fragment_path)
3981 u->unit_file_preset = unit_file_query_preset(
3982 u->manager->unit_file_scope,
3983 NULL,
3984 basename(u->fragment_path));
3985
3986 return u->unit_file_preset;
3987 }
3988
3989 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3990 assert(ref);
3991 assert(u);
3992
3993 if (ref->unit)
3994 unit_ref_unset(ref);
3995
3996 ref->unit = u;
3997 LIST_PREPEND(refs, u->refs, ref);
3998 return u;
3999 }
4000
4001 void unit_ref_unset(UnitRef *ref) {
4002 assert(ref);
4003
4004 if (!ref->unit)
4005 return;
4006
4007 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4008 * be unreferenced now. */
4009 unit_add_to_gc_queue(ref->unit);
4010
4011 LIST_REMOVE(refs, ref->unit->refs, ref);
4012 ref->unit = NULL;
4013 }
4014
4015 static int user_from_unit_name(Unit *u, char **ret) {
4016
4017 static const uint8_t hash_key[] = {
4018 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4019 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4020 };
4021
4022 _cleanup_free_ char *n = NULL;
4023 int r;
4024
4025 r = unit_name_to_prefix(u->id, &n);
4026 if (r < 0)
4027 return r;
4028
4029 if (valid_user_group_name(n)) {
4030 *ret = n;
4031 n = NULL;
4032 return 0;
4033 }
4034
4035 /* If we can't use the unit name as a user name, then let's hash it and use that */
4036 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4037 return -ENOMEM;
4038
4039 return 0;
4040 }
4041
4042 int unit_patch_contexts(Unit *u) {
4043 CGroupContext *cc;
4044 ExecContext *ec;
4045 unsigned i;
4046 int r;
4047
4048 assert(u);
4049
4050 /* Patch in the manager defaults into the exec and cgroup
4051 * contexts, _after_ the rest of the settings have been
4052 * initialized */
4053
4054 ec = unit_get_exec_context(u);
4055 if (ec) {
4056 /* This only copies in the ones that need memory */
4057 for (i = 0; i < _RLIMIT_MAX; i++)
4058 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4059 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4060 if (!ec->rlimit[i])
4061 return -ENOMEM;
4062 }
4063
4064 if (MANAGER_IS_USER(u->manager) &&
4065 !ec->working_directory) {
4066
4067 r = get_home_dir(&ec->working_directory);
4068 if (r < 0)
4069 return r;
4070
4071 /* Allow user services to run, even if the
4072 * home directory is missing */
4073 ec->working_directory_missing_ok = true;
4074 }
4075
4076 if (ec->private_devices)
4077 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4078
4079 if (ec->protect_kernel_modules)
4080 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4081
4082 if (ec->dynamic_user) {
4083 if (!ec->user) {
4084 r = user_from_unit_name(u, &ec->user);
4085 if (r < 0)
4086 return r;
4087 }
4088
4089 if (!ec->group) {
4090 ec->group = strdup(ec->user);
4091 if (!ec->group)
4092 return -ENOMEM;
4093 }
4094
4095 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4096 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4097
4098 ec->private_tmp = true;
4099 ec->remove_ipc = true;
4100 ec->protect_system = PROTECT_SYSTEM_STRICT;
4101 if (ec->protect_home == PROTECT_HOME_NO)
4102 ec->protect_home = PROTECT_HOME_READ_ONLY;
4103 }
4104 }
4105
4106 cc = unit_get_cgroup_context(u);
4107 if (cc) {
4108
4109 if (ec &&
4110 ec->private_devices &&
4111 cc->device_policy == CGROUP_AUTO)
4112 cc->device_policy = CGROUP_CLOSED;
4113 }
4114
4115 return 0;
4116 }
4117
4118 ExecContext *unit_get_exec_context(Unit *u) {
4119 size_t offset;
4120 assert(u);
4121
4122 if (u->type < 0)
4123 return NULL;
4124
4125 offset = UNIT_VTABLE(u)->exec_context_offset;
4126 if (offset <= 0)
4127 return NULL;
4128
4129 return (ExecContext*) ((uint8_t*) u + offset);
4130 }
4131
4132 KillContext *unit_get_kill_context(Unit *u) {
4133 size_t offset;
4134 assert(u);
4135
4136 if (u->type < 0)
4137 return NULL;
4138
4139 offset = UNIT_VTABLE(u)->kill_context_offset;
4140 if (offset <= 0)
4141 return NULL;
4142
4143 return (KillContext*) ((uint8_t*) u + offset);
4144 }
4145
4146 CGroupContext *unit_get_cgroup_context(Unit *u) {
4147 size_t offset;
4148
4149 if (u->type < 0)
4150 return NULL;
4151
4152 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4153 if (offset <= 0)
4154 return NULL;
4155
4156 return (CGroupContext*) ((uint8_t*) u + offset);
4157 }
4158
4159 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4160 size_t offset;
4161
4162 if (u->type < 0)
4163 return NULL;
4164
4165 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4166 if (offset <= 0)
4167 return NULL;
4168
4169 return *(ExecRuntime**) ((uint8_t*) u + offset);
4170 }
4171
4172 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4173 assert(u);
4174
4175 if (UNIT_WRITE_FLAGS_NOOP(flags))
4176 return NULL;
4177
4178 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4179 return u->manager->lookup_paths.transient;
4180
4181 if (flags & UNIT_PERSISTENT)
4182 return u->manager->lookup_paths.persistent_control;
4183
4184 if (flags & UNIT_RUNTIME)
4185 return u->manager->lookup_paths.runtime_control;
4186
4187 return NULL;
4188 }
4189
4190 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4191 char *ret = NULL;
4192
4193 if (!s)
4194 return NULL;
4195
4196 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4197 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4198 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4199 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4200 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4201 * allocations. */
4202
4203 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4204 ret = specifier_escape(s);
4205 if (!ret)
4206 return NULL;
4207
4208 s = ret;
4209 }
4210
4211 if (flags & UNIT_ESCAPE_C) {
4212 char *a;
4213
4214 a = cescape(s);
4215 free(ret);
4216 if (!a)
4217 return NULL;
4218
4219 ret = a;
4220 }
4221
4222 if (buf) {
4223 *buf = ret;
4224 return ret ?: (char*) s;
4225 }
4226
4227 return ret ?: strdup(s);
4228 }
4229
4230 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4231 _cleanup_free_ char *result = NULL;
4232 size_t n = 0, allocated = 0;
4233 char **i, *ret;
4234
4235 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4236 * way suitable for ExecStart= stanzas */
4237
4238 STRV_FOREACH(i, l) {
4239 _cleanup_free_ char *buf = NULL;
4240 const char *p;
4241 size_t a;
4242 char *q;
4243
4244 p = unit_escape_setting(*i, flags, &buf);
4245 if (!p)
4246 return NULL;
4247
4248 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4249 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4250 return NULL;
4251
4252 q = result + n;
4253 if (n > 0)
4254 *(q++) = ' ';
4255
4256 *(q++) = '"';
4257 q = stpcpy(q, p);
4258 *(q++) = '"';
4259
4260 n += a;
4261 }
4262
4263 if (!GREEDY_REALLOC(result, allocated, n + 1))
4264 return NULL;
4265
4266 result[n] = 0;
4267
4268 ret = result;
4269 result = NULL;
4270
4271 return ret;
4272 }
4273
4274 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4275 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4276 const char *dir, *wrapped;
4277 int r;
4278
4279 assert(u);
4280 assert(name);
4281 assert(data);
4282
4283 if (UNIT_WRITE_FLAGS_NOOP(flags))
4284 return 0;
4285
4286 data = unit_escape_setting(data, flags, &escaped);
4287 if (!data)
4288 return -ENOMEM;
4289
4290 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4291 * previous section header is the same */
4292
4293 if (flags & UNIT_PRIVATE) {
4294 if (!UNIT_VTABLE(u)->private_section)
4295 return -EINVAL;
4296
4297 if (!u->transient_file || u->last_section_private < 0)
4298 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4299 else if (u->last_section_private == 0)
4300 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4301 } else {
4302 if (!u->transient_file || u->last_section_private < 0)
4303 data = strjoina("[Unit]\n", data);
4304 else if (u->last_section_private > 0)
4305 data = strjoina("\n[Unit]\n", data);
4306 }
4307
4308 if (u->transient_file) {
4309 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4310 * write to the transient unit file. */
4311 fputs(data, u->transient_file);
4312
4313 if (!endswith(data, "\n"))
4314 fputc('\n', u->transient_file);
4315
4316 /* Remember which section we wrote this entry to */
4317 u->last_section_private = !!(flags & UNIT_PRIVATE);
4318 return 0;
4319 }
4320
4321 dir = unit_drop_in_dir(u, flags);
4322 if (!dir)
4323 return -EINVAL;
4324
4325 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4326 "# or an equivalent operation. Do not edit.\n",
4327 data,
4328 "\n");
4329
4330 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4331 if (r < 0)
4332 return r;
4333
4334 (void) mkdir_p_label(p, 0755);
4335 r = write_string_file_atomic_label(q, wrapped);
4336 if (r < 0)
4337 return r;
4338
4339 r = strv_push(&u->dropin_paths, q);
4340 if (r < 0)
4341 return r;
4342 q = NULL;
4343
4344 strv_uniq(u->dropin_paths);
4345
4346 u->dropin_mtime = now(CLOCK_REALTIME);
4347
4348 return 0;
4349 }
4350
4351 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4352 _cleanup_free_ char *p = NULL;
4353 va_list ap;
4354 int r;
4355
4356 assert(u);
4357 assert(name);
4358 assert(format);
4359
4360 if (UNIT_WRITE_FLAGS_NOOP(flags))
4361 return 0;
4362
4363 va_start(ap, format);
4364 r = vasprintf(&p, format, ap);
4365 va_end(ap);
4366
4367 if (r < 0)
4368 return -ENOMEM;
4369
4370 return unit_write_setting(u, flags, name, p);
4371 }
4372
4373 int unit_make_transient(Unit *u) {
4374 _cleanup_free_ char *path = NULL;
4375 FILE *f;
4376
4377 assert(u);
4378
4379 if (!UNIT_VTABLE(u)->can_transient)
4380 return -EOPNOTSUPP;
4381
4382 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4383
4384 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4385 if (!path)
4386 return -ENOMEM;
4387
4388 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4389 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4390
4391 RUN_WITH_UMASK(0022) {
4392 f = fopen(path, "we");
4393 if (!f)
4394 return -errno;
4395 }
4396
4397 safe_fclose(u->transient_file);
4398 u->transient_file = f;
4399
4400 free_and_replace(u->fragment_path, path);
4401
4402 u->source_path = mfree(u->source_path);
4403 u->dropin_paths = strv_free(u->dropin_paths);
4404 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4405
4406 u->load_state = UNIT_STUB;
4407 u->load_error = 0;
4408 u->transient = true;
4409
4410 unit_add_to_dbus_queue(u);
4411 unit_add_to_gc_queue(u);
4412
4413 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4414 u->transient_file);
4415
4416 return 0;
4417 }
4418
4419 static void log_kill(pid_t pid, int sig, void *userdata) {
4420 _cleanup_free_ char *comm = NULL;
4421
4422 (void) get_process_comm(pid, &comm);
4423
4424 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4425 only, like for example systemd's own PAM stub process. */
4426 if (comm && comm[0] == '(')
4427 return;
4428
4429 log_unit_notice(userdata,
4430 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4431 pid,
4432 strna(comm),
4433 signal_to_string(sig));
4434 }
4435
4436 static int operation_to_signal(KillContext *c, KillOperation k) {
4437 assert(c);
4438
4439 switch (k) {
4440
4441 case KILL_TERMINATE:
4442 case KILL_TERMINATE_AND_LOG:
4443 return c->kill_signal;
4444
4445 case KILL_KILL:
4446 return SIGKILL;
4447
4448 case KILL_ABORT:
4449 return SIGABRT;
4450
4451 default:
4452 assert_not_reached("KillOperation unknown");
4453 }
4454 }
4455
4456 int unit_kill_context(
4457 Unit *u,
4458 KillContext *c,
4459 KillOperation k,
4460 pid_t main_pid,
4461 pid_t control_pid,
4462 bool main_pid_alien) {
4463
4464 bool wait_for_exit = false, send_sighup;
4465 cg_kill_log_func_t log_func = NULL;
4466 int sig, r;
4467
4468 assert(u);
4469 assert(c);
4470
4471 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4472 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4473
4474 if (c->kill_mode == KILL_NONE)
4475 return 0;
4476
4477 sig = operation_to_signal(c, k);
4478
4479 send_sighup =
4480 c->send_sighup &&
4481 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4482 sig != SIGHUP;
4483
4484 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4485 log_func = log_kill;
4486
4487 if (main_pid > 0) {
4488 if (log_func)
4489 log_func(main_pid, sig, u);
4490
4491 r = kill_and_sigcont(main_pid, sig);
4492 if (r < 0 && r != -ESRCH) {
4493 _cleanup_free_ char *comm = NULL;
4494 (void) get_process_comm(main_pid, &comm);
4495
4496 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4497 } else {
4498 if (!main_pid_alien)
4499 wait_for_exit = true;
4500
4501 if (r != -ESRCH && send_sighup)
4502 (void) kill(main_pid, SIGHUP);
4503 }
4504 }
4505
4506 if (control_pid > 0) {
4507 if (log_func)
4508 log_func(control_pid, sig, u);
4509
4510 r = kill_and_sigcont(control_pid, sig);
4511 if (r < 0 && r != -ESRCH) {
4512 _cleanup_free_ char *comm = NULL;
4513 (void) get_process_comm(control_pid, &comm);
4514
4515 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4516 } else {
4517 wait_for_exit = true;
4518
4519 if (r != -ESRCH && send_sighup)
4520 (void) kill(control_pid, SIGHUP);
4521 }
4522 }
4523
4524 if (u->cgroup_path &&
4525 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4526 _cleanup_set_free_ Set *pid_set = NULL;
4527
4528 /* Exclude the main/control pids from being killed via the cgroup */
4529 pid_set = unit_pid_set(main_pid, control_pid);
4530 if (!pid_set)
4531 return -ENOMEM;
4532
4533 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4534 sig,
4535 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4536 pid_set,
4537 log_func, u);
4538 if (r < 0) {
4539 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4540 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4541
4542 } else if (r > 0) {
4543
4544 /* FIXME: For now, on the legacy hierarchy, we
4545 * will not wait for the cgroup members to die
4546 * if we are running in a container or if this
4547 * is a delegation unit, simply because cgroup
4548 * notification is unreliable in these
4549 * cases. It doesn't work at all in
4550 * containers, and outside of containers it
4551 * can be confused easily by left-over
4552 * directories in the cgroup — which however
4553 * should not exist in non-delegated units. On
4554 * the unified hierarchy that's different,
4555 * there we get proper events. Hence rely on
4556 * them. */
4557
4558 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4559 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u, delegate)))
4560 wait_for_exit = true;
4561
4562 if (send_sighup) {
4563 set_free(pid_set);
4564
4565 pid_set = unit_pid_set(main_pid, control_pid);
4566 if (!pid_set)
4567 return -ENOMEM;
4568
4569 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4570 SIGHUP,
4571 CGROUP_IGNORE_SELF,
4572 pid_set,
4573 NULL, NULL);
4574 }
4575 }
4576 }
4577
4578 return wait_for_exit;
4579 }
4580
4581 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4582 char prefix[strlen(path) + 1], *p;
4583 UnitDependencyInfo di;
4584 int r;
4585
4586 assert(u);
4587 assert(path);
4588
4589 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4590 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4591 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4592 * determine which units to make themselves a dependency of. */
4593
4594 if (!path_is_absolute(path))
4595 return -EINVAL;
4596
4597 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4598 if (r < 0)
4599 return r;
4600
4601 p = strdup(path);
4602 if (!p)
4603 return -ENOMEM;
4604
4605 path_kill_slashes(p);
4606
4607 if (!path_is_normalized(p)) {
4608 free(p);
4609 return -EPERM;
4610 }
4611
4612 if (hashmap_contains(u->requires_mounts_for, p)) {
4613 free(p);
4614 return 0;
4615 }
4616
4617 di = (UnitDependencyInfo) {
4618 .origin_mask = mask
4619 };
4620
4621 r = hashmap_put(u->requires_mounts_for, p, di.data);
4622 if (r < 0) {
4623 free(p);
4624 return r;
4625 }
4626
4627 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4628 Set *x;
4629
4630 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4631 if (!x) {
4632 char *q;
4633
4634 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4635 if (r < 0)
4636 return r;
4637
4638 q = strdup(prefix);
4639 if (!q)
4640 return -ENOMEM;
4641
4642 x = set_new(NULL);
4643 if (!x) {
4644 free(q);
4645 return -ENOMEM;
4646 }
4647
4648 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4649 if (r < 0) {
4650 free(q);
4651 set_free(x);
4652 return r;
4653 }
4654 }
4655
4656 r = set_put(x, u);
4657 if (r < 0)
4658 return r;
4659 }
4660
4661 return 0;
4662 }
4663
4664 int unit_setup_exec_runtime(Unit *u) {
4665 ExecRuntime **rt;
4666 size_t offset;
4667 Unit *other;
4668 Iterator i;
4669 void *v;
4670 int r;
4671
4672 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4673 assert(offset > 0);
4674
4675 /* Check if there already is an ExecRuntime for this unit? */
4676 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4677 if (*rt)
4678 return 0;
4679
4680 /* Try to get it from somebody else */
4681 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4682 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4683 if (r == 1)
4684 return 1;
4685 }
4686
4687 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4688 }
4689
4690 int unit_setup_dynamic_creds(Unit *u) {
4691 ExecContext *ec;
4692 DynamicCreds *dcreds;
4693 size_t offset;
4694
4695 assert(u);
4696
4697 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4698 assert(offset > 0);
4699 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4700
4701 ec = unit_get_exec_context(u);
4702 assert(ec);
4703
4704 if (!ec->dynamic_user)
4705 return 0;
4706
4707 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4708 }
4709
4710 bool unit_type_supported(UnitType t) {
4711 if (_unlikely_(t < 0))
4712 return false;
4713 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4714 return false;
4715
4716 if (!unit_vtable[t]->supported)
4717 return true;
4718
4719 return unit_vtable[t]->supported();
4720 }
4721
4722 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4723 int r;
4724
4725 assert(u);
4726 assert(where);
4727
4728 r = dir_is_empty(where);
4729 if (r > 0 || r == -ENOTDIR)
4730 return;
4731 if (r < 0) {
4732 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4733 return;
4734 }
4735
4736 log_struct(LOG_NOTICE,
4737 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4738 LOG_UNIT_ID(u),
4739 LOG_UNIT_INVOCATION_ID(u),
4740 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4741 "WHERE=%s", where,
4742 NULL);
4743 }
4744
4745 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4746 _cleanup_free_ char *canonical_where;
4747 int r;
4748
4749 assert(u);
4750 assert(where);
4751
4752 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4753 if (r < 0) {
4754 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4755 return 0;
4756 }
4757
4758 /* We will happily ignore a trailing slash (or any redundant slashes) */
4759 if (path_equal(where, canonical_where))
4760 return 0;
4761
4762 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4763 log_struct(LOG_ERR,
4764 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4765 LOG_UNIT_ID(u),
4766 LOG_UNIT_INVOCATION_ID(u),
4767 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4768 "WHERE=%s", where,
4769 NULL);
4770
4771 return -ELOOP;
4772 }
4773
4774 bool unit_is_pristine(Unit *u) {
4775 assert(u);
4776
4777 /* Check if the unit already exists or is already around,
4778 * in a number of different ways. Note that to cater for unit
4779 * types such as slice, we are generally fine with units that
4780 * are marked UNIT_LOADED even though nothing was
4781 * actually loaded, as those unit types don't require a file
4782 * on disk to validly load. */
4783
4784 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4785 u->fragment_path ||
4786 u->source_path ||
4787 !strv_isempty(u->dropin_paths) ||
4788 u->job ||
4789 u->merged_into);
4790 }
4791
4792 pid_t unit_control_pid(Unit *u) {
4793 assert(u);
4794
4795 if (UNIT_VTABLE(u)->control_pid)
4796 return UNIT_VTABLE(u)->control_pid(u);
4797
4798 return 0;
4799 }
4800
4801 pid_t unit_main_pid(Unit *u) {
4802 assert(u);
4803
4804 if (UNIT_VTABLE(u)->main_pid)
4805 return UNIT_VTABLE(u)->main_pid(u);
4806
4807 return 0;
4808 }
4809
4810 static void unit_unref_uid_internal(
4811 Unit *u,
4812 uid_t *ref_uid,
4813 bool destroy_now,
4814 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4815
4816 assert(u);
4817 assert(ref_uid);
4818 assert(_manager_unref_uid);
4819
4820 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4821 * gid_t are actually the same time, with the same validity rules.
4822 *
4823 * Drops a reference to UID/GID from a unit. */
4824
4825 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4826 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4827
4828 if (!uid_is_valid(*ref_uid))
4829 return;
4830
4831 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4832 *ref_uid = UID_INVALID;
4833 }
4834
4835 void unit_unref_uid(Unit *u, bool destroy_now) {
4836 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4837 }
4838
4839 void unit_unref_gid(Unit *u, bool destroy_now) {
4840 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4841 }
4842
4843 static int unit_ref_uid_internal(
4844 Unit *u,
4845 uid_t *ref_uid,
4846 uid_t uid,
4847 bool clean_ipc,
4848 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4849
4850 int r;
4851
4852 assert(u);
4853 assert(ref_uid);
4854 assert(uid_is_valid(uid));
4855 assert(_manager_ref_uid);
4856
4857 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4858 * are actually the same type, and have the same validity rules.
4859 *
4860 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4861 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4862 * drops to zero. */
4863
4864 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4865 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4866
4867 if (*ref_uid == uid)
4868 return 0;
4869
4870 if (uid_is_valid(*ref_uid)) /* Already set? */
4871 return -EBUSY;
4872
4873 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4874 if (r < 0)
4875 return r;
4876
4877 *ref_uid = uid;
4878 return 1;
4879 }
4880
4881 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4882 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4883 }
4884
4885 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4886 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4887 }
4888
4889 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4890 int r = 0, q = 0;
4891
4892 assert(u);
4893
4894 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4895
4896 if (uid_is_valid(uid)) {
4897 r = unit_ref_uid(u, uid, clean_ipc);
4898 if (r < 0)
4899 return r;
4900 }
4901
4902 if (gid_is_valid(gid)) {
4903 q = unit_ref_gid(u, gid, clean_ipc);
4904 if (q < 0) {
4905 if (r > 0)
4906 unit_unref_uid(u, false);
4907
4908 return q;
4909 }
4910 }
4911
4912 return r > 0 || q > 0;
4913 }
4914
4915 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4916 ExecContext *c;
4917 int r;
4918
4919 assert(u);
4920
4921 c = unit_get_exec_context(u);
4922
4923 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4924 if (r < 0)
4925 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4926
4927 return r;
4928 }
4929
4930 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4931 assert(u);
4932
4933 unit_unref_uid(u, destroy_now);
4934 unit_unref_gid(u, destroy_now);
4935 }
4936
4937 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4938 int r;
4939
4940 assert(u);
4941
4942 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4943 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4944 * objects when no service references the UID/GID anymore. */
4945
4946 r = unit_ref_uid_gid(u, uid, gid);
4947 if (r > 0)
4948 bus_unit_send_change_signal(u);
4949 }
4950
4951 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4952 int r;
4953
4954 assert(u);
4955
4956 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4957
4958 if (sd_id128_equal(u->invocation_id, id))
4959 return 0;
4960
4961 if (!sd_id128_is_null(u->invocation_id))
4962 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4963
4964 if (sd_id128_is_null(id)) {
4965 r = 0;
4966 goto reset;
4967 }
4968
4969 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4970 if (r < 0)
4971 goto reset;
4972
4973 u->invocation_id = id;
4974 sd_id128_to_string(id, u->invocation_id_string);
4975
4976 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4977 if (r < 0)
4978 goto reset;
4979
4980 return 0;
4981
4982 reset:
4983 u->invocation_id = SD_ID128_NULL;
4984 u->invocation_id_string[0] = 0;
4985 return r;
4986 }
4987
4988 int unit_acquire_invocation_id(Unit *u) {
4989 sd_id128_t id;
4990 int r;
4991
4992 assert(u);
4993
4994 r = sd_id128_randomize(&id);
4995 if (r < 0)
4996 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4997
4998 r = unit_set_invocation_id(u, id);
4999 if (r < 0)
5000 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5001
5002 return 0;
5003 }
5004
5005 void unit_set_exec_params(Unit *u, ExecParameters *p) {
5006 assert(u);
5007 assert(p);
5008
5009 p->cgroup_path = u->cgroup_path;
5010 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, UNIT_CGROUP_BOOL(u, delegate));
5011 }
5012
5013 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5014 int r;
5015
5016 assert(u);
5017 assert(ret);
5018
5019 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5020 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5021
5022 (void) unit_realize_cgroup(u);
5023
5024 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5025 if (r != 0)
5026 return r;
5027
5028 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5029 (void) ignore_signals(SIGPIPE, -1);
5030
5031 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5032
5033 if (u->cgroup_path) {
5034 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5035 if (r < 0) {
5036 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5037 _exit(EXIT_CGROUP);
5038 }
5039 }
5040
5041 return 0;
5042 }
5043
5044 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5045 assert(u);
5046 assert(d >= 0);
5047 assert(d < _UNIT_DEPENDENCY_MAX);
5048 assert(other);
5049
5050 if (di.origin_mask == 0 && di.destination_mask == 0) {
5051 /* No bit set anymore, let's drop the whole entry */
5052 assert_se(hashmap_remove(u->dependencies[d], other));
5053 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5054 } else
5055 /* Mask was reduced, let's update the entry */
5056 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5057 }
5058
5059 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5060 UnitDependency d;
5061
5062 assert(u);
5063
5064 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5065
5066 if (mask == 0)
5067 return;
5068
5069 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5070 bool done;
5071
5072 do {
5073 UnitDependencyInfo di;
5074 Unit *other;
5075 Iterator i;
5076
5077 done = true;
5078
5079 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5080 UnitDependency q;
5081
5082 if ((di.origin_mask & ~mask) == di.origin_mask)
5083 continue;
5084 di.origin_mask &= ~mask;
5085 unit_update_dependency_mask(u, d, other, di);
5086
5087 /* We updated the dependency from our unit to the other unit now. But most dependencies
5088 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5089 * all dependency types on the other unit and delete all those which point to us and
5090 * have the right mask set. */
5091
5092 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5093 UnitDependencyInfo dj;
5094
5095 dj.data = hashmap_get(other->dependencies[q], u);
5096 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5097 continue;
5098 dj.destination_mask &= ~mask;
5099
5100 unit_update_dependency_mask(other, q, u, dj);
5101 }
5102
5103 unit_add_to_gc_queue(other);
5104
5105 done = false;
5106 break;
5107 }
5108
5109 } while (!done);
5110 }
5111 }
5112
5113 static int unit_export_invocation_id(Unit *u) {
5114 const char *p;
5115 int r;
5116
5117 assert(u);
5118
5119 if (u->exported_invocation_id)
5120 return 0;
5121
5122 if (sd_id128_is_null(u->invocation_id))
5123 return 0;
5124
5125 p = strjoina("/run/systemd/units/invocation:", u->id);
5126 r = symlink_atomic(u->invocation_id_string, p);
5127 if (r < 0)
5128 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5129
5130 u->exported_invocation_id = true;
5131 return 0;
5132 }
5133
5134 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5135 const char *p;
5136 char buf[2];
5137 int r;
5138
5139 assert(u);
5140 assert(c);
5141
5142 if (u->exported_log_level_max)
5143 return 0;
5144
5145 if (c->log_level_max < 0)
5146 return 0;
5147
5148 assert(c->log_level_max <= 7);
5149
5150 buf[0] = '0' + c->log_level_max;
5151 buf[1] = 0;
5152
5153 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5154 r = symlink_atomic(buf, p);
5155 if (r < 0)
5156 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5157
5158 u->exported_log_level_max = true;
5159 return 0;
5160 }
5161
5162 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5163 _cleanup_close_ int fd = -1;
5164 struct iovec *iovec;
5165 const char *p;
5166 char *pattern;
5167 le64_t *sizes;
5168 ssize_t n;
5169 size_t i;
5170 int r;
5171
5172 if (u->exported_log_extra_fields)
5173 return 0;
5174
5175 if (c->n_log_extra_fields <= 0)
5176 return 0;
5177
5178 sizes = newa(le64_t, c->n_log_extra_fields);
5179 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5180
5181 for (i = 0; i < c->n_log_extra_fields; i++) {
5182 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5183
5184 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5185 iovec[i*2+1] = c->log_extra_fields[i];
5186 }
5187
5188 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5189 pattern = strjoina(p, ".XXXXXX");
5190
5191 fd = mkostemp_safe(pattern);
5192 if (fd < 0)
5193 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5194
5195 n = writev(fd, iovec, c->n_log_extra_fields*2);
5196 if (n < 0) {
5197 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5198 goto fail;
5199 }
5200
5201 (void) fchmod(fd, 0644);
5202
5203 if (rename(pattern, p) < 0) {
5204 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5205 goto fail;
5206 }
5207
5208 u->exported_log_extra_fields = true;
5209 return 0;
5210
5211 fail:
5212 (void) unlink(pattern);
5213 return r;
5214 }
5215
5216 void unit_export_state_files(Unit *u) {
5217 const ExecContext *c;
5218
5219 assert(u);
5220
5221 if (!u->id)
5222 return;
5223
5224 if (!MANAGER_IS_SYSTEM(u->manager))
5225 return;
5226
5227 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5228 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5229 * the IPC system itself and PID 1 also log to the journal.
5230 *
5231 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5232 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5233 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5234 * namespace at least.
5235 *
5236 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5237 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5238 * them with one. */
5239
5240 (void) unit_export_invocation_id(u);
5241
5242 c = unit_get_exec_context(u);
5243 if (c) {
5244 (void) unit_export_log_level_max(u, c);
5245 (void) unit_export_log_extra_fields(u, c);
5246 }
5247 }
5248
5249 void unit_unlink_state_files(Unit *u) {
5250 const char *p;
5251
5252 assert(u);
5253
5254 if (!u->id)
5255 return;
5256
5257 if (!MANAGER_IS_SYSTEM(u->manager))
5258 return;
5259
5260 /* Undoes the effect of unit_export_state() */
5261
5262 if (u->exported_invocation_id) {
5263 p = strjoina("/run/systemd/units/invocation:", u->id);
5264 (void) unlink(p);
5265
5266 u->exported_invocation_id = false;
5267 }
5268
5269 if (u->exported_log_level_max) {
5270 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5271 (void) unlink(p);
5272
5273 u->exported_log_level_max = false;
5274 }
5275
5276 if (u->exported_log_extra_fields) {
5277 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5278 (void) unlink(p);
5279
5280 u->exported_log_extra_fields = false;
5281 }
5282 }
5283
5284 int unit_prepare_exec(Unit *u) {
5285 int r;
5286
5287 assert(u);
5288
5289 /* Prepares everything so that we can fork of a process for this unit */
5290
5291 (void) unit_realize_cgroup(u);
5292
5293 if (u->reset_accounting) {
5294 (void) unit_reset_cpu_accounting(u);
5295 (void) unit_reset_ip_accounting(u);
5296 u->reset_accounting = false;
5297 }
5298
5299 unit_export_state_files(u);
5300
5301 r = unit_setup_exec_runtime(u);
5302 if (r < 0)
5303 return r;
5304
5305 r = unit_setup_dynamic_creds(u);
5306 if (r < 0)
5307 return r;
5308
5309 return 0;
5310 }
5311
5312 static void log_leftover(pid_t pid, int sig, void *userdata) {
5313 _cleanup_free_ char *comm = NULL;
5314
5315 (void) get_process_comm(pid, &comm);
5316
5317 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5318 return;
5319
5320 log_unit_warning(userdata,
5321 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5322 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5323 pid, strna(comm));
5324 }
5325
5326 void unit_warn_leftover_processes(Unit *u) {
5327 assert(u);
5328
5329 (void) unit_pick_cgroup_path(u);
5330
5331 if (!u->cgroup_path)
5332 return;
5333
5334 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5335 }
5336
5337 bool unit_needs_console(Unit *u) {
5338 ExecContext *ec;
5339 UnitActiveState state;
5340
5341 assert(u);
5342
5343 state = unit_active_state(u);
5344
5345 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5346 return false;
5347
5348 if (UNIT_VTABLE(u)->needs_console)
5349 return UNIT_VTABLE(u)->needs_console(u);
5350
5351 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5352 ec = unit_get_exec_context(u);
5353 if (!ec)
5354 return false;
5355
5356 return exec_context_may_touch_console(ec);
5357 }
5358
5359 const char *unit_label_path(Unit *u) {
5360 const char *p;
5361
5362 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5363 * when validating access checks. */
5364
5365 p = u->source_path ?: u->fragment_path;
5366 if (!p)
5367 return NULL;
5368
5369 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5370 if (path_equal(p, "/dev/null"))
5371 return NULL;
5372
5373 return p;
5374 }
5375
5376 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5377 [COLLECT_INACTIVE] = "inactive",
5378 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5379 };
5380
5381 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);