]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
macro: introduce TAKE_PTR() macro
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/prctl.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "alloc-util.h"
32 #include "bus-common-errors.h"
33 #include "bus-util.h"
34 #include "cgroup-util.h"
35 #include "dbus-unit.h"
36 #include "dbus.h"
37 #include "dropin.h"
38 #include "escape.h"
39 #include "execute.h"
40 #include "fd-util.h"
41 #include "fileio-label.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "id128-util.h"
45 #include "io-util.h"
46 #include "load-dropin.h"
47 #include "load-fragment.h"
48 #include "log.h"
49 #include "macro.h"
50 #include "missing.h"
51 #include "mkdir.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "process-util.h"
55 #include "set.h"
56 #include "signal-util.h"
57 #include "sparse-endian.h"
58 #include "special.h"
59 #include "specifier.h"
60 #include "stat-util.h"
61 #include "stdio-util.h"
62 #include "string-table.h"
63 #include "string-util.h"
64 #include "strv.h"
65 #include "umask-util.h"
66 #include "unit-name.h"
67 #include "unit.h"
68 #include "user-util.h"
69 #include "virt.h"
70
71 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
72 [UNIT_SERVICE] = &service_vtable,
73 [UNIT_SOCKET] = &socket_vtable,
74 [UNIT_TARGET] = &target_vtable,
75 [UNIT_DEVICE] = &device_vtable,
76 [UNIT_MOUNT] = &mount_vtable,
77 [UNIT_AUTOMOUNT] = &automount_vtable,
78 [UNIT_SWAP] = &swap_vtable,
79 [UNIT_TIMER] = &timer_vtable,
80 [UNIT_PATH] = &path_vtable,
81 [UNIT_SLICE] = &slice_vtable,
82 [UNIT_SCOPE] = &scope_vtable,
83 };
84
85 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
86
87 Unit *unit_new(Manager *m, size_t size) {
88 Unit *u;
89
90 assert(m);
91 assert(size >= sizeof(Unit));
92
93 u = malloc0(size);
94 if (!u)
95 return NULL;
96
97 u->names = set_new(&string_hash_ops);
98 if (!u->names)
99 return mfree(u);
100
101 u->manager = m;
102 u->type = _UNIT_TYPE_INVALID;
103 u->default_dependencies = true;
104 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
105 u->unit_file_preset = -1;
106 u->on_failure_job_mode = JOB_REPLACE;
107 u->cgroup_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
114
115 u->ip_accounting_ingress_map_fd = -1;
116 u->ip_accounting_egress_map_fd = -1;
117 u->ipv4_allow_map_fd = -1;
118 u->ipv6_allow_map_fd = -1;
119 u->ipv4_deny_map_fd = -1;
120 u->ipv6_deny_map_fd = -1;
121
122 u->last_section_private = -1;
123
124 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
125 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
126
127 return u;
128 }
129
130 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
131 _cleanup_(unit_freep) Unit *u = NULL;
132 int r;
133
134 u = unit_new(m, size);
135 if (!u)
136 return -ENOMEM;
137
138 r = unit_add_name(u, name);
139 if (r < 0)
140 return r;
141
142 *ret = u;
143 u = NULL;
144 return r;
145 }
146
147 bool unit_has_name(Unit *u, const char *name) {
148 assert(u);
149 assert(name);
150
151 return set_contains(u->names, (char*) name);
152 }
153
154 static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->cpu_accounting = u->manager->default_cpu_accounting;
172 cc->io_accounting = u->manager->default_io_accounting;
173 cc->ip_accounting = u->manager->default_ip_accounting;
174 cc->blockio_accounting = u->manager->default_blockio_accounting;
175 cc->memory_accounting = u->manager->default_memory_accounting;
176 cc->tasks_accounting = u->manager->default_tasks_accounting;
177 cc->ip_accounting = u->manager->default_ip_accounting;
178
179 if (u->type != UNIT_SLICE)
180 cc->tasks_max = u->manager->default_tasks_max;
181 }
182
183 ec = unit_get_exec_context(u);
184 if (ec) {
185 exec_context_init(ec);
186
187 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
188 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
189 }
190
191 kc = unit_get_kill_context(u);
192 if (kc)
193 kill_context_init(kc);
194
195 if (UNIT_VTABLE(u)->init)
196 UNIT_VTABLE(u)->init(u);
197 }
198
199 int unit_add_name(Unit *u, const char *text) {
200 _cleanup_free_ char *s = NULL, *i = NULL;
201 UnitType t;
202 int r;
203
204 assert(u);
205 assert(text);
206
207 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
208
209 if (!u->instance)
210 return -EINVAL;
211
212 r = unit_name_replace_instance(text, u->instance, &s);
213 if (r < 0)
214 return r;
215 } else {
216 s = strdup(text);
217 if (!s)
218 return -ENOMEM;
219 }
220
221 if (set_contains(u->names, s))
222 return 0;
223 if (hashmap_contains(u->manager->units, s))
224 return -EEXIST;
225
226 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
227 return -EINVAL;
228
229 t = unit_name_to_type(s);
230 if (t < 0)
231 return -EINVAL;
232
233 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
234 return -EINVAL;
235
236 r = unit_name_to_instance(s, &i);
237 if (r < 0)
238 return r;
239
240 if (i && !unit_type_may_template(t))
241 return -EINVAL;
242
243 /* Ensure that this unit is either instanced or not instanced,
244 * but not both. Note that we do allow names with different
245 * instance names however! */
246 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
247 return -EINVAL;
248
249 if (!unit_type_may_alias(t) && !set_isempty(u->names))
250 return -EEXIST;
251
252 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
253 return -E2BIG;
254
255 r = set_put(u->names, s);
256 if (r < 0)
257 return r;
258 assert(r > 0);
259
260 r = hashmap_put(u->manager->units, s, u);
261 if (r < 0) {
262 (void) set_remove(u->names, s);
263 return r;
264 }
265
266 if (u->type == _UNIT_TYPE_INVALID) {
267 u->type = t;
268 u->id = s;
269 u->instance = i;
270
271 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
272
273 unit_init(u);
274
275 i = NULL;
276 }
277
278 s = NULL;
279
280 unit_add_to_dbus_queue(u);
281 return 0;
282 }
283
284 int unit_choose_id(Unit *u, const char *name) {
285 _cleanup_free_ char *t = NULL;
286 char *s, *i;
287 int r;
288
289 assert(u);
290 assert(name);
291
292 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
293
294 if (!u->instance)
295 return -EINVAL;
296
297 r = unit_name_replace_instance(name, u->instance, &t);
298 if (r < 0)
299 return r;
300
301 name = t;
302 }
303
304 /* Selects one of the names of this unit as the id */
305 s = set_get(u->names, (char*) name);
306 if (!s)
307 return -ENOENT;
308
309 /* Determine the new instance from the new id */
310 r = unit_name_to_instance(s, &i);
311 if (r < 0)
312 return r;
313
314 u->id = s;
315
316 free(u->instance);
317 u->instance = i;
318
319 unit_add_to_dbus_queue(u);
320
321 return 0;
322 }
323
324 int unit_set_description(Unit *u, const char *description) {
325 int r;
326
327 assert(u);
328
329 r = free_and_strdup(&u->description, empty_to_null(description));
330 if (r < 0)
331 return r;
332 if (r > 0)
333 unit_add_to_dbus_queue(u);
334
335 return 0;
336 }
337
338 bool unit_may_gc(Unit *u) {
339 UnitActiveState state;
340 int r;
341
342 assert(u);
343
344 /* Checks whether the unit is ready to be unloaded for garbage collection.
345 * Returns true when the unit may be collected, and false if there's some
346 * reason to keep it loaded.
347 *
348 * References from other units are *not* checked here. Instead, this is done
349 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
350 */
351
352 if (u->job)
353 return false;
354
355 if (u->nop_job)
356 return false;
357
358 state = unit_active_state(u);
359
360 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
361 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
362 UNIT_VTABLE(u)->release_resources)
363 UNIT_VTABLE(u)->release_resources(u);
364
365 if (u->perpetual)
366 return false;
367
368 if (sd_bus_track_count(u->bus_track) > 0)
369 return false;
370
371 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
372 switch (u->collect_mode) {
373
374 case COLLECT_INACTIVE:
375 if (state != UNIT_INACTIVE)
376 return false;
377
378 break;
379
380 case COLLECT_INACTIVE_OR_FAILED:
381 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
382 return false;
383
384 break;
385
386 default:
387 assert_not_reached("Unknown garbage collection mode");
388 }
389
390 if (u->cgroup_path) {
391 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
392 * around. Units with active processes should never be collected. */
393
394 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
395 if (r < 0)
396 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
397 if (r <= 0)
398 return false;
399 }
400
401 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
402 return false;
403
404 return true;
405 }
406
407 void unit_add_to_load_queue(Unit *u) {
408 assert(u);
409 assert(u->type != _UNIT_TYPE_INVALID);
410
411 if (u->load_state != UNIT_STUB || u->in_load_queue)
412 return;
413
414 LIST_PREPEND(load_queue, u->manager->load_queue, u);
415 u->in_load_queue = true;
416 }
417
418 void unit_add_to_cleanup_queue(Unit *u) {
419 assert(u);
420
421 if (u->in_cleanup_queue)
422 return;
423
424 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
425 u->in_cleanup_queue = true;
426 }
427
428 void unit_add_to_gc_queue(Unit *u) {
429 assert(u);
430
431 if (u->in_gc_queue || u->in_cleanup_queue)
432 return;
433
434 if (!unit_may_gc(u))
435 return;
436
437 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
438 u->in_gc_queue = true;
439 }
440
441 void unit_add_to_dbus_queue(Unit *u) {
442 assert(u);
443 assert(u->type != _UNIT_TYPE_INVALID);
444
445 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
446 return;
447
448 /* Shortcut things if nobody cares */
449 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
450 sd_bus_track_count(u->bus_track) <= 0 &&
451 set_isempty(u->manager->private_buses)) {
452 u->sent_dbus_new_signal = true;
453 return;
454 }
455
456 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
457 u->in_dbus_queue = true;
458 }
459
460 static void bidi_set_free(Unit *u, Hashmap *h) {
461 Unit *other;
462 Iterator i;
463 void *v;
464
465 assert(u);
466
467 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468
469 HASHMAP_FOREACH_KEY(v, other, h, i) {
470 UnitDependency d;
471
472 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
473 hashmap_remove(other->dependencies[d], u);
474
475 unit_add_to_gc_queue(other);
476 }
477
478 hashmap_free(h);
479 }
480
481 static void unit_remove_transient(Unit *u) {
482 char **i;
483
484 assert(u);
485
486 if (!u->transient)
487 return;
488
489 if (u->fragment_path)
490 (void) unlink(u->fragment_path);
491
492 STRV_FOREACH(i, u->dropin_paths) {
493 _cleanup_free_ char *p = NULL, *pp = NULL;
494
495 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
496 if (!p)
497 continue;
498
499 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
500 if (!pp)
501 continue;
502
503 /* Only drop transient drop-ins */
504 if (!path_equal(u->manager->lookup_paths.transient, pp))
505 continue;
506
507 (void) unlink(*i);
508 (void) rmdir(p);
509 }
510 }
511
512 static void unit_free_requires_mounts_for(Unit *u) {
513 assert(u);
514
515 for (;;) {
516 _cleanup_free_ char *path;
517
518 path = hashmap_steal_first_key(u->requires_mounts_for);
519 if (!path)
520 break;
521 else {
522 char s[strlen(path) + 1];
523
524 PATH_FOREACH_PREFIX_MORE(s, path) {
525 char *y;
526 Set *x;
527
528 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
529 if (!x)
530 continue;
531
532 (void) set_remove(x, u);
533
534 if (set_isempty(x)) {
535 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
536 free(y);
537 set_free(x);
538 }
539 }
540 }
541 }
542
543 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
544 }
545
546 static void unit_done(Unit *u) {
547 ExecContext *ec;
548 CGroupContext *cc;
549
550 assert(u);
551
552 if (u->type < 0)
553 return;
554
555 if (UNIT_VTABLE(u)->done)
556 UNIT_VTABLE(u)->done(u);
557
558 ec = unit_get_exec_context(u);
559 if (ec)
560 exec_context_done(ec);
561
562 cc = unit_get_cgroup_context(u);
563 if (cc)
564 cgroup_context_done(cc);
565 }
566
567 void unit_free(Unit *u) {
568 UnitDependency d;
569 Iterator i;
570 char *t;
571
572 if (!u)
573 return;
574
575 u->transient_file = safe_fclose(u->transient_file);
576
577 if (!MANAGER_IS_RELOADING(u->manager))
578 unit_remove_transient(u);
579
580 bus_unit_send_removed_signal(u);
581
582 unit_done(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585
586 sd_bus_track_unref(u->bus_track);
587 u->deserialized_refs = strv_free(u->deserialized_refs);
588
589 unit_free_requires_mounts_for(u);
590
591 SET_FOREACH(t, u->names, i)
592 hashmap_remove_value(u->manager->units, t, u);
593
594 if (!sd_id128_is_null(u->invocation_id))
595 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
596
597 if (u->job) {
598 Job *j = u->job;
599 job_uninstall(j);
600 job_free(j);
601 }
602
603 if (u->nop_job) {
604 Job *j = u->nop_job;
605 job_uninstall(j);
606 job_free(j);
607 }
608
609 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
610 bidi_set_free(u, u->dependencies[d]);
611
612 if (u->on_console)
613 manager_unref_console(u->manager);
614
615 unit_release_cgroup(u);
616
617 if (!MANAGER_IS_RELOADING(u->manager))
618 unit_unlink_state_files(u);
619
620 unit_unref_uid_gid(u, false);
621
622 (void) manager_update_failed_units(u->manager, u, false);
623 set_remove(u->manager->startup_units, u);
624
625 unit_unwatch_all_pids(u);
626
627 unit_ref_unset(&u->slice);
628 while (u->refs_by_target)
629 unit_ref_unset(u->refs_by_target);
630
631 if (u->type != _UNIT_TYPE_INVALID)
632 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
633
634 if (u->in_load_queue)
635 LIST_REMOVE(load_queue, u->manager->load_queue, u);
636
637 if (u->in_dbus_queue)
638 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
639
640 if (u->in_gc_queue)
641 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
642
643 if (u->in_cgroup_realize_queue)
644 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
645
646 if (u->in_cgroup_empty_queue)
647 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
648
649 if (u->in_cleanup_queue)
650 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
651
652 safe_close(u->ip_accounting_ingress_map_fd);
653 safe_close(u->ip_accounting_egress_map_fd);
654
655 safe_close(u->ipv4_allow_map_fd);
656 safe_close(u->ipv6_allow_map_fd);
657 safe_close(u->ipv4_deny_map_fd);
658 safe_close(u->ipv6_deny_map_fd);
659
660 bpf_program_unref(u->ip_bpf_ingress);
661 bpf_program_unref(u->ip_bpf_ingress_installed);
662 bpf_program_unref(u->ip_bpf_egress);
663 bpf_program_unref(u->ip_bpf_egress_installed);
664
665 condition_free_list(u->conditions);
666 condition_free_list(u->asserts);
667
668 free(u->description);
669 strv_free(u->documentation);
670 free(u->fragment_path);
671 free(u->source_path);
672 strv_free(u->dropin_paths);
673 free(u->instance);
674
675 free(u->job_timeout_reboot_arg);
676
677 set_free_free(u->names);
678
679 free(u->reboot_arg);
680
681 free(u);
682 }
683
684 UnitActiveState unit_active_state(Unit *u) {
685 assert(u);
686
687 if (u->load_state == UNIT_MERGED)
688 return unit_active_state(unit_follow_merge(u));
689
690 /* After a reload it might happen that a unit is not correctly
691 * loaded but still has a process around. That's why we won't
692 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
693
694 return UNIT_VTABLE(u)->active_state(u);
695 }
696
697 const char* unit_sub_state_to_string(Unit *u) {
698 assert(u);
699
700 return UNIT_VTABLE(u)->sub_state_to_string(u);
701 }
702
703 static int set_complete_move(Set **s, Set **other) {
704 assert(s);
705 assert(other);
706
707 if (!other)
708 return 0;
709
710 if (*s)
711 return set_move(*s, *other);
712 else
713 *s = TAKE_PTR(*other);
714
715 return 0;
716 }
717
718 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
719 assert(s);
720 assert(other);
721
722 if (!*other)
723 return 0;
724
725 if (*s)
726 return hashmap_move(*s, *other);
727 else
728 *s = TAKE_PTR(*other);
729
730 return 0;
731 }
732
733 static int merge_names(Unit *u, Unit *other) {
734 char *t;
735 Iterator i;
736 int r;
737
738 assert(u);
739 assert(other);
740
741 r = set_complete_move(&u->names, &other->names);
742 if (r < 0)
743 return r;
744
745 set_free_free(other->names);
746 other->names = NULL;
747 other->id = NULL;
748
749 SET_FOREACH(t, u->names, i)
750 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
751
752 return 0;
753 }
754
755 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
756 unsigned n_reserve;
757
758 assert(u);
759 assert(other);
760 assert(d < _UNIT_DEPENDENCY_MAX);
761
762 /*
763 * If u does not have this dependency set allocated, there is no need
764 * to reserve anything. In that case other's set will be transferred
765 * as a whole to u by complete_move().
766 */
767 if (!u->dependencies[d])
768 return 0;
769
770 /* merge_dependencies() will skip a u-on-u dependency */
771 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
772
773 return hashmap_reserve(u->dependencies[d], n_reserve);
774 }
775
776 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
777 Iterator i;
778 Unit *back;
779 void *v;
780 int r;
781
782 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
783
784 assert(u);
785 assert(other);
786 assert(d < _UNIT_DEPENDENCY_MAX);
787
788 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
789 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
790 UnitDependency k;
791
792 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
793 * pointers back, and let's fix them up, to instead point to 'u'. */
794
795 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
796 if (back == u) {
797 /* Do not add dependencies between u and itself. */
798 if (hashmap_remove(back->dependencies[k], other))
799 maybe_warn_about_dependency(u, other_id, k);
800 } else {
801 UnitDependencyInfo di_u, di_other, di_merged;
802
803 /* Let's drop this dependency between "back" and "other", and let's create it between
804 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
805 * and any such dependency which might already exist */
806
807 di_other.data = hashmap_get(back->dependencies[k], other);
808 if (!di_other.data)
809 continue; /* dependency isn't set, let's try the next one */
810
811 di_u.data = hashmap_get(back->dependencies[k], u);
812
813 di_merged = (UnitDependencyInfo) {
814 .origin_mask = di_u.origin_mask | di_other.origin_mask,
815 .destination_mask = di_u.destination_mask | di_other.destination_mask,
816 };
817
818 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
819 if (r < 0)
820 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
821 assert(r >= 0);
822
823 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
824 }
825 }
826
827 }
828
829 /* Also do not move dependencies on u to itself */
830 back = hashmap_remove(other->dependencies[d], u);
831 if (back)
832 maybe_warn_about_dependency(u, other_id, d);
833
834 /* The move cannot fail. The caller must have performed a reservation. */
835 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
836
837 other->dependencies[d] = hashmap_free(other->dependencies[d]);
838 }
839
840 int unit_merge(Unit *u, Unit *other) {
841 UnitDependency d;
842 const char *other_id = NULL;
843 int r;
844
845 assert(u);
846 assert(other);
847 assert(u->manager == other->manager);
848 assert(u->type != _UNIT_TYPE_INVALID);
849
850 other = unit_follow_merge(other);
851
852 if (other == u)
853 return 0;
854
855 if (u->type != other->type)
856 return -EINVAL;
857
858 if (!u->instance != !other->instance)
859 return -EINVAL;
860
861 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
862 return -EEXIST;
863
864 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
865 return -EEXIST;
866
867 if (other->job)
868 return -EEXIST;
869
870 if (other->nop_job)
871 return -EEXIST;
872
873 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
874 return -EEXIST;
875
876 if (other->id)
877 other_id = strdupa(other->id);
878
879 /* Make reservations to ensure merge_dependencies() won't fail */
880 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
881 r = reserve_dependencies(u, other, d);
882 /*
883 * We don't rollback reservations if we fail. We don't have
884 * a way to undo reservations. A reservation is not a leak.
885 */
886 if (r < 0)
887 return r;
888 }
889
890 /* Merge names */
891 r = merge_names(u, other);
892 if (r < 0)
893 return r;
894
895 /* Redirect all references */
896 while (other->refs_by_target)
897 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
898
899 /* Merge dependencies */
900 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
901 merge_dependencies(u, other, other_id, d);
902
903 other->load_state = UNIT_MERGED;
904 other->merged_into = u;
905
906 /* If there is still some data attached to the other node, we
907 * don't need it anymore, and can free it. */
908 if (other->load_state != UNIT_STUB)
909 if (UNIT_VTABLE(other)->done)
910 UNIT_VTABLE(other)->done(other);
911
912 unit_add_to_dbus_queue(u);
913 unit_add_to_cleanup_queue(other);
914
915 return 0;
916 }
917
918 int unit_merge_by_name(Unit *u, const char *name) {
919 _cleanup_free_ char *s = NULL;
920 Unit *other;
921 int r;
922
923 assert(u);
924 assert(name);
925
926 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
927 if (!u->instance)
928 return -EINVAL;
929
930 r = unit_name_replace_instance(name, u->instance, &s);
931 if (r < 0)
932 return r;
933
934 name = s;
935 }
936
937 other = manager_get_unit(u->manager, name);
938 if (other)
939 return unit_merge(u, other);
940
941 return unit_add_name(u, name);
942 }
943
944 Unit* unit_follow_merge(Unit *u) {
945 assert(u);
946
947 while (u->load_state == UNIT_MERGED)
948 assert_se(u = u->merged_into);
949
950 return u;
951 }
952
953 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
954 ExecDirectoryType dt;
955 char **dp;
956 int r;
957
958 assert(u);
959 assert(c);
960
961 if (c->working_directory) {
962 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
963 if (r < 0)
964 return r;
965 }
966
967 if (c->root_directory) {
968 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
969 if (r < 0)
970 return r;
971 }
972
973 if (c->root_image) {
974 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
975 if (r < 0)
976 return r;
977 }
978
979 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
980 if (!u->manager->prefix[dt])
981 continue;
982
983 STRV_FOREACH(dp, c->directories[dt].paths) {
984 _cleanup_free_ char *p;
985
986 p = strjoin(u->manager->prefix[dt], "/", *dp);
987 if (!p)
988 return -ENOMEM;
989
990 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
991 if (r < 0)
992 return r;
993 }
994 }
995
996 if (!MANAGER_IS_SYSTEM(u->manager))
997 return 0;
998
999 if (c->private_tmp) {
1000 const char *p;
1001
1002 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1003 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1004 if (r < 0)
1005 return r;
1006 }
1007
1008 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1009 if (r < 0)
1010 return r;
1011 }
1012
1013 if (!IN_SET(c->std_output,
1014 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1015 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1016 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1017 !IN_SET(c->std_error,
1018 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1019 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1020 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1021 return 0;
1022
1023 /* If syslog or kernel logging is requested, make sure our own
1024 * logging daemon is run first. */
1025
1026 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1027 if (r < 0)
1028 return r;
1029
1030 return 0;
1031 }
1032
1033 const char *unit_description(Unit *u) {
1034 assert(u);
1035
1036 if (u->description)
1037 return u->description;
1038
1039 return strna(u->id);
1040 }
1041
1042 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1043 const struct {
1044 UnitDependencyMask mask;
1045 const char *name;
1046 } table[] = {
1047 { UNIT_DEPENDENCY_FILE, "file" },
1048 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1049 { UNIT_DEPENDENCY_DEFAULT, "default" },
1050 { UNIT_DEPENDENCY_UDEV, "udev" },
1051 { UNIT_DEPENDENCY_PATH, "path" },
1052 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1053 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1054 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1055 };
1056 size_t i;
1057
1058 assert(f);
1059 assert(kind);
1060 assert(space);
1061
1062 for (i = 0; i < ELEMENTSOF(table); i++) {
1063
1064 if (mask == 0)
1065 break;
1066
1067 if ((mask & table[i].mask) == table[i].mask) {
1068 if (*space)
1069 fputc(' ', f);
1070 else
1071 *space = true;
1072
1073 fputs(kind, f);
1074 fputs("-", f);
1075 fputs(table[i].name, f);
1076
1077 mask &= ~table[i].mask;
1078 }
1079 }
1080
1081 assert(mask == 0);
1082 }
1083
1084 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1085 char *t, **j;
1086 UnitDependency d;
1087 Iterator i;
1088 const char *prefix2;
1089 char
1090 timestamp0[FORMAT_TIMESTAMP_MAX],
1091 timestamp1[FORMAT_TIMESTAMP_MAX],
1092 timestamp2[FORMAT_TIMESTAMP_MAX],
1093 timestamp3[FORMAT_TIMESTAMP_MAX],
1094 timestamp4[FORMAT_TIMESTAMP_MAX],
1095 timespan[FORMAT_TIMESPAN_MAX];
1096 Unit *following;
1097 _cleanup_set_free_ Set *following_set = NULL;
1098 const char *n;
1099 CGroupMask m;
1100 int r;
1101
1102 assert(u);
1103 assert(u->type >= 0);
1104
1105 prefix = strempty(prefix);
1106 prefix2 = strjoina(prefix, "\t");
1107
1108 fprintf(f,
1109 "%s-> Unit %s:\n"
1110 "%s\tDescription: %s\n"
1111 "%s\tInstance: %s\n"
1112 "%s\tUnit Load State: %s\n"
1113 "%s\tUnit Active State: %s\n"
1114 "%s\tState Change Timestamp: %s\n"
1115 "%s\tInactive Exit Timestamp: %s\n"
1116 "%s\tActive Enter Timestamp: %s\n"
1117 "%s\tActive Exit Timestamp: %s\n"
1118 "%s\tInactive Enter Timestamp: %s\n"
1119 "%s\tMay GC: %s\n"
1120 "%s\tNeed Daemon Reload: %s\n"
1121 "%s\tTransient: %s\n"
1122 "%s\tPerpetual: %s\n"
1123 "%s\tGarbage Collection Mode: %s\n"
1124 "%s\tSlice: %s\n"
1125 "%s\tCGroup: %s\n"
1126 "%s\tCGroup realized: %s\n",
1127 prefix, u->id,
1128 prefix, unit_description(u),
1129 prefix, strna(u->instance),
1130 prefix, unit_load_state_to_string(u->load_state),
1131 prefix, unit_active_state_to_string(unit_active_state(u)),
1132 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1133 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1134 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1135 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1136 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1137 prefix, yes_no(unit_may_gc(u)),
1138 prefix, yes_no(unit_need_daemon_reload(u)),
1139 prefix, yes_no(u->transient),
1140 prefix, yes_no(u->perpetual),
1141 prefix, collect_mode_to_string(u->collect_mode),
1142 prefix, strna(unit_slice_name(u)),
1143 prefix, strna(u->cgroup_path),
1144 prefix, yes_no(u->cgroup_realized));
1145
1146 if (u->cgroup_realized_mask != 0) {
1147 _cleanup_free_ char *s = NULL;
1148 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1149 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1150 }
1151 if (u->cgroup_enabled_mask != 0) {
1152 _cleanup_free_ char *s = NULL;
1153 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1154 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1155 }
1156 m = unit_get_own_mask(u);
1157 if (m != 0) {
1158 _cleanup_free_ char *s = NULL;
1159 (void) cg_mask_to_string(m, &s);
1160 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1161 }
1162 m = unit_get_members_mask(u);
1163 if (m != 0) {
1164 _cleanup_free_ char *s = NULL;
1165 (void) cg_mask_to_string(m, &s);
1166 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1167 }
1168
1169 SET_FOREACH(t, u->names, i)
1170 fprintf(f, "%s\tName: %s\n", prefix, t);
1171
1172 if (!sd_id128_is_null(u->invocation_id))
1173 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1174 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1175
1176 STRV_FOREACH(j, u->documentation)
1177 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1178
1179 following = unit_following(u);
1180 if (following)
1181 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1182
1183 r = unit_following_set(u, &following_set);
1184 if (r >= 0) {
1185 Unit *other;
1186
1187 SET_FOREACH(other, following_set, i)
1188 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1189 }
1190
1191 if (u->fragment_path)
1192 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1193
1194 if (u->source_path)
1195 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1196
1197 STRV_FOREACH(j, u->dropin_paths)
1198 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1199
1200 if (u->failure_action != EMERGENCY_ACTION_NONE)
1201 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1202 if (u->success_action != EMERGENCY_ACTION_NONE)
1203 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1204
1205 if (u->job_timeout != USEC_INFINITY)
1206 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1207
1208 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1209 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1210
1211 if (u->job_timeout_reboot_arg)
1212 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1213
1214 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1215 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1216
1217 if (dual_timestamp_is_set(&u->condition_timestamp))
1218 fprintf(f,
1219 "%s\tCondition Timestamp: %s\n"
1220 "%s\tCondition Result: %s\n",
1221 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1222 prefix, yes_no(u->condition_result));
1223
1224 if (dual_timestamp_is_set(&u->assert_timestamp))
1225 fprintf(f,
1226 "%s\tAssert Timestamp: %s\n"
1227 "%s\tAssert Result: %s\n",
1228 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1229 prefix, yes_no(u->assert_result));
1230
1231 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1232 UnitDependencyInfo di;
1233 Unit *other;
1234
1235 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1236 bool space = false;
1237
1238 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1239
1240 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1241 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1242
1243 fputs(")\n", f);
1244 }
1245 }
1246
1247 if (!hashmap_isempty(u->requires_mounts_for)) {
1248 UnitDependencyInfo di;
1249 const char *path;
1250
1251 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1252 bool space = false;
1253
1254 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1255
1256 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1257 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1258
1259 fputs(")\n", f);
1260 }
1261 }
1262
1263 if (u->load_state == UNIT_LOADED) {
1264
1265 fprintf(f,
1266 "%s\tStopWhenUnneeded: %s\n"
1267 "%s\tRefuseManualStart: %s\n"
1268 "%s\tRefuseManualStop: %s\n"
1269 "%s\tDefaultDependencies: %s\n"
1270 "%s\tOnFailureJobMode: %s\n"
1271 "%s\tIgnoreOnIsolate: %s\n",
1272 prefix, yes_no(u->stop_when_unneeded),
1273 prefix, yes_no(u->refuse_manual_start),
1274 prefix, yes_no(u->refuse_manual_stop),
1275 prefix, yes_no(u->default_dependencies),
1276 prefix, job_mode_to_string(u->on_failure_job_mode),
1277 prefix, yes_no(u->ignore_on_isolate));
1278
1279 if (UNIT_VTABLE(u)->dump)
1280 UNIT_VTABLE(u)->dump(u, f, prefix2);
1281
1282 } else if (u->load_state == UNIT_MERGED)
1283 fprintf(f,
1284 "%s\tMerged into: %s\n",
1285 prefix, u->merged_into->id);
1286 else if (u->load_state == UNIT_ERROR)
1287 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1288
1289 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1290 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1291
1292 if (u->job)
1293 job_dump(u->job, f, prefix2);
1294
1295 if (u->nop_job)
1296 job_dump(u->nop_job, f, prefix2);
1297 }
1298
1299 /* Common implementation for multiple backends */
1300 int unit_load_fragment_and_dropin(Unit *u) {
1301 int r;
1302
1303 assert(u);
1304
1305 /* Load a .{service,socket,...} file */
1306 r = unit_load_fragment(u);
1307 if (r < 0)
1308 return r;
1309
1310 if (u->load_state == UNIT_STUB)
1311 return -ENOENT;
1312
1313 /* Load drop-in directory data. If u is an alias, we might be reloading the
1314 * target unit needlessly. But we cannot be sure which drops-ins have already
1315 * been loaded and which not, at least without doing complicated book-keeping,
1316 * so let's always reread all drop-ins. */
1317 return unit_load_dropin(unit_follow_merge(u));
1318 }
1319
1320 /* Common implementation for multiple backends */
1321 int unit_load_fragment_and_dropin_optional(Unit *u) {
1322 int r;
1323
1324 assert(u);
1325
1326 /* Same as unit_load_fragment_and_dropin(), but whether
1327 * something can be loaded or not doesn't matter. */
1328
1329 /* Load a .service file */
1330 r = unit_load_fragment(u);
1331 if (r < 0)
1332 return r;
1333
1334 if (u->load_state == UNIT_STUB)
1335 u->load_state = UNIT_LOADED;
1336
1337 /* Load drop-in directory data */
1338 return unit_load_dropin(unit_follow_merge(u));
1339 }
1340
1341 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1342 assert(u);
1343 assert(target);
1344
1345 if (target->type != UNIT_TARGET)
1346 return 0;
1347
1348 /* Only add the dependency if both units are loaded, so that
1349 * that loop check below is reliable */
1350 if (u->load_state != UNIT_LOADED ||
1351 target->load_state != UNIT_LOADED)
1352 return 0;
1353
1354 /* If either side wants no automatic dependencies, then let's
1355 * skip this */
1356 if (!u->default_dependencies ||
1357 !target->default_dependencies)
1358 return 0;
1359
1360 /* Don't create loops */
1361 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1362 return 0;
1363
1364 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1365 }
1366
1367 static int unit_add_target_dependencies(Unit *u) {
1368
1369 static const UnitDependency deps[] = {
1370 UNIT_REQUIRED_BY,
1371 UNIT_REQUISITE_OF,
1372 UNIT_WANTED_BY,
1373 UNIT_BOUND_BY
1374 };
1375
1376 unsigned k;
1377 int r = 0;
1378
1379 assert(u);
1380
1381 for (k = 0; k < ELEMENTSOF(deps); k++) {
1382 Unit *target;
1383 Iterator i;
1384 void *v;
1385
1386 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1387 r = unit_add_default_target_dependency(u, target);
1388 if (r < 0)
1389 return r;
1390 }
1391 }
1392
1393 return r;
1394 }
1395
1396 static int unit_add_slice_dependencies(Unit *u) {
1397 UnitDependencyMask mask;
1398 assert(u);
1399
1400 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1401 return 0;
1402
1403 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1404 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1405 relationship). */
1406 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1407
1408 if (UNIT_ISSET(u->slice))
1409 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1410
1411 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1412 return 0;
1413
1414 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1415 }
1416
1417 static int unit_add_mount_dependencies(Unit *u) {
1418 UnitDependencyInfo di;
1419 const char *path;
1420 Iterator i;
1421 int r;
1422
1423 assert(u);
1424
1425 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1426 char prefix[strlen(path) + 1];
1427
1428 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1429 _cleanup_free_ char *p = NULL;
1430 Unit *m;
1431
1432 r = unit_name_from_path(prefix, ".mount", &p);
1433 if (r < 0)
1434 return r;
1435
1436 m = manager_get_unit(u->manager, p);
1437 if (!m) {
1438 /* Make sure to load the mount unit if
1439 * it exists. If so the dependencies
1440 * on this unit will be added later
1441 * during the loading of the mount
1442 * unit. */
1443 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1444 continue;
1445 }
1446 if (m == u)
1447 continue;
1448
1449 if (m->load_state != UNIT_LOADED)
1450 continue;
1451
1452 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1453 if (r < 0)
1454 return r;
1455
1456 if (m->fragment_path) {
1457 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1458 if (r < 0)
1459 return r;
1460 }
1461 }
1462 }
1463
1464 return 0;
1465 }
1466
1467 static int unit_add_startup_units(Unit *u) {
1468 CGroupContext *c;
1469 int r;
1470
1471 c = unit_get_cgroup_context(u);
1472 if (!c)
1473 return 0;
1474
1475 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1476 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1477 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1478 return 0;
1479
1480 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1481 if (r < 0)
1482 return r;
1483
1484 return set_put(u->manager->startup_units, u);
1485 }
1486
1487 int unit_load(Unit *u) {
1488 int r;
1489
1490 assert(u);
1491
1492 if (u->in_load_queue) {
1493 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1494 u->in_load_queue = false;
1495 }
1496
1497 if (u->type == _UNIT_TYPE_INVALID)
1498 return -EINVAL;
1499
1500 if (u->load_state != UNIT_STUB)
1501 return 0;
1502
1503 if (u->transient_file) {
1504 r = fflush_and_check(u->transient_file);
1505 if (r < 0)
1506 goto fail;
1507
1508 u->transient_file = safe_fclose(u->transient_file);
1509 u->fragment_mtime = now(CLOCK_REALTIME);
1510 }
1511
1512 if (UNIT_VTABLE(u)->load) {
1513 r = UNIT_VTABLE(u)->load(u);
1514 if (r < 0)
1515 goto fail;
1516 }
1517
1518 if (u->load_state == UNIT_STUB) {
1519 r = -ENOENT;
1520 goto fail;
1521 }
1522
1523 if (u->load_state == UNIT_LOADED) {
1524
1525 r = unit_add_target_dependencies(u);
1526 if (r < 0)
1527 goto fail;
1528
1529 r = unit_add_slice_dependencies(u);
1530 if (r < 0)
1531 goto fail;
1532
1533 r = unit_add_mount_dependencies(u);
1534 if (r < 0)
1535 goto fail;
1536
1537 r = unit_add_startup_units(u);
1538 if (r < 0)
1539 goto fail;
1540
1541 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1542 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1543 r = -EINVAL;
1544 goto fail;
1545 }
1546
1547 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1548 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1549
1550 unit_update_cgroup_members_masks(u);
1551 }
1552
1553 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1554
1555 unit_add_to_dbus_queue(unit_follow_merge(u));
1556 unit_add_to_gc_queue(u);
1557
1558 return 0;
1559
1560 fail:
1561 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1562 u->load_error = r;
1563 unit_add_to_dbus_queue(u);
1564 unit_add_to_gc_queue(u);
1565
1566 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1567
1568 return r;
1569 }
1570
1571 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1572 Condition *c;
1573 int triggered = -1;
1574
1575 assert(u);
1576 assert(to_string);
1577
1578 /* If the condition list is empty, then it is true */
1579 if (!first)
1580 return true;
1581
1582 /* Otherwise, if all of the non-trigger conditions apply and
1583 * if any of the trigger conditions apply (unless there are
1584 * none) we return true */
1585 LIST_FOREACH(conditions, c, first) {
1586 int r;
1587
1588 r = condition_test(c);
1589 if (r < 0)
1590 log_unit_warning(u,
1591 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1592 to_string(c->type),
1593 c->trigger ? "|" : "",
1594 c->negate ? "!" : "",
1595 c->parameter);
1596 else
1597 log_unit_debug(u,
1598 "%s=%s%s%s %s.",
1599 to_string(c->type),
1600 c->trigger ? "|" : "",
1601 c->negate ? "!" : "",
1602 c->parameter,
1603 condition_result_to_string(c->result));
1604
1605 if (!c->trigger && r <= 0)
1606 return false;
1607
1608 if (c->trigger && triggered <= 0)
1609 triggered = r > 0;
1610 }
1611
1612 return triggered != 0;
1613 }
1614
1615 static bool unit_condition_test(Unit *u) {
1616 assert(u);
1617
1618 dual_timestamp_get(&u->condition_timestamp);
1619 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1620
1621 return u->condition_result;
1622 }
1623
1624 static bool unit_assert_test(Unit *u) {
1625 assert(u);
1626
1627 dual_timestamp_get(&u->assert_timestamp);
1628 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1629
1630 return u->assert_result;
1631 }
1632
1633 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1634 DISABLE_WARNING_FORMAT_NONLITERAL;
1635 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1636 REENABLE_WARNING;
1637 }
1638
1639 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1640 const char *format;
1641 const UnitStatusMessageFormats *format_table;
1642
1643 assert(u);
1644 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1645
1646 if (t != JOB_RELOAD) {
1647 format_table = &UNIT_VTABLE(u)->status_message_formats;
1648 if (format_table) {
1649 format = format_table->starting_stopping[t == JOB_STOP];
1650 if (format)
1651 return format;
1652 }
1653 }
1654
1655 /* Return generic strings */
1656 if (t == JOB_START)
1657 return "Starting %s.";
1658 else if (t == JOB_STOP)
1659 return "Stopping %s.";
1660 else
1661 return "Reloading %s.";
1662 }
1663
1664 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1665 const char *format;
1666
1667 assert(u);
1668
1669 /* Reload status messages have traditionally not been printed to console. */
1670 if (!IN_SET(t, JOB_START, JOB_STOP))
1671 return;
1672
1673 format = unit_get_status_message_format(u, t);
1674
1675 DISABLE_WARNING_FORMAT_NONLITERAL;
1676 unit_status_printf(u, "", format);
1677 REENABLE_WARNING;
1678 }
1679
1680 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1681 const char *format, *mid;
1682 char buf[LINE_MAX];
1683
1684 assert(u);
1685
1686 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1687 return;
1688
1689 if (log_on_console())
1690 return;
1691
1692 /* We log status messages for all units and all operations. */
1693
1694 format = unit_get_status_message_format(u, t);
1695
1696 DISABLE_WARNING_FORMAT_NONLITERAL;
1697 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1698 REENABLE_WARNING;
1699
1700 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1701 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1702 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1703
1704 /* Note that we deliberately use LOG_MESSAGE() instead of
1705 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1706 * closely what is written to screen using the status output,
1707 * which is supposed the highest level, friendliest output
1708 * possible, which means we should avoid the low-level unit
1709 * name. */
1710 log_struct(LOG_INFO,
1711 LOG_MESSAGE("%s", buf),
1712 LOG_UNIT_ID(u),
1713 LOG_UNIT_INVOCATION_ID(u),
1714 mid,
1715 NULL);
1716 }
1717
1718 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1719 assert(u);
1720 assert(t >= 0);
1721 assert(t < _JOB_TYPE_MAX);
1722
1723 unit_status_log_starting_stopping_reloading(u, t);
1724 unit_status_print_starting_stopping(u, t);
1725 }
1726
1727 int unit_start_limit_test(Unit *u) {
1728 assert(u);
1729
1730 if (ratelimit_test(&u->start_limit)) {
1731 u->start_limit_hit = false;
1732 return 0;
1733 }
1734
1735 log_unit_warning(u, "Start request repeated too quickly.");
1736 u->start_limit_hit = true;
1737
1738 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1739 }
1740
1741 bool unit_shall_confirm_spawn(Unit *u) {
1742 assert(u);
1743
1744 if (manager_is_confirm_spawn_disabled(u->manager))
1745 return false;
1746
1747 /* For some reasons units remaining in the same process group
1748 * as PID 1 fail to acquire the console even if it's not used
1749 * by any process. So skip the confirmation question for them. */
1750 return !unit_get_exec_context(u)->same_pgrp;
1751 }
1752
1753 static bool unit_verify_deps(Unit *u) {
1754 Unit *other;
1755 Iterator j;
1756 void *v;
1757
1758 assert(u);
1759
1760 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1761 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1762 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1763 * conjunction with After= as for them any such check would make things entirely racy. */
1764
1765 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1766
1767 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1768 continue;
1769
1770 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1771 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1772 return false;
1773 }
1774 }
1775
1776 return true;
1777 }
1778
1779 /* Errors:
1780 * -EBADR: This unit type does not support starting.
1781 * -EALREADY: Unit is already started.
1782 * -EAGAIN: An operation is already in progress. Retry later.
1783 * -ECANCELED: Too many requests for now.
1784 * -EPROTO: Assert failed
1785 * -EINVAL: Unit not loaded
1786 * -EOPNOTSUPP: Unit type not supported
1787 * -ENOLINK: The necessary dependencies are not fulfilled.
1788 */
1789 int unit_start(Unit *u) {
1790 UnitActiveState state;
1791 Unit *following;
1792
1793 assert(u);
1794
1795 /* If this is already started, then this will succeed. Note
1796 * that this will even succeed if this unit is not startable
1797 * by the user. This is relied on to detect when we need to
1798 * wait for units and when waiting is finished. */
1799 state = unit_active_state(u);
1800 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1801 return -EALREADY;
1802
1803 /* Units that aren't loaded cannot be started */
1804 if (u->load_state != UNIT_LOADED)
1805 return -EINVAL;
1806
1807 /* If the conditions failed, don't do anything at all. If we
1808 * already are activating this call might still be useful to
1809 * speed up activation in case there is some hold-off time,
1810 * but we don't want to recheck the condition in that case. */
1811 if (state != UNIT_ACTIVATING &&
1812 !unit_condition_test(u)) {
1813 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1814 return -EALREADY;
1815 }
1816
1817 /* If the asserts failed, fail the entire job */
1818 if (state != UNIT_ACTIVATING &&
1819 !unit_assert_test(u)) {
1820 log_unit_notice(u, "Starting requested but asserts failed.");
1821 return -EPROTO;
1822 }
1823
1824 /* Units of types that aren't supported cannot be
1825 * started. Note that we do this test only after the condition
1826 * checks, so that we rather return condition check errors
1827 * (which are usually not considered a true failure) than "not
1828 * supported" errors (which are considered a failure).
1829 */
1830 if (!unit_supported(u))
1831 return -EOPNOTSUPP;
1832
1833 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1834 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1835 * effect anymore, due to a reload or due to a failed condition. */
1836 if (!unit_verify_deps(u))
1837 return -ENOLINK;
1838
1839 /* Forward to the main object, if we aren't it. */
1840 following = unit_following(u);
1841 if (following) {
1842 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1843 return unit_start(following);
1844 }
1845
1846 /* If it is stopped, but we cannot start it, then fail */
1847 if (!UNIT_VTABLE(u)->start)
1848 return -EBADR;
1849
1850 /* We don't suppress calls to ->start() here when we are
1851 * already starting, to allow this request to be used as a
1852 * "hurry up" call, for example when the unit is in some "auto
1853 * restart" state where it waits for a holdoff timer to elapse
1854 * before it will start again. */
1855
1856 unit_add_to_dbus_queue(u);
1857
1858 return UNIT_VTABLE(u)->start(u);
1859 }
1860
1861 bool unit_can_start(Unit *u) {
1862 assert(u);
1863
1864 if (u->load_state != UNIT_LOADED)
1865 return false;
1866
1867 if (!unit_supported(u))
1868 return false;
1869
1870 return !!UNIT_VTABLE(u)->start;
1871 }
1872
1873 bool unit_can_isolate(Unit *u) {
1874 assert(u);
1875
1876 return unit_can_start(u) &&
1877 u->allow_isolate;
1878 }
1879
1880 /* Errors:
1881 * -EBADR: This unit type does not support stopping.
1882 * -EALREADY: Unit is already stopped.
1883 * -EAGAIN: An operation is already in progress. Retry later.
1884 */
1885 int unit_stop(Unit *u) {
1886 UnitActiveState state;
1887 Unit *following;
1888
1889 assert(u);
1890
1891 state = unit_active_state(u);
1892 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1893 return -EALREADY;
1894
1895 following = unit_following(u);
1896 if (following) {
1897 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1898 return unit_stop(following);
1899 }
1900
1901 if (!UNIT_VTABLE(u)->stop)
1902 return -EBADR;
1903
1904 unit_add_to_dbus_queue(u);
1905
1906 return UNIT_VTABLE(u)->stop(u);
1907 }
1908
1909 bool unit_can_stop(Unit *u) {
1910 assert(u);
1911
1912 if (!unit_supported(u))
1913 return false;
1914
1915 if (u->perpetual)
1916 return false;
1917
1918 return !!UNIT_VTABLE(u)->stop;
1919 }
1920
1921 /* Errors:
1922 * -EBADR: This unit type does not support reloading.
1923 * -ENOEXEC: Unit is not started.
1924 * -EAGAIN: An operation is already in progress. Retry later.
1925 */
1926 int unit_reload(Unit *u) {
1927 UnitActiveState state;
1928 Unit *following;
1929
1930 assert(u);
1931
1932 if (u->load_state != UNIT_LOADED)
1933 return -EINVAL;
1934
1935 if (!unit_can_reload(u))
1936 return -EBADR;
1937
1938 state = unit_active_state(u);
1939 if (state == UNIT_RELOADING)
1940 return -EALREADY;
1941
1942 if (state != UNIT_ACTIVE) {
1943 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1944 return -ENOEXEC;
1945 }
1946
1947 following = unit_following(u);
1948 if (following) {
1949 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1950 return unit_reload(following);
1951 }
1952
1953 unit_add_to_dbus_queue(u);
1954
1955 if (!UNIT_VTABLE(u)->reload) {
1956 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1957 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1958 return 0;
1959 }
1960
1961 return UNIT_VTABLE(u)->reload(u);
1962 }
1963
1964 bool unit_can_reload(Unit *u) {
1965 assert(u);
1966
1967 if (UNIT_VTABLE(u)->can_reload)
1968 return UNIT_VTABLE(u)->can_reload(u);
1969
1970 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1971 return true;
1972
1973 return UNIT_VTABLE(u)->reload;
1974 }
1975
1976 static void unit_check_unneeded(Unit *u) {
1977
1978 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1979
1980 static const UnitDependency needed_dependencies[] = {
1981 UNIT_REQUIRED_BY,
1982 UNIT_REQUISITE_OF,
1983 UNIT_WANTED_BY,
1984 UNIT_BOUND_BY,
1985 };
1986
1987 unsigned j;
1988 int r;
1989
1990 assert(u);
1991
1992 /* If this service shall be shut down when unneeded then do
1993 * so. */
1994
1995 if (!u->stop_when_unneeded)
1996 return;
1997
1998 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1999 return;
2000
2001 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
2002 Unit *other;
2003 Iterator i;
2004 void *v;
2005
2006 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
2007 if (unit_active_or_pending(other) || unit_will_restart(other))
2008 return;
2009 }
2010
2011 /* If stopping a unit fails continuously we might enter a stop
2012 * loop here, hence stop acting on the service being
2013 * unnecessary after a while. */
2014 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2015 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
2016 return;
2017 }
2018
2019 log_unit_info(u, "Unit not needed anymore. Stopping.");
2020
2021 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2022 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2023 if (r < 0)
2024 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2025 }
2026
2027 static void unit_check_binds_to(Unit *u) {
2028 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2029 bool stop = false;
2030 Unit *other;
2031 Iterator i;
2032 void *v;
2033 int r;
2034
2035 assert(u);
2036
2037 if (u->job)
2038 return;
2039
2040 if (unit_active_state(u) != UNIT_ACTIVE)
2041 return;
2042
2043 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2044 if (other->job)
2045 continue;
2046
2047 if (!other->coldplugged)
2048 /* We might yet create a job for the other unit… */
2049 continue;
2050
2051 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2052 continue;
2053
2054 stop = true;
2055 break;
2056 }
2057
2058 if (!stop)
2059 return;
2060
2061 /* If stopping a unit fails continuously we might enter a stop
2062 * loop here, hence stop acting on the service being
2063 * unnecessary after a while. */
2064 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2065 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2066 return;
2067 }
2068
2069 assert(other);
2070 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2071
2072 /* A unit we need to run is gone. Sniff. Let's stop this. */
2073 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2074 if (r < 0)
2075 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2076 }
2077
2078 static void retroactively_start_dependencies(Unit *u) {
2079 Iterator i;
2080 Unit *other;
2081 void *v;
2082
2083 assert(u);
2084 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2085
2086 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2087 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2088 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2090
2091 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2092 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2093 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2094 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2095
2096 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2097 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2098 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2099 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2100
2101 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2102 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2104
2105 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2106 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2107 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2108 }
2109
2110 static void retroactively_stop_dependencies(Unit *u) {
2111 Unit *other;
2112 Iterator i;
2113 void *v;
2114
2115 assert(u);
2116 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2117
2118 /* Pull down units which are bound to us recursively if enabled */
2119 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2120 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2121 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2122 }
2123
2124 static void check_unneeded_dependencies(Unit *u) {
2125 Unit *other;
2126 Iterator i;
2127 void *v;
2128
2129 assert(u);
2130 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2131
2132 /* Garbage collect services that might not be needed anymore, if enabled */
2133 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2134 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2135 unit_check_unneeded(other);
2136 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2137 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2138 unit_check_unneeded(other);
2139 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2140 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2141 unit_check_unneeded(other);
2142 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2143 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2144 unit_check_unneeded(other);
2145 }
2146
2147 void unit_start_on_failure(Unit *u) {
2148 Unit *other;
2149 Iterator i;
2150 void *v;
2151
2152 assert(u);
2153
2154 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2155 return;
2156
2157 log_unit_info(u, "Triggering OnFailure= dependencies.");
2158
2159 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2160 int r;
2161
2162 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2163 if (r < 0)
2164 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2165 }
2166 }
2167
2168 void unit_trigger_notify(Unit *u) {
2169 Unit *other;
2170 Iterator i;
2171 void *v;
2172
2173 assert(u);
2174
2175 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2176 if (UNIT_VTABLE(other)->trigger_notify)
2177 UNIT_VTABLE(other)->trigger_notify(other, u);
2178 }
2179
2180 static int unit_log_resources(Unit *u) {
2181
2182 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2183 size_t n_message_parts = 0, n_iovec = 0;
2184 char* message_parts[3 + 1], *t;
2185 nsec_t nsec = NSEC_INFINITY;
2186 CGroupIPAccountingMetric m;
2187 size_t i;
2188 int r;
2189 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2190 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2194 };
2195
2196 assert(u);
2197
2198 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2199 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2200 * information and the complete data in structured fields. */
2201
2202 (void) unit_get_cpu_usage(u, &nsec);
2203 if (nsec != NSEC_INFINITY) {
2204 char buf[FORMAT_TIMESPAN_MAX] = "";
2205
2206 /* Format the CPU time for inclusion in the structured log message */
2207 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2208 r = log_oom();
2209 goto finish;
2210 }
2211 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2212
2213 /* Format the CPU time for inclusion in the human language message string */
2214 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2215 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2216 if (!t) {
2217 r = log_oom();
2218 goto finish;
2219 }
2220
2221 message_parts[n_message_parts++] = t;
2222 }
2223
2224 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2225 char buf[FORMAT_BYTES_MAX] = "";
2226 uint64_t value = UINT64_MAX;
2227
2228 assert(ip_fields[m]);
2229
2230 (void) unit_get_ip_accounting(u, m, &value);
2231 if (value == UINT64_MAX)
2232 continue;
2233
2234 /* Format IP accounting data for inclusion in the structured log message */
2235 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2236 r = log_oom();
2237 goto finish;
2238 }
2239 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2240
2241 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2242 * bytes counters (and not for the packets counters) */
2243 if (m == CGROUP_IP_INGRESS_BYTES)
2244 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2245 format_bytes(buf, sizeof(buf), value),
2246 " IP traffic");
2247 else if (m == CGROUP_IP_EGRESS_BYTES)
2248 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2249 format_bytes(buf, sizeof(buf), value),
2250 " IP traffic");
2251 else
2252 continue;
2253 if (!t) {
2254 r = log_oom();
2255 goto finish;
2256 }
2257
2258 message_parts[n_message_parts++] = t;
2259 }
2260
2261 /* Is there any accounting data available at all? */
2262 if (n_iovec == 0) {
2263 r = 0;
2264 goto finish;
2265 }
2266
2267 if (n_message_parts == 0)
2268 t = strjoina("MESSAGE=", u->id, ": Completed");
2269 else {
2270 _cleanup_free_ char *joined;
2271
2272 message_parts[n_message_parts] = NULL;
2273
2274 joined = strv_join(message_parts, ", ");
2275 if (!joined) {
2276 r = log_oom();
2277 goto finish;
2278 }
2279
2280 t = strjoina("MESSAGE=", u->id, ": ", joined);
2281 }
2282
2283 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2284 * and hence don't increase n_iovec for them */
2285 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2286 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2287
2288 t = strjoina(u->manager->unit_log_field, u->id);
2289 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2290
2291 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2292 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2293
2294 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2295 r = 0;
2296
2297 finish:
2298 for (i = 0; i < n_message_parts; i++)
2299 free(message_parts[i]);
2300
2301 for (i = 0; i < n_iovec; i++)
2302 free(iovec[i].iov_base);
2303
2304 return r;
2305
2306 }
2307
2308 static void unit_update_on_console(Unit *u) {
2309 bool b;
2310
2311 assert(u);
2312
2313 b = unit_needs_console(u);
2314 if (u->on_console == b)
2315 return;
2316
2317 u->on_console = b;
2318 if (b)
2319 manager_ref_console(u->manager);
2320 else
2321 manager_unref_console(u->manager);
2322
2323 }
2324
2325 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2326 bool unexpected;
2327 Manager *m;
2328
2329 assert(u);
2330 assert(os < _UNIT_ACTIVE_STATE_MAX);
2331 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2332
2333 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2334 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2335 * remounted this function will be called too! */
2336
2337 m = u->manager;
2338
2339 /* Update timestamps for state changes */
2340 if (!MANAGER_IS_RELOADING(m)) {
2341 dual_timestamp_get(&u->state_change_timestamp);
2342
2343 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2344 u->inactive_exit_timestamp = u->state_change_timestamp;
2345 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2346 u->inactive_enter_timestamp = u->state_change_timestamp;
2347
2348 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2349 u->active_enter_timestamp = u->state_change_timestamp;
2350 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2351 u->active_exit_timestamp = u->state_change_timestamp;
2352 }
2353
2354 /* Keep track of failed units */
2355 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2356
2357 /* Make sure the cgroup and state files are always removed when we become inactive */
2358 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2359 unit_prune_cgroup(u);
2360 unit_unlink_state_files(u);
2361 }
2362
2363 unit_update_on_console(u);
2364
2365 if (u->job) {
2366 unexpected = false;
2367
2368 if (u->job->state == JOB_WAITING)
2369
2370 /* So we reached a different state for this
2371 * job. Let's see if we can run it now if it
2372 * failed previously due to EAGAIN. */
2373 job_add_to_run_queue(u->job);
2374
2375 /* Let's check whether this state change constitutes a
2376 * finished job, or maybe contradicts a running job and
2377 * hence needs to invalidate jobs. */
2378
2379 switch (u->job->type) {
2380
2381 case JOB_START:
2382 case JOB_VERIFY_ACTIVE:
2383
2384 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2385 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2386 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2387 unexpected = true;
2388
2389 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2390 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2391 }
2392
2393 break;
2394
2395 case JOB_RELOAD:
2396 case JOB_RELOAD_OR_START:
2397 case JOB_TRY_RELOAD:
2398
2399 if (u->job->state == JOB_RUNNING) {
2400 if (ns == UNIT_ACTIVE)
2401 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2402 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2403 unexpected = true;
2404
2405 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2406 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2407 }
2408 }
2409
2410 break;
2411
2412 case JOB_STOP:
2413 case JOB_RESTART:
2414 case JOB_TRY_RESTART:
2415
2416 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2417 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2418 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2419 unexpected = true;
2420 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2421 }
2422
2423 break;
2424
2425 default:
2426 assert_not_reached("Job type unknown");
2427 }
2428
2429 } else
2430 unexpected = true;
2431
2432 if (!MANAGER_IS_RELOADING(m)) {
2433
2434 /* If this state change happened without being
2435 * requested by a job, then let's retroactively start
2436 * or stop dependencies. We skip that step when
2437 * deserializing, since we don't want to create any
2438 * additional jobs just because something is already
2439 * activated. */
2440
2441 if (unexpected) {
2442 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2443 retroactively_start_dependencies(u);
2444 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2445 retroactively_stop_dependencies(u);
2446 }
2447
2448 /* stop unneeded units regardless if going down was expected or not */
2449 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2450 check_unneeded_dependencies(u);
2451
2452 if (ns != os && ns == UNIT_FAILED) {
2453 log_unit_debug(u, "Unit entered failed state.");
2454 unit_start_on_failure(u);
2455 }
2456 }
2457
2458 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2459
2460 if (u->type == UNIT_SERVICE &&
2461 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2462 !MANAGER_IS_RELOADING(m)) {
2463 /* Write audit record if we have just finished starting up */
2464 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2465 u->in_audit = true;
2466 }
2467
2468 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2469 manager_send_unit_plymouth(m, u);
2470
2471 } else {
2472
2473 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2474 !UNIT_IS_INACTIVE_OR_FAILED(os)
2475 && !MANAGER_IS_RELOADING(m)) {
2476
2477 /* This unit just stopped/failed. */
2478 if (u->type == UNIT_SERVICE) {
2479
2480 /* Hmm, if there was no start record written
2481 * write it now, so that we always have a nice
2482 * pair */
2483 if (!u->in_audit) {
2484 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2485
2486 if (ns == UNIT_INACTIVE)
2487 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2488 } else
2489 /* Write audit record if we have just finished shutting down */
2490 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2491
2492 u->in_audit = false;
2493 }
2494
2495 /* Write a log message about consumed resources */
2496 unit_log_resources(u);
2497 }
2498 }
2499
2500 manager_recheck_journal(m);
2501 manager_recheck_dbus(m);
2502
2503 unit_trigger_notify(u);
2504
2505 if (!MANAGER_IS_RELOADING(u->manager)) {
2506 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2507 unit_check_unneeded(u);
2508
2509 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2510 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2511 * without ever entering started.) */
2512 unit_check_binds_to(u);
2513
2514 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2515 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2516 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2517 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2518 }
2519
2520 unit_add_to_dbus_queue(u);
2521 unit_add_to_gc_queue(u);
2522 }
2523
2524 int unit_watch_pid(Unit *u, pid_t pid) {
2525 int r;
2526
2527 assert(u);
2528 assert(pid_is_valid(pid));
2529
2530 /* Watch a specific PID */
2531
2532 r = set_ensure_allocated(&u->pids, NULL);
2533 if (r < 0)
2534 return r;
2535
2536 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2537 if (r < 0)
2538 return r;
2539
2540 /* First try, let's add the unit keyed by "pid". */
2541 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2542 if (r == -EEXIST) {
2543 Unit **array;
2544 bool found = false;
2545 size_t n = 0;
2546
2547 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2548 * to an array of Units rather than just a Unit), lists us already. */
2549
2550 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2551 if (array)
2552 for (; array[n]; n++)
2553 if (array[n] == u)
2554 found = true;
2555
2556 if (found) /* Found it already? if so, do nothing */
2557 r = 0;
2558 else {
2559 Unit **new_array;
2560
2561 /* Allocate a new array */
2562 new_array = new(Unit*, n + 2);
2563 if (!new_array)
2564 return -ENOMEM;
2565
2566 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2567 new_array[n] = u;
2568 new_array[n+1] = NULL;
2569
2570 /* Add or replace the old array */
2571 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2572 if (r < 0) {
2573 free(new_array);
2574 return r;
2575 }
2576
2577 free(array);
2578 }
2579 } else if (r < 0)
2580 return r;
2581
2582 r = set_put(u->pids, PID_TO_PTR(pid));
2583 if (r < 0)
2584 return r;
2585
2586 return 0;
2587 }
2588
2589 void unit_unwatch_pid(Unit *u, pid_t pid) {
2590 Unit **array;
2591
2592 assert(u);
2593 assert(pid_is_valid(pid));
2594
2595 /* First let's drop the unit in case it's keyed as "pid". */
2596 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2597
2598 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2599 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2600 if (array) {
2601 size_t n, m = 0;
2602
2603 /* Let's iterate through the array, dropping our own entry */
2604 for (n = 0; array[n]; n++)
2605 if (array[n] != u)
2606 array[m++] = array[n];
2607 array[m] = NULL;
2608
2609 if (m == 0) {
2610 /* The array is now empty, remove the entire entry */
2611 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2612 free(array);
2613 }
2614 }
2615
2616 (void) set_remove(u->pids, PID_TO_PTR(pid));
2617 }
2618
2619 void unit_unwatch_all_pids(Unit *u) {
2620 assert(u);
2621
2622 while (!set_isempty(u->pids))
2623 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2624
2625 u->pids = set_free(u->pids);
2626 }
2627
2628 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2629 Iterator i;
2630 void *e;
2631
2632 assert(u);
2633
2634 /* Cleans dead PIDs from our list */
2635
2636 SET_FOREACH(e, u->pids, i) {
2637 pid_t pid = PTR_TO_PID(e);
2638
2639 if (pid == except1 || pid == except2)
2640 continue;
2641
2642 if (!pid_is_unwaited(pid))
2643 unit_unwatch_pid(u, pid);
2644 }
2645 }
2646
2647 bool unit_job_is_applicable(Unit *u, JobType j) {
2648 assert(u);
2649 assert(j >= 0 && j < _JOB_TYPE_MAX);
2650
2651 switch (j) {
2652
2653 case JOB_VERIFY_ACTIVE:
2654 case JOB_START:
2655 case JOB_NOP:
2656 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2657 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2658 * jobs for it. */
2659 return true;
2660
2661 case JOB_STOP:
2662 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2663 * external events), hence it makes no sense to permit enqueing such a request either. */
2664 return !u->perpetual;
2665
2666 case JOB_RESTART:
2667 case JOB_TRY_RESTART:
2668 return unit_can_stop(u) && unit_can_start(u);
2669
2670 case JOB_RELOAD:
2671 case JOB_TRY_RELOAD:
2672 return unit_can_reload(u);
2673
2674 case JOB_RELOAD_OR_START:
2675 return unit_can_reload(u) && unit_can_start(u);
2676
2677 default:
2678 assert_not_reached("Invalid job type");
2679 }
2680 }
2681
2682 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2683 assert(u);
2684
2685 /* Only warn about some unit types */
2686 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2687 return;
2688
2689 if (streq_ptr(u->id, other))
2690 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2691 else
2692 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2693 }
2694
2695 static int unit_add_dependency_hashmap(
2696 Hashmap **h,
2697 Unit *other,
2698 UnitDependencyMask origin_mask,
2699 UnitDependencyMask destination_mask) {
2700
2701 UnitDependencyInfo info;
2702 int r;
2703
2704 assert(h);
2705 assert(other);
2706 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2707 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2708 assert(origin_mask > 0 || destination_mask > 0);
2709
2710 r = hashmap_ensure_allocated(h, NULL);
2711 if (r < 0)
2712 return r;
2713
2714 assert_cc(sizeof(void*) == sizeof(info));
2715
2716 info.data = hashmap_get(*h, other);
2717 if (info.data) {
2718 /* Entry already exists. Add in our mask. */
2719
2720 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2721 (info.destination_mask & destination_mask) == info.destination_mask)
2722 return 0; /* NOP */
2723
2724 info.origin_mask |= origin_mask;
2725 info.destination_mask |= destination_mask;
2726
2727 r = hashmap_update(*h, other, info.data);
2728 } else {
2729 info = (UnitDependencyInfo) {
2730 .origin_mask = origin_mask,
2731 .destination_mask = destination_mask,
2732 };
2733
2734 r = hashmap_put(*h, other, info.data);
2735 }
2736 if (r < 0)
2737 return r;
2738
2739 return 1;
2740 }
2741
2742 int unit_add_dependency(
2743 Unit *u,
2744 UnitDependency d,
2745 Unit *other,
2746 bool add_reference,
2747 UnitDependencyMask mask) {
2748
2749 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2750 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2751 [UNIT_WANTS] = UNIT_WANTED_BY,
2752 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2753 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2754 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2755 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2756 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2757 [UNIT_WANTED_BY] = UNIT_WANTS,
2758 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2759 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2760 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2761 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2762 [UNIT_BEFORE] = UNIT_AFTER,
2763 [UNIT_AFTER] = UNIT_BEFORE,
2764 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2765 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2766 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2767 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2768 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2769 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2770 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2771 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2772 };
2773 Unit *original_u = u, *original_other = other;
2774 int r;
2775
2776 assert(u);
2777 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2778 assert(other);
2779
2780 u = unit_follow_merge(u);
2781 other = unit_follow_merge(other);
2782
2783 /* We won't allow dependencies on ourselves. We will not
2784 * consider them an error however. */
2785 if (u == other) {
2786 maybe_warn_about_dependency(original_u, original_other->id, d);
2787 return 0;
2788 }
2789
2790 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2791 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2792 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2793 return 0;
2794 }
2795
2796 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2797 if (r < 0)
2798 return r;
2799
2800 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2801 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2802 if (r < 0)
2803 return r;
2804 }
2805
2806 if (add_reference) {
2807 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2808 if (r < 0)
2809 return r;
2810
2811 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2812 if (r < 0)
2813 return r;
2814 }
2815
2816 unit_add_to_dbus_queue(u);
2817 return 0;
2818 }
2819
2820 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2821 int r;
2822
2823 assert(u);
2824
2825 r = unit_add_dependency(u, d, other, add_reference, mask);
2826 if (r < 0)
2827 return r;
2828
2829 return unit_add_dependency(u, e, other, add_reference, mask);
2830 }
2831
2832 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2833 int r;
2834
2835 assert(u);
2836 assert(name || path);
2837 assert(buf);
2838 assert(ret);
2839
2840 if (!name)
2841 name = basename(path);
2842
2843 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2844 *buf = NULL;
2845 *ret = name;
2846 return 0;
2847 }
2848
2849 if (u->instance)
2850 r = unit_name_replace_instance(name, u->instance, buf);
2851 else {
2852 _cleanup_free_ char *i = NULL;
2853
2854 r = unit_name_to_prefix(u->id, &i);
2855 if (r < 0)
2856 return r;
2857
2858 r = unit_name_replace_instance(name, i, buf);
2859 }
2860 if (r < 0)
2861 return r;
2862
2863 *ret = *buf;
2864 return 0;
2865 }
2866
2867 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2868 _cleanup_free_ char *buf = NULL;
2869 Unit *other;
2870 int r;
2871
2872 assert(u);
2873 assert(name || path);
2874
2875 r = resolve_template(u, name, path, &buf, &name);
2876 if (r < 0)
2877 return r;
2878
2879 r = manager_load_unit(u->manager, name, path, NULL, &other);
2880 if (r < 0)
2881 return r;
2882
2883 return unit_add_dependency(u, d, other, add_reference, mask);
2884 }
2885
2886 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2887 _cleanup_free_ char *buf = NULL;
2888 Unit *other;
2889 int r;
2890
2891 assert(u);
2892 assert(name || path);
2893
2894 r = resolve_template(u, name, path, &buf, &name);
2895 if (r < 0)
2896 return r;
2897
2898 r = manager_load_unit(u->manager, name, path, NULL, &other);
2899 if (r < 0)
2900 return r;
2901
2902 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2903 }
2904
2905 int set_unit_path(const char *p) {
2906 /* This is mostly for debug purposes */
2907 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2908 return -errno;
2909
2910 return 0;
2911 }
2912
2913 char *unit_dbus_path(Unit *u) {
2914 assert(u);
2915
2916 if (!u->id)
2917 return NULL;
2918
2919 return unit_dbus_path_from_name(u->id);
2920 }
2921
2922 char *unit_dbus_path_invocation_id(Unit *u) {
2923 assert(u);
2924
2925 if (sd_id128_is_null(u->invocation_id))
2926 return NULL;
2927
2928 return unit_dbus_path_from_name(u->invocation_id_string);
2929 }
2930
2931 int unit_set_slice(Unit *u, Unit *slice) {
2932 assert(u);
2933 assert(slice);
2934
2935 /* Sets the unit slice if it has not been set before. Is extra
2936 * careful, to only allow this for units that actually have a
2937 * cgroup context. Also, we don't allow to set this for slices
2938 * (since the parent slice is derived from the name). Make
2939 * sure the unit we set is actually a slice. */
2940
2941 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2942 return -EOPNOTSUPP;
2943
2944 if (u->type == UNIT_SLICE)
2945 return -EINVAL;
2946
2947 if (unit_active_state(u) != UNIT_INACTIVE)
2948 return -EBUSY;
2949
2950 if (slice->type != UNIT_SLICE)
2951 return -EINVAL;
2952
2953 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2954 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2955 return -EPERM;
2956
2957 if (UNIT_DEREF(u->slice) == slice)
2958 return 0;
2959
2960 /* Disallow slice changes if @u is already bound to cgroups */
2961 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2962 return -EBUSY;
2963
2964 unit_ref_set(&u->slice, u, slice);
2965 return 1;
2966 }
2967
2968 int unit_set_default_slice(Unit *u) {
2969 _cleanup_free_ char *b = NULL;
2970 const char *slice_name;
2971 Unit *slice;
2972 int r;
2973
2974 assert(u);
2975
2976 if (UNIT_ISSET(u->slice))
2977 return 0;
2978
2979 if (u->instance) {
2980 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2981
2982 /* Implicitly place all instantiated units in their
2983 * own per-template slice */
2984
2985 r = unit_name_to_prefix(u->id, &prefix);
2986 if (r < 0)
2987 return r;
2988
2989 /* The prefix is already escaped, but it might include
2990 * "-" which has a special meaning for slice units,
2991 * hence escape it here extra. */
2992 escaped = unit_name_escape(prefix);
2993 if (!escaped)
2994 return -ENOMEM;
2995
2996 if (MANAGER_IS_SYSTEM(u->manager))
2997 b = strjoin("system-", escaped, ".slice");
2998 else
2999 b = strappend(escaped, ".slice");
3000 if (!b)
3001 return -ENOMEM;
3002
3003 slice_name = b;
3004 } else
3005 slice_name =
3006 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3007 ? SPECIAL_SYSTEM_SLICE
3008 : SPECIAL_ROOT_SLICE;
3009
3010 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3011 if (r < 0)
3012 return r;
3013
3014 return unit_set_slice(u, slice);
3015 }
3016
3017 const char *unit_slice_name(Unit *u) {
3018 assert(u);
3019
3020 if (!UNIT_ISSET(u->slice))
3021 return NULL;
3022
3023 return UNIT_DEREF(u->slice)->id;
3024 }
3025
3026 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3027 _cleanup_free_ char *t = NULL;
3028 int r;
3029
3030 assert(u);
3031 assert(type);
3032 assert(_found);
3033
3034 r = unit_name_change_suffix(u->id, type, &t);
3035 if (r < 0)
3036 return r;
3037 if (unit_has_name(u, t))
3038 return -EINVAL;
3039
3040 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3041 assert(r < 0 || *_found != u);
3042 return r;
3043 }
3044
3045 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3046 const char *name, *old_owner, *new_owner;
3047 Unit *u = userdata;
3048 int r;
3049
3050 assert(message);
3051 assert(u);
3052
3053 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3054 if (r < 0) {
3055 bus_log_parse_error(r);
3056 return 0;
3057 }
3058
3059 old_owner = empty_to_null(old_owner);
3060 new_owner = empty_to_null(new_owner);
3061
3062 if (UNIT_VTABLE(u)->bus_name_owner_change)
3063 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3064
3065 return 0;
3066 }
3067
3068 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3069 const char *match;
3070
3071 assert(u);
3072 assert(bus);
3073 assert(name);
3074
3075 if (u->match_bus_slot)
3076 return -EBUSY;
3077
3078 match = strjoina("type='signal',"
3079 "sender='org.freedesktop.DBus',"
3080 "path='/org/freedesktop/DBus',"
3081 "interface='org.freedesktop.DBus',"
3082 "member='NameOwnerChanged',"
3083 "arg0='", name, "'");
3084
3085 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3086 }
3087
3088 int unit_watch_bus_name(Unit *u, const char *name) {
3089 int r;
3090
3091 assert(u);
3092 assert(name);
3093
3094 /* Watch a specific name on the bus. We only support one unit
3095 * watching each name for now. */
3096
3097 if (u->manager->api_bus) {
3098 /* If the bus is already available, install the match directly.
3099 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3100 r = unit_install_bus_match(u, u->manager->api_bus, name);
3101 if (r < 0)
3102 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3103 }
3104
3105 r = hashmap_put(u->manager->watch_bus, name, u);
3106 if (r < 0) {
3107 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3108 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3109 }
3110
3111 return 0;
3112 }
3113
3114 void unit_unwatch_bus_name(Unit *u, const char *name) {
3115 assert(u);
3116 assert(name);
3117
3118 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3119 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3120 }
3121
3122 bool unit_can_serialize(Unit *u) {
3123 assert(u);
3124
3125 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3126 }
3127
3128 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3129 _cleanup_free_ char *s = NULL;
3130 int r = 0;
3131
3132 assert(f);
3133 assert(key);
3134
3135 if (mask != 0) {
3136 r = cg_mask_to_string(mask, &s);
3137 if (r >= 0) {
3138 fputs(key, f);
3139 fputc('=', f);
3140 fputs(s, f);
3141 fputc('\n', f);
3142 }
3143 }
3144 return r;
3145 }
3146
3147 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3148 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3149 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3150 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3151 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3152 };
3153
3154 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3155 CGroupIPAccountingMetric m;
3156 int r;
3157
3158 assert(u);
3159 assert(f);
3160 assert(fds);
3161
3162 if (unit_can_serialize(u)) {
3163 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3164 if (r < 0)
3165 return r;
3166 }
3167
3168 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3169
3170 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3171 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3172 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3173 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3174
3175 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3176 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3177
3178 if (dual_timestamp_is_set(&u->condition_timestamp))
3179 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3180
3181 if (dual_timestamp_is_set(&u->assert_timestamp))
3182 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3183
3184 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3185
3186 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3187 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3188 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3189
3190 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3191 if (u->cpu_usage_last != NSEC_INFINITY)
3192 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3193
3194 if (u->cgroup_path)
3195 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3196 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3197 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3198 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3199 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3200
3201 if (uid_is_valid(u->ref_uid))
3202 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3203 if (gid_is_valid(u->ref_gid))
3204 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3205
3206 if (!sd_id128_is_null(u->invocation_id))
3207 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3208
3209 bus_track_serialize(u->bus_track, f, "ref");
3210
3211 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3212 uint64_t v;
3213
3214 r = unit_get_ip_accounting(u, m, &v);
3215 if (r >= 0)
3216 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3217 }
3218
3219 if (serialize_jobs) {
3220 if (u->job) {
3221 fprintf(f, "job\n");
3222 job_serialize(u->job, f);
3223 }
3224
3225 if (u->nop_job) {
3226 fprintf(f, "job\n");
3227 job_serialize(u->nop_job, f);
3228 }
3229 }
3230
3231 /* End marker */
3232 fputc('\n', f);
3233 return 0;
3234 }
3235
3236 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3237 assert(u);
3238 assert(f);
3239 assert(key);
3240
3241 if (!value)
3242 return 0;
3243
3244 fputs(key, f);
3245 fputc('=', f);
3246 fputs(value, f);
3247 fputc('\n', f);
3248
3249 return 1;
3250 }
3251
3252 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3253 _cleanup_free_ char *c = NULL;
3254
3255 assert(u);
3256 assert(f);
3257 assert(key);
3258
3259 if (!value)
3260 return 0;
3261
3262 c = cescape(value);
3263 if (!c)
3264 return -ENOMEM;
3265
3266 fputs(key, f);
3267 fputc('=', f);
3268 fputs(c, f);
3269 fputc('\n', f);
3270
3271 return 1;
3272 }
3273
3274 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3275 int copy;
3276
3277 assert(u);
3278 assert(f);
3279 assert(key);
3280
3281 if (fd < 0)
3282 return 0;
3283
3284 copy = fdset_put_dup(fds, fd);
3285 if (copy < 0)
3286 return copy;
3287
3288 fprintf(f, "%s=%i\n", key, copy);
3289 return 1;
3290 }
3291
3292 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3293 va_list ap;
3294
3295 assert(u);
3296 assert(f);
3297 assert(key);
3298 assert(format);
3299
3300 fputs(key, f);
3301 fputc('=', f);
3302
3303 va_start(ap, format);
3304 vfprintf(f, format, ap);
3305 va_end(ap);
3306
3307 fputc('\n', f);
3308 }
3309
3310 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3311 int r;
3312
3313 assert(u);
3314 assert(f);
3315 assert(fds);
3316
3317 for (;;) {
3318 char line[LINE_MAX], *l, *v;
3319 CGroupIPAccountingMetric m;
3320 size_t k;
3321
3322 if (!fgets(line, sizeof(line), f)) {
3323 if (feof(f))
3324 return 0;
3325 return -errno;
3326 }
3327
3328 char_array_0(line);
3329 l = strstrip(line);
3330
3331 /* End marker */
3332 if (isempty(l))
3333 break;
3334
3335 k = strcspn(l, "=");
3336
3337 if (l[k] == '=') {
3338 l[k] = 0;
3339 v = l+k+1;
3340 } else
3341 v = l+k;
3342
3343 if (streq(l, "job")) {
3344 if (v[0] == '\0') {
3345 /* new-style serialized job */
3346 Job *j;
3347
3348 j = job_new_raw(u);
3349 if (!j)
3350 return log_oom();
3351
3352 r = job_deserialize(j, f);
3353 if (r < 0) {
3354 job_free(j);
3355 return r;
3356 }
3357
3358 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3359 if (r < 0) {
3360 job_free(j);
3361 return r;
3362 }
3363
3364 r = job_install_deserialized(j);
3365 if (r < 0) {
3366 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3367 job_free(j);
3368 return r;
3369 }
3370 } else /* legacy for pre-44 */
3371 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3372 continue;
3373 } else if (streq(l, "state-change-timestamp")) {
3374 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3375 continue;
3376 } else if (streq(l, "inactive-exit-timestamp")) {
3377 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3378 continue;
3379 } else if (streq(l, "active-enter-timestamp")) {
3380 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3381 continue;
3382 } else if (streq(l, "active-exit-timestamp")) {
3383 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3384 continue;
3385 } else if (streq(l, "inactive-enter-timestamp")) {
3386 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3387 continue;
3388 } else if (streq(l, "condition-timestamp")) {
3389 dual_timestamp_deserialize(v, &u->condition_timestamp);
3390 continue;
3391 } else if (streq(l, "assert-timestamp")) {
3392 dual_timestamp_deserialize(v, &u->assert_timestamp);
3393 continue;
3394 } else if (streq(l, "condition-result")) {
3395
3396 r = parse_boolean(v);
3397 if (r < 0)
3398 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3399 else
3400 u->condition_result = r;
3401
3402 continue;
3403
3404 } else if (streq(l, "assert-result")) {
3405
3406 r = parse_boolean(v);
3407 if (r < 0)
3408 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3409 else
3410 u->assert_result = r;
3411
3412 continue;
3413
3414 } else if (streq(l, "transient")) {
3415
3416 r = parse_boolean(v);
3417 if (r < 0)
3418 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3419 else
3420 u->transient = r;
3421
3422 continue;
3423
3424 } else if (streq(l, "exported-invocation-id")) {
3425
3426 r = parse_boolean(v);
3427 if (r < 0)
3428 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3429 else
3430 u->exported_invocation_id = r;
3431
3432 continue;
3433
3434 } else if (streq(l, "exported-log-level-max")) {
3435
3436 r = parse_boolean(v);
3437 if (r < 0)
3438 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3439 else
3440 u->exported_log_level_max = r;
3441
3442 continue;
3443
3444 } else if (streq(l, "exported-log-extra-fields")) {
3445
3446 r = parse_boolean(v);
3447 if (r < 0)
3448 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3449 else
3450 u->exported_log_extra_fields = r;
3451
3452 continue;
3453
3454 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3455
3456 r = safe_atou64(v, &u->cpu_usage_base);
3457 if (r < 0)
3458 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3459
3460 continue;
3461
3462 } else if (streq(l, "cpu-usage-last")) {
3463
3464 r = safe_atou64(v, &u->cpu_usage_last);
3465 if (r < 0)
3466 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3467
3468 continue;
3469
3470 } else if (streq(l, "cgroup")) {
3471
3472 r = unit_set_cgroup_path(u, v);
3473 if (r < 0)
3474 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3475
3476 (void) unit_watch_cgroup(u);
3477
3478 continue;
3479 } else if (streq(l, "cgroup-realized")) {
3480 int b;
3481
3482 b = parse_boolean(v);
3483 if (b < 0)
3484 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3485 else
3486 u->cgroup_realized = b;
3487
3488 continue;
3489
3490 } else if (streq(l, "cgroup-realized-mask")) {
3491
3492 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3493 if (r < 0)
3494 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3495 continue;
3496
3497 } else if (streq(l, "cgroup-enabled-mask")) {
3498
3499 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3500 if (r < 0)
3501 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3502 continue;
3503
3504 } else if (streq(l, "cgroup-bpf-realized")) {
3505 int i;
3506
3507 r = safe_atoi(v, &i);
3508 if (r < 0)
3509 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3510 else
3511 u->cgroup_bpf_state =
3512 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3513 i > 0 ? UNIT_CGROUP_BPF_ON :
3514 UNIT_CGROUP_BPF_OFF;
3515
3516 continue;
3517
3518 } else if (streq(l, "ref-uid")) {
3519 uid_t uid;
3520
3521 r = parse_uid(v, &uid);
3522 if (r < 0)
3523 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3524 else
3525 unit_ref_uid_gid(u, uid, GID_INVALID);
3526
3527 continue;
3528
3529 } else if (streq(l, "ref-gid")) {
3530 gid_t gid;
3531
3532 r = parse_gid(v, &gid);
3533 if (r < 0)
3534 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3535 else
3536 unit_ref_uid_gid(u, UID_INVALID, gid);
3537
3538 } else if (streq(l, "ref")) {
3539
3540 r = strv_extend(&u->deserialized_refs, v);
3541 if (r < 0)
3542 log_oom();
3543
3544 continue;
3545 } else if (streq(l, "invocation-id")) {
3546 sd_id128_t id;
3547
3548 r = sd_id128_from_string(v, &id);
3549 if (r < 0)
3550 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3551 else {
3552 r = unit_set_invocation_id(u, id);
3553 if (r < 0)
3554 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3555 }
3556
3557 continue;
3558 }
3559
3560 /* Check if this is an IP accounting metric serialization field */
3561 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3562 if (streq(l, ip_accounting_metric_field[m]))
3563 break;
3564 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3565 uint64_t c;
3566
3567 r = safe_atou64(v, &c);
3568 if (r < 0)
3569 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3570 else
3571 u->ip_accounting_extra[m] = c;
3572 continue;
3573 }
3574
3575 if (unit_can_serialize(u)) {
3576 r = exec_runtime_deserialize_compat(u, l, v, fds);
3577 if (r < 0) {
3578 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3579 continue;
3580 }
3581
3582 /* Returns positive if key was handled by the call */
3583 if (r > 0)
3584 continue;
3585
3586 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3587 if (r < 0)
3588 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3589 }
3590 }
3591
3592 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3593 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3594 * before 228 where the base for timeouts was not persistent across reboots. */
3595
3596 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3597 dual_timestamp_get(&u->state_change_timestamp);
3598
3599 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3600 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3601 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3602 unit_invalidate_cgroup_bpf(u);
3603
3604 return 0;
3605 }
3606
3607 void unit_deserialize_skip(FILE *f) {
3608 assert(f);
3609
3610 /* Skip serialized data for this unit. We don't know what it is. */
3611
3612 for (;;) {
3613 char line[LINE_MAX], *l;
3614
3615 if (!fgets(line, sizeof line, f))
3616 return;
3617
3618 char_array_0(line);
3619 l = strstrip(line);
3620
3621 /* End marker */
3622 if (isempty(l))
3623 return;
3624 }
3625 }
3626
3627
3628 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3629 Unit *device;
3630 _cleanup_free_ char *e = NULL;
3631 int r;
3632
3633 assert(u);
3634
3635 /* Adds in links to the device node that this unit is based on */
3636 if (isempty(what))
3637 return 0;
3638
3639 if (!is_device_path(what))
3640 return 0;
3641
3642 /* When device units aren't supported (such as in a
3643 * container), don't create dependencies on them. */
3644 if (!unit_type_supported(UNIT_DEVICE))
3645 return 0;
3646
3647 r = unit_name_from_path(what, ".device", &e);
3648 if (r < 0)
3649 return r;
3650
3651 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3652 if (r < 0)
3653 return r;
3654
3655 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3656 dep = UNIT_BINDS_TO;
3657
3658 r = unit_add_two_dependencies(u, UNIT_AFTER,
3659 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3660 device, true, mask);
3661 if (r < 0)
3662 return r;
3663
3664 if (wants) {
3665 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3666 if (r < 0)
3667 return r;
3668 }
3669
3670 return 0;
3671 }
3672
3673 int unit_coldplug(Unit *u) {
3674 int r = 0, q;
3675 char **i;
3676
3677 assert(u);
3678
3679 /* Make sure we don't enter a loop, when coldplugging
3680 * recursively. */
3681 if (u->coldplugged)
3682 return 0;
3683
3684 u->coldplugged = true;
3685
3686 STRV_FOREACH(i, u->deserialized_refs) {
3687 q = bus_unit_track_add_name(u, *i);
3688 if (q < 0 && r >= 0)
3689 r = q;
3690 }
3691 u->deserialized_refs = strv_free(u->deserialized_refs);
3692
3693 if (UNIT_VTABLE(u)->coldplug) {
3694 q = UNIT_VTABLE(u)->coldplug(u);
3695 if (q < 0 && r >= 0)
3696 r = q;
3697 }
3698
3699 if (u->job) {
3700 q = job_coldplug(u->job);
3701 if (q < 0 && r >= 0)
3702 r = q;
3703 }
3704
3705 return r;
3706 }
3707
3708 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3709 struct stat st;
3710
3711 if (!path)
3712 return false;
3713
3714 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3715 * are never out-of-date. */
3716 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3717 return false;
3718
3719 if (stat(path, &st) < 0)
3720 /* What, cannot access this anymore? */
3721 return true;
3722
3723 if (path_masked)
3724 /* For masked files check if they are still so */
3725 return !null_or_empty(&st);
3726 else
3727 /* For non-empty files check the mtime */
3728 return timespec_load(&st.st_mtim) > mtime;
3729
3730 return false;
3731 }
3732
3733 bool unit_need_daemon_reload(Unit *u) {
3734 _cleanup_strv_free_ char **t = NULL;
3735 char **path;
3736
3737 assert(u);
3738
3739 /* For unit files, we allow masking… */
3740 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3741 u->load_state == UNIT_MASKED))
3742 return true;
3743
3744 /* Source paths should not be masked… */
3745 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3746 return true;
3747
3748 if (u->load_state == UNIT_LOADED)
3749 (void) unit_find_dropin_paths(u, &t);
3750 if (!strv_equal(u->dropin_paths, t))
3751 return true;
3752
3753 /* … any drop-ins that are masked are simply omitted from the list. */
3754 STRV_FOREACH(path, u->dropin_paths)
3755 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3756 return true;
3757
3758 return false;
3759 }
3760
3761 void unit_reset_failed(Unit *u) {
3762 assert(u);
3763
3764 if (UNIT_VTABLE(u)->reset_failed)
3765 UNIT_VTABLE(u)->reset_failed(u);
3766
3767 RATELIMIT_RESET(u->start_limit);
3768 u->start_limit_hit = false;
3769 }
3770
3771 Unit *unit_following(Unit *u) {
3772 assert(u);
3773
3774 if (UNIT_VTABLE(u)->following)
3775 return UNIT_VTABLE(u)->following(u);
3776
3777 return NULL;
3778 }
3779
3780 bool unit_stop_pending(Unit *u) {
3781 assert(u);
3782
3783 /* This call does check the current state of the unit. It's
3784 * hence useful to be called from state change calls of the
3785 * unit itself, where the state isn't updated yet. This is
3786 * different from unit_inactive_or_pending() which checks both
3787 * the current state and for a queued job. */
3788
3789 return u->job && u->job->type == JOB_STOP;
3790 }
3791
3792 bool unit_inactive_or_pending(Unit *u) {
3793 assert(u);
3794
3795 /* Returns true if the unit is inactive or going down */
3796
3797 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3798 return true;
3799
3800 if (unit_stop_pending(u))
3801 return true;
3802
3803 return false;
3804 }
3805
3806 bool unit_active_or_pending(Unit *u) {
3807 assert(u);
3808
3809 /* Returns true if the unit is active or going up */
3810
3811 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3812 return true;
3813
3814 if (u->job &&
3815 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3816 return true;
3817
3818 return false;
3819 }
3820
3821 bool unit_will_restart(Unit *u) {
3822 assert(u);
3823
3824 if (!UNIT_VTABLE(u)->will_restart)
3825 return false;
3826
3827 return UNIT_VTABLE(u)->will_restart(u);
3828 }
3829
3830 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3831 assert(u);
3832 assert(w >= 0 && w < _KILL_WHO_MAX);
3833 assert(SIGNAL_VALID(signo));
3834
3835 if (!UNIT_VTABLE(u)->kill)
3836 return -EOPNOTSUPP;
3837
3838 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3839 }
3840
3841 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3842 Set *pid_set;
3843 int r;
3844
3845 pid_set = set_new(NULL);
3846 if (!pid_set)
3847 return NULL;
3848
3849 /* Exclude the main/control pids from being killed via the cgroup */
3850 if (main_pid > 0) {
3851 r = set_put(pid_set, PID_TO_PTR(main_pid));
3852 if (r < 0)
3853 goto fail;
3854 }
3855
3856 if (control_pid > 0) {
3857 r = set_put(pid_set, PID_TO_PTR(control_pid));
3858 if (r < 0)
3859 goto fail;
3860 }
3861
3862 return pid_set;
3863
3864 fail:
3865 set_free(pid_set);
3866 return NULL;
3867 }
3868
3869 int unit_kill_common(
3870 Unit *u,
3871 KillWho who,
3872 int signo,
3873 pid_t main_pid,
3874 pid_t control_pid,
3875 sd_bus_error *error) {
3876
3877 int r = 0;
3878 bool killed = false;
3879
3880 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3881 if (main_pid < 0)
3882 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3883 else if (main_pid == 0)
3884 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3885 }
3886
3887 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3888 if (control_pid < 0)
3889 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3890 else if (control_pid == 0)
3891 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3892 }
3893
3894 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3895 if (control_pid > 0) {
3896 if (kill(control_pid, signo) < 0)
3897 r = -errno;
3898 else
3899 killed = true;
3900 }
3901
3902 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3903 if (main_pid > 0) {
3904 if (kill(main_pid, signo) < 0)
3905 r = -errno;
3906 else
3907 killed = true;
3908 }
3909
3910 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3911 _cleanup_set_free_ Set *pid_set = NULL;
3912 int q;
3913
3914 /* Exclude the main/control pids from being killed via the cgroup */
3915 pid_set = unit_pid_set(main_pid, control_pid);
3916 if (!pid_set)
3917 return -ENOMEM;
3918
3919 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3920 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3921 r = q;
3922 else
3923 killed = true;
3924 }
3925
3926 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3927 return -ESRCH;
3928
3929 return r;
3930 }
3931
3932 int unit_following_set(Unit *u, Set **s) {
3933 assert(u);
3934 assert(s);
3935
3936 if (UNIT_VTABLE(u)->following_set)
3937 return UNIT_VTABLE(u)->following_set(u, s);
3938
3939 *s = NULL;
3940 return 0;
3941 }
3942
3943 UnitFileState unit_get_unit_file_state(Unit *u) {
3944 int r;
3945
3946 assert(u);
3947
3948 if (u->unit_file_state < 0 && u->fragment_path) {
3949 r = unit_file_get_state(
3950 u->manager->unit_file_scope,
3951 NULL,
3952 u->id,
3953 &u->unit_file_state);
3954 if (r < 0)
3955 u->unit_file_state = UNIT_FILE_BAD;
3956 }
3957
3958 return u->unit_file_state;
3959 }
3960
3961 int unit_get_unit_file_preset(Unit *u) {
3962 assert(u);
3963
3964 if (u->unit_file_preset < 0 && u->fragment_path)
3965 u->unit_file_preset = unit_file_query_preset(
3966 u->manager->unit_file_scope,
3967 NULL,
3968 basename(u->fragment_path));
3969
3970 return u->unit_file_preset;
3971 }
3972
3973 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3974 assert(ref);
3975 assert(source);
3976 assert(target);
3977
3978 if (ref->target)
3979 unit_ref_unset(ref);
3980
3981 ref->source = source;
3982 ref->target = target;
3983 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3984 return target;
3985 }
3986
3987 void unit_ref_unset(UnitRef *ref) {
3988 assert(ref);
3989
3990 if (!ref->target)
3991 return;
3992
3993 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3994 * be unreferenced now. */
3995 unit_add_to_gc_queue(ref->target);
3996
3997 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3998 ref->source = ref->target = NULL;
3999 }
4000
4001 static int user_from_unit_name(Unit *u, char **ret) {
4002
4003 static const uint8_t hash_key[] = {
4004 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4005 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4006 };
4007
4008 _cleanup_free_ char *n = NULL;
4009 int r;
4010
4011 r = unit_name_to_prefix(u->id, &n);
4012 if (r < 0)
4013 return r;
4014
4015 if (valid_user_group_name(n)) {
4016 *ret = TAKE_PTR(n);
4017 return 0;
4018 }
4019
4020 /* If we can't use the unit name as a user name, then let's hash it and use that */
4021 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4022 return -ENOMEM;
4023
4024 return 0;
4025 }
4026
4027 int unit_patch_contexts(Unit *u) {
4028 CGroupContext *cc;
4029 ExecContext *ec;
4030 unsigned i;
4031 int r;
4032
4033 assert(u);
4034
4035 /* Patch in the manager defaults into the exec and cgroup
4036 * contexts, _after_ the rest of the settings have been
4037 * initialized */
4038
4039 ec = unit_get_exec_context(u);
4040 if (ec) {
4041 /* This only copies in the ones that need memory */
4042 for (i = 0; i < _RLIMIT_MAX; i++)
4043 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4044 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4045 if (!ec->rlimit[i])
4046 return -ENOMEM;
4047 }
4048
4049 if (MANAGER_IS_USER(u->manager) &&
4050 !ec->working_directory) {
4051
4052 r = get_home_dir(&ec->working_directory);
4053 if (r < 0)
4054 return r;
4055
4056 /* Allow user services to run, even if the
4057 * home directory is missing */
4058 ec->working_directory_missing_ok = true;
4059 }
4060
4061 if (ec->private_devices)
4062 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4063
4064 if (ec->protect_kernel_modules)
4065 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4066
4067 if (ec->dynamic_user) {
4068 if (!ec->user) {
4069 r = user_from_unit_name(u, &ec->user);
4070 if (r < 0)
4071 return r;
4072 }
4073
4074 if (!ec->group) {
4075 ec->group = strdup(ec->user);
4076 if (!ec->group)
4077 return -ENOMEM;
4078 }
4079
4080 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4081 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4082
4083 ec->private_tmp = true;
4084 ec->remove_ipc = true;
4085 ec->protect_system = PROTECT_SYSTEM_STRICT;
4086 if (ec->protect_home == PROTECT_HOME_NO)
4087 ec->protect_home = PROTECT_HOME_READ_ONLY;
4088 }
4089 }
4090
4091 cc = unit_get_cgroup_context(u);
4092 if (cc) {
4093
4094 if (ec &&
4095 ec->private_devices &&
4096 cc->device_policy == CGROUP_AUTO)
4097 cc->device_policy = CGROUP_CLOSED;
4098 }
4099
4100 return 0;
4101 }
4102
4103 ExecContext *unit_get_exec_context(Unit *u) {
4104 size_t offset;
4105 assert(u);
4106
4107 if (u->type < 0)
4108 return NULL;
4109
4110 offset = UNIT_VTABLE(u)->exec_context_offset;
4111 if (offset <= 0)
4112 return NULL;
4113
4114 return (ExecContext*) ((uint8_t*) u + offset);
4115 }
4116
4117 KillContext *unit_get_kill_context(Unit *u) {
4118 size_t offset;
4119 assert(u);
4120
4121 if (u->type < 0)
4122 return NULL;
4123
4124 offset = UNIT_VTABLE(u)->kill_context_offset;
4125 if (offset <= 0)
4126 return NULL;
4127
4128 return (KillContext*) ((uint8_t*) u + offset);
4129 }
4130
4131 CGroupContext *unit_get_cgroup_context(Unit *u) {
4132 size_t offset;
4133
4134 if (u->type < 0)
4135 return NULL;
4136
4137 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4138 if (offset <= 0)
4139 return NULL;
4140
4141 return (CGroupContext*) ((uint8_t*) u + offset);
4142 }
4143
4144 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4145 size_t offset;
4146
4147 if (u->type < 0)
4148 return NULL;
4149
4150 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4151 if (offset <= 0)
4152 return NULL;
4153
4154 return *(ExecRuntime**) ((uint8_t*) u + offset);
4155 }
4156
4157 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4158 assert(u);
4159
4160 if (UNIT_WRITE_FLAGS_NOOP(flags))
4161 return NULL;
4162
4163 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4164 return u->manager->lookup_paths.transient;
4165
4166 if (flags & UNIT_PERSISTENT)
4167 return u->manager->lookup_paths.persistent_control;
4168
4169 if (flags & UNIT_RUNTIME)
4170 return u->manager->lookup_paths.runtime_control;
4171
4172 return NULL;
4173 }
4174
4175 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4176 char *ret = NULL;
4177
4178 if (!s)
4179 return NULL;
4180
4181 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4182 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4183 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4184 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4185 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4186 * allocations. */
4187
4188 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4189 ret = specifier_escape(s);
4190 if (!ret)
4191 return NULL;
4192
4193 s = ret;
4194 }
4195
4196 if (flags & UNIT_ESCAPE_C) {
4197 char *a;
4198
4199 a = cescape(s);
4200 free(ret);
4201 if (!a)
4202 return NULL;
4203
4204 ret = a;
4205 }
4206
4207 if (buf) {
4208 *buf = ret;
4209 return ret ?: (char*) s;
4210 }
4211
4212 return ret ?: strdup(s);
4213 }
4214
4215 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4216 _cleanup_free_ char *result = NULL;
4217 size_t n = 0, allocated = 0;
4218 char **i;
4219
4220 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4221 * way suitable for ExecStart= stanzas */
4222
4223 STRV_FOREACH(i, l) {
4224 _cleanup_free_ char *buf = NULL;
4225 const char *p;
4226 size_t a;
4227 char *q;
4228
4229 p = unit_escape_setting(*i, flags, &buf);
4230 if (!p)
4231 return NULL;
4232
4233 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4234 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4235 return NULL;
4236
4237 q = result + n;
4238 if (n > 0)
4239 *(q++) = ' ';
4240
4241 *(q++) = '"';
4242 q = stpcpy(q, p);
4243 *(q++) = '"';
4244
4245 n += a;
4246 }
4247
4248 if (!GREEDY_REALLOC(result, allocated, n + 1))
4249 return NULL;
4250
4251 result[n] = 0;
4252
4253 return TAKE_PTR(result);
4254 }
4255
4256 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4257 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4258 const char *dir, *wrapped;
4259 int r;
4260
4261 assert(u);
4262 assert(name);
4263 assert(data);
4264
4265 if (UNIT_WRITE_FLAGS_NOOP(flags))
4266 return 0;
4267
4268 data = unit_escape_setting(data, flags, &escaped);
4269 if (!data)
4270 return -ENOMEM;
4271
4272 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4273 * previous section header is the same */
4274
4275 if (flags & UNIT_PRIVATE) {
4276 if (!UNIT_VTABLE(u)->private_section)
4277 return -EINVAL;
4278
4279 if (!u->transient_file || u->last_section_private < 0)
4280 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4281 else if (u->last_section_private == 0)
4282 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4283 } else {
4284 if (!u->transient_file || u->last_section_private < 0)
4285 data = strjoina("[Unit]\n", data);
4286 else if (u->last_section_private > 0)
4287 data = strjoina("\n[Unit]\n", data);
4288 }
4289
4290 if (u->transient_file) {
4291 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4292 * write to the transient unit file. */
4293 fputs(data, u->transient_file);
4294
4295 if (!endswith(data, "\n"))
4296 fputc('\n', u->transient_file);
4297
4298 /* Remember which section we wrote this entry to */
4299 u->last_section_private = !!(flags & UNIT_PRIVATE);
4300 return 0;
4301 }
4302
4303 dir = unit_drop_in_dir(u, flags);
4304 if (!dir)
4305 return -EINVAL;
4306
4307 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4308 "# or an equivalent operation. Do not edit.\n",
4309 data,
4310 "\n");
4311
4312 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4313 if (r < 0)
4314 return r;
4315
4316 (void) mkdir_p_label(p, 0755);
4317 r = write_string_file_atomic_label(q, wrapped);
4318 if (r < 0)
4319 return r;
4320
4321 r = strv_push(&u->dropin_paths, q);
4322 if (r < 0)
4323 return r;
4324 q = NULL;
4325
4326 strv_uniq(u->dropin_paths);
4327
4328 u->dropin_mtime = now(CLOCK_REALTIME);
4329
4330 return 0;
4331 }
4332
4333 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4334 _cleanup_free_ char *p = NULL;
4335 va_list ap;
4336 int r;
4337
4338 assert(u);
4339 assert(name);
4340 assert(format);
4341
4342 if (UNIT_WRITE_FLAGS_NOOP(flags))
4343 return 0;
4344
4345 va_start(ap, format);
4346 r = vasprintf(&p, format, ap);
4347 va_end(ap);
4348
4349 if (r < 0)
4350 return -ENOMEM;
4351
4352 return unit_write_setting(u, flags, name, p);
4353 }
4354
4355 int unit_make_transient(Unit *u) {
4356 _cleanup_free_ char *path = NULL;
4357 FILE *f;
4358
4359 assert(u);
4360
4361 if (!UNIT_VTABLE(u)->can_transient)
4362 return -EOPNOTSUPP;
4363
4364 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4365
4366 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4367 if (!path)
4368 return -ENOMEM;
4369
4370 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4371 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4372
4373 RUN_WITH_UMASK(0022) {
4374 f = fopen(path, "we");
4375 if (!f)
4376 return -errno;
4377 }
4378
4379 safe_fclose(u->transient_file);
4380 u->transient_file = f;
4381
4382 free_and_replace(u->fragment_path, path);
4383
4384 u->source_path = mfree(u->source_path);
4385 u->dropin_paths = strv_free(u->dropin_paths);
4386 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4387
4388 u->load_state = UNIT_STUB;
4389 u->load_error = 0;
4390 u->transient = true;
4391
4392 unit_add_to_dbus_queue(u);
4393 unit_add_to_gc_queue(u);
4394
4395 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4396 u->transient_file);
4397
4398 return 0;
4399 }
4400
4401 static void log_kill(pid_t pid, int sig, void *userdata) {
4402 _cleanup_free_ char *comm = NULL;
4403
4404 (void) get_process_comm(pid, &comm);
4405
4406 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4407 only, like for example systemd's own PAM stub process. */
4408 if (comm && comm[0] == '(')
4409 return;
4410
4411 log_unit_notice(userdata,
4412 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4413 pid,
4414 strna(comm),
4415 signal_to_string(sig));
4416 }
4417
4418 static int operation_to_signal(KillContext *c, KillOperation k) {
4419 assert(c);
4420
4421 switch (k) {
4422
4423 case KILL_TERMINATE:
4424 case KILL_TERMINATE_AND_LOG:
4425 return c->kill_signal;
4426
4427 case KILL_KILL:
4428 return SIGKILL;
4429
4430 case KILL_ABORT:
4431 return SIGABRT;
4432
4433 default:
4434 assert_not_reached("KillOperation unknown");
4435 }
4436 }
4437
4438 int unit_kill_context(
4439 Unit *u,
4440 KillContext *c,
4441 KillOperation k,
4442 pid_t main_pid,
4443 pid_t control_pid,
4444 bool main_pid_alien) {
4445
4446 bool wait_for_exit = false, send_sighup;
4447 cg_kill_log_func_t log_func = NULL;
4448 int sig, r;
4449
4450 assert(u);
4451 assert(c);
4452
4453 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4454 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4455
4456 if (c->kill_mode == KILL_NONE)
4457 return 0;
4458
4459 sig = operation_to_signal(c, k);
4460
4461 send_sighup =
4462 c->send_sighup &&
4463 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4464 sig != SIGHUP;
4465
4466 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4467 log_func = log_kill;
4468
4469 if (main_pid > 0) {
4470 if (log_func)
4471 log_func(main_pid, sig, u);
4472
4473 r = kill_and_sigcont(main_pid, sig);
4474 if (r < 0 && r != -ESRCH) {
4475 _cleanup_free_ char *comm = NULL;
4476 (void) get_process_comm(main_pid, &comm);
4477
4478 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4479 } else {
4480 if (!main_pid_alien)
4481 wait_for_exit = true;
4482
4483 if (r != -ESRCH && send_sighup)
4484 (void) kill(main_pid, SIGHUP);
4485 }
4486 }
4487
4488 if (control_pid > 0) {
4489 if (log_func)
4490 log_func(control_pid, sig, u);
4491
4492 r = kill_and_sigcont(control_pid, sig);
4493 if (r < 0 && r != -ESRCH) {
4494 _cleanup_free_ char *comm = NULL;
4495 (void) get_process_comm(control_pid, &comm);
4496
4497 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4498 } else {
4499 wait_for_exit = true;
4500
4501 if (r != -ESRCH && send_sighup)
4502 (void) kill(control_pid, SIGHUP);
4503 }
4504 }
4505
4506 if (u->cgroup_path &&
4507 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4508 _cleanup_set_free_ Set *pid_set = NULL;
4509
4510 /* Exclude the main/control pids from being killed via the cgroup */
4511 pid_set = unit_pid_set(main_pid, control_pid);
4512 if (!pid_set)
4513 return -ENOMEM;
4514
4515 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4516 sig,
4517 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4518 pid_set,
4519 log_func, u);
4520 if (r < 0) {
4521 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4522 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4523
4524 } else if (r > 0) {
4525
4526 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4527 * we are running in a container or if this is a delegation unit, simply because cgroup
4528 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4529 * of containers it can be confused easily by left-over directories in the cgroup — which
4530 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4531 * there we get proper events. Hence rely on them. */
4532
4533 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4534 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4535 wait_for_exit = true;
4536
4537 if (send_sighup) {
4538 set_free(pid_set);
4539
4540 pid_set = unit_pid_set(main_pid, control_pid);
4541 if (!pid_set)
4542 return -ENOMEM;
4543
4544 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4545 SIGHUP,
4546 CGROUP_IGNORE_SELF,
4547 pid_set,
4548 NULL, NULL);
4549 }
4550 }
4551 }
4552
4553 return wait_for_exit;
4554 }
4555
4556 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4557 _cleanup_free_ char *p = NULL;
4558 char *prefix;
4559 UnitDependencyInfo di;
4560 int r;
4561
4562 assert(u);
4563 assert(path);
4564
4565 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4566 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4567 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4568 * determine which units to make themselves a dependency of. */
4569
4570 if (!path_is_absolute(path))
4571 return -EINVAL;
4572
4573 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4574 if (r < 0)
4575 return r;
4576
4577 p = strdup(path);
4578 if (!p)
4579 return -ENOMEM;
4580
4581 path = path_kill_slashes(p);
4582
4583 if (!path_is_normalized(path))
4584 return -EPERM;
4585
4586 if (hashmap_contains(u->requires_mounts_for, path))
4587 return 0;
4588
4589 di = (UnitDependencyInfo) {
4590 .origin_mask = mask
4591 };
4592
4593 r = hashmap_put(u->requires_mounts_for, path, di.data);
4594 if (r < 0)
4595 return r;
4596 p = NULL;
4597
4598 prefix = alloca(strlen(path) + 1);
4599 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4600 Set *x;
4601
4602 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4603 if (!x) {
4604 _cleanup_free_ char *q = NULL;
4605
4606 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4607 if (r < 0)
4608 return r;
4609
4610 q = strdup(prefix);
4611 if (!q)
4612 return -ENOMEM;
4613
4614 x = set_new(NULL);
4615 if (!x)
4616 return -ENOMEM;
4617
4618 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4619 if (r < 0) {
4620 set_free(x);
4621 return r;
4622 }
4623 q = NULL;
4624 }
4625
4626 r = set_put(x, u);
4627 if (r < 0)
4628 return r;
4629 }
4630
4631 return 0;
4632 }
4633
4634 int unit_setup_exec_runtime(Unit *u) {
4635 ExecRuntime **rt;
4636 size_t offset;
4637 Unit *other;
4638 Iterator i;
4639 void *v;
4640 int r;
4641
4642 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4643 assert(offset > 0);
4644
4645 /* Check if there already is an ExecRuntime for this unit? */
4646 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4647 if (*rt)
4648 return 0;
4649
4650 /* Try to get it from somebody else */
4651 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4652 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4653 if (r == 1)
4654 return 1;
4655 }
4656
4657 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4658 }
4659
4660 int unit_setup_dynamic_creds(Unit *u) {
4661 ExecContext *ec;
4662 DynamicCreds *dcreds;
4663 size_t offset;
4664
4665 assert(u);
4666
4667 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4668 assert(offset > 0);
4669 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4670
4671 ec = unit_get_exec_context(u);
4672 assert(ec);
4673
4674 if (!ec->dynamic_user)
4675 return 0;
4676
4677 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4678 }
4679
4680 bool unit_type_supported(UnitType t) {
4681 if (_unlikely_(t < 0))
4682 return false;
4683 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4684 return false;
4685
4686 if (!unit_vtable[t]->supported)
4687 return true;
4688
4689 return unit_vtable[t]->supported();
4690 }
4691
4692 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4693 int r;
4694
4695 assert(u);
4696 assert(where);
4697
4698 r = dir_is_empty(where);
4699 if (r > 0 || r == -ENOTDIR)
4700 return;
4701 if (r < 0) {
4702 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4703 return;
4704 }
4705
4706 log_struct(LOG_NOTICE,
4707 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4708 LOG_UNIT_ID(u),
4709 LOG_UNIT_INVOCATION_ID(u),
4710 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4711 "WHERE=%s", where,
4712 NULL);
4713 }
4714
4715 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4716 _cleanup_free_ char *canonical_where;
4717 int r;
4718
4719 assert(u);
4720 assert(where);
4721
4722 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4723 if (r < 0) {
4724 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4725 return 0;
4726 }
4727
4728 /* We will happily ignore a trailing slash (or any redundant slashes) */
4729 if (path_equal(where, canonical_where))
4730 return 0;
4731
4732 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4733 log_struct(LOG_ERR,
4734 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4735 LOG_UNIT_ID(u),
4736 LOG_UNIT_INVOCATION_ID(u),
4737 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4738 "WHERE=%s", where,
4739 NULL);
4740
4741 return -ELOOP;
4742 }
4743
4744 bool unit_is_pristine(Unit *u) {
4745 assert(u);
4746
4747 /* Check if the unit already exists or is already around,
4748 * in a number of different ways. Note that to cater for unit
4749 * types such as slice, we are generally fine with units that
4750 * are marked UNIT_LOADED even though nothing was
4751 * actually loaded, as those unit types don't require a file
4752 * on disk to validly load. */
4753
4754 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4755 u->fragment_path ||
4756 u->source_path ||
4757 !strv_isempty(u->dropin_paths) ||
4758 u->job ||
4759 u->merged_into);
4760 }
4761
4762 pid_t unit_control_pid(Unit *u) {
4763 assert(u);
4764
4765 if (UNIT_VTABLE(u)->control_pid)
4766 return UNIT_VTABLE(u)->control_pid(u);
4767
4768 return 0;
4769 }
4770
4771 pid_t unit_main_pid(Unit *u) {
4772 assert(u);
4773
4774 if (UNIT_VTABLE(u)->main_pid)
4775 return UNIT_VTABLE(u)->main_pid(u);
4776
4777 return 0;
4778 }
4779
4780 static void unit_unref_uid_internal(
4781 Unit *u,
4782 uid_t *ref_uid,
4783 bool destroy_now,
4784 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4785
4786 assert(u);
4787 assert(ref_uid);
4788 assert(_manager_unref_uid);
4789
4790 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4791 * gid_t are actually the same time, with the same validity rules.
4792 *
4793 * Drops a reference to UID/GID from a unit. */
4794
4795 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4796 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4797
4798 if (!uid_is_valid(*ref_uid))
4799 return;
4800
4801 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4802 *ref_uid = UID_INVALID;
4803 }
4804
4805 void unit_unref_uid(Unit *u, bool destroy_now) {
4806 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4807 }
4808
4809 void unit_unref_gid(Unit *u, bool destroy_now) {
4810 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4811 }
4812
4813 static int unit_ref_uid_internal(
4814 Unit *u,
4815 uid_t *ref_uid,
4816 uid_t uid,
4817 bool clean_ipc,
4818 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4819
4820 int r;
4821
4822 assert(u);
4823 assert(ref_uid);
4824 assert(uid_is_valid(uid));
4825 assert(_manager_ref_uid);
4826
4827 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4828 * are actually the same type, and have the same validity rules.
4829 *
4830 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4831 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4832 * drops to zero. */
4833
4834 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4835 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4836
4837 if (*ref_uid == uid)
4838 return 0;
4839
4840 if (uid_is_valid(*ref_uid)) /* Already set? */
4841 return -EBUSY;
4842
4843 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4844 if (r < 0)
4845 return r;
4846
4847 *ref_uid = uid;
4848 return 1;
4849 }
4850
4851 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4852 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4853 }
4854
4855 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4856 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4857 }
4858
4859 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4860 int r = 0, q = 0;
4861
4862 assert(u);
4863
4864 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4865
4866 if (uid_is_valid(uid)) {
4867 r = unit_ref_uid(u, uid, clean_ipc);
4868 if (r < 0)
4869 return r;
4870 }
4871
4872 if (gid_is_valid(gid)) {
4873 q = unit_ref_gid(u, gid, clean_ipc);
4874 if (q < 0) {
4875 if (r > 0)
4876 unit_unref_uid(u, false);
4877
4878 return q;
4879 }
4880 }
4881
4882 return r > 0 || q > 0;
4883 }
4884
4885 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4886 ExecContext *c;
4887 int r;
4888
4889 assert(u);
4890
4891 c = unit_get_exec_context(u);
4892
4893 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4894 if (r < 0)
4895 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4896
4897 return r;
4898 }
4899
4900 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4901 assert(u);
4902
4903 unit_unref_uid(u, destroy_now);
4904 unit_unref_gid(u, destroy_now);
4905 }
4906
4907 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4908 int r;
4909
4910 assert(u);
4911
4912 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4913 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4914 * objects when no service references the UID/GID anymore. */
4915
4916 r = unit_ref_uid_gid(u, uid, gid);
4917 if (r > 0)
4918 bus_unit_send_change_signal(u);
4919 }
4920
4921 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4922 int r;
4923
4924 assert(u);
4925
4926 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4927
4928 if (sd_id128_equal(u->invocation_id, id))
4929 return 0;
4930
4931 if (!sd_id128_is_null(u->invocation_id))
4932 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4933
4934 if (sd_id128_is_null(id)) {
4935 r = 0;
4936 goto reset;
4937 }
4938
4939 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4940 if (r < 0)
4941 goto reset;
4942
4943 u->invocation_id = id;
4944 sd_id128_to_string(id, u->invocation_id_string);
4945
4946 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4947 if (r < 0)
4948 goto reset;
4949
4950 return 0;
4951
4952 reset:
4953 u->invocation_id = SD_ID128_NULL;
4954 u->invocation_id_string[0] = 0;
4955 return r;
4956 }
4957
4958 int unit_acquire_invocation_id(Unit *u) {
4959 sd_id128_t id;
4960 int r;
4961
4962 assert(u);
4963
4964 r = sd_id128_randomize(&id);
4965 if (r < 0)
4966 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4967
4968 r = unit_set_invocation_id(u, id);
4969 if (r < 0)
4970 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4971
4972 return 0;
4973 }
4974
4975 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4976 assert(u);
4977 assert(p);
4978
4979 /* Copy parameters from manager */
4980 p->environment = u->manager->environment;
4981 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4982 p->cgroup_supported = u->manager->cgroup_supported;
4983 p->prefix = u->manager->prefix;
4984 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4985
4986 /* Copy paramaters from unit */
4987 p->cgroup_path = u->cgroup_path;
4988 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4989 }
4990
4991 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4992 int r;
4993
4994 assert(u);
4995 assert(ret);
4996
4997 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4998 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4999
5000 (void) unit_realize_cgroup(u);
5001
5002 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5003 if (r != 0)
5004 return r;
5005
5006 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5007 (void) ignore_signals(SIGPIPE, -1);
5008
5009 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5010
5011 if (u->cgroup_path) {
5012 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5013 if (r < 0) {
5014 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5015 _exit(EXIT_CGROUP);
5016 }
5017 }
5018
5019 return 0;
5020 }
5021
5022 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5023 assert(u);
5024 assert(d >= 0);
5025 assert(d < _UNIT_DEPENDENCY_MAX);
5026 assert(other);
5027
5028 if (di.origin_mask == 0 && di.destination_mask == 0) {
5029 /* No bit set anymore, let's drop the whole entry */
5030 assert_se(hashmap_remove(u->dependencies[d], other));
5031 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5032 } else
5033 /* Mask was reduced, let's update the entry */
5034 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5035 }
5036
5037 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5038 UnitDependency d;
5039
5040 assert(u);
5041
5042 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5043
5044 if (mask == 0)
5045 return;
5046
5047 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5048 bool done;
5049
5050 do {
5051 UnitDependencyInfo di;
5052 Unit *other;
5053 Iterator i;
5054
5055 done = true;
5056
5057 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5058 UnitDependency q;
5059
5060 if ((di.origin_mask & ~mask) == di.origin_mask)
5061 continue;
5062 di.origin_mask &= ~mask;
5063 unit_update_dependency_mask(u, d, other, di);
5064
5065 /* We updated the dependency from our unit to the other unit now. But most dependencies
5066 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5067 * all dependency types on the other unit and delete all those which point to us and
5068 * have the right mask set. */
5069
5070 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5071 UnitDependencyInfo dj;
5072
5073 dj.data = hashmap_get(other->dependencies[q], u);
5074 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5075 continue;
5076 dj.destination_mask &= ~mask;
5077
5078 unit_update_dependency_mask(other, q, u, dj);
5079 }
5080
5081 unit_add_to_gc_queue(other);
5082
5083 done = false;
5084 break;
5085 }
5086
5087 } while (!done);
5088 }
5089 }
5090
5091 static int unit_export_invocation_id(Unit *u) {
5092 const char *p;
5093 int r;
5094
5095 assert(u);
5096
5097 if (u->exported_invocation_id)
5098 return 0;
5099
5100 if (sd_id128_is_null(u->invocation_id))
5101 return 0;
5102
5103 p = strjoina("/run/systemd/units/invocation:", u->id);
5104 r = symlink_atomic(u->invocation_id_string, p);
5105 if (r < 0)
5106 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5107
5108 u->exported_invocation_id = true;
5109 return 0;
5110 }
5111
5112 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5113 const char *p;
5114 char buf[2];
5115 int r;
5116
5117 assert(u);
5118 assert(c);
5119
5120 if (u->exported_log_level_max)
5121 return 0;
5122
5123 if (c->log_level_max < 0)
5124 return 0;
5125
5126 assert(c->log_level_max <= 7);
5127
5128 buf[0] = '0' + c->log_level_max;
5129 buf[1] = 0;
5130
5131 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5132 r = symlink_atomic(buf, p);
5133 if (r < 0)
5134 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5135
5136 u->exported_log_level_max = true;
5137 return 0;
5138 }
5139
5140 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5141 _cleanup_close_ int fd = -1;
5142 struct iovec *iovec;
5143 const char *p;
5144 char *pattern;
5145 le64_t *sizes;
5146 ssize_t n;
5147 size_t i;
5148 int r;
5149
5150 if (u->exported_log_extra_fields)
5151 return 0;
5152
5153 if (c->n_log_extra_fields <= 0)
5154 return 0;
5155
5156 sizes = newa(le64_t, c->n_log_extra_fields);
5157 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5158
5159 for (i = 0; i < c->n_log_extra_fields; i++) {
5160 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5161
5162 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5163 iovec[i*2+1] = c->log_extra_fields[i];
5164 }
5165
5166 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5167 pattern = strjoina(p, ".XXXXXX");
5168
5169 fd = mkostemp_safe(pattern);
5170 if (fd < 0)
5171 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5172
5173 n = writev(fd, iovec, c->n_log_extra_fields*2);
5174 if (n < 0) {
5175 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5176 goto fail;
5177 }
5178
5179 (void) fchmod(fd, 0644);
5180
5181 if (rename(pattern, p) < 0) {
5182 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5183 goto fail;
5184 }
5185
5186 u->exported_log_extra_fields = true;
5187 return 0;
5188
5189 fail:
5190 (void) unlink(pattern);
5191 return r;
5192 }
5193
5194 void unit_export_state_files(Unit *u) {
5195 const ExecContext *c;
5196
5197 assert(u);
5198
5199 if (!u->id)
5200 return;
5201
5202 if (!MANAGER_IS_SYSTEM(u->manager))
5203 return;
5204
5205 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5206 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5207 * the IPC system itself and PID 1 also log to the journal.
5208 *
5209 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5210 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5211 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5212 * namespace at least.
5213 *
5214 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5215 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5216 * them with one. */
5217
5218 (void) unit_export_invocation_id(u);
5219
5220 c = unit_get_exec_context(u);
5221 if (c) {
5222 (void) unit_export_log_level_max(u, c);
5223 (void) unit_export_log_extra_fields(u, c);
5224 }
5225 }
5226
5227 void unit_unlink_state_files(Unit *u) {
5228 const char *p;
5229
5230 assert(u);
5231
5232 if (!u->id)
5233 return;
5234
5235 if (!MANAGER_IS_SYSTEM(u->manager))
5236 return;
5237
5238 /* Undoes the effect of unit_export_state() */
5239
5240 if (u->exported_invocation_id) {
5241 p = strjoina("/run/systemd/units/invocation:", u->id);
5242 (void) unlink(p);
5243
5244 u->exported_invocation_id = false;
5245 }
5246
5247 if (u->exported_log_level_max) {
5248 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5249 (void) unlink(p);
5250
5251 u->exported_log_level_max = false;
5252 }
5253
5254 if (u->exported_log_extra_fields) {
5255 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5256 (void) unlink(p);
5257
5258 u->exported_log_extra_fields = false;
5259 }
5260 }
5261
5262 int unit_prepare_exec(Unit *u) {
5263 int r;
5264
5265 assert(u);
5266
5267 /* Prepares everything so that we can fork of a process for this unit */
5268
5269 (void) unit_realize_cgroup(u);
5270
5271 if (u->reset_accounting) {
5272 (void) unit_reset_cpu_accounting(u);
5273 (void) unit_reset_ip_accounting(u);
5274 u->reset_accounting = false;
5275 }
5276
5277 unit_export_state_files(u);
5278
5279 r = unit_setup_exec_runtime(u);
5280 if (r < 0)
5281 return r;
5282
5283 r = unit_setup_dynamic_creds(u);
5284 if (r < 0)
5285 return r;
5286
5287 return 0;
5288 }
5289
5290 static void log_leftover(pid_t pid, int sig, void *userdata) {
5291 _cleanup_free_ char *comm = NULL;
5292
5293 (void) get_process_comm(pid, &comm);
5294
5295 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5296 return;
5297
5298 log_unit_warning(userdata,
5299 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5300 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5301 pid, strna(comm));
5302 }
5303
5304 void unit_warn_leftover_processes(Unit *u) {
5305 assert(u);
5306
5307 (void) unit_pick_cgroup_path(u);
5308
5309 if (!u->cgroup_path)
5310 return;
5311
5312 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5313 }
5314
5315 bool unit_needs_console(Unit *u) {
5316 ExecContext *ec;
5317 UnitActiveState state;
5318
5319 assert(u);
5320
5321 state = unit_active_state(u);
5322
5323 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5324 return false;
5325
5326 if (UNIT_VTABLE(u)->needs_console)
5327 return UNIT_VTABLE(u)->needs_console(u);
5328
5329 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5330 ec = unit_get_exec_context(u);
5331 if (!ec)
5332 return false;
5333
5334 return exec_context_may_touch_console(ec);
5335 }
5336
5337 const char *unit_label_path(Unit *u) {
5338 const char *p;
5339
5340 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5341 * when validating access checks. */
5342
5343 p = u->source_path ?: u->fragment_path;
5344 if (!p)
5345 return NULL;
5346
5347 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5348 if (path_equal(p, "/dev/null"))
5349 return NULL;
5350
5351 return p;
5352 }
5353
5354 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5355 int r;
5356
5357 assert(u);
5358
5359 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5360 * and not a kernel thread either */
5361
5362 /* First, a simple range check */
5363 if (!pid_is_valid(pid))
5364 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5365
5366 /* Some extra safety check */
5367 if (pid == 1 || pid == getpid_cached())
5368 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager processs, refusing.", pid);
5369
5370 /* Don't even begin to bother with kernel threads */
5371 r = is_kernel_thread(pid);
5372 if (r == -ESRCH)
5373 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5374 if (r < 0)
5375 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5376 if (r > 0)
5377 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5378
5379 return 0;
5380 }
5381
5382 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5383 [COLLECT_INACTIVE] = "inactive",
5384 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5385 };
5386
5387 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);