]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
72f475ab140b3c1228bd83b95ca3dcc86134313e
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/prctl.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "alloc-util.h"
32 #include "bus-common-errors.h"
33 #include "bus-util.h"
34 #include "cgroup-util.h"
35 #include "dbus-unit.h"
36 #include "dbus.h"
37 #include "dropin.h"
38 #include "escape.h"
39 #include "execute.h"
40 #include "fd-util.h"
41 #include "fileio-label.h"
42 #include "format-util.h"
43 #include "fs-util.h"
44 #include "id128-util.h"
45 #include "io-util.h"
46 #include "load-dropin.h"
47 #include "load-fragment.h"
48 #include "log.h"
49 #include "macro.h"
50 #include "missing.h"
51 #include "mkdir.h"
52 #include "parse-util.h"
53 #include "path-util.h"
54 #include "process-util.h"
55 #include "set.h"
56 #include "signal-util.h"
57 #include "sparse-endian.h"
58 #include "special.h"
59 #include "specifier.h"
60 #include "stat-util.h"
61 #include "stdio-util.h"
62 #include "string-table.h"
63 #include "string-util.h"
64 #include "strv.h"
65 #include "umask-util.h"
66 #include "unit-name.h"
67 #include "unit.h"
68 #include "user-util.h"
69 #include "virt.h"
70
71 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
72 [UNIT_SERVICE] = &service_vtable,
73 [UNIT_SOCKET] = &socket_vtable,
74 [UNIT_TARGET] = &target_vtable,
75 [UNIT_DEVICE] = &device_vtable,
76 [UNIT_MOUNT] = &mount_vtable,
77 [UNIT_AUTOMOUNT] = &automount_vtable,
78 [UNIT_SWAP] = &swap_vtable,
79 [UNIT_TIMER] = &timer_vtable,
80 [UNIT_PATH] = &path_vtable,
81 [UNIT_SLICE] = &slice_vtable,
82 [UNIT_SCOPE] = &scope_vtable,
83 };
84
85 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
86
87 Unit *unit_new(Manager *m, size_t size) {
88 Unit *u;
89
90 assert(m);
91 assert(size >= sizeof(Unit));
92
93 u = malloc0(size);
94 if (!u)
95 return NULL;
96
97 u->names = set_new(&string_hash_ops);
98 if (!u->names)
99 return mfree(u);
100
101 u->manager = m;
102 u->type = _UNIT_TYPE_INVALID;
103 u->default_dependencies = true;
104 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
105 u->unit_file_preset = -1;
106 u->on_failure_job_mode = JOB_REPLACE;
107 u->cgroup_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
114
115 u->ip_accounting_ingress_map_fd = -1;
116 u->ip_accounting_egress_map_fd = -1;
117 u->ipv4_allow_map_fd = -1;
118 u->ipv6_allow_map_fd = -1;
119 u->ipv4_deny_map_fd = -1;
120 u->ipv6_deny_map_fd = -1;
121
122 u->last_section_private = -1;
123
124 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
125 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
126
127 return u;
128 }
129
130 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
131 _cleanup_(unit_freep) Unit *u = NULL;
132 int r;
133
134 u = unit_new(m, size);
135 if (!u)
136 return -ENOMEM;
137
138 r = unit_add_name(u, name);
139 if (r < 0)
140 return r;
141
142 *ret = u;
143 u = NULL;
144 return r;
145 }
146
147 bool unit_has_name(Unit *u, const char *name) {
148 assert(u);
149 assert(name);
150
151 return set_contains(u->names, (char*) name);
152 }
153
154 static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->cpu_accounting = u->manager->default_cpu_accounting;
172 cc->io_accounting = u->manager->default_io_accounting;
173 cc->ip_accounting = u->manager->default_ip_accounting;
174 cc->blockio_accounting = u->manager->default_blockio_accounting;
175 cc->memory_accounting = u->manager->default_memory_accounting;
176 cc->tasks_accounting = u->manager->default_tasks_accounting;
177 cc->ip_accounting = u->manager->default_ip_accounting;
178
179 if (u->type != UNIT_SLICE)
180 cc->tasks_max = u->manager->default_tasks_max;
181 }
182
183 ec = unit_get_exec_context(u);
184 if (ec) {
185 exec_context_init(ec);
186
187 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
188 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
189 }
190
191 kc = unit_get_kill_context(u);
192 if (kc)
193 kill_context_init(kc);
194
195 if (UNIT_VTABLE(u)->init)
196 UNIT_VTABLE(u)->init(u);
197 }
198
199 int unit_add_name(Unit *u, const char *text) {
200 _cleanup_free_ char *s = NULL, *i = NULL;
201 UnitType t;
202 int r;
203
204 assert(u);
205 assert(text);
206
207 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
208
209 if (!u->instance)
210 return -EINVAL;
211
212 r = unit_name_replace_instance(text, u->instance, &s);
213 if (r < 0)
214 return r;
215 } else {
216 s = strdup(text);
217 if (!s)
218 return -ENOMEM;
219 }
220
221 if (set_contains(u->names, s))
222 return 0;
223 if (hashmap_contains(u->manager->units, s))
224 return -EEXIST;
225
226 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
227 return -EINVAL;
228
229 t = unit_name_to_type(s);
230 if (t < 0)
231 return -EINVAL;
232
233 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
234 return -EINVAL;
235
236 r = unit_name_to_instance(s, &i);
237 if (r < 0)
238 return r;
239
240 if (i && !unit_type_may_template(t))
241 return -EINVAL;
242
243 /* Ensure that this unit is either instanced or not instanced,
244 * but not both. Note that we do allow names with different
245 * instance names however! */
246 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
247 return -EINVAL;
248
249 if (!unit_type_may_alias(t) && !set_isempty(u->names))
250 return -EEXIST;
251
252 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
253 return -E2BIG;
254
255 r = set_put(u->names, s);
256 if (r < 0)
257 return r;
258 assert(r > 0);
259
260 r = hashmap_put(u->manager->units, s, u);
261 if (r < 0) {
262 (void) set_remove(u->names, s);
263 return r;
264 }
265
266 if (u->type == _UNIT_TYPE_INVALID) {
267 u->type = t;
268 u->id = s;
269 u->instance = i;
270
271 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
272
273 unit_init(u);
274
275 i = NULL;
276 }
277
278 s = NULL;
279
280 unit_add_to_dbus_queue(u);
281 return 0;
282 }
283
284 int unit_choose_id(Unit *u, const char *name) {
285 _cleanup_free_ char *t = NULL;
286 char *s, *i;
287 int r;
288
289 assert(u);
290 assert(name);
291
292 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
293
294 if (!u->instance)
295 return -EINVAL;
296
297 r = unit_name_replace_instance(name, u->instance, &t);
298 if (r < 0)
299 return r;
300
301 name = t;
302 }
303
304 /* Selects one of the names of this unit as the id */
305 s = set_get(u->names, (char*) name);
306 if (!s)
307 return -ENOENT;
308
309 /* Determine the new instance from the new id */
310 r = unit_name_to_instance(s, &i);
311 if (r < 0)
312 return r;
313
314 u->id = s;
315
316 free(u->instance);
317 u->instance = i;
318
319 unit_add_to_dbus_queue(u);
320
321 return 0;
322 }
323
324 int unit_set_description(Unit *u, const char *description) {
325 int r;
326
327 assert(u);
328
329 r = free_and_strdup(&u->description, empty_to_null(description));
330 if (r < 0)
331 return r;
332 if (r > 0)
333 unit_add_to_dbus_queue(u);
334
335 return 0;
336 }
337
338 bool unit_may_gc(Unit *u) {
339 UnitActiveState state;
340 int r;
341
342 assert(u);
343
344 /* Checks whether the unit is ready to be unloaded for garbage collection.
345 * Returns true when the unit may be collected, and false if there's some
346 * reason to keep it loaded.
347 *
348 * References from other units are *not* checked here. Instead, this is done
349 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
350 */
351
352 if (u->job)
353 return false;
354
355 if (u->nop_job)
356 return false;
357
358 state = unit_active_state(u);
359
360 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
361 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
362 UNIT_VTABLE(u)->release_resources)
363 UNIT_VTABLE(u)->release_resources(u);
364
365 if (u->perpetual)
366 return false;
367
368 if (sd_bus_track_count(u->bus_track) > 0)
369 return false;
370
371 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
372 switch (u->collect_mode) {
373
374 case COLLECT_INACTIVE:
375 if (state != UNIT_INACTIVE)
376 return false;
377
378 break;
379
380 case COLLECT_INACTIVE_OR_FAILED:
381 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
382 return false;
383
384 break;
385
386 default:
387 assert_not_reached("Unknown garbage collection mode");
388 }
389
390 if (u->cgroup_path) {
391 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
392 * around. Units with active processes should never be collected. */
393
394 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
395 if (r < 0)
396 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
397 if (r <= 0)
398 return false;
399 }
400
401 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
402 return false;
403
404 return true;
405 }
406
407 void unit_add_to_load_queue(Unit *u) {
408 assert(u);
409 assert(u->type != _UNIT_TYPE_INVALID);
410
411 if (u->load_state != UNIT_STUB || u->in_load_queue)
412 return;
413
414 LIST_PREPEND(load_queue, u->manager->load_queue, u);
415 u->in_load_queue = true;
416 }
417
418 void unit_add_to_cleanup_queue(Unit *u) {
419 assert(u);
420
421 if (u->in_cleanup_queue)
422 return;
423
424 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
425 u->in_cleanup_queue = true;
426 }
427
428 void unit_add_to_gc_queue(Unit *u) {
429 assert(u);
430
431 if (u->in_gc_queue || u->in_cleanup_queue)
432 return;
433
434 if (!unit_may_gc(u))
435 return;
436
437 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
438 u->in_gc_queue = true;
439 }
440
441 void unit_add_to_dbus_queue(Unit *u) {
442 assert(u);
443 assert(u->type != _UNIT_TYPE_INVALID);
444
445 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
446 return;
447
448 /* Shortcut things if nobody cares */
449 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
450 sd_bus_track_count(u->bus_track) <= 0 &&
451 set_isempty(u->manager->private_buses)) {
452 u->sent_dbus_new_signal = true;
453 return;
454 }
455
456 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
457 u->in_dbus_queue = true;
458 }
459
460 static void bidi_set_free(Unit *u, Hashmap *h) {
461 Unit *other;
462 Iterator i;
463 void *v;
464
465 assert(u);
466
467 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468
469 HASHMAP_FOREACH_KEY(v, other, h, i) {
470 UnitDependency d;
471
472 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
473 hashmap_remove(other->dependencies[d], u);
474
475 unit_add_to_gc_queue(other);
476 }
477
478 hashmap_free(h);
479 }
480
481 static void unit_remove_transient(Unit *u) {
482 char **i;
483
484 assert(u);
485
486 if (!u->transient)
487 return;
488
489 if (u->fragment_path)
490 (void) unlink(u->fragment_path);
491
492 STRV_FOREACH(i, u->dropin_paths) {
493 _cleanup_free_ char *p = NULL, *pp = NULL;
494
495 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
496 if (!p)
497 continue;
498
499 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
500 if (!pp)
501 continue;
502
503 /* Only drop transient drop-ins */
504 if (!path_equal(u->manager->lookup_paths.transient, pp))
505 continue;
506
507 (void) unlink(*i);
508 (void) rmdir(p);
509 }
510 }
511
512 static void unit_free_requires_mounts_for(Unit *u) {
513 assert(u);
514
515 for (;;) {
516 _cleanup_free_ char *path;
517
518 path = hashmap_steal_first_key(u->requires_mounts_for);
519 if (!path)
520 break;
521 else {
522 char s[strlen(path) + 1];
523
524 PATH_FOREACH_PREFIX_MORE(s, path) {
525 char *y;
526 Set *x;
527
528 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
529 if (!x)
530 continue;
531
532 (void) set_remove(x, u);
533
534 if (set_isempty(x)) {
535 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
536 free(y);
537 set_free(x);
538 }
539 }
540 }
541 }
542
543 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
544 }
545
546 static void unit_done(Unit *u) {
547 ExecContext *ec;
548 CGroupContext *cc;
549
550 assert(u);
551
552 if (u->type < 0)
553 return;
554
555 if (UNIT_VTABLE(u)->done)
556 UNIT_VTABLE(u)->done(u);
557
558 ec = unit_get_exec_context(u);
559 if (ec)
560 exec_context_done(ec);
561
562 cc = unit_get_cgroup_context(u);
563 if (cc)
564 cgroup_context_done(cc);
565 }
566
567 void unit_free(Unit *u) {
568 UnitDependency d;
569 Iterator i;
570 char *t;
571
572 if (!u)
573 return;
574
575 u->transient_file = safe_fclose(u->transient_file);
576
577 if (!MANAGER_IS_RELOADING(u->manager))
578 unit_remove_transient(u);
579
580 bus_unit_send_removed_signal(u);
581
582 unit_done(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585
586 sd_bus_track_unref(u->bus_track);
587 u->deserialized_refs = strv_free(u->deserialized_refs);
588
589 unit_free_requires_mounts_for(u);
590
591 SET_FOREACH(t, u->names, i)
592 hashmap_remove_value(u->manager->units, t, u);
593
594 if (!sd_id128_is_null(u->invocation_id))
595 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
596
597 if (u->job) {
598 Job *j = u->job;
599 job_uninstall(j);
600 job_free(j);
601 }
602
603 if (u->nop_job) {
604 Job *j = u->nop_job;
605 job_uninstall(j);
606 job_free(j);
607 }
608
609 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
610 bidi_set_free(u, u->dependencies[d]);
611
612 if (u->on_console)
613 manager_unref_console(u->manager);
614
615 unit_release_cgroup(u);
616
617 if (!MANAGER_IS_RELOADING(u->manager))
618 unit_unlink_state_files(u);
619
620 unit_unref_uid_gid(u, false);
621
622 (void) manager_update_failed_units(u->manager, u, false);
623 set_remove(u->manager->startup_units, u);
624
625 unit_unwatch_all_pids(u);
626
627 unit_ref_unset(&u->slice);
628 while (u->refs_by_target)
629 unit_ref_unset(u->refs_by_target);
630
631 if (u->type != _UNIT_TYPE_INVALID)
632 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
633
634 if (u->in_load_queue)
635 LIST_REMOVE(load_queue, u->manager->load_queue, u);
636
637 if (u->in_dbus_queue)
638 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
639
640 if (u->in_gc_queue)
641 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
642
643 if (u->in_cgroup_realize_queue)
644 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
645
646 if (u->in_cgroup_empty_queue)
647 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
648
649 if (u->in_cleanup_queue)
650 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
651
652 if (u->in_target_deps_queue)
653 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
654
655 safe_close(u->ip_accounting_ingress_map_fd);
656 safe_close(u->ip_accounting_egress_map_fd);
657
658 safe_close(u->ipv4_allow_map_fd);
659 safe_close(u->ipv6_allow_map_fd);
660 safe_close(u->ipv4_deny_map_fd);
661 safe_close(u->ipv6_deny_map_fd);
662
663 bpf_program_unref(u->ip_bpf_ingress);
664 bpf_program_unref(u->ip_bpf_ingress_installed);
665 bpf_program_unref(u->ip_bpf_egress);
666 bpf_program_unref(u->ip_bpf_egress_installed);
667
668 condition_free_list(u->conditions);
669 condition_free_list(u->asserts);
670
671 free(u->description);
672 strv_free(u->documentation);
673 free(u->fragment_path);
674 free(u->source_path);
675 strv_free(u->dropin_paths);
676 free(u->instance);
677
678 free(u->job_timeout_reboot_arg);
679
680 set_free_free(u->names);
681
682 free(u->reboot_arg);
683
684 free(u);
685 }
686
687 UnitActiveState unit_active_state(Unit *u) {
688 assert(u);
689
690 if (u->load_state == UNIT_MERGED)
691 return unit_active_state(unit_follow_merge(u));
692
693 /* After a reload it might happen that a unit is not correctly
694 * loaded but still has a process around. That's why we won't
695 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
696
697 return UNIT_VTABLE(u)->active_state(u);
698 }
699
700 const char* unit_sub_state_to_string(Unit *u) {
701 assert(u);
702
703 return UNIT_VTABLE(u)->sub_state_to_string(u);
704 }
705
706 static int set_complete_move(Set **s, Set **other) {
707 assert(s);
708 assert(other);
709
710 if (!other)
711 return 0;
712
713 if (*s)
714 return set_move(*s, *other);
715 else
716 *s = TAKE_PTR(*other);
717
718 return 0;
719 }
720
721 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
722 assert(s);
723 assert(other);
724
725 if (!*other)
726 return 0;
727
728 if (*s)
729 return hashmap_move(*s, *other);
730 else
731 *s = TAKE_PTR(*other);
732
733 return 0;
734 }
735
736 static int merge_names(Unit *u, Unit *other) {
737 char *t;
738 Iterator i;
739 int r;
740
741 assert(u);
742 assert(other);
743
744 r = set_complete_move(&u->names, &other->names);
745 if (r < 0)
746 return r;
747
748 set_free_free(other->names);
749 other->names = NULL;
750 other->id = NULL;
751
752 SET_FOREACH(t, u->names, i)
753 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
754
755 return 0;
756 }
757
758 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
759 unsigned n_reserve;
760
761 assert(u);
762 assert(other);
763 assert(d < _UNIT_DEPENDENCY_MAX);
764
765 /*
766 * If u does not have this dependency set allocated, there is no need
767 * to reserve anything. In that case other's set will be transferred
768 * as a whole to u by complete_move().
769 */
770 if (!u->dependencies[d])
771 return 0;
772
773 /* merge_dependencies() will skip a u-on-u dependency */
774 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
775
776 return hashmap_reserve(u->dependencies[d], n_reserve);
777 }
778
779 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
780 Iterator i;
781 Unit *back;
782 void *v;
783 int r;
784
785 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
786
787 assert(u);
788 assert(other);
789 assert(d < _UNIT_DEPENDENCY_MAX);
790
791 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
792 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
793 UnitDependency k;
794
795 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
796 * pointers back, and let's fix them up, to instead point to 'u'. */
797
798 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
799 if (back == u) {
800 /* Do not add dependencies between u and itself. */
801 if (hashmap_remove(back->dependencies[k], other))
802 maybe_warn_about_dependency(u, other_id, k);
803 } else {
804 UnitDependencyInfo di_u, di_other, di_merged;
805
806 /* Let's drop this dependency between "back" and "other", and let's create it between
807 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
808 * and any such dependency which might already exist */
809
810 di_other.data = hashmap_get(back->dependencies[k], other);
811 if (!di_other.data)
812 continue; /* dependency isn't set, let's try the next one */
813
814 di_u.data = hashmap_get(back->dependencies[k], u);
815
816 di_merged = (UnitDependencyInfo) {
817 .origin_mask = di_u.origin_mask | di_other.origin_mask,
818 .destination_mask = di_u.destination_mask | di_other.destination_mask,
819 };
820
821 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
822 if (r < 0)
823 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
824 assert(r >= 0);
825
826 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
827 }
828 }
829
830 }
831
832 /* Also do not move dependencies on u to itself */
833 back = hashmap_remove(other->dependencies[d], u);
834 if (back)
835 maybe_warn_about_dependency(u, other_id, d);
836
837 /* The move cannot fail. The caller must have performed a reservation. */
838 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
839
840 other->dependencies[d] = hashmap_free(other->dependencies[d]);
841 }
842
843 int unit_merge(Unit *u, Unit *other) {
844 UnitDependency d;
845 const char *other_id = NULL;
846 int r;
847
848 assert(u);
849 assert(other);
850 assert(u->manager == other->manager);
851 assert(u->type != _UNIT_TYPE_INVALID);
852
853 other = unit_follow_merge(other);
854
855 if (other == u)
856 return 0;
857
858 if (u->type != other->type)
859 return -EINVAL;
860
861 if (!u->instance != !other->instance)
862 return -EINVAL;
863
864 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
865 return -EEXIST;
866
867 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
868 return -EEXIST;
869
870 if (other->job)
871 return -EEXIST;
872
873 if (other->nop_job)
874 return -EEXIST;
875
876 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
877 return -EEXIST;
878
879 if (other->id)
880 other_id = strdupa(other->id);
881
882 /* Make reservations to ensure merge_dependencies() won't fail */
883 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
884 r = reserve_dependencies(u, other, d);
885 /*
886 * We don't rollback reservations if we fail. We don't have
887 * a way to undo reservations. A reservation is not a leak.
888 */
889 if (r < 0)
890 return r;
891 }
892
893 /* Merge names */
894 r = merge_names(u, other);
895 if (r < 0)
896 return r;
897
898 /* Redirect all references */
899 while (other->refs_by_target)
900 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
901
902 /* Merge dependencies */
903 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
904 merge_dependencies(u, other, other_id, d);
905
906 other->load_state = UNIT_MERGED;
907 other->merged_into = u;
908
909 /* If there is still some data attached to the other node, we
910 * don't need it anymore, and can free it. */
911 if (other->load_state != UNIT_STUB)
912 if (UNIT_VTABLE(other)->done)
913 UNIT_VTABLE(other)->done(other);
914
915 unit_add_to_dbus_queue(u);
916 unit_add_to_cleanup_queue(other);
917
918 return 0;
919 }
920
921 int unit_merge_by_name(Unit *u, const char *name) {
922 _cleanup_free_ char *s = NULL;
923 Unit *other;
924 int r;
925
926 assert(u);
927 assert(name);
928
929 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
930 if (!u->instance)
931 return -EINVAL;
932
933 r = unit_name_replace_instance(name, u->instance, &s);
934 if (r < 0)
935 return r;
936
937 name = s;
938 }
939
940 other = manager_get_unit(u->manager, name);
941 if (other)
942 return unit_merge(u, other);
943
944 return unit_add_name(u, name);
945 }
946
947 Unit* unit_follow_merge(Unit *u) {
948 assert(u);
949
950 while (u->load_state == UNIT_MERGED)
951 assert_se(u = u->merged_into);
952
953 return u;
954 }
955
956 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
957 ExecDirectoryType dt;
958 char **dp;
959 int r;
960
961 assert(u);
962 assert(c);
963
964 if (c->working_directory) {
965 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
966 if (r < 0)
967 return r;
968 }
969
970 if (c->root_directory) {
971 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
972 if (r < 0)
973 return r;
974 }
975
976 if (c->root_image) {
977 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
978 if (r < 0)
979 return r;
980 }
981
982 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
983 if (!u->manager->prefix[dt])
984 continue;
985
986 STRV_FOREACH(dp, c->directories[dt].paths) {
987 _cleanup_free_ char *p;
988
989 p = strjoin(u->manager->prefix[dt], "/", *dp);
990 if (!p)
991 return -ENOMEM;
992
993 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
994 if (r < 0)
995 return r;
996 }
997 }
998
999 if (!MANAGER_IS_SYSTEM(u->manager))
1000 return 0;
1001
1002 if (c->private_tmp) {
1003 const char *p;
1004
1005 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1006 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1007 if (r < 0)
1008 return r;
1009 }
1010
1011 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
1012 if (r < 0)
1013 return r;
1014 }
1015
1016 if (!IN_SET(c->std_output,
1017 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1018 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1019 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1020 !IN_SET(c->std_error,
1021 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1022 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1023 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1024 return 0;
1025
1026 /* If syslog or kernel logging is requested, make sure our own
1027 * logging daemon is run first. */
1028
1029 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1030 if (r < 0)
1031 return r;
1032
1033 return 0;
1034 }
1035
1036 const char *unit_description(Unit *u) {
1037 assert(u);
1038
1039 if (u->description)
1040 return u->description;
1041
1042 return strna(u->id);
1043 }
1044
1045 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1046 const struct {
1047 UnitDependencyMask mask;
1048 const char *name;
1049 } table[] = {
1050 { UNIT_DEPENDENCY_FILE, "file" },
1051 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1052 { UNIT_DEPENDENCY_DEFAULT, "default" },
1053 { UNIT_DEPENDENCY_UDEV, "udev" },
1054 { UNIT_DEPENDENCY_PATH, "path" },
1055 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1056 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1057 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1058 };
1059 size_t i;
1060
1061 assert(f);
1062 assert(kind);
1063 assert(space);
1064
1065 for (i = 0; i < ELEMENTSOF(table); i++) {
1066
1067 if (mask == 0)
1068 break;
1069
1070 if ((mask & table[i].mask) == table[i].mask) {
1071 if (*space)
1072 fputc(' ', f);
1073 else
1074 *space = true;
1075
1076 fputs(kind, f);
1077 fputs("-", f);
1078 fputs(table[i].name, f);
1079
1080 mask &= ~table[i].mask;
1081 }
1082 }
1083
1084 assert(mask == 0);
1085 }
1086
1087 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1088 char *t, **j;
1089 UnitDependency d;
1090 Iterator i;
1091 const char *prefix2;
1092 char
1093 timestamp0[FORMAT_TIMESTAMP_MAX],
1094 timestamp1[FORMAT_TIMESTAMP_MAX],
1095 timestamp2[FORMAT_TIMESTAMP_MAX],
1096 timestamp3[FORMAT_TIMESTAMP_MAX],
1097 timestamp4[FORMAT_TIMESTAMP_MAX],
1098 timespan[FORMAT_TIMESPAN_MAX];
1099 Unit *following;
1100 _cleanup_set_free_ Set *following_set = NULL;
1101 const char *n;
1102 CGroupMask m;
1103 int r;
1104
1105 assert(u);
1106 assert(u->type >= 0);
1107
1108 prefix = strempty(prefix);
1109 prefix2 = strjoina(prefix, "\t");
1110
1111 fprintf(f,
1112 "%s-> Unit %s:\n"
1113 "%s\tDescription: %s\n"
1114 "%s\tInstance: %s\n"
1115 "%s\tUnit Load State: %s\n"
1116 "%s\tUnit Active State: %s\n"
1117 "%s\tState Change Timestamp: %s\n"
1118 "%s\tInactive Exit Timestamp: %s\n"
1119 "%s\tActive Enter Timestamp: %s\n"
1120 "%s\tActive Exit Timestamp: %s\n"
1121 "%s\tInactive Enter Timestamp: %s\n"
1122 "%s\tMay GC: %s\n"
1123 "%s\tNeed Daemon Reload: %s\n"
1124 "%s\tTransient: %s\n"
1125 "%s\tPerpetual: %s\n"
1126 "%s\tGarbage Collection Mode: %s\n"
1127 "%s\tSlice: %s\n"
1128 "%s\tCGroup: %s\n"
1129 "%s\tCGroup realized: %s\n",
1130 prefix, u->id,
1131 prefix, unit_description(u),
1132 prefix, strna(u->instance),
1133 prefix, unit_load_state_to_string(u->load_state),
1134 prefix, unit_active_state_to_string(unit_active_state(u)),
1135 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1136 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1137 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1138 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1139 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1140 prefix, yes_no(unit_may_gc(u)),
1141 prefix, yes_no(unit_need_daemon_reload(u)),
1142 prefix, yes_no(u->transient),
1143 prefix, yes_no(u->perpetual),
1144 prefix, collect_mode_to_string(u->collect_mode),
1145 prefix, strna(unit_slice_name(u)),
1146 prefix, strna(u->cgroup_path),
1147 prefix, yes_no(u->cgroup_realized));
1148
1149 if (u->cgroup_realized_mask != 0) {
1150 _cleanup_free_ char *s = NULL;
1151 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1152 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1153 }
1154 if (u->cgroup_enabled_mask != 0) {
1155 _cleanup_free_ char *s = NULL;
1156 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1157 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1158 }
1159 m = unit_get_own_mask(u);
1160 if (m != 0) {
1161 _cleanup_free_ char *s = NULL;
1162 (void) cg_mask_to_string(m, &s);
1163 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1164 }
1165 m = unit_get_members_mask(u);
1166 if (m != 0) {
1167 _cleanup_free_ char *s = NULL;
1168 (void) cg_mask_to_string(m, &s);
1169 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1170 }
1171
1172 SET_FOREACH(t, u->names, i)
1173 fprintf(f, "%s\tName: %s\n", prefix, t);
1174
1175 if (!sd_id128_is_null(u->invocation_id))
1176 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1177 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1178
1179 STRV_FOREACH(j, u->documentation)
1180 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1181
1182 following = unit_following(u);
1183 if (following)
1184 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1185
1186 r = unit_following_set(u, &following_set);
1187 if (r >= 0) {
1188 Unit *other;
1189
1190 SET_FOREACH(other, following_set, i)
1191 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1192 }
1193
1194 if (u->fragment_path)
1195 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1196
1197 if (u->source_path)
1198 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1199
1200 STRV_FOREACH(j, u->dropin_paths)
1201 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1202
1203 if (u->failure_action != EMERGENCY_ACTION_NONE)
1204 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1205 if (u->success_action != EMERGENCY_ACTION_NONE)
1206 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1207
1208 if (u->job_timeout != USEC_INFINITY)
1209 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1210
1211 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1212 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1213
1214 if (u->job_timeout_reboot_arg)
1215 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1216
1217 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1218 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1219
1220 if (dual_timestamp_is_set(&u->condition_timestamp))
1221 fprintf(f,
1222 "%s\tCondition Timestamp: %s\n"
1223 "%s\tCondition Result: %s\n",
1224 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1225 prefix, yes_no(u->condition_result));
1226
1227 if (dual_timestamp_is_set(&u->assert_timestamp))
1228 fprintf(f,
1229 "%s\tAssert Timestamp: %s\n"
1230 "%s\tAssert Result: %s\n",
1231 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1232 prefix, yes_no(u->assert_result));
1233
1234 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1235 UnitDependencyInfo di;
1236 Unit *other;
1237
1238 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1239 bool space = false;
1240
1241 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1242
1243 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1244 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1245
1246 fputs(")\n", f);
1247 }
1248 }
1249
1250 if (!hashmap_isempty(u->requires_mounts_for)) {
1251 UnitDependencyInfo di;
1252 const char *path;
1253
1254 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1255 bool space = false;
1256
1257 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1258
1259 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1260 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1261
1262 fputs(")\n", f);
1263 }
1264 }
1265
1266 if (u->load_state == UNIT_LOADED) {
1267
1268 fprintf(f,
1269 "%s\tStopWhenUnneeded: %s\n"
1270 "%s\tRefuseManualStart: %s\n"
1271 "%s\tRefuseManualStop: %s\n"
1272 "%s\tDefaultDependencies: %s\n"
1273 "%s\tOnFailureJobMode: %s\n"
1274 "%s\tIgnoreOnIsolate: %s\n",
1275 prefix, yes_no(u->stop_when_unneeded),
1276 prefix, yes_no(u->refuse_manual_start),
1277 prefix, yes_no(u->refuse_manual_stop),
1278 prefix, yes_no(u->default_dependencies),
1279 prefix, job_mode_to_string(u->on_failure_job_mode),
1280 prefix, yes_no(u->ignore_on_isolate));
1281
1282 if (UNIT_VTABLE(u)->dump)
1283 UNIT_VTABLE(u)->dump(u, f, prefix2);
1284
1285 } else if (u->load_state == UNIT_MERGED)
1286 fprintf(f,
1287 "%s\tMerged into: %s\n",
1288 prefix, u->merged_into->id);
1289 else if (u->load_state == UNIT_ERROR)
1290 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1291
1292 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1293 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1294
1295 if (u->job)
1296 job_dump(u->job, f, prefix2);
1297
1298 if (u->nop_job)
1299 job_dump(u->nop_job, f, prefix2);
1300 }
1301
1302 /* Common implementation for multiple backends */
1303 int unit_load_fragment_and_dropin(Unit *u) {
1304 int r;
1305
1306 assert(u);
1307
1308 /* Load a .{service,socket,...} file */
1309 r = unit_load_fragment(u);
1310 if (r < 0)
1311 return r;
1312
1313 if (u->load_state == UNIT_STUB)
1314 return -ENOENT;
1315
1316 /* Load drop-in directory data. If u is an alias, we might be reloading the
1317 * target unit needlessly. But we cannot be sure which drops-ins have already
1318 * been loaded and which not, at least without doing complicated book-keeping,
1319 * so let's always reread all drop-ins. */
1320 return unit_load_dropin(unit_follow_merge(u));
1321 }
1322
1323 /* Common implementation for multiple backends */
1324 int unit_load_fragment_and_dropin_optional(Unit *u) {
1325 int r;
1326
1327 assert(u);
1328
1329 /* Same as unit_load_fragment_and_dropin(), but whether
1330 * something can be loaded or not doesn't matter. */
1331
1332 /* Load a .service file */
1333 r = unit_load_fragment(u);
1334 if (r < 0)
1335 return r;
1336
1337 if (u->load_state == UNIT_STUB)
1338 u->load_state = UNIT_LOADED;
1339
1340 /* Load drop-in directory data */
1341 return unit_load_dropin(unit_follow_merge(u));
1342 }
1343
1344 void unit_add_to_target_deps_queue(Unit *u) {
1345 Manager *m = u->manager;
1346
1347 assert(u);
1348
1349 if (u->in_target_deps_queue)
1350 return;
1351
1352 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1353 u->in_target_deps_queue = true;
1354 }
1355
1356 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1357 assert(u);
1358 assert(target);
1359
1360 if (target->type != UNIT_TARGET)
1361 return 0;
1362
1363 /* Only add the dependency if both units are loaded, so that
1364 * that loop check below is reliable */
1365 if (u->load_state != UNIT_LOADED ||
1366 target->load_state != UNIT_LOADED)
1367 return 0;
1368
1369 /* If either side wants no automatic dependencies, then let's
1370 * skip this */
1371 if (!u->default_dependencies ||
1372 !target->default_dependencies)
1373 return 0;
1374
1375 /* Don't create loops */
1376 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1377 return 0;
1378
1379 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1380 }
1381
1382 static int unit_add_slice_dependencies(Unit *u) {
1383 UnitDependencyMask mask;
1384 assert(u);
1385
1386 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1387 return 0;
1388
1389 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1390 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1391 relationship). */
1392 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1393
1394 if (UNIT_ISSET(u->slice))
1395 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1396
1397 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1398 return 0;
1399
1400 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1401 }
1402
1403 static int unit_add_mount_dependencies(Unit *u) {
1404 UnitDependencyInfo di;
1405 const char *path;
1406 Iterator i;
1407 int r;
1408
1409 assert(u);
1410
1411 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1412 char prefix[strlen(path) + 1];
1413
1414 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1415 _cleanup_free_ char *p = NULL;
1416 Unit *m;
1417
1418 r = unit_name_from_path(prefix, ".mount", &p);
1419 if (r < 0)
1420 return r;
1421
1422 m = manager_get_unit(u->manager, p);
1423 if (!m) {
1424 /* Make sure to load the mount unit if
1425 * it exists. If so the dependencies
1426 * on this unit will be added later
1427 * during the loading of the mount
1428 * unit. */
1429 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1430 continue;
1431 }
1432 if (m == u)
1433 continue;
1434
1435 if (m->load_state != UNIT_LOADED)
1436 continue;
1437
1438 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1439 if (r < 0)
1440 return r;
1441
1442 if (m->fragment_path) {
1443 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1444 if (r < 0)
1445 return r;
1446 }
1447 }
1448 }
1449
1450 return 0;
1451 }
1452
1453 static int unit_add_startup_units(Unit *u) {
1454 CGroupContext *c;
1455 int r;
1456
1457 c = unit_get_cgroup_context(u);
1458 if (!c)
1459 return 0;
1460
1461 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1462 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1463 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1464 return 0;
1465
1466 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1467 if (r < 0)
1468 return r;
1469
1470 return set_put(u->manager->startup_units, u);
1471 }
1472
1473 int unit_load(Unit *u) {
1474 int r;
1475
1476 assert(u);
1477
1478 if (u->in_load_queue) {
1479 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1480 u->in_load_queue = false;
1481 }
1482
1483 if (u->type == _UNIT_TYPE_INVALID)
1484 return -EINVAL;
1485
1486 if (u->load_state != UNIT_STUB)
1487 return 0;
1488
1489 if (u->transient_file) {
1490 r = fflush_and_check(u->transient_file);
1491 if (r < 0)
1492 goto fail;
1493
1494 u->transient_file = safe_fclose(u->transient_file);
1495 u->fragment_mtime = now(CLOCK_REALTIME);
1496 }
1497
1498 if (UNIT_VTABLE(u)->load) {
1499 r = UNIT_VTABLE(u)->load(u);
1500 if (r < 0)
1501 goto fail;
1502 }
1503
1504 if (u->load_state == UNIT_STUB) {
1505 r = -ENOENT;
1506 goto fail;
1507 }
1508
1509 if (u->load_state == UNIT_LOADED) {
1510 unit_add_to_target_deps_queue(u);
1511
1512 r = unit_add_slice_dependencies(u);
1513 if (r < 0)
1514 goto fail;
1515
1516 r = unit_add_mount_dependencies(u);
1517 if (r < 0)
1518 goto fail;
1519
1520 r = unit_add_startup_units(u);
1521 if (r < 0)
1522 goto fail;
1523
1524 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1525 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1526 r = -EINVAL;
1527 goto fail;
1528 }
1529
1530 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1531 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1532
1533 unit_update_cgroup_members_masks(u);
1534 }
1535
1536 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1537
1538 unit_add_to_dbus_queue(unit_follow_merge(u));
1539 unit_add_to_gc_queue(u);
1540
1541 return 0;
1542
1543 fail:
1544 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1545 u->load_error = r;
1546 unit_add_to_dbus_queue(u);
1547 unit_add_to_gc_queue(u);
1548
1549 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1550
1551 return r;
1552 }
1553
1554 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1555 Condition *c;
1556 int triggered = -1;
1557
1558 assert(u);
1559 assert(to_string);
1560
1561 /* If the condition list is empty, then it is true */
1562 if (!first)
1563 return true;
1564
1565 /* Otherwise, if all of the non-trigger conditions apply and
1566 * if any of the trigger conditions apply (unless there are
1567 * none) we return true */
1568 LIST_FOREACH(conditions, c, first) {
1569 int r;
1570
1571 r = condition_test(c);
1572 if (r < 0)
1573 log_unit_warning(u,
1574 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1575 to_string(c->type),
1576 c->trigger ? "|" : "",
1577 c->negate ? "!" : "",
1578 c->parameter);
1579 else
1580 log_unit_debug(u,
1581 "%s=%s%s%s %s.",
1582 to_string(c->type),
1583 c->trigger ? "|" : "",
1584 c->negate ? "!" : "",
1585 c->parameter,
1586 condition_result_to_string(c->result));
1587
1588 if (!c->trigger && r <= 0)
1589 return false;
1590
1591 if (c->trigger && triggered <= 0)
1592 triggered = r > 0;
1593 }
1594
1595 return triggered != 0;
1596 }
1597
1598 static bool unit_condition_test(Unit *u) {
1599 assert(u);
1600
1601 dual_timestamp_get(&u->condition_timestamp);
1602 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1603
1604 return u->condition_result;
1605 }
1606
1607 static bool unit_assert_test(Unit *u) {
1608 assert(u);
1609
1610 dual_timestamp_get(&u->assert_timestamp);
1611 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1612
1613 return u->assert_result;
1614 }
1615
1616 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1617 DISABLE_WARNING_FORMAT_NONLITERAL;
1618 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1619 REENABLE_WARNING;
1620 }
1621
1622 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1623 const char *format;
1624 const UnitStatusMessageFormats *format_table;
1625
1626 assert(u);
1627 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1628
1629 if (t != JOB_RELOAD) {
1630 format_table = &UNIT_VTABLE(u)->status_message_formats;
1631 if (format_table) {
1632 format = format_table->starting_stopping[t == JOB_STOP];
1633 if (format)
1634 return format;
1635 }
1636 }
1637
1638 /* Return generic strings */
1639 if (t == JOB_START)
1640 return "Starting %s.";
1641 else if (t == JOB_STOP)
1642 return "Stopping %s.";
1643 else
1644 return "Reloading %s.";
1645 }
1646
1647 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1648 const char *format;
1649
1650 assert(u);
1651
1652 /* Reload status messages have traditionally not been printed to console. */
1653 if (!IN_SET(t, JOB_START, JOB_STOP))
1654 return;
1655
1656 format = unit_get_status_message_format(u, t);
1657
1658 DISABLE_WARNING_FORMAT_NONLITERAL;
1659 unit_status_printf(u, "", format);
1660 REENABLE_WARNING;
1661 }
1662
1663 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1664 const char *format, *mid;
1665 char buf[LINE_MAX];
1666
1667 assert(u);
1668
1669 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1670 return;
1671
1672 if (log_on_console())
1673 return;
1674
1675 /* We log status messages for all units and all operations. */
1676
1677 format = unit_get_status_message_format(u, t);
1678
1679 DISABLE_WARNING_FORMAT_NONLITERAL;
1680 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1681 REENABLE_WARNING;
1682
1683 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1684 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1685 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1686
1687 /* Note that we deliberately use LOG_MESSAGE() instead of
1688 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1689 * closely what is written to screen using the status output,
1690 * which is supposed the highest level, friendliest output
1691 * possible, which means we should avoid the low-level unit
1692 * name. */
1693 log_struct(LOG_INFO,
1694 LOG_MESSAGE("%s", buf),
1695 LOG_UNIT_ID(u),
1696 LOG_UNIT_INVOCATION_ID(u),
1697 mid,
1698 NULL);
1699 }
1700
1701 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1702 assert(u);
1703 assert(t >= 0);
1704 assert(t < _JOB_TYPE_MAX);
1705
1706 unit_status_log_starting_stopping_reloading(u, t);
1707 unit_status_print_starting_stopping(u, t);
1708 }
1709
1710 int unit_start_limit_test(Unit *u) {
1711 assert(u);
1712
1713 if (ratelimit_test(&u->start_limit)) {
1714 u->start_limit_hit = false;
1715 return 0;
1716 }
1717
1718 log_unit_warning(u, "Start request repeated too quickly.");
1719 u->start_limit_hit = true;
1720
1721 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1722 }
1723
1724 bool unit_shall_confirm_spawn(Unit *u) {
1725 assert(u);
1726
1727 if (manager_is_confirm_spawn_disabled(u->manager))
1728 return false;
1729
1730 /* For some reasons units remaining in the same process group
1731 * as PID 1 fail to acquire the console even if it's not used
1732 * by any process. So skip the confirmation question for them. */
1733 return !unit_get_exec_context(u)->same_pgrp;
1734 }
1735
1736 static bool unit_verify_deps(Unit *u) {
1737 Unit *other;
1738 Iterator j;
1739 void *v;
1740
1741 assert(u);
1742
1743 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1744 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1745 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1746 * conjunction with After= as for them any such check would make things entirely racy. */
1747
1748 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1749
1750 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1751 continue;
1752
1753 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1754 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1755 return false;
1756 }
1757 }
1758
1759 return true;
1760 }
1761
1762 /* Errors:
1763 * -EBADR: This unit type does not support starting.
1764 * -EALREADY: Unit is already started.
1765 * -EAGAIN: An operation is already in progress. Retry later.
1766 * -ECANCELED: Too many requests for now.
1767 * -EPROTO: Assert failed
1768 * -EINVAL: Unit not loaded
1769 * -EOPNOTSUPP: Unit type not supported
1770 * -ENOLINK: The necessary dependencies are not fulfilled.
1771 */
1772 int unit_start(Unit *u) {
1773 UnitActiveState state;
1774 Unit *following;
1775
1776 assert(u);
1777
1778 /* If this is already started, then this will succeed. Note
1779 * that this will even succeed if this unit is not startable
1780 * by the user. This is relied on to detect when we need to
1781 * wait for units and when waiting is finished. */
1782 state = unit_active_state(u);
1783 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1784 return -EALREADY;
1785
1786 /* Units that aren't loaded cannot be started */
1787 if (u->load_state != UNIT_LOADED)
1788 return -EINVAL;
1789
1790 /* If the conditions failed, don't do anything at all. If we
1791 * already are activating this call might still be useful to
1792 * speed up activation in case there is some hold-off time,
1793 * but we don't want to recheck the condition in that case. */
1794 if (state != UNIT_ACTIVATING &&
1795 !unit_condition_test(u)) {
1796 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1797 return -EALREADY;
1798 }
1799
1800 /* If the asserts failed, fail the entire job */
1801 if (state != UNIT_ACTIVATING &&
1802 !unit_assert_test(u)) {
1803 log_unit_notice(u, "Starting requested but asserts failed.");
1804 return -EPROTO;
1805 }
1806
1807 /* Units of types that aren't supported cannot be
1808 * started. Note that we do this test only after the condition
1809 * checks, so that we rather return condition check errors
1810 * (which are usually not considered a true failure) than "not
1811 * supported" errors (which are considered a failure).
1812 */
1813 if (!unit_supported(u))
1814 return -EOPNOTSUPP;
1815
1816 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1817 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1818 * effect anymore, due to a reload or due to a failed condition. */
1819 if (!unit_verify_deps(u))
1820 return -ENOLINK;
1821
1822 /* Forward to the main object, if we aren't it. */
1823 following = unit_following(u);
1824 if (following) {
1825 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1826 return unit_start(following);
1827 }
1828
1829 /* If it is stopped, but we cannot start it, then fail */
1830 if (!UNIT_VTABLE(u)->start)
1831 return -EBADR;
1832
1833 /* We don't suppress calls to ->start() here when we are
1834 * already starting, to allow this request to be used as a
1835 * "hurry up" call, for example when the unit is in some "auto
1836 * restart" state where it waits for a holdoff timer to elapse
1837 * before it will start again. */
1838
1839 unit_add_to_dbus_queue(u);
1840
1841 return UNIT_VTABLE(u)->start(u);
1842 }
1843
1844 bool unit_can_start(Unit *u) {
1845 assert(u);
1846
1847 if (u->load_state != UNIT_LOADED)
1848 return false;
1849
1850 if (!unit_supported(u))
1851 return false;
1852
1853 return !!UNIT_VTABLE(u)->start;
1854 }
1855
1856 bool unit_can_isolate(Unit *u) {
1857 assert(u);
1858
1859 return unit_can_start(u) &&
1860 u->allow_isolate;
1861 }
1862
1863 /* Errors:
1864 * -EBADR: This unit type does not support stopping.
1865 * -EALREADY: Unit is already stopped.
1866 * -EAGAIN: An operation is already in progress. Retry later.
1867 */
1868 int unit_stop(Unit *u) {
1869 UnitActiveState state;
1870 Unit *following;
1871
1872 assert(u);
1873
1874 state = unit_active_state(u);
1875 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1876 return -EALREADY;
1877
1878 following = unit_following(u);
1879 if (following) {
1880 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1881 return unit_stop(following);
1882 }
1883
1884 if (!UNIT_VTABLE(u)->stop)
1885 return -EBADR;
1886
1887 unit_add_to_dbus_queue(u);
1888
1889 return UNIT_VTABLE(u)->stop(u);
1890 }
1891
1892 bool unit_can_stop(Unit *u) {
1893 assert(u);
1894
1895 if (!unit_supported(u))
1896 return false;
1897
1898 if (u->perpetual)
1899 return false;
1900
1901 return !!UNIT_VTABLE(u)->stop;
1902 }
1903
1904 /* Errors:
1905 * -EBADR: This unit type does not support reloading.
1906 * -ENOEXEC: Unit is not started.
1907 * -EAGAIN: An operation is already in progress. Retry later.
1908 */
1909 int unit_reload(Unit *u) {
1910 UnitActiveState state;
1911 Unit *following;
1912
1913 assert(u);
1914
1915 if (u->load_state != UNIT_LOADED)
1916 return -EINVAL;
1917
1918 if (!unit_can_reload(u))
1919 return -EBADR;
1920
1921 state = unit_active_state(u);
1922 if (state == UNIT_RELOADING)
1923 return -EALREADY;
1924
1925 if (state != UNIT_ACTIVE) {
1926 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1927 return -ENOEXEC;
1928 }
1929
1930 following = unit_following(u);
1931 if (following) {
1932 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1933 return unit_reload(following);
1934 }
1935
1936 unit_add_to_dbus_queue(u);
1937
1938 if (!UNIT_VTABLE(u)->reload) {
1939 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1940 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1941 return 0;
1942 }
1943
1944 return UNIT_VTABLE(u)->reload(u);
1945 }
1946
1947 bool unit_can_reload(Unit *u) {
1948 assert(u);
1949
1950 if (UNIT_VTABLE(u)->can_reload)
1951 return UNIT_VTABLE(u)->can_reload(u);
1952
1953 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1954 return true;
1955
1956 return UNIT_VTABLE(u)->reload;
1957 }
1958
1959 static void unit_check_unneeded(Unit *u) {
1960
1961 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1962
1963 static const UnitDependency needed_dependencies[] = {
1964 UNIT_REQUIRED_BY,
1965 UNIT_REQUISITE_OF,
1966 UNIT_WANTED_BY,
1967 UNIT_BOUND_BY,
1968 };
1969
1970 unsigned j;
1971 int r;
1972
1973 assert(u);
1974
1975 /* If this service shall be shut down when unneeded then do
1976 * so. */
1977
1978 if (!u->stop_when_unneeded)
1979 return;
1980
1981 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1982 return;
1983
1984 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
1985 Unit *other;
1986 Iterator i;
1987 void *v;
1988
1989 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
1990 if (unit_active_or_pending(other) || unit_will_restart(other))
1991 return;
1992 }
1993
1994 /* If stopping a unit fails continuously we might enter a stop
1995 * loop here, hence stop acting on the service being
1996 * unnecessary after a while. */
1997 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1998 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1999 return;
2000 }
2001
2002 log_unit_info(u, "Unit not needed anymore. Stopping.");
2003
2004 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2005 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2006 if (r < 0)
2007 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2008 }
2009
2010 static void unit_check_binds_to(Unit *u) {
2011 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2012 bool stop = false;
2013 Unit *other;
2014 Iterator i;
2015 void *v;
2016 int r;
2017
2018 assert(u);
2019
2020 if (u->job)
2021 return;
2022
2023 if (unit_active_state(u) != UNIT_ACTIVE)
2024 return;
2025
2026 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2027 if (other->job)
2028 continue;
2029
2030 if (!other->coldplugged)
2031 /* We might yet create a job for the other unit… */
2032 continue;
2033
2034 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2035 continue;
2036
2037 stop = true;
2038 break;
2039 }
2040
2041 if (!stop)
2042 return;
2043
2044 /* If stopping a unit fails continuously we might enter a stop
2045 * loop here, hence stop acting on the service being
2046 * unnecessary after a while. */
2047 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2048 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2049 return;
2050 }
2051
2052 assert(other);
2053 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2054
2055 /* A unit we need to run is gone. Sniff. Let's stop this. */
2056 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2057 if (r < 0)
2058 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2059 }
2060
2061 static void retroactively_start_dependencies(Unit *u) {
2062 Iterator i;
2063 Unit *other;
2064 void *v;
2065
2066 assert(u);
2067 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2068
2069 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2070 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2071 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2072 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2073
2074 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2075 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2076 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2077 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2078
2079 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2080 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2081 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2082 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2083
2084 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2085 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2086 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2087
2088 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2089 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2090 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2091 }
2092
2093 static void retroactively_stop_dependencies(Unit *u) {
2094 Unit *other;
2095 Iterator i;
2096 void *v;
2097
2098 assert(u);
2099 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2100
2101 /* Pull down units which are bound to us recursively if enabled */
2102 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2103 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2104 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2105 }
2106
2107 static void check_unneeded_dependencies(Unit *u) {
2108 Unit *other;
2109 Iterator i;
2110 void *v;
2111
2112 assert(u);
2113 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2114
2115 /* Garbage collect services that might not be needed anymore, if enabled */
2116 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2117 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2118 unit_check_unneeded(other);
2119 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2120 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2121 unit_check_unneeded(other);
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2124 unit_check_unneeded(other);
2125 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2126 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2127 unit_check_unneeded(other);
2128 }
2129
2130 void unit_start_on_failure(Unit *u) {
2131 Unit *other;
2132 Iterator i;
2133 void *v;
2134
2135 assert(u);
2136
2137 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2138 return;
2139
2140 log_unit_info(u, "Triggering OnFailure= dependencies.");
2141
2142 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2143 int r;
2144
2145 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2146 if (r < 0)
2147 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2148 }
2149 }
2150
2151 void unit_trigger_notify(Unit *u) {
2152 Unit *other;
2153 Iterator i;
2154 void *v;
2155
2156 assert(u);
2157
2158 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2159 if (UNIT_VTABLE(other)->trigger_notify)
2160 UNIT_VTABLE(other)->trigger_notify(other, u);
2161 }
2162
2163 static int unit_log_resources(Unit *u) {
2164
2165 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2166 size_t n_message_parts = 0, n_iovec = 0;
2167 char* message_parts[3 + 1], *t;
2168 nsec_t nsec = NSEC_INFINITY;
2169 CGroupIPAccountingMetric m;
2170 size_t i;
2171 int r;
2172 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2173 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2174 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2175 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2176 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2177 };
2178
2179 assert(u);
2180
2181 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2182 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2183 * information and the complete data in structured fields. */
2184
2185 (void) unit_get_cpu_usage(u, &nsec);
2186 if (nsec != NSEC_INFINITY) {
2187 char buf[FORMAT_TIMESPAN_MAX] = "";
2188
2189 /* Format the CPU time for inclusion in the structured log message */
2190 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2191 r = log_oom();
2192 goto finish;
2193 }
2194 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2195
2196 /* Format the CPU time for inclusion in the human language message string */
2197 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2198 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2199 if (!t) {
2200 r = log_oom();
2201 goto finish;
2202 }
2203
2204 message_parts[n_message_parts++] = t;
2205 }
2206
2207 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2208 char buf[FORMAT_BYTES_MAX] = "";
2209 uint64_t value = UINT64_MAX;
2210
2211 assert(ip_fields[m]);
2212
2213 (void) unit_get_ip_accounting(u, m, &value);
2214 if (value == UINT64_MAX)
2215 continue;
2216
2217 /* Format IP accounting data for inclusion in the structured log message */
2218 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2219 r = log_oom();
2220 goto finish;
2221 }
2222 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2223
2224 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2225 * bytes counters (and not for the packets counters) */
2226 if (m == CGROUP_IP_INGRESS_BYTES)
2227 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2228 format_bytes(buf, sizeof(buf), value),
2229 " IP traffic");
2230 else if (m == CGROUP_IP_EGRESS_BYTES)
2231 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2232 format_bytes(buf, sizeof(buf), value),
2233 " IP traffic");
2234 else
2235 continue;
2236 if (!t) {
2237 r = log_oom();
2238 goto finish;
2239 }
2240
2241 message_parts[n_message_parts++] = t;
2242 }
2243
2244 /* Is there any accounting data available at all? */
2245 if (n_iovec == 0) {
2246 r = 0;
2247 goto finish;
2248 }
2249
2250 if (n_message_parts == 0)
2251 t = strjoina("MESSAGE=", u->id, ": Completed");
2252 else {
2253 _cleanup_free_ char *joined;
2254
2255 message_parts[n_message_parts] = NULL;
2256
2257 joined = strv_join(message_parts, ", ");
2258 if (!joined) {
2259 r = log_oom();
2260 goto finish;
2261 }
2262
2263 t = strjoina("MESSAGE=", u->id, ": ", joined);
2264 }
2265
2266 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2267 * and hence don't increase n_iovec for them */
2268 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2269 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2270
2271 t = strjoina(u->manager->unit_log_field, u->id);
2272 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2273
2274 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2275 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2276
2277 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2278 r = 0;
2279
2280 finish:
2281 for (i = 0; i < n_message_parts; i++)
2282 free(message_parts[i]);
2283
2284 for (i = 0; i < n_iovec; i++)
2285 free(iovec[i].iov_base);
2286
2287 return r;
2288
2289 }
2290
2291 static void unit_update_on_console(Unit *u) {
2292 bool b;
2293
2294 assert(u);
2295
2296 b = unit_needs_console(u);
2297 if (u->on_console == b)
2298 return;
2299
2300 u->on_console = b;
2301 if (b)
2302 manager_ref_console(u->manager);
2303 else
2304 manager_unref_console(u->manager);
2305
2306 }
2307
2308 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2309 bool unexpected;
2310 Manager *m;
2311
2312 assert(u);
2313 assert(os < _UNIT_ACTIVE_STATE_MAX);
2314 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2315
2316 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2317 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2318 * remounted this function will be called too! */
2319
2320 m = u->manager;
2321
2322 /* Update timestamps for state changes */
2323 if (!MANAGER_IS_RELOADING(m)) {
2324 dual_timestamp_get(&u->state_change_timestamp);
2325
2326 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2327 u->inactive_exit_timestamp = u->state_change_timestamp;
2328 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2329 u->inactive_enter_timestamp = u->state_change_timestamp;
2330
2331 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2332 u->active_enter_timestamp = u->state_change_timestamp;
2333 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2334 u->active_exit_timestamp = u->state_change_timestamp;
2335 }
2336
2337 /* Keep track of failed units */
2338 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2339
2340 /* Make sure the cgroup and state files are always removed when we become inactive */
2341 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2342 unit_prune_cgroup(u);
2343 unit_unlink_state_files(u);
2344 }
2345
2346 unit_update_on_console(u);
2347
2348 if (u->job) {
2349 unexpected = false;
2350
2351 if (u->job->state == JOB_WAITING)
2352
2353 /* So we reached a different state for this
2354 * job. Let's see if we can run it now if it
2355 * failed previously due to EAGAIN. */
2356 job_add_to_run_queue(u->job);
2357
2358 /* Let's check whether this state change constitutes a
2359 * finished job, or maybe contradicts a running job and
2360 * hence needs to invalidate jobs. */
2361
2362 switch (u->job->type) {
2363
2364 case JOB_START:
2365 case JOB_VERIFY_ACTIVE:
2366
2367 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2368 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2369 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2370 unexpected = true;
2371
2372 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2373 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2374 }
2375
2376 break;
2377
2378 case JOB_RELOAD:
2379 case JOB_RELOAD_OR_START:
2380 case JOB_TRY_RELOAD:
2381
2382 if (u->job->state == JOB_RUNNING) {
2383 if (ns == UNIT_ACTIVE)
2384 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2385 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2386 unexpected = true;
2387
2388 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2389 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2390 }
2391 }
2392
2393 break;
2394
2395 case JOB_STOP:
2396 case JOB_RESTART:
2397 case JOB_TRY_RESTART:
2398
2399 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2400 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2401 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2402 unexpected = true;
2403 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2404 }
2405
2406 break;
2407
2408 default:
2409 assert_not_reached("Job type unknown");
2410 }
2411
2412 } else
2413 unexpected = true;
2414
2415 if (!MANAGER_IS_RELOADING(m)) {
2416
2417 /* If this state change happened without being
2418 * requested by a job, then let's retroactively start
2419 * or stop dependencies. We skip that step when
2420 * deserializing, since we don't want to create any
2421 * additional jobs just because something is already
2422 * activated. */
2423
2424 if (unexpected) {
2425 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2426 retroactively_start_dependencies(u);
2427 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2428 retroactively_stop_dependencies(u);
2429 }
2430
2431 /* stop unneeded units regardless if going down was expected or not */
2432 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2433 check_unneeded_dependencies(u);
2434
2435 if (ns != os && ns == UNIT_FAILED) {
2436 log_unit_debug(u, "Unit entered failed state.");
2437 unit_start_on_failure(u);
2438 }
2439 }
2440
2441 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2442
2443 if (u->type == UNIT_SERVICE &&
2444 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2445 !MANAGER_IS_RELOADING(m)) {
2446 /* Write audit record if we have just finished starting up */
2447 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2448 u->in_audit = true;
2449 }
2450
2451 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2452 manager_send_unit_plymouth(m, u);
2453
2454 } else {
2455
2456 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2457 !UNIT_IS_INACTIVE_OR_FAILED(os)
2458 && !MANAGER_IS_RELOADING(m)) {
2459
2460 /* This unit just stopped/failed. */
2461 if (u->type == UNIT_SERVICE) {
2462
2463 /* Hmm, if there was no start record written
2464 * write it now, so that we always have a nice
2465 * pair */
2466 if (!u->in_audit) {
2467 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2468
2469 if (ns == UNIT_INACTIVE)
2470 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2471 } else
2472 /* Write audit record if we have just finished shutting down */
2473 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2474
2475 u->in_audit = false;
2476 }
2477
2478 /* Write a log message about consumed resources */
2479 unit_log_resources(u);
2480 }
2481 }
2482
2483 manager_recheck_journal(m);
2484 manager_recheck_dbus(m);
2485
2486 unit_trigger_notify(u);
2487
2488 if (!MANAGER_IS_RELOADING(u->manager)) {
2489 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2490 unit_check_unneeded(u);
2491
2492 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2493 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2494 * without ever entering started.) */
2495 unit_check_binds_to(u);
2496
2497 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2498 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2499 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2500 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2501 }
2502
2503 unit_add_to_dbus_queue(u);
2504 unit_add_to_gc_queue(u);
2505 }
2506
2507 int unit_watch_pid(Unit *u, pid_t pid) {
2508 int r;
2509
2510 assert(u);
2511 assert(pid_is_valid(pid));
2512
2513 /* Watch a specific PID */
2514
2515 r = set_ensure_allocated(&u->pids, NULL);
2516 if (r < 0)
2517 return r;
2518
2519 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2520 if (r < 0)
2521 return r;
2522
2523 /* First try, let's add the unit keyed by "pid". */
2524 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2525 if (r == -EEXIST) {
2526 Unit **array;
2527 bool found = false;
2528 size_t n = 0;
2529
2530 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2531 * to an array of Units rather than just a Unit), lists us already. */
2532
2533 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2534 if (array)
2535 for (; array[n]; n++)
2536 if (array[n] == u)
2537 found = true;
2538
2539 if (found) /* Found it already? if so, do nothing */
2540 r = 0;
2541 else {
2542 Unit **new_array;
2543
2544 /* Allocate a new array */
2545 new_array = new(Unit*, n + 2);
2546 if (!new_array)
2547 return -ENOMEM;
2548
2549 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2550 new_array[n] = u;
2551 new_array[n+1] = NULL;
2552
2553 /* Add or replace the old array */
2554 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2555 if (r < 0) {
2556 free(new_array);
2557 return r;
2558 }
2559
2560 free(array);
2561 }
2562 } else if (r < 0)
2563 return r;
2564
2565 r = set_put(u->pids, PID_TO_PTR(pid));
2566 if (r < 0)
2567 return r;
2568
2569 return 0;
2570 }
2571
2572 void unit_unwatch_pid(Unit *u, pid_t pid) {
2573 Unit **array;
2574
2575 assert(u);
2576 assert(pid_is_valid(pid));
2577
2578 /* First let's drop the unit in case it's keyed as "pid". */
2579 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2580
2581 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2582 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2583 if (array) {
2584 size_t n, m = 0;
2585
2586 /* Let's iterate through the array, dropping our own entry */
2587 for (n = 0; array[n]; n++)
2588 if (array[n] != u)
2589 array[m++] = array[n];
2590 array[m] = NULL;
2591
2592 if (m == 0) {
2593 /* The array is now empty, remove the entire entry */
2594 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2595 free(array);
2596 }
2597 }
2598
2599 (void) set_remove(u->pids, PID_TO_PTR(pid));
2600 }
2601
2602 void unit_unwatch_all_pids(Unit *u) {
2603 assert(u);
2604
2605 while (!set_isempty(u->pids))
2606 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2607
2608 u->pids = set_free(u->pids);
2609 }
2610
2611 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2612 Iterator i;
2613 void *e;
2614
2615 assert(u);
2616
2617 /* Cleans dead PIDs from our list */
2618
2619 SET_FOREACH(e, u->pids, i) {
2620 pid_t pid = PTR_TO_PID(e);
2621
2622 if (pid == except1 || pid == except2)
2623 continue;
2624
2625 if (!pid_is_unwaited(pid))
2626 unit_unwatch_pid(u, pid);
2627 }
2628 }
2629
2630 bool unit_job_is_applicable(Unit *u, JobType j) {
2631 assert(u);
2632 assert(j >= 0 && j < _JOB_TYPE_MAX);
2633
2634 switch (j) {
2635
2636 case JOB_VERIFY_ACTIVE:
2637 case JOB_START:
2638 case JOB_NOP:
2639 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2640 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2641 * jobs for it. */
2642 return true;
2643
2644 case JOB_STOP:
2645 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2646 * external events), hence it makes no sense to permit enqueing such a request either. */
2647 return !u->perpetual;
2648
2649 case JOB_RESTART:
2650 case JOB_TRY_RESTART:
2651 return unit_can_stop(u) && unit_can_start(u);
2652
2653 case JOB_RELOAD:
2654 case JOB_TRY_RELOAD:
2655 return unit_can_reload(u);
2656
2657 case JOB_RELOAD_OR_START:
2658 return unit_can_reload(u) && unit_can_start(u);
2659
2660 default:
2661 assert_not_reached("Invalid job type");
2662 }
2663 }
2664
2665 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2666 assert(u);
2667
2668 /* Only warn about some unit types */
2669 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2670 return;
2671
2672 if (streq_ptr(u->id, other))
2673 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2674 else
2675 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2676 }
2677
2678 static int unit_add_dependency_hashmap(
2679 Hashmap **h,
2680 Unit *other,
2681 UnitDependencyMask origin_mask,
2682 UnitDependencyMask destination_mask) {
2683
2684 UnitDependencyInfo info;
2685 int r;
2686
2687 assert(h);
2688 assert(other);
2689 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2690 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2691 assert(origin_mask > 0 || destination_mask > 0);
2692
2693 r = hashmap_ensure_allocated(h, NULL);
2694 if (r < 0)
2695 return r;
2696
2697 assert_cc(sizeof(void*) == sizeof(info));
2698
2699 info.data = hashmap_get(*h, other);
2700 if (info.data) {
2701 /* Entry already exists. Add in our mask. */
2702
2703 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2704 (info.destination_mask & destination_mask) == info.destination_mask)
2705 return 0; /* NOP */
2706
2707 info.origin_mask |= origin_mask;
2708 info.destination_mask |= destination_mask;
2709
2710 r = hashmap_update(*h, other, info.data);
2711 } else {
2712 info = (UnitDependencyInfo) {
2713 .origin_mask = origin_mask,
2714 .destination_mask = destination_mask,
2715 };
2716
2717 r = hashmap_put(*h, other, info.data);
2718 }
2719 if (r < 0)
2720 return r;
2721
2722 return 1;
2723 }
2724
2725 int unit_add_dependency(
2726 Unit *u,
2727 UnitDependency d,
2728 Unit *other,
2729 bool add_reference,
2730 UnitDependencyMask mask) {
2731
2732 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2733 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2734 [UNIT_WANTS] = UNIT_WANTED_BY,
2735 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2736 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2737 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2738 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2739 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2740 [UNIT_WANTED_BY] = UNIT_WANTS,
2741 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2742 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2743 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2744 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2745 [UNIT_BEFORE] = UNIT_AFTER,
2746 [UNIT_AFTER] = UNIT_BEFORE,
2747 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2748 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2749 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2750 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2751 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2752 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2753 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2754 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2755 };
2756 Unit *original_u = u, *original_other = other;
2757 int r;
2758
2759 assert(u);
2760 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2761 assert(other);
2762
2763 u = unit_follow_merge(u);
2764 other = unit_follow_merge(other);
2765
2766 /* We won't allow dependencies on ourselves. We will not
2767 * consider them an error however. */
2768 if (u == other) {
2769 maybe_warn_about_dependency(original_u, original_other->id, d);
2770 return 0;
2771 }
2772
2773 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2774 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2775 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2776 return 0;
2777 }
2778
2779 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2780 if (r < 0)
2781 return r;
2782
2783 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2784 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2785 if (r < 0)
2786 return r;
2787 }
2788
2789 if (add_reference) {
2790 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2791 if (r < 0)
2792 return r;
2793
2794 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2795 if (r < 0)
2796 return r;
2797 }
2798
2799 unit_add_to_dbus_queue(u);
2800 return 0;
2801 }
2802
2803 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2804 int r;
2805
2806 assert(u);
2807
2808 r = unit_add_dependency(u, d, other, add_reference, mask);
2809 if (r < 0)
2810 return r;
2811
2812 return unit_add_dependency(u, e, other, add_reference, mask);
2813 }
2814
2815 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2816 int r;
2817
2818 assert(u);
2819 assert(name || path);
2820 assert(buf);
2821 assert(ret);
2822
2823 if (!name)
2824 name = basename(path);
2825
2826 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2827 *buf = NULL;
2828 *ret = name;
2829 return 0;
2830 }
2831
2832 if (u->instance)
2833 r = unit_name_replace_instance(name, u->instance, buf);
2834 else {
2835 _cleanup_free_ char *i = NULL;
2836
2837 r = unit_name_to_prefix(u->id, &i);
2838 if (r < 0)
2839 return r;
2840
2841 r = unit_name_replace_instance(name, i, buf);
2842 }
2843 if (r < 0)
2844 return r;
2845
2846 *ret = *buf;
2847 return 0;
2848 }
2849
2850 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2851 _cleanup_free_ char *buf = NULL;
2852 Unit *other;
2853 int r;
2854
2855 assert(u);
2856 assert(name || path);
2857
2858 r = resolve_template(u, name, path, &buf, &name);
2859 if (r < 0)
2860 return r;
2861
2862 r = manager_load_unit(u->manager, name, path, NULL, &other);
2863 if (r < 0)
2864 return r;
2865
2866 return unit_add_dependency(u, d, other, add_reference, mask);
2867 }
2868
2869 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2870 _cleanup_free_ char *buf = NULL;
2871 Unit *other;
2872 int r;
2873
2874 assert(u);
2875 assert(name || path);
2876
2877 r = resolve_template(u, name, path, &buf, &name);
2878 if (r < 0)
2879 return r;
2880
2881 r = manager_load_unit(u->manager, name, path, NULL, &other);
2882 if (r < 0)
2883 return r;
2884
2885 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2886 }
2887
2888 int set_unit_path(const char *p) {
2889 /* This is mostly for debug purposes */
2890 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2891 return -errno;
2892
2893 return 0;
2894 }
2895
2896 char *unit_dbus_path(Unit *u) {
2897 assert(u);
2898
2899 if (!u->id)
2900 return NULL;
2901
2902 return unit_dbus_path_from_name(u->id);
2903 }
2904
2905 char *unit_dbus_path_invocation_id(Unit *u) {
2906 assert(u);
2907
2908 if (sd_id128_is_null(u->invocation_id))
2909 return NULL;
2910
2911 return unit_dbus_path_from_name(u->invocation_id_string);
2912 }
2913
2914 int unit_set_slice(Unit *u, Unit *slice) {
2915 assert(u);
2916 assert(slice);
2917
2918 /* Sets the unit slice if it has not been set before. Is extra
2919 * careful, to only allow this for units that actually have a
2920 * cgroup context. Also, we don't allow to set this for slices
2921 * (since the parent slice is derived from the name). Make
2922 * sure the unit we set is actually a slice. */
2923
2924 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2925 return -EOPNOTSUPP;
2926
2927 if (u->type == UNIT_SLICE)
2928 return -EINVAL;
2929
2930 if (unit_active_state(u) != UNIT_INACTIVE)
2931 return -EBUSY;
2932
2933 if (slice->type != UNIT_SLICE)
2934 return -EINVAL;
2935
2936 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2937 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2938 return -EPERM;
2939
2940 if (UNIT_DEREF(u->slice) == slice)
2941 return 0;
2942
2943 /* Disallow slice changes if @u is already bound to cgroups */
2944 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2945 return -EBUSY;
2946
2947 unit_ref_set(&u->slice, u, slice);
2948 return 1;
2949 }
2950
2951 int unit_set_default_slice(Unit *u) {
2952 _cleanup_free_ char *b = NULL;
2953 const char *slice_name;
2954 Unit *slice;
2955 int r;
2956
2957 assert(u);
2958
2959 if (UNIT_ISSET(u->slice))
2960 return 0;
2961
2962 if (u->instance) {
2963 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2964
2965 /* Implicitly place all instantiated units in their
2966 * own per-template slice */
2967
2968 r = unit_name_to_prefix(u->id, &prefix);
2969 if (r < 0)
2970 return r;
2971
2972 /* The prefix is already escaped, but it might include
2973 * "-" which has a special meaning for slice units,
2974 * hence escape it here extra. */
2975 escaped = unit_name_escape(prefix);
2976 if (!escaped)
2977 return -ENOMEM;
2978
2979 if (MANAGER_IS_SYSTEM(u->manager))
2980 b = strjoin("system-", escaped, ".slice");
2981 else
2982 b = strappend(escaped, ".slice");
2983 if (!b)
2984 return -ENOMEM;
2985
2986 slice_name = b;
2987 } else
2988 slice_name =
2989 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2990 ? SPECIAL_SYSTEM_SLICE
2991 : SPECIAL_ROOT_SLICE;
2992
2993 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2994 if (r < 0)
2995 return r;
2996
2997 return unit_set_slice(u, slice);
2998 }
2999
3000 const char *unit_slice_name(Unit *u) {
3001 assert(u);
3002
3003 if (!UNIT_ISSET(u->slice))
3004 return NULL;
3005
3006 return UNIT_DEREF(u->slice)->id;
3007 }
3008
3009 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3010 _cleanup_free_ char *t = NULL;
3011 int r;
3012
3013 assert(u);
3014 assert(type);
3015 assert(_found);
3016
3017 r = unit_name_change_suffix(u->id, type, &t);
3018 if (r < 0)
3019 return r;
3020 if (unit_has_name(u, t))
3021 return -EINVAL;
3022
3023 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3024 assert(r < 0 || *_found != u);
3025 return r;
3026 }
3027
3028 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3029 const char *name, *old_owner, *new_owner;
3030 Unit *u = userdata;
3031 int r;
3032
3033 assert(message);
3034 assert(u);
3035
3036 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3037 if (r < 0) {
3038 bus_log_parse_error(r);
3039 return 0;
3040 }
3041
3042 old_owner = empty_to_null(old_owner);
3043 new_owner = empty_to_null(new_owner);
3044
3045 if (UNIT_VTABLE(u)->bus_name_owner_change)
3046 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3047
3048 return 0;
3049 }
3050
3051 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3052 const char *match;
3053
3054 assert(u);
3055 assert(bus);
3056 assert(name);
3057
3058 if (u->match_bus_slot)
3059 return -EBUSY;
3060
3061 match = strjoina("type='signal',"
3062 "sender='org.freedesktop.DBus',"
3063 "path='/org/freedesktop/DBus',"
3064 "interface='org.freedesktop.DBus',"
3065 "member='NameOwnerChanged',"
3066 "arg0='", name, "'");
3067
3068 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3069 }
3070
3071 int unit_watch_bus_name(Unit *u, const char *name) {
3072 int r;
3073
3074 assert(u);
3075 assert(name);
3076
3077 /* Watch a specific name on the bus. We only support one unit
3078 * watching each name for now. */
3079
3080 if (u->manager->api_bus) {
3081 /* If the bus is already available, install the match directly.
3082 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3083 r = unit_install_bus_match(u, u->manager->api_bus, name);
3084 if (r < 0)
3085 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3086 }
3087
3088 r = hashmap_put(u->manager->watch_bus, name, u);
3089 if (r < 0) {
3090 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3091 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3092 }
3093
3094 return 0;
3095 }
3096
3097 void unit_unwatch_bus_name(Unit *u, const char *name) {
3098 assert(u);
3099 assert(name);
3100
3101 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3102 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3103 }
3104
3105 bool unit_can_serialize(Unit *u) {
3106 assert(u);
3107
3108 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3109 }
3110
3111 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3112 _cleanup_free_ char *s = NULL;
3113 int r = 0;
3114
3115 assert(f);
3116 assert(key);
3117
3118 if (mask != 0) {
3119 r = cg_mask_to_string(mask, &s);
3120 if (r >= 0) {
3121 fputs(key, f);
3122 fputc('=', f);
3123 fputs(s, f);
3124 fputc('\n', f);
3125 }
3126 }
3127 return r;
3128 }
3129
3130 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3131 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3132 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3133 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3134 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3135 };
3136
3137 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3138 CGroupIPAccountingMetric m;
3139 int r;
3140
3141 assert(u);
3142 assert(f);
3143 assert(fds);
3144
3145 if (unit_can_serialize(u)) {
3146 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3147 if (r < 0)
3148 return r;
3149 }
3150
3151 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3152
3153 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3154 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3155 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3156 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3157
3158 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3159 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3160
3161 if (dual_timestamp_is_set(&u->condition_timestamp))
3162 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3163
3164 if (dual_timestamp_is_set(&u->assert_timestamp))
3165 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3166
3167 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3168
3169 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3170 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3171 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3172
3173 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3174 if (u->cpu_usage_last != NSEC_INFINITY)
3175 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3176
3177 if (u->cgroup_path)
3178 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3179 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3180 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3181 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3182 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3183
3184 if (uid_is_valid(u->ref_uid))
3185 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3186 if (gid_is_valid(u->ref_gid))
3187 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3188
3189 if (!sd_id128_is_null(u->invocation_id))
3190 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3191
3192 bus_track_serialize(u->bus_track, f, "ref");
3193
3194 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3195 uint64_t v;
3196
3197 r = unit_get_ip_accounting(u, m, &v);
3198 if (r >= 0)
3199 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3200 }
3201
3202 if (serialize_jobs) {
3203 if (u->job) {
3204 fprintf(f, "job\n");
3205 job_serialize(u->job, f);
3206 }
3207
3208 if (u->nop_job) {
3209 fprintf(f, "job\n");
3210 job_serialize(u->nop_job, f);
3211 }
3212 }
3213
3214 /* End marker */
3215 fputc('\n', f);
3216 return 0;
3217 }
3218
3219 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3220 assert(u);
3221 assert(f);
3222 assert(key);
3223
3224 if (!value)
3225 return 0;
3226
3227 fputs(key, f);
3228 fputc('=', f);
3229 fputs(value, f);
3230 fputc('\n', f);
3231
3232 return 1;
3233 }
3234
3235 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3236 _cleanup_free_ char *c = NULL;
3237
3238 assert(u);
3239 assert(f);
3240 assert(key);
3241
3242 if (!value)
3243 return 0;
3244
3245 c = cescape(value);
3246 if (!c)
3247 return -ENOMEM;
3248
3249 fputs(key, f);
3250 fputc('=', f);
3251 fputs(c, f);
3252 fputc('\n', f);
3253
3254 return 1;
3255 }
3256
3257 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3258 int copy;
3259
3260 assert(u);
3261 assert(f);
3262 assert(key);
3263
3264 if (fd < 0)
3265 return 0;
3266
3267 copy = fdset_put_dup(fds, fd);
3268 if (copy < 0)
3269 return copy;
3270
3271 fprintf(f, "%s=%i\n", key, copy);
3272 return 1;
3273 }
3274
3275 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3276 va_list ap;
3277
3278 assert(u);
3279 assert(f);
3280 assert(key);
3281 assert(format);
3282
3283 fputs(key, f);
3284 fputc('=', f);
3285
3286 va_start(ap, format);
3287 vfprintf(f, format, ap);
3288 va_end(ap);
3289
3290 fputc('\n', f);
3291 }
3292
3293 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3294 int r;
3295
3296 assert(u);
3297 assert(f);
3298 assert(fds);
3299
3300 for (;;) {
3301 char line[LINE_MAX], *l, *v;
3302 CGroupIPAccountingMetric m;
3303 size_t k;
3304
3305 if (!fgets(line, sizeof(line), f)) {
3306 if (feof(f))
3307 return 0;
3308 return -errno;
3309 }
3310
3311 char_array_0(line);
3312 l = strstrip(line);
3313
3314 /* End marker */
3315 if (isempty(l))
3316 break;
3317
3318 k = strcspn(l, "=");
3319
3320 if (l[k] == '=') {
3321 l[k] = 0;
3322 v = l+k+1;
3323 } else
3324 v = l+k;
3325
3326 if (streq(l, "job")) {
3327 if (v[0] == '\0') {
3328 /* new-style serialized job */
3329 Job *j;
3330
3331 j = job_new_raw(u);
3332 if (!j)
3333 return log_oom();
3334
3335 r = job_deserialize(j, f);
3336 if (r < 0) {
3337 job_free(j);
3338 return r;
3339 }
3340
3341 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3342 if (r < 0) {
3343 job_free(j);
3344 return r;
3345 }
3346
3347 r = job_install_deserialized(j);
3348 if (r < 0) {
3349 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3350 job_free(j);
3351 return r;
3352 }
3353 } else /* legacy for pre-44 */
3354 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3355 continue;
3356 } else if (streq(l, "state-change-timestamp")) {
3357 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3358 continue;
3359 } else if (streq(l, "inactive-exit-timestamp")) {
3360 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3361 continue;
3362 } else if (streq(l, "active-enter-timestamp")) {
3363 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3364 continue;
3365 } else if (streq(l, "active-exit-timestamp")) {
3366 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3367 continue;
3368 } else if (streq(l, "inactive-enter-timestamp")) {
3369 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3370 continue;
3371 } else if (streq(l, "condition-timestamp")) {
3372 dual_timestamp_deserialize(v, &u->condition_timestamp);
3373 continue;
3374 } else if (streq(l, "assert-timestamp")) {
3375 dual_timestamp_deserialize(v, &u->assert_timestamp);
3376 continue;
3377 } else if (streq(l, "condition-result")) {
3378
3379 r = parse_boolean(v);
3380 if (r < 0)
3381 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3382 else
3383 u->condition_result = r;
3384
3385 continue;
3386
3387 } else if (streq(l, "assert-result")) {
3388
3389 r = parse_boolean(v);
3390 if (r < 0)
3391 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3392 else
3393 u->assert_result = r;
3394
3395 continue;
3396
3397 } else if (streq(l, "transient")) {
3398
3399 r = parse_boolean(v);
3400 if (r < 0)
3401 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3402 else
3403 u->transient = r;
3404
3405 continue;
3406
3407 } else if (streq(l, "exported-invocation-id")) {
3408
3409 r = parse_boolean(v);
3410 if (r < 0)
3411 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3412 else
3413 u->exported_invocation_id = r;
3414
3415 continue;
3416
3417 } else if (streq(l, "exported-log-level-max")) {
3418
3419 r = parse_boolean(v);
3420 if (r < 0)
3421 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3422 else
3423 u->exported_log_level_max = r;
3424
3425 continue;
3426
3427 } else if (streq(l, "exported-log-extra-fields")) {
3428
3429 r = parse_boolean(v);
3430 if (r < 0)
3431 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3432 else
3433 u->exported_log_extra_fields = r;
3434
3435 continue;
3436
3437 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3438
3439 r = safe_atou64(v, &u->cpu_usage_base);
3440 if (r < 0)
3441 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3442
3443 continue;
3444
3445 } else if (streq(l, "cpu-usage-last")) {
3446
3447 r = safe_atou64(v, &u->cpu_usage_last);
3448 if (r < 0)
3449 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3450
3451 continue;
3452
3453 } else if (streq(l, "cgroup")) {
3454
3455 r = unit_set_cgroup_path(u, v);
3456 if (r < 0)
3457 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3458
3459 (void) unit_watch_cgroup(u);
3460
3461 continue;
3462 } else if (streq(l, "cgroup-realized")) {
3463 int b;
3464
3465 b = parse_boolean(v);
3466 if (b < 0)
3467 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3468 else
3469 u->cgroup_realized = b;
3470
3471 continue;
3472
3473 } else if (streq(l, "cgroup-realized-mask")) {
3474
3475 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3476 if (r < 0)
3477 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3478 continue;
3479
3480 } else if (streq(l, "cgroup-enabled-mask")) {
3481
3482 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3483 if (r < 0)
3484 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3485 continue;
3486
3487 } else if (streq(l, "cgroup-bpf-realized")) {
3488 int i;
3489
3490 r = safe_atoi(v, &i);
3491 if (r < 0)
3492 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3493 else
3494 u->cgroup_bpf_state =
3495 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3496 i > 0 ? UNIT_CGROUP_BPF_ON :
3497 UNIT_CGROUP_BPF_OFF;
3498
3499 continue;
3500
3501 } else if (streq(l, "ref-uid")) {
3502 uid_t uid;
3503
3504 r = parse_uid(v, &uid);
3505 if (r < 0)
3506 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3507 else
3508 unit_ref_uid_gid(u, uid, GID_INVALID);
3509
3510 continue;
3511
3512 } else if (streq(l, "ref-gid")) {
3513 gid_t gid;
3514
3515 r = parse_gid(v, &gid);
3516 if (r < 0)
3517 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3518 else
3519 unit_ref_uid_gid(u, UID_INVALID, gid);
3520
3521 } else if (streq(l, "ref")) {
3522
3523 r = strv_extend(&u->deserialized_refs, v);
3524 if (r < 0)
3525 log_oom();
3526
3527 continue;
3528 } else if (streq(l, "invocation-id")) {
3529 sd_id128_t id;
3530
3531 r = sd_id128_from_string(v, &id);
3532 if (r < 0)
3533 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3534 else {
3535 r = unit_set_invocation_id(u, id);
3536 if (r < 0)
3537 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3538 }
3539
3540 continue;
3541 }
3542
3543 /* Check if this is an IP accounting metric serialization field */
3544 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3545 if (streq(l, ip_accounting_metric_field[m]))
3546 break;
3547 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3548 uint64_t c;
3549
3550 r = safe_atou64(v, &c);
3551 if (r < 0)
3552 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3553 else
3554 u->ip_accounting_extra[m] = c;
3555 continue;
3556 }
3557
3558 if (unit_can_serialize(u)) {
3559 r = exec_runtime_deserialize_compat(u, l, v, fds);
3560 if (r < 0) {
3561 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3562 continue;
3563 }
3564
3565 /* Returns positive if key was handled by the call */
3566 if (r > 0)
3567 continue;
3568
3569 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3570 if (r < 0)
3571 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3572 }
3573 }
3574
3575 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3576 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3577 * before 228 where the base for timeouts was not persistent across reboots. */
3578
3579 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3580 dual_timestamp_get(&u->state_change_timestamp);
3581
3582 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3583 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3584 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3585 unit_invalidate_cgroup_bpf(u);
3586
3587 return 0;
3588 }
3589
3590 void unit_deserialize_skip(FILE *f) {
3591 assert(f);
3592
3593 /* Skip serialized data for this unit. We don't know what it is. */
3594
3595 for (;;) {
3596 char line[LINE_MAX], *l;
3597
3598 if (!fgets(line, sizeof line, f))
3599 return;
3600
3601 char_array_0(line);
3602 l = strstrip(line);
3603
3604 /* End marker */
3605 if (isempty(l))
3606 return;
3607 }
3608 }
3609
3610
3611 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3612 Unit *device;
3613 _cleanup_free_ char *e = NULL;
3614 int r;
3615
3616 assert(u);
3617
3618 /* Adds in links to the device node that this unit is based on */
3619 if (isempty(what))
3620 return 0;
3621
3622 if (!is_device_path(what))
3623 return 0;
3624
3625 /* When device units aren't supported (such as in a
3626 * container), don't create dependencies on them. */
3627 if (!unit_type_supported(UNIT_DEVICE))
3628 return 0;
3629
3630 r = unit_name_from_path(what, ".device", &e);
3631 if (r < 0)
3632 return r;
3633
3634 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3635 if (r < 0)
3636 return r;
3637
3638 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3639 dep = UNIT_BINDS_TO;
3640
3641 r = unit_add_two_dependencies(u, UNIT_AFTER,
3642 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3643 device, true, mask);
3644 if (r < 0)
3645 return r;
3646
3647 if (wants) {
3648 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3649 if (r < 0)
3650 return r;
3651 }
3652
3653 return 0;
3654 }
3655
3656 int unit_coldplug(Unit *u) {
3657 int r = 0, q;
3658 char **i;
3659
3660 assert(u);
3661
3662 /* Make sure we don't enter a loop, when coldplugging
3663 * recursively. */
3664 if (u->coldplugged)
3665 return 0;
3666
3667 u->coldplugged = true;
3668
3669 STRV_FOREACH(i, u->deserialized_refs) {
3670 q = bus_unit_track_add_name(u, *i);
3671 if (q < 0 && r >= 0)
3672 r = q;
3673 }
3674 u->deserialized_refs = strv_free(u->deserialized_refs);
3675
3676 if (UNIT_VTABLE(u)->coldplug) {
3677 q = UNIT_VTABLE(u)->coldplug(u);
3678 if (q < 0 && r >= 0)
3679 r = q;
3680 }
3681
3682 if (u->job) {
3683 q = job_coldplug(u->job);
3684 if (q < 0 && r >= 0)
3685 r = q;
3686 }
3687
3688 return r;
3689 }
3690
3691 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3692 struct stat st;
3693
3694 if (!path)
3695 return false;
3696
3697 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3698 * are never out-of-date. */
3699 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3700 return false;
3701
3702 if (stat(path, &st) < 0)
3703 /* What, cannot access this anymore? */
3704 return true;
3705
3706 if (path_masked)
3707 /* For masked files check if they are still so */
3708 return !null_or_empty(&st);
3709 else
3710 /* For non-empty files check the mtime */
3711 return timespec_load(&st.st_mtim) > mtime;
3712
3713 return false;
3714 }
3715
3716 bool unit_need_daemon_reload(Unit *u) {
3717 _cleanup_strv_free_ char **t = NULL;
3718 char **path;
3719
3720 assert(u);
3721
3722 /* For unit files, we allow masking… */
3723 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3724 u->load_state == UNIT_MASKED))
3725 return true;
3726
3727 /* Source paths should not be masked… */
3728 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3729 return true;
3730
3731 if (u->load_state == UNIT_LOADED)
3732 (void) unit_find_dropin_paths(u, &t);
3733 if (!strv_equal(u->dropin_paths, t))
3734 return true;
3735
3736 /* … any drop-ins that are masked are simply omitted from the list. */
3737 STRV_FOREACH(path, u->dropin_paths)
3738 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3739 return true;
3740
3741 return false;
3742 }
3743
3744 void unit_reset_failed(Unit *u) {
3745 assert(u);
3746
3747 if (UNIT_VTABLE(u)->reset_failed)
3748 UNIT_VTABLE(u)->reset_failed(u);
3749
3750 RATELIMIT_RESET(u->start_limit);
3751 u->start_limit_hit = false;
3752 }
3753
3754 Unit *unit_following(Unit *u) {
3755 assert(u);
3756
3757 if (UNIT_VTABLE(u)->following)
3758 return UNIT_VTABLE(u)->following(u);
3759
3760 return NULL;
3761 }
3762
3763 bool unit_stop_pending(Unit *u) {
3764 assert(u);
3765
3766 /* This call does check the current state of the unit. It's
3767 * hence useful to be called from state change calls of the
3768 * unit itself, where the state isn't updated yet. This is
3769 * different from unit_inactive_or_pending() which checks both
3770 * the current state and for a queued job. */
3771
3772 return u->job && u->job->type == JOB_STOP;
3773 }
3774
3775 bool unit_inactive_or_pending(Unit *u) {
3776 assert(u);
3777
3778 /* Returns true if the unit is inactive or going down */
3779
3780 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3781 return true;
3782
3783 if (unit_stop_pending(u))
3784 return true;
3785
3786 return false;
3787 }
3788
3789 bool unit_active_or_pending(Unit *u) {
3790 assert(u);
3791
3792 /* Returns true if the unit is active or going up */
3793
3794 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3795 return true;
3796
3797 if (u->job &&
3798 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3799 return true;
3800
3801 return false;
3802 }
3803
3804 bool unit_will_restart(Unit *u) {
3805 assert(u);
3806
3807 if (!UNIT_VTABLE(u)->will_restart)
3808 return false;
3809
3810 return UNIT_VTABLE(u)->will_restart(u);
3811 }
3812
3813 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3814 assert(u);
3815 assert(w >= 0 && w < _KILL_WHO_MAX);
3816 assert(SIGNAL_VALID(signo));
3817
3818 if (!UNIT_VTABLE(u)->kill)
3819 return -EOPNOTSUPP;
3820
3821 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3822 }
3823
3824 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3825 Set *pid_set;
3826 int r;
3827
3828 pid_set = set_new(NULL);
3829 if (!pid_set)
3830 return NULL;
3831
3832 /* Exclude the main/control pids from being killed via the cgroup */
3833 if (main_pid > 0) {
3834 r = set_put(pid_set, PID_TO_PTR(main_pid));
3835 if (r < 0)
3836 goto fail;
3837 }
3838
3839 if (control_pid > 0) {
3840 r = set_put(pid_set, PID_TO_PTR(control_pid));
3841 if (r < 0)
3842 goto fail;
3843 }
3844
3845 return pid_set;
3846
3847 fail:
3848 set_free(pid_set);
3849 return NULL;
3850 }
3851
3852 int unit_kill_common(
3853 Unit *u,
3854 KillWho who,
3855 int signo,
3856 pid_t main_pid,
3857 pid_t control_pid,
3858 sd_bus_error *error) {
3859
3860 int r = 0;
3861 bool killed = false;
3862
3863 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3864 if (main_pid < 0)
3865 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3866 else if (main_pid == 0)
3867 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3868 }
3869
3870 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3871 if (control_pid < 0)
3872 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3873 else if (control_pid == 0)
3874 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3875 }
3876
3877 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3878 if (control_pid > 0) {
3879 if (kill(control_pid, signo) < 0)
3880 r = -errno;
3881 else
3882 killed = true;
3883 }
3884
3885 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3886 if (main_pid > 0) {
3887 if (kill(main_pid, signo) < 0)
3888 r = -errno;
3889 else
3890 killed = true;
3891 }
3892
3893 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3894 _cleanup_set_free_ Set *pid_set = NULL;
3895 int q;
3896
3897 /* Exclude the main/control pids from being killed via the cgroup */
3898 pid_set = unit_pid_set(main_pid, control_pid);
3899 if (!pid_set)
3900 return -ENOMEM;
3901
3902 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3903 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3904 r = q;
3905 else
3906 killed = true;
3907 }
3908
3909 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3910 return -ESRCH;
3911
3912 return r;
3913 }
3914
3915 int unit_following_set(Unit *u, Set **s) {
3916 assert(u);
3917 assert(s);
3918
3919 if (UNIT_VTABLE(u)->following_set)
3920 return UNIT_VTABLE(u)->following_set(u, s);
3921
3922 *s = NULL;
3923 return 0;
3924 }
3925
3926 UnitFileState unit_get_unit_file_state(Unit *u) {
3927 int r;
3928
3929 assert(u);
3930
3931 if (u->unit_file_state < 0 && u->fragment_path) {
3932 r = unit_file_get_state(
3933 u->manager->unit_file_scope,
3934 NULL,
3935 u->id,
3936 &u->unit_file_state);
3937 if (r < 0)
3938 u->unit_file_state = UNIT_FILE_BAD;
3939 }
3940
3941 return u->unit_file_state;
3942 }
3943
3944 int unit_get_unit_file_preset(Unit *u) {
3945 assert(u);
3946
3947 if (u->unit_file_preset < 0 && u->fragment_path)
3948 u->unit_file_preset = unit_file_query_preset(
3949 u->manager->unit_file_scope,
3950 NULL,
3951 basename(u->fragment_path));
3952
3953 return u->unit_file_preset;
3954 }
3955
3956 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3957 assert(ref);
3958 assert(source);
3959 assert(target);
3960
3961 if (ref->target)
3962 unit_ref_unset(ref);
3963
3964 ref->source = source;
3965 ref->target = target;
3966 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3967 return target;
3968 }
3969
3970 void unit_ref_unset(UnitRef *ref) {
3971 assert(ref);
3972
3973 if (!ref->target)
3974 return;
3975
3976 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3977 * be unreferenced now. */
3978 unit_add_to_gc_queue(ref->target);
3979
3980 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3981 ref->source = ref->target = NULL;
3982 }
3983
3984 static int user_from_unit_name(Unit *u, char **ret) {
3985
3986 static const uint8_t hash_key[] = {
3987 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3988 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3989 };
3990
3991 _cleanup_free_ char *n = NULL;
3992 int r;
3993
3994 r = unit_name_to_prefix(u->id, &n);
3995 if (r < 0)
3996 return r;
3997
3998 if (valid_user_group_name(n)) {
3999 *ret = TAKE_PTR(n);
4000 return 0;
4001 }
4002
4003 /* If we can't use the unit name as a user name, then let's hash it and use that */
4004 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4005 return -ENOMEM;
4006
4007 return 0;
4008 }
4009
4010 int unit_patch_contexts(Unit *u) {
4011 CGroupContext *cc;
4012 ExecContext *ec;
4013 unsigned i;
4014 int r;
4015
4016 assert(u);
4017
4018 /* Patch in the manager defaults into the exec and cgroup
4019 * contexts, _after_ the rest of the settings have been
4020 * initialized */
4021
4022 ec = unit_get_exec_context(u);
4023 if (ec) {
4024 /* This only copies in the ones that need memory */
4025 for (i = 0; i < _RLIMIT_MAX; i++)
4026 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4027 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4028 if (!ec->rlimit[i])
4029 return -ENOMEM;
4030 }
4031
4032 if (MANAGER_IS_USER(u->manager) &&
4033 !ec->working_directory) {
4034
4035 r = get_home_dir(&ec->working_directory);
4036 if (r < 0)
4037 return r;
4038
4039 /* Allow user services to run, even if the
4040 * home directory is missing */
4041 ec->working_directory_missing_ok = true;
4042 }
4043
4044 if (ec->private_devices)
4045 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4046
4047 if (ec->protect_kernel_modules)
4048 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4049
4050 if (ec->dynamic_user) {
4051 if (!ec->user) {
4052 r = user_from_unit_name(u, &ec->user);
4053 if (r < 0)
4054 return r;
4055 }
4056
4057 if (!ec->group) {
4058 ec->group = strdup(ec->user);
4059 if (!ec->group)
4060 return -ENOMEM;
4061 }
4062
4063 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4064 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4065
4066 ec->private_tmp = true;
4067 ec->remove_ipc = true;
4068 ec->protect_system = PROTECT_SYSTEM_STRICT;
4069 if (ec->protect_home == PROTECT_HOME_NO)
4070 ec->protect_home = PROTECT_HOME_READ_ONLY;
4071 }
4072 }
4073
4074 cc = unit_get_cgroup_context(u);
4075 if (cc) {
4076
4077 if (ec &&
4078 ec->private_devices &&
4079 cc->device_policy == CGROUP_AUTO)
4080 cc->device_policy = CGROUP_CLOSED;
4081 }
4082
4083 return 0;
4084 }
4085
4086 ExecContext *unit_get_exec_context(Unit *u) {
4087 size_t offset;
4088 assert(u);
4089
4090 if (u->type < 0)
4091 return NULL;
4092
4093 offset = UNIT_VTABLE(u)->exec_context_offset;
4094 if (offset <= 0)
4095 return NULL;
4096
4097 return (ExecContext*) ((uint8_t*) u + offset);
4098 }
4099
4100 KillContext *unit_get_kill_context(Unit *u) {
4101 size_t offset;
4102 assert(u);
4103
4104 if (u->type < 0)
4105 return NULL;
4106
4107 offset = UNIT_VTABLE(u)->kill_context_offset;
4108 if (offset <= 0)
4109 return NULL;
4110
4111 return (KillContext*) ((uint8_t*) u + offset);
4112 }
4113
4114 CGroupContext *unit_get_cgroup_context(Unit *u) {
4115 size_t offset;
4116
4117 if (u->type < 0)
4118 return NULL;
4119
4120 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4121 if (offset <= 0)
4122 return NULL;
4123
4124 return (CGroupContext*) ((uint8_t*) u + offset);
4125 }
4126
4127 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4128 size_t offset;
4129
4130 if (u->type < 0)
4131 return NULL;
4132
4133 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4134 if (offset <= 0)
4135 return NULL;
4136
4137 return *(ExecRuntime**) ((uint8_t*) u + offset);
4138 }
4139
4140 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4141 assert(u);
4142
4143 if (UNIT_WRITE_FLAGS_NOOP(flags))
4144 return NULL;
4145
4146 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4147 return u->manager->lookup_paths.transient;
4148
4149 if (flags & UNIT_PERSISTENT)
4150 return u->manager->lookup_paths.persistent_control;
4151
4152 if (flags & UNIT_RUNTIME)
4153 return u->manager->lookup_paths.runtime_control;
4154
4155 return NULL;
4156 }
4157
4158 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4159 char *ret = NULL;
4160
4161 if (!s)
4162 return NULL;
4163
4164 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4165 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4166 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4167 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4168 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4169 * allocations. */
4170
4171 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4172 ret = specifier_escape(s);
4173 if (!ret)
4174 return NULL;
4175
4176 s = ret;
4177 }
4178
4179 if (flags & UNIT_ESCAPE_C) {
4180 char *a;
4181
4182 a = cescape(s);
4183 free(ret);
4184 if (!a)
4185 return NULL;
4186
4187 ret = a;
4188 }
4189
4190 if (buf) {
4191 *buf = ret;
4192 return ret ?: (char*) s;
4193 }
4194
4195 return ret ?: strdup(s);
4196 }
4197
4198 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4199 _cleanup_free_ char *result = NULL;
4200 size_t n = 0, allocated = 0;
4201 char **i;
4202
4203 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4204 * way suitable for ExecStart= stanzas */
4205
4206 STRV_FOREACH(i, l) {
4207 _cleanup_free_ char *buf = NULL;
4208 const char *p;
4209 size_t a;
4210 char *q;
4211
4212 p = unit_escape_setting(*i, flags, &buf);
4213 if (!p)
4214 return NULL;
4215
4216 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4217 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4218 return NULL;
4219
4220 q = result + n;
4221 if (n > 0)
4222 *(q++) = ' ';
4223
4224 *(q++) = '"';
4225 q = stpcpy(q, p);
4226 *(q++) = '"';
4227
4228 n += a;
4229 }
4230
4231 if (!GREEDY_REALLOC(result, allocated, n + 1))
4232 return NULL;
4233
4234 result[n] = 0;
4235
4236 return TAKE_PTR(result);
4237 }
4238
4239 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4240 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4241 const char *dir, *wrapped;
4242 int r;
4243
4244 assert(u);
4245 assert(name);
4246 assert(data);
4247
4248 if (UNIT_WRITE_FLAGS_NOOP(flags))
4249 return 0;
4250
4251 data = unit_escape_setting(data, flags, &escaped);
4252 if (!data)
4253 return -ENOMEM;
4254
4255 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4256 * previous section header is the same */
4257
4258 if (flags & UNIT_PRIVATE) {
4259 if (!UNIT_VTABLE(u)->private_section)
4260 return -EINVAL;
4261
4262 if (!u->transient_file || u->last_section_private < 0)
4263 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4264 else if (u->last_section_private == 0)
4265 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4266 } else {
4267 if (!u->transient_file || u->last_section_private < 0)
4268 data = strjoina("[Unit]\n", data);
4269 else if (u->last_section_private > 0)
4270 data = strjoina("\n[Unit]\n", data);
4271 }
4272
4273 if (u->transient_file) {
4274 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4275 * write to the transient unit file. */
4276 fputs(data, u->transient_file);
4277
4278 if (!endswith(data, "\n"))
4279 fputc('\n', u->transient_file);
4280
4281 /* Remember which section we wrote this entry to */
4282 u->last_section_private = !!(flags & UNIT_PRIVATE);
4283 return 0;
4284 }
4285
4286 dir = unit_drop_in_dir(u, flags);
4287 if (!dir)
4288 return -EINVAL;
4289
4290 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4291 "# or an equivalent operation. Do not edit.\n",
4292 data,
4293 "\n");
4294
4295 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4296 if (r < 0)
4297 return r;
4298
4299 (void) mkdir_p_label(p, 0755);
4300 r = write_string_file_atomic_label(q, wrapped);
4301 if (r < 0)
4302 return r;
4303
4304 r = strv_push(&u->dropin_paths, q);
4305 if (r < 0)
4306 return r;
4307 q = NULL;
4308
4309 strv_uniq(u->dropin_paths);
4310
4311 u->dropin_mtime = now(CLOCK_REALTIME);
4312
4313 return 0;
4314 }
4315
4316 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4317 _cleanup_free_ char *p = NULL;
4318 va_list ap;
4319 int r;
4320
4321 assert(u);
4322 assert(name);
4323 assert(format);
4324
4325 if (UNIT_WRITE_FLAGS_NOOP(flags))
4326 return 0;
4327
4328 va_start(ap, format);
4329 r = vasprintf(&p, format, ap);
4330 va_end(ap);
4331
4332 if (r < 0)
4333 return -ENOMEM;
4334
4335 return unit_write_setting(u, flags, name, p);
4336 }
4337
4338 int unit_make_transient(Unit *u) {
4339 _cleanup_free_ char *path = NULL;
4340 FILE *f;
4341
4342 assert(u);
4343
4344 if (!UNIT_VTABLE(u)->can_transient)
4345 return -EOPNOTSUPP;
4346
4347 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4348
4349 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4350 if (!path)
4351 return -ENOMEM;
4352
4353 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4354 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4355
4356 RUN_WITH_UMASK(0022) {
4357 f = fopen(path, "we");
4358 if (!f)
4359 return -errno;
4360 }
4361
4362 safe_fclose(u->transient_file);
4363 u->transient_file = f;
4364
4365 free_and_replace(u->fragment_path, path);
4366
4367 u->source_path = mfree(u->source_path);
4368 u->dropin_paths = strv_free(u->dropin_paths);
4369 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4370
4371 u->load_state = UNIT_STUB;
4372 u->load_error = 0;
4373 u->transient = true;
4374
4375 unit_add_to_dbus_queue(u);
4376 unit_add_to_gc_queue(u);
4377
4378 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4379 u->transient_file);
4380
4381 return 0;
4382 }
4383
4384 static void log_kill(pid_t pid, int sig, void *userdata) {
4385 _cleanup_free_ char *comm = NULL;
4386
4387 (void) get_process_comm(pid, &comm);
4388
4389 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4390 only, like for example systemd's own PAM stub process. */
4391 if (comm && comm[0] == '(')
4392 return;
4393
4394 log_unit_notice(userdata,
4395 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4396 pid,
4397 strna(comm),
4398 signal_to_string(sig));
4399 }
4400
4401 static int operation_to_signal(KillContext *c, KillOperation k) {
4402 assert(c);
4403
4404 switch (k) {
4405
4406 case KILL_TERMINATE:
4407 case KILL_TERMINATE_AND_LOG:
4408 return c->kill_signal;
4409
4410 case KILL_KILL:
4411 return SIGKILL;
4412
4413 case KILL_ABORT:
4414 return SIGABRT;
4415
4416 default:
4417 assert_not_reached("KillOperation unknown");
4418 }
4419 }
4420
4421 int unit_kill_context(
4422 Unit *u,
4423 KillContext *c,
4424 KillOperation k,
4425 pid_t main_pid,
4426 pid_t control_pid,
4427 bool main_pid_alien) {
4428
4429 bool wait_for_exit = false, send_sighup;
4430 cg_kill_log_func_t log_func = NULL;
4431 int sig, r;
4432
4433 assert(u);
4434 assert(c);
4435
4436 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4437 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4438
4439 if (c->kill_mode == KILL_NONE)
4440 return 0;
4441
4442 sig = operation_to_signal(c, k);
4443
4444 send_sighup =
4445 c->send_sighup &&
4446 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4447 sig != SIGHUP;
4448
4449 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4450 log_func = log_kill;
4451
4452 if (main_pid > 0) {
4453 if (log_func)
4454 log_func(main_pid, sig, u);
4455
4456 r = kill_and_sigcont(main_pid, sig);
4457 if (r < 0 && r != -ESRCH) {
4458 _cleanup_free_ char *comm = NULL;
4459 (void) get_process_comm(main_pid, &comm);
4460
4461 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4462 } else {
4463 if (!main_pid_alien)
4464 wait_for_exit = true;
4465
4466 if (r != -ESRCH && send_sighup)
4467 (void) kill(main_pid, SIGHUP);
4468 }
4469 }
4470
4471 if (control_pid > 0) {
4472 if (log_func)
4473 log_func(control_pid, sig, u);
4474
4475 r = kill_and_sigcont(control_pid, sig);
4476 if (r < 0 && r != -ESRCH) {
4477 _cleanup_free_ char *comm = NULL;
4478 (void) get_process_comm(control_pid, &comm);
4479
4480 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4481 } else {
4482 wait_for_exit = true;
4483
4484 if (r != -ESRCH && send_sighup)
4485 (void) kill(control_pid, SIGHUP);
4486 }
4487 }
4488
4489 if (u->cgroup_path &&
4490 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4491 _cleanup_set_free_ Set *pid_set = NULL;
4492
4493 /* Exclude the main/control pids from being killed via the cgroup */
4494 pid_set = unit_pid_set(main_pid, control_pid);
4495 if (!pid_set)
4496 return -ENOMEM;
4497
4498 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4499 sig,
4500 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4501 pid_set,
4502 log_func, u);
4503 if (r < 0) {
4504 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4505 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4506
4507 } else if (r > 0) {
4508
4509 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4510 * we are running in a container or if this is a delegation unit, simply because cgroup
4511 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4512 * of containers it can be confused easily by left-over directories in the cgroup — which
4513 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4514 * there we get proper events. Hence rely on them. */
4515
4516 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4517 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4518 wait_for_exit = true;
4519
4520 if (send_sighup) {
4521 set_free(pid_set);
4522
4523 pid_set = unit_pid_set(main_pid, control_pid);
4524 if (!pid_set)
4525 return -ENOMEM;
4526
4527 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4528 SIGHUP,
4529 CGROUP_IGNORE_SELF,
4530 pid_set,
4531 NULL, NULL);
4532 }
4533 }
4534 }
4535
4536 return wait_for_exit;
4537 }
4538
4539 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4540 _cleanup_free_ char *p = NULL;
4541 char *prefix;
4542 UnitDependencyInfo di;
4543 int r;
4544
4545 assert(u);
4546 assert(path);
4547
4548 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4549 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4550 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4551 * determine which units to make themselves a dependency of. */
4552
4553 if (!path_is_absolute(path))
4554 return -EINVAL;
4555
4556 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4557 if (r < 0)
4558 return r;
4559
4560 p = strdup(path);
4561 if (!p)
4562 return -ENOMEM;
4563
4564 path = path_kill_slashes(p);
4565
4566 if (!path_is_normalized(path))
4567 return -EPERM;
4568
4569 if (hashmap_contains(u->requires_mounts_for, path))
4570 return 0;
4571
4572 di = (UnitDependencyInfo) {
4573 .origin_mask = mask
4574 };
4575
4576 r = hashmap_put(u->requires_mounts_for, path, di.data);
4577 if (r < 0)
4578 return r;
4579 p = NULL;
4580
4581 prefix = alloca(strlen(path) + 1);
4582 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4583 Set *x;
4584
4585 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4586 if (!x) {
4587 _cleanup_free_ char *q = NULL;
4588
4589 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4590 if (r < 0)
4591 return r;
4592
4593 q = strdup(prefix);
4594 if (!q)
4595 return -ENOMEM;
4596
4597 x = set_new(NULL);
4598 if (!x)
4599 return -ENOMEM;
4600
4601 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4602 if (r < 0) {
4603 set_free(x);
4604 return r;
4605 }
4606 q = NULL;
4607 }
4608
4609 r = set_put(x, u);
4610 if (r < 0)
4611 return r;
4612 }
4613
4614 return 0;
4615 }
4616
4617 int unit_setup_exec_runtime(Unit *u) {
4618 ExecRuntime **rt;
4619 size_t offset;
4620 Unit *other;
4621 Iterator i;
4622 void *v;
4623 int r;
4624
4625 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4626 assert(offset > 0);
4627
4628 /* Check if there already is an ExecRuntime for this unit? */
4629 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4630 if (*rt)
4631 return 0;
4632
4633 /* Try to get it from somebody else */
4634 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4635 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4636 if (r == 1)
4637 return 1;
4638 }
4639
4640 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4641 }
4642
4643 int unit_setup_dynamic_creds(Unit *u) {
4644 ExecContext *ec;
4645 DynamicCreds *dcreds;
4646 size_t offset;
4647
4648 assert(u);
4649
4650 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4651 assert(offset > 0);
4652 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4653
4654 ec = unit_get_exec_context(u);
4655 assert(ec);
4656
4657 if (!ec->dynamic_user)
4658 return 0;
4659
4660 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4661 }
4662
4663 bool unit_type_supported(UnitType t) {
4664 if (_unlikely_(t < 0))
4665 return false;
4666 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4667 return false;
4668
4669 if (!unit_vtable[t]->supported)
4670 return true;
4671
4672 return unit_vtable[t]->supported();
4673 }
4674
4675 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4676 int r;
4677
4678 assert(u);
4679 assert(where);
4680
4681 r = dir_is_empty(where);
4682 if (r > 0 || r == -ENOTDIR)
4683 return;
4684 if (r < 0) {
4685 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4686 return;
4687 }
4688
4689 log_struct(LOG_NOTICE,
4690 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4691 LOG_UNIT_ID(u),
4692 LOG_UNIT_INVOCATION_ID(u),
4693 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4694 "WHERE=%s", where,
4695 NULL);
4696 }
4697
4698 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4699 _cleanup_free_ char *canonical_where;
4700 int r;
4701
4702 assert(u);
4703 assert(where);
4704
4705 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4706 if (r < 0) {
4707 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4708 return 0;
4709 }
4710
4711 /* We will happily ignore a trailing slash (or any redundant slashes) */
4712 if (path_equal(where, canonical_where))
4713 return 0;
4714
4715 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4716 log_struct(LOG_ERR,
4717 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4718 LOG_UNIT_ID(u),
4719 LOG_UNIT_INVOCATION_ID(u),
4720 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4721 "WHERE=%s", where,
4722 NULL);
4723
4724 return -ELOOP;
4725 }
4726
4727 bool unit_is_pristine(Unit *u) {
4728 assert(u);
4729
4730 /* Check if the unit already exists or is already around,
4731 * in a number of different ways. Note that to cater for unit
4732 * types such as slice, we are generally fine with units that
4733 * are marked UNIT_LOADED even though nothing was
4734 * actually loaded, as those unit types don't require a file
4735 * on disk to validly load. */
4736
4737 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4738 u->fragment_path ||
4739 u->source_path ||
4740 !strv_isempty(u->dropin_paths) ||
4741 u->job ||
4742 u->merged_into);
4743 }
4744
4745 pid_t unit_control_pid(Unit *u) {
4746 assert(u);
4747
4748 if (UNIT_VTABLE(u)->control_pid)
4749 return UNIT_VTABLE(u)->control_pid(u);
4750
4751 return 0;
4752 }
4753
4754 pid_t unit_main_pid(Unit *u) {
4755 assert(u);
4756
4757 if (UNIT_VTABLE(u)->main_pid)
4758 return UNIT_VTABLE(u)->main_pid(u);
4759
4760 return 0;
4761 }
4762
4763 static void unit_unref_uid_internal(
4764 Unit *u,
4765 uid_t *ref_uid,
4766 bool destroy_now,
4767 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4768
4769 assert(u);
4770 assert(ref_uid);
4771 assert(_manager_unref_uid);
4772
4773 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4774 * gid_t are actually the same time, with the same validity rules.
4775 *
4776 * Drops a reference to UID/GID from a unit. */
4777
4778 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4779 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4780
4781 if (!uid_is_valid(*ref_uid))
4782 return;
4783
4784 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4785 *ref_uid = UID_INVALID;
4786 }
4787
4788 void unit_unref_uid(Unit *u, bool destroy_now) {
4789 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4790 }
4791
4792 void unit_unref_gid(Unit *u, bool destroy_now) {
4793 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4794 }
4795
4796 static int unit_ref_uid_internal(
4797 Unit *u,
4798 uid_t *ref_uid,
4799 uid_t uid,
4800 bool clean_ipc,
4801 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4802
4803 int r;
4804
4805 assert(u);
4806 assert(ref_uid);
4807 assert(uid_is_valid(uid));
4808 assert(_manager_ref_uid);
4809
4810 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4811 * are actually the same type, and have the same validity rules.
4812 *
4813 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4814 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4815 * drops to zero. */
4816
4817 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4818 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4819
4820 if (*ref_uid == uid)
4821 return 0;
4822
4823 if (uid_is_valid(*ref_uid)) /* Already set? */
4824 return -EBUSY;
4825
4826 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4827 if (r < 0)
4828 return r;
4829
4830 *ref_uid = uid;
4831 return 1;
4832 }
4833
4834 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4835 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4836 }
4837
4838 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4839 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4840 }
4841
4842 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4843 int r = 0, q = 0;
4844
4845 assert(u);
4846
4847 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4848
4849 if (uid_is_valid(uid)) {
4850 r = unit_ref_uid(u, uid, clean_ipc);
4851 if (r < 0)
4852 return r;
4853 }
4854
4855 if (gid_is_valid(gid)) {
4856 q = unit_ref_gid(u, gid, clean_ipc);
4857 if (q < 0) {
4858 if (r > 0)
4859 unit_unref_uid(u, false);
4860
4861 return q;
4862 }
4863 }
4864
4865 return r > 0 || q > 0;
4866 }
4867
4868 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4869 ExecContext *c;
4870 int r;
4871
4872 assert(u);
4873
4874 c = unit_get_exec_context(u);
4875
4876 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4877 if (r < 0)
4878 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4879
4880 return r;
4881 }
4882
4883 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4884 assert(u);
4885
4886 unit_unref_uid(u, destroy_now);
4887 unit_unref_gid(u, destroy_now);
4888 }
4889
4890 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4891 int r;
4892
4893 assert(u);
4894
4895 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4896 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4897 * objects when no service references the UID/GID anymore. */
4898
4899 r = unit_ref_uid_gid(u, uid, gid);
4900 if (r > 0)
4901 bus_unit_send_change_signal(u);
4902 }
4903
4904 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4905 int r;
4906
4907 assert(u);
4908
4909 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4910
4911 if (sd_id128_equal(u->invocation_id, id))
4912 return 0;
4913
4914 if (!sd_id128_is_null(u->invocation_id))
4915 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4916
4917 if (sd_id128_is_null(id)) {
4918 r = 0;
4919 goto reset;
4920 }
4921
4922 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4923 if (r < 0)
4924 goto reset;
4925
4926 u->invocation_id = id;
4927 sd_id128_to_string(id, u->invocation_id_string);
4928
4929 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4930 if (r < 0)
4931 goto reset;
4932
4933 return 0;
4934
4935 reset:
4936 u->invocation_id = SD_ID128_NULL;
4937 u->invocation_id_string[0] = 0;
4938 return r;
4939 }
4940
4941 int unit_acquire_invocation_id(Unit *u) {
4942 sd_id128_t id;
4943 int r;
4944
4945 assert(u);
4946
4947 r = sd_id128_randomize(&id);
4948 if (r < 0)
4949 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4950
4951 r = unit_set_invocation_id(u, id);
4952 if (r < 0)
4953 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4954
4955 return 0;
4956 }
4957
4958 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4959 assert(u);
4960 assert(p);
4961
4962 /* Copy parameters from manager */
4963 p->environment = u->manager->environment;
4964 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4965 p->cgroup_supported = u->manager->cgroup_supported;
4966 p->prefix = u->manager->prefix;
4967 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4968
4969 /* Copy paramaters from unit */
4970 p->cgroup_path = u->cgroup_path;
4971 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4972 }
4973
4974 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4975 int r;
4976
4977 assert(u);
4978 assert(ret);
4979
4980 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4981 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4982
4983 (void) unit_realize_cgroup(u);
4984
4985 r = safe_fork(name, FORK_REOPEN_LOG, ret);
4986 if (r != 0)
4987 return r;
4988
4989 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4990 (void) ignore_signals(SIGPIPE, -1);
4991
4992 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
4993
4994 if (u->cgroup_path) {
4995 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4996 if (r < 0) {
4997 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4998 _exit(EXIT_CGROUP);
4999 }
5000 }
5001
5002 return 0;
5003 }
5004
5005 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5006 assert(u);
5007 assert(d >= 0);
5008 assert(d < _UNIT_DEPENDENCY_MAX);
5009 assert(other);
5010
5011 if (di.origin_mask == 0 && di.destination_mask == 0) {
5012 /* No bit set anymore, let's drop the whole entry */
5013 assert_se(hashmap_remove(u->dependencies[d], other));
5014 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5015 } else
5016 /* Mask was reduced, let's update the entry */
5017 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5018 }
5019
5020 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5021 UnitDependency d;
5022
5023 assert(u);
5024
5025 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5026
5027 if (mask == 0)
5028 return;
5029
5030 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5031 bool done;
5032
5033 do {
5034 UnitDependencyInfo di;
5035 Unit *other;
5036 Iterator i;
5037
5038 done = true;
5039
5040 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5041 UnitDependency q;
5042
5043 if ((di.origin_mask & ~mask) == di.origin_mask)
5044 continue;
5045 di.origin_mask &= ~mask;
5046 unit_update_dependency_mask(u, d, other, di);
5047
5048 /* We updated the dependency from our unit to the other unit now. But most dependencies
5049 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5050 * all dependency types on the other unit and delete all those which point to us and
5051 * have the right mask set. */
5052
5053 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5054 UnitDependencyInfo dj;
5055
5056 dj.data = hashmap_get(other->dependencies[q], u);
5057 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5058 continue;
5059 dj.destination_mask &= ~mask;
5060
5061 unit_update_dependency_mask(other, q, u, dj);
5062 }
5063
5064 unit_add_to_gc_queue(other);
5065
5066 done = false;
5067 break;
5068 }
5069
5070 } while (!done);
5071 }
5072 }
5073
5074 static int unit_export_invocation_id(Unit *u) {
5075 const char *p;
5076 int r;
5077
5078 assert(u);
5079
5080 if (u->exported_invocation_id)
5081 return 0;
5082
5083 if (sd_id128_is_null(u->invocation_id))
5084 return 0;
5085
5086 p = strjoina("/run/systemd/units/invocation:", u->id);
5087 r = symlink_atomic(u->invocation_id_string, p);
5088 if (r < 0)
5089 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5090
5091 u->exported_invocation_id = true;
5092 return 0;
5093 }
5094
5095 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5096 const char *p;
5097 char buf[2];
5098 int r;
5099
5100 assert(u);
5101 assert(c);
5102
5103 if (u->exported_log_level_max)
5104 return 0;
5105
5106 if (c->log_level_max < 0)
5107 return 0;
5108
5109 assert(c->log_level_max <= 7);
5110
5111 buf[0] = '0' + c->log_level_max;
5112 buf[1] = 0;
5113
5114 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5115 r = symlink_atomic(buf, p);
5116 if (r < 0)
5117 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5118
5119 u->exported_log_level_max = true;
5120 return 0;
5121 }
5122
5123 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5124 _cleanup_close_ int fd = -1;
5125 struct iovec *iovec;
5126 const char *p;
5127 char *pattern;
5128 le64_t *sizes;
5129 ssize_t n;
5130 size_t i;
5131 int r;
5132
5133 if (u->exported_log_extra_fields)
5134 return 0;
5135
5136 if (c->n_log_extra_fields <= 0)
5137 return 0;
5138
5139 sizes = newa(le64_t, c->n_log_extra_fields);
5140 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5141
5142 for (i = 0; i < c->n_log_extra_fields; i++) {
5143 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5144
5145 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5146 iovec[i*2+1] = c->log_extra_fields[i];
5147 }
5148
5149 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5150 pattern = strjoina(p, ".XXXXXX");
5151
5152 fd = mkostemp_safe(pattern);
5153 if (fd < 0)
5154 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5155
5156 n = writev(fd, iovec, c->n_log_extra_fields*2);
5157 if (n < 0) {
5158 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5159 goto fail;
5160 }
5161
5162 (void) fchmod(fd, 0644);
5163
5164 if (rename(pattern, p) < 0) {
5165 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5166 goto fail;
5167 }
5168
5169 u->exported_log_extra_fields = true;
5170 return 0;
5171
5172 fail:
5173 (void) unlink(pattern);
5174 return r;
5175 }
5176
5177 void unit_export_state_files(Unit *u) {
5178 const ExecContext *c;
5179
5180 assert(u);
5181
5182 if (!u->id)
5183 return;
5184
5185 if (!MANAGER_IS_SYSTEM(u->manager))
5186 return;
5187
5188 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5189 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5190 * the IPC system itself and PID 1 also log to the journal.
5191 *
5192 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5193 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5194 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5195 * namespace at least.
5196 *
5197 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5198 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5199 * them with one. */
5200
5201 (void) unit_export_invocation_id(u);
5202
5203 c = unit_get_exec_context(u);
5204 if (c) {
5205 (void) unit_export_log_level_max(u, c);
5206 (void) unit_export_log_extra_fields(u, c);
5207 }
5208 }
5209
5210 void unit_unlink_state_files(Unit *u) {
5211 const char *p;
5212
5213 assert(u);
5214
5215 if (!u->id)
5216 return;
5217
5218 if (!MANAGER_IS_SYSTEM(u->manager))
5219 return;
5220
5221 /* Undoes the effect of unit_export_state() */
5222
5223 if (u->exported_invocation_id) {
5224 p = strjoina("/run/systemd/units/invocation:", u->id);
5225 (void) unlink(p);
5226
5227 u->exported_invocation_id = false;
5228 }
5229
5230 if (u->exported_log_level_max) {
5231 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5232 (void) unlink(p);
5233
5234 u->exported_log_level_max = false;
5235 }
5236
5237 if (u->exported_log_extra_fields) {
5238 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5239 (void) unlink(p);
5240
5241 u->exported_log_extra_fields = false;
5242 }
5243 }
5244
5245 int unit_prepare_exec(Unit *u) {
5246 int r;
5247
5248 assert(u);
5249
5250 /* Prepares everything so that we can fork of a process for this unit */
5251
5252 (void) unit_realize_cgroup(u);
5253
5254 if (u->reset_accounting) {
5255 (void) unit_reset_cpu_accounting(u);
5256 (void) unit_reset_ip_accounting(u);
5257 u->reset_accounting = false;
5258 }
5259
5260 unit_export_state_files(u);
5261
5262 r = unit_setup_exec_runtime(u);
5263 if (r < 0)
5264 return r;
5265
5266 r = unit_setup_dynamic_creds(u);
5267 if (r < 0)
5268 return r;
5269
5270 return 0;
5271 }
5272
5273 static void log_leftover(pid_t pid, int sig, void *userdata) {
5274 _cleanup_free_ char *comm = NULL;
5275
5276 (void) get_process_comm(pid, &comm);
5277
5278 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5279 return;
5280
5281 log_unit_warning(userdata,
5282 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5283 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5284 pid, strna(comm));
5285 }
5286
5287 void unit_warn_leftover_processes(Unit *u) {
5288 assert(u);
5289
5290 (void) unit_pick_cgroup_path(u);
5291
5292 if (!u->cgroup_path)
5293 return;
5294
5295 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5296 }
5297
5298 bool unit_needs_console(Unit *u) {
5299 ExecContext *ec;
5300 UnitActiveState state;
5301
5302 assert(u);
5303
5304 state = unit_active_state(u);
5305
5306 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5307 return false;
5308
5309 if (UNIT_VTABLE(u)->needs_console)
5310 return UNIT_VTABLE(u)->needs_console(u);
5311
5312 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5313 ec = unit_get_exec_context(u);
5314 if (!ec)
5315 return false;
5316
5317 return exec_context_may_touch_console(ec);
5318 }
5319
5320 const char *unit_label_path(Unit *u) {
5321 const char *p;
5322
5323 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5324 * when validating access checks. */
5325
5326 p = u->source_path ?: u->fragment_path;
5327 if (!p)
5328 return NULL;
5329
5330 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5331 if (path_equal(p, "/dev/null"))
5332 return NULL;
5333
5334 return p;
5335 }
5336
5337 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5338 int r;
5339
5340 assert(u);
5341
5342 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5343 * and not a kernel thread either */
5344
5345 /* First, a simple range check */
5346 if (!pid_is_valid(pid))
5347 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5348
5349 /* Some extra safety check */
5350 if (pid == 1 || pid == getpid_cached())
5351 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager processs, refusing.", pid);
5352
5353 /* Don't even begin to bother with kernel threads */
5354 r = is_kernel_thread(pid);
5355 if (r == -ESRCH)
5356 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5357 if (r < 0)
5358 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5359 if (r > 0)
5360 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5361
5362 return 0;
5363 }
5364
5365 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5366 [COLLECT_INACTIVE] = "inactive",
5367 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5368 };
5369
5370 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);