]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: include unit name in emergency_action() reason message
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
39 #include "set.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
42 #include "special.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
48 #include "strv.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
51 #include "unit.h"
52 #include "user-util.h"
53 #include "virt.h"
54
55 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
56 [UNIT_SERVICE] = &service_vtable,
57 [UNIT_SOCKET] = &socket_vtable,
58 [UNIT_TARGET] = &target_vtable,
59 [UNIT_DEVICE] = &device_vtable,
60 [UNIT_MOUNT] = &mount_vtable,
61 [UNIT_AUTOMOUNT] = &automount_vtable,
62 [UNIT_SWAP] = &swap_vtable,
63 [UNIT_TIMER] = &timer_vtable,
64 [UNIT_PATH] = &path_vtable,
65 [UNIT_SLICE] = &slice_vtable,
66 [UNIT_SCOPE] = &scope_vtable,
67 };
68
69 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
70
71 Unit *unit_new(Manager *m, size_t size) {
72 Unit *u;
73
74 assert(m);
75 assert(size >= sizeof(Unit));
76
77 u = malloc0(size);
78 if (!u)
79 return NULL;
80
81 u->names = set_new(&string_hash_ops);
82 if (!u->names)
83 return mfree(u);
84
85 u->manager = m;
86 u->type = _UNIT_TYPE_INVALID;
87 u->default_dependencies = true;
88 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
89 u->unit_file_preset = -1;
90 u->on_failure_job_mode = JOB_REPLACE;
91 u->cgroup_inotify_wd = -1;
92 u->job_timeout = USEC_INFINITY;
93 u->job_running_timeout = USEC_INFINITY;
94 u->ref_uid = UID_INVALID;
95 u->ref_gid = GID_INVALID;
96 u->cpu_usage_last = NSEC_INFINITY;
97 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
98
99 u->ip_accounting_ingress_map_fd = -1;
100 u->ip_accounting_egress_map_fd = -1;
101 u->ipv4_allow_map_fd = -1;
102 u->ipv6_allow_map_fd = -1;
103 u->ipv4_deny_map_fd = -1;
104 u->ipv6_deny_map_fd = -1;
105
106 u->last_section_private = -1;
107
108 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
109 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
110
111 return u;
112 }
113
114 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
115 _cleanup_(unit_freep) Unit *u = NULL;
116 int r;
117
118 u = unit_new(m, size);
119 if (!u)
120 return -ENOMEM;
121
122 r = unit_add_name(u, name);
123 if (r < 0)
124 return r;
125
126 *ret = TAKE_PTR(u);
127
128 return r;
129 }
130
131 bool unit_has_name(Unit *u, const char *name) {
132 assert(u);
133 assert(name);
134
135 return set_contains(u->names, (char*) name);
136 }
137
138 static void unit_init(Unit *u) {
139 CGroupContext *cc;
140 ExecContext *ec;
141 KillContext *kc;
142
143 assert(u);
144 assert(u->manager);
145 assert(u->type >= 0);
146
147 cc = unit_get_cgroup_context(u);
148 if (cc) {
149 cgroup_context_init(cc);
150
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
154
155 cc->cpu_accounting = u->manager->default_cpu_accounting;
156 cc->io_accounting = u->manager->default_io_accounting;
157 cc->ip_accounting = u->manager->default_ip_accounting;
158 cc->blockio_accounting = u->manager->default_blockio_accounting;
159 cc->memory_accounting = u->manager->default_memory_accounting;
160 cc->tasks_accounting = u->manager->default_tasks_accounting;
161 cc->ip_accounting = u->manager->default_ip_accounting;
162
163 if (u->type != UNIT_SLICE)
164 cc->tasks_max = u->manager->default_tasks_max;
165 }
166
167 ec = unit_get_exec_context(u);
168 if (ec) {
169 exec_context_init(ec);
170
171 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
172 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
173 }
174
175 kc = unit_get_kill_context(u);
176 if (kc)
177 kill_context_init(kc);
178
179 if (UNIT_VTABLE(u)->init)
180 UNIT_VTABLE(u)->init(u);
181 }
182
183 int unit_add_name(Unit *u, const char *text) {
184 _cleanup_free_ char *s = NULL, *i = NULL;
185 UnitType t;
186 int r;
187
188 assert(u);
189 assert(text);
190
191 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
192
193 if (!u->instance)
194 return -EINVAL;
195
196 r = unit_name_replace_instance(text, u->instance, &s);
197 if (r < 0)
198 return r;
199 } else {
200 s = strdup(text);
201 if (!s)
202 return -ENOMEM;
203 }
204
205 if (set_contains(u->names, s))
206 return 0;
207 if (hashmap_contains(u->manager->units, s))
208 return -EEXIST;
209
210 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
211 return -EINVAL;
212
213 t = unit_name_to_type(s);
214 if (t < 0)
215 return -EINVAL;
216
217 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
218 return -EINVAL;
219
220 r = unit_name_to_instance(s, &i);
221 if (r < 0)
222 return r;
223
224 if (i && !unit_type_may_template(t))
225 return -EINVAL;
226
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
231 return -EINVAL;
232
233 if (!unit_type_may_alias(t) && !set_isempty(u->names))
234 return -EEXIST;
235
236 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
237 return -E2BIG;
238
239 r = set_put(u->names, s);
240 if (r < 0)
241 return r;
242 assert(r > 0);
243
244 r = hashmap_put(u->manager->units, s, u);
245 if (r < 0) {
246 (void) set_remove(u->names, s);
247 return r;
248 }
249
250 if (u->type == _UNIT_TYPE_INVALID) {
251 u->type = t;
252 u->id = s;
253 u->instance = TAKE_PTR(i);
254
255 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
256
257 unit_init(u);
258 }
259
260 s = NULL;
261
262 unit_add_to_dbus_queue(u);
263 return 0;
264 }
265
266 int unit_choose_id(Unit *u, const char *name) {
267 _cleanup_free_ char *t = NULL;
268 char *s, *i;
269 int r;
270
271 assert(u);
272 assert(name);
273
274 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
275
276 if (!u->instance)
277 return -EINVAL;
278
279 r = unit_name_replace_instance(name, u->instance, &t);
280 if (r < 0)
281 return r;
282
283 name = t;
284 }
285
286 /* Selects one of the names of this unit as the id */
287 s = set_get(u->names, (char*) name);
288 if (!s)
289 return -ENOENT;
290
291 /* Determine the new instance from the new id */
292 r = unit_name_to_instance(s, &i);
293 if (r < 0)
294 return r;
295
296 u->id = s;
297
298 free(u->instance);
299 u->instance = i;
300
301 unit_add_to_dbus_queue(u);
302
303 return 0;
304 }
305
306 int unit_set_description(Unit *u, const char *description) {
307 int r;
308
309 assert(u);
310
311 r = free_and_strdup(&u->description, empty_to_null(description));
312 if (r < 0)
313 return r;
314 if (r > 0)
315 unit_add_to_dbus_queue(u);
316
317 return 0;
318 }
319
320 bool unit_may_gc(Unit *u) {
321 UnitActiveState state;
322 int r;
323
324 assert(u);
325
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
329 *
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
332 */
333
334 if (u->job)
335 return false;
336
337 if (u->nop_job)
338 return false;
339
340 state = unit_active_state(u);
341
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
344 UNIT_VTABLE(u)->release_resources)
345 UNIT_VTABLE(u)->release_resources(u);
346
347 if (u->perpetual)
348 return false;
349
350 if (sd_bus_track_count(u->bus_track) > 0)
351 return false;
352
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u->collect_mode) {
355
356 case COLLECT_INACTIVE:
357 if (state != UNIT_INACTIVE)
358 return false;
359
360 break;
361
362 case COLLECT_INACTIVE_OR_FAILED:
363 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
364 return false;
365
366 break;
367
368 default:
369 assert_not_reached("Unknown garbage collection mode");
370 }
371
372 if (u->cgroup_path) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
375
376 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
377 if (r < 0)
378 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
379 if (r <= 0)
380 return false;
381 }
382
383 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
384 return false;
385
386 return true;
387 }
388
389 void unit_add_to_load_queue(Unit *u) {
390 assert(u);
391 assert(u->type != _UNIT_TYPE_INVALID);
392
393 if (u->load_state != UNIT_STUB || u->in_load_queue)
394 return;
395
396 LIST_PREPEND(load_queue, u->manager->load_queue, u);
397 u->in_load_queue = true;
398 }
399
400 void unit_add_to_cleanup_queue(Unit *u) {
401 assert(u);
402
403 if (u->in_cleanup_queue)
404 return;
405
406 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
407 u->in_cleanup_queue = true;
408 }
409
410 void unit_add_to_gc_queue(Unit *u) {
411 assert(u);
412
413 if (u->in_gc_queue || u->in_cleanup_queue)
414 return;
415
416 if (!unit_may_gc(u))
417 return;
418
419 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
420 u->in_gc_queue = true;
421 }
422
423 void unit_add_to_dbus_queue(Unit *u) {
424 assert(u);
425 assert(u->type != _UNIT_TYPE_INVALID);
426
427 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
428 return;
429
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
432 sd_bus_track_count(u->bus_track) <= 0 &&
433 set_isempty(u->manager->private_buses)) {
434 u->sent_dbus_new_signal = true;
435 return;
436 }
437
438 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
439 u->in_dbus_queue = true;
440 }
441
442 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
443 assert(u);
444
445 if (u->in_stop_when_unneeded_queue)
446 return;
447
448 if (!u->stop_when_unneeded)
449 return;
450
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
452 return;
453
454 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
455 u->in_stop_when_unneeded_queue = true;
456 }
457
458 static void bidi_set_free(Unit *u, Hashmap *h) {
459 Unit *other;
460 Iterator i;
461 void *v;
462
463 assert(u);
464
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
466
467 HASHMAP_FOREACH_KEY(v, other, h, i) {
468 UnitDependency d;
469
470 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
471 hashmap_remove(other->dependencies[d], u);
472
473 unit_add_to_gc_queue(other);
474 }
475
476 hashmap_free(h);
477 }
478
479 static void unit_remove_transient(Unit *u) {
480 char **i;
481
482 assert(u);
483
484 if (!u->transient)
485 return;
486
487 if (u->fragment_path)
488 (void) unlink(u->fragment_path);
489
490 STRV_FOREACH(i, u->dropin_paths) {
491 _cleanup_free_ char *p = NULL, *pp = NULL;
492
493 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
494 if (!p)
495 continue;
496
497 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
498 if (!pp)
499 continue;
500
501 /* Only drop transient drop-ins */
502 if (!path_equal(u->manager->lookup_paths.transient, pp))
503 continue;
504
505 (void) unlink(*i);
506 (void) rmdir(p);
507 }
508 }
509
510 static void unit_free_requires_mounts_for(Unit *u) {
511 assert(u);
512
513 for (;;) {
514 _cleanup_free_ char *path;
515
516 path = hashmap_steal_first_key(u->requires_mounts_for);
517 if (!path)
518 break;
519 else {
520 char s[strlen(path) + 1];
521
522 PATH_FOREACH_PREFIX_MORE(s, path) {
523 char *y;
524 Set *x;
525
526 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
527 if (!x)
528 continue;
529
530 (void) set_remove(x, u);
531
532 if (set_isempty(x)) {
533 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
534 free(y);
535 set_free(x);
536 }
537 }
538 }
539 }
540
541 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
542 }
543
544 static void unit_done(Unit *u) {
545 ExecContext *ec;
546 CGroupContext *cc;
547
548 assert(u);
549
550 if (u->type < 0)
551 return;
552
553 if (UNIT_VTABLE(u)->done)
554 UNIT_VTABLE(u)->done(u);
555
556 ec = unit_get_exec_context(u);
557 if (ec)
558 exec_context_done(ec);
559
560 cc = unit_get_cgroup_context(u);
561 if (cc)
562 cgroup_context_done(cc);
563 }
564
565 void unit_free(Unit *u) {
566 UnitDependency d;
567 Iterator i;
568 char *t;
569
570 if (!u)
571 return;
572
573 u->transient_file = safe_fclose(u->transient_file);
574
575 if (!MANAGER_IS_RELOADING(u->manager))
576 unit_remove_transient(u);
577
578 bus_unit_send_removed_signal(u);
579
580 unit_done(u);
581
582 unit_dequeue_rewatch_pids(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585 sd_bus_track_unref(u->bus_track);
586 u->deserialized_refs = strv_free(u->deserialized_refs);
587
588 unit_free_requires_mounts_for(u);
589
590 SET_FOREACH(t, u->names, i)
591 hashmap_remove_value(u->manager->units, t, u);
592
593 if (!sd_id128_is_null(u->invocation_id))
594 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
595
596 if (u->job) {
597 Job *j = u->job;
598 job_uninstall(j);
599 job_free(j);
600 }
601
602 if (u->nop_job) {
603 Job *j = u->nop_job;
604 job_uninstall(j);
605 job_free(j);
606 }
607
608 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
609 bidi_set_free(u, u->dependencies[d]);
610
611 if (u->on_console)
612 manager_unref_console(u->manager);
613
614 unit_release_cgroup(u);
615
616 if (!MANAGER_IS_RELOADING(u->manager))
617 unit_unlink_state_files(u);
618
619 unit_unref_uid_gid(u, false);
620
621 (void) manager_update_failed_units(u->manager, u, false);
622 set_remove(u->manager->startup_units, u);
623
624 unit_unwatch_all_pids(u);
625
626 unit_ref_unset(&u->slice);
627 while (u->refs_by_target)
628 unit_ref_unset(u->refs_by_target);
629
630 if (u->type != _UNIT_TYPE_INVALID)
631 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
632
633 if (u->in_load_queue)
634 LIST_REMOVE(load_queue, u->manager->load_queue, u);
635
636 if (u->in_dbus_queue)
637 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
638
639 if (u->in_gc_queue)
640 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
641
642 if (u->in_cgroup_realize_queue)
643 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
644
645 if (u->in_cgroup_empty_queue)
646 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
647
648 if (u->in_cleanup_queue)
649 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
650
651 if (u->in_target_deps_queue)
652 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
653
654 if (u->in_stop_when_unneeded_queue)
655 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
656
657 safe_close(u->ip_accounting_ingress_map_fd);
658 safe_close(u->ip_accounting_egress_map_fd);
659
660 safe_close(u->ipv4_allow_map_fd);
661 safe_close(u->ipv6_allow_map_fd);
662 safe_close(u->ipv4_deny_map_fd);
663 safe_close(u->ipv6_deny_map_fd);
664
665 bpf_program_unref(u->ip_bpf_ingress);
666 bpf_program_unref(u->ip_bpf_ingress_installed);
667 bpf_program_unref(u->ip_bpf_egress);
668 bpf_program_unref(u->ip_bpf_egress_installed);
669
670 bpf_program_unref(u->bpf_device_control_installed);
671
672 condition_free_list(u->conditions);
673 condition_free_list(u->asserts);
674
675 free(u->description);
676 strv_free(u->documentation);
677 free(u->fragment_path);
678 free(u->source_path);
679 strv_free(u->dropin_paths);
680 free(u->instance);
681
682 free(u->job_timeout_reboot_arg);
683
684 set_free_free(u->names);
685
686 free(u->reboot_arg);
687
688 free(u);
689 }
690
691 UnitActiveState unit_active_state(Unit *u) {
692 assert(u);
693
694 if (u->load_state == UNIT_MERGED)
695 return unit_active_state(unit_follow_merge(u));
696
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
700
701 return UNIT_VTABLE(u)->active_state(u);
702 }
703
704 const char* unit_sub_state_to_string(Unit *u) {
705 assert(u);
706
707 return UNIT_VTABLE(u)->sub_state_to_string(u);
708 }
709
710 static int set_complete_move(Set **s, Set **other) {
711 assert(s);
712 assert(other);
713
714 if (!other)
715 return 0;
716
717 if (*s)
718 return set_move(*s, *other);
719 else
720 *s = TAKE_PTR(*other);
721
722 return 0;
723 }
724
725 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
726 assert(s);
727 assert(other);
728
729 if (!*other)
730 return 0;
731
732 if (*s)
733 return hashmap_move(*s, *other);
734 else
735 *s = TAKE_PTR(*other);
736
737 return 0;
738 }
739
740 static int merge_names(Unit *u, Unit *other) {
741 char *t;
742 Iterator i;
743 int r;
744
745 assert(u);
746 assert(other);
747
748 r = set_complete_move(&u->names, &other->names);
749 if (r < 0)
750 return r;
751
752 set_free_free(other->names);
753 other->names = NULL;
754 other->id = NULL;
755
756 SET_FOREACH(t, u->names, i)
757 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
758
759 return 0;
760 }
761
762 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
763 unsigned n_reserve;
764
765 assert(u);
766 assert(other);
767 assert(d < _UNIT_DEPENDENCY_MAX);
768
769 /*
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
773 */
774 if (!u->dependencies[d])
775 return 0;
776
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
779
780 return hashmap_reserve(u->dependencies[d], n_reserve);
781 }
782
783 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
784 Iterator i;
785 Unit *back;
786 void *v;
787 int r;
788
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
790
791 assert(u);
792 assert(other);
793 assert(d < _UNIT_DEPENDENCY_MAX);
794
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
797 UnitDependency k;
798
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
801
802 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
803 if (back == u) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back->dependencies[k], other))
806 maybe_warn_about_dependency(u, other_id, k);
807 } else {
808 UnitDependencyInfo di_u, di_other, di_merged;
809
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
813
814 di_other.data = hashmap_get(back->dependencies[k], other);
815 if (!di_other.data)
816 continue; /* dependency isn't set, let's try the next one */
817
818 di_u.data = hashmap_get(back->dependencies[k], u);
819
820 di_merged = (UnitDependencyInfo) {
821 .origin_mask = di_u.origin_mask | di_other.origin_mask,
822 .destination_mask = di_u.destination_mask | di_other.destination_mask,
823 };
824
825 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
826 if (r < 0)
827 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
828 assert(r >= 0);
829
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
831 }
832 }
833
834 }
835
836 /* Also do not move dependencies on u to itself */
837 back = hashmap_remove(other->dependencies[d], u);
838 if (back)
839 maybe_warn_about_dependency(u, other_id, d);
840
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
843
844 other->dependencies[d] = hashmap_free(other->dependencies[d]);
845 }
846
847 int unit_merge(Unit *u, Unit *other) {
848 UnitDependency d;
849 const char *other_id = NULL;
850 int r;
851
852 assert(u);
853 assert(other);
854 assert(u->manager == other->manager);
855 assert(u->type != _UNIT_TYPE_INVALID);
856
857 other = unit_follow_merge(other);
858
859 if (other == u)
860 return 0;
861
862 if (u->type != other->type)
863 return -EINVAL;
864
865 if (!u->instance != !other->instance)
866 return -EINVAL;
867
868 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
869 return -EEXIST;
870
871 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
872 return -EEXIST;
873
874 if (other->job)
875 return -EEXIST;
876
877 if (other->nop_job)
878 return -EEXIST;
879
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
881 return -EEXIST;
882
883 if (other->id)
884 other_id = strdupa(other->id);
885
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
888 r = reserve_dependencies(u, other, d);
889 /*
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
892 */
893 if (r < 0)
894 return r;
895 }
896
897 /* Merge names */
898 r = merge_names(u, other);
899 if (r < 0)
900 return r;
901
902 /* Redirect all references */
903 while (other->refs_by_target)
904 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
905
906 /* Merge dependencies */
907 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
908 merge_dependencies(u, other, other_id, d);
909
910 other->load_state = UNIT_MERGED;
911 other->merged_into = u;
912
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other->load_state != UNIT_STUB)
916 if (UNIT_VTABLE(other)->done)
917 UNIT_VTABLE(other)->done(other);
918
919 unit_add_to_dbus_queue(u);
920 unit_add_to_cleanup_queue(other);
921
922 return 0;
923 }
924
925 int unit_merge_by_name(Unit *u, const char *name) {
926 _cleanup_free_ char *s = NULL;
927 Unit *other;
928 int r;
929
930 assert(u);
931 assert(name);
932
933 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
934 if (!u->instance)
935 return -EINVAL;
936
937 r = unit_name_replace_instance(name, u->instance, &s);
938 if (r < 0)
939 return r;
940
941 name = s;
942 }
943
944 other = manager_get_unit(u->manager, name);
945 if (other)
946 return unit_merge(u, other);
947
948 return unit_add_name(u, name);
949 }
950
951 Unit* unit_follow_merge(Unit *u) {
952 assert(u);
953
954 while (u->load_state == UNIT_MERGED)
955 assert_se(u = u->merged_into);
956
957 return u;
958 }
959
960 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
961 ExecDirectoryType dt;
962 char **dp;
963 int r;
964
965 assert(u);
966 assert(c);
967
968 if (c->working_directory && !c->working_directory_missing_ok) {
969 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
970 if (r < 0)
971 return r;
972 }
973
974 if (c->root_directory) {
975 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
976 if (r < 0)
977 return r;
978 }
979
980 if (c->root_image) {
981 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
982 if (r < 0)
983 return r;
984 }
985
986 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
987 if (!u->manager->prefix[dt])
988 continue;
989
990 STRV_FOREACH(dp, c->directories[dt].paths) {
991 _cleanup_free_ char *p;
992
993 p = strjoin(u->manager->prefix[dt], "/", *dp);
994 if (!p)
995 return -ENOMEM;
996
997 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
998 if (r < 0)
999 return r;
1000 }
1001 }
1002
1003 if (!MANAGER_IS_SYSTEM(u->manager))
1004 return 0;
1005
1006 if (c->private_tmp) {
1007 const char *p;
1008
1009 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1010 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1016 if (r < 0)
1017 return r;
1018 }
1019
1020 if (!IN_SET(c->std_output,
1021 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1022 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1023 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1024 !IN_SET(c->std_error,
1025 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1026 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1027 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1028 return 0;
1029
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1032
1033 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1034 if (r < 0)
1035 return r;
1036
1037 return 0;
1038 }
1039
1040 const char *unit_description(Unit *u) {
1041 assert(u);
1042
1043 if (u->description)
1044 return u->description;
1045
1046 return strna(u->id);
1047 }
1048
1049 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1050 const struct {
1051 UnitDependencyMask mask;
1052 const char *name;
1053 } table[] = {
1054 { UNIT_DEPENDENCY_FILE, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT, "default" },
1057 { UNIT_DEPENDENCY_UDEV, "udev" },
1058 { UNIT_DEPENDENCY_PATH, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1062 };
1063 size_t i;
1064
1065 assert(f);
1066 assert(kind);
1067 assert(space);
1068
1069 for (i = 0; i < ELEMENTSOF(table); i++) {
1070
1071 if (mask == 0)
1072 break;
1073
1074 if (FLAGS_SET(mask, table[i].mask)) {
1075 if (*space)
1076 fputc(' ', f);
1077 else
1078 *space = true;
1079
1080 fputs(kind, f);
1081 fputs("-", f);
1082 fputs(table[i].name, f);
1083
1084 mask &= ~table[i].mask;
1085 }
1086 }
1087
1088 assert(mask == 0);
1089 }
1090
1091 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1092 char *t, **j;
1093 UnitDependency d;
1094 Iterator i;
1095 const char *prefix2;
1096 char
1097 timestamp0[FORMAT_TIMESTAMP_MAX],
1098 timestamp1[FORMAT_TIMESTAMP_MAX],
1099 timestamp2[FORMAT_TIMESTAMP_MAX],
1100 timestamp3[FORMAT_TIMESTAMP_MAX],
1101 timestamp4[FORMAT_TIMESTAMP_MAX],
1102 timespan[FORMAT_TIMESPAN_MAX];
1103 Unit *following;
1104 _cleanup_set_free_ Set *following_set = NULL;
1105 const char *n;
1106 CGroupMask m;
1107 int r;
1108
1109 assert(u);
1110 assert(u->type >= 0);
1111
1112 prefix = strempty(prefix);
1113 prefix2 = strjoina(prefix, "\t");
1114
1115 fprintf(f,
1116 "%s-> Unit %s:\n"
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1126 "%s\tMay GC: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1131 "%s\tSlice: %s\n"
1132 "%s\tCGroup: %s\n"
1133 "%s\tCGroup realized: %s\n",
1134 prefix, u->id,
1135 prefix, unit_description(u),
1136 prefix, strna(u->instance),
1137 prefix, unit_load_state_to_string(u->load_state),
1138 prefix, unit_active_state_to_string(unit_active_state(u)),
1139 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1140 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1141 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1142 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1143 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1144 prefix, yes_no(unit_may_gc(u)),
1145 prefix, yes_no(unit_need_daemon_reload(u)),
1146 prefix, yes_no(u->transient),
1147 prefix, yes_no(u->perpetual),
1148 prefix, collect_mode_to_string(u->collect_mode),
1149 prefix, strna(unit_slice_name(u)),
1150 prefix, strna(u->cgroup_path),
1151 prefix, yes_no(u->cgroup_realized));
1152
1153 if (u->cgroup_realized_mask != 0) {
1154 _cleanup_free_ char *s = NULL;
1155 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1156 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1157 }
1158 if (u->cgroup_enabled_mask != 0) {
1159 _cleanup_free_ char *s = NULL;
1160 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1161 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1162 }
1163 m = unit_get_own_mask(u);
1164 if (m != 0) {
1165 _cleanup_free_ char *s = NULL;
1166 (void) cg_mask_to_string(m, &s);
1167 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1168 }
1169 m = unit_get_members_mask(u);
1170 if (m != 0) {
1171 _cleanup_free_ char *s = NULL;
1172 (void) cg_mask_to_string(m, &s);
1173 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1174 }
1175
1176 SET_FOREACH(t, u->names, i)
1177 fprintf(f, "%s\tName: %s\n", prefix, t);
1178
1179 if (!sd_id128_is_null(u->invocation_id))
1180 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1181 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1182
1183 STRV_FOREACH(j, u->documentation)
1184 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1185
1186 following = unit_following(u);
1187 if (following)
1188 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1189
1190 r = unit_following_set(u, &following_set);
1191 if (r >= 0) {
1192 Unit *other;
1193
1194 SET_FOREACH(other, following_set, i)
1195 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1196 }
1197
1198 if (u->fragment_path)
1199 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1200
1201 if (u->source_path)
1202 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1203
1204 STRV_FOREACH(j, u->dropin_paths)
1205 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1206
1207 if (u->failure_action != EMERGENCY_ACTION_NONE)
1208 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1209 if (u->success_action != EMERGENCY_ACTION_NONE)
1210 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1211
1212 if (u->job_timeout != USEC_INFINITY)
1213 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1214
1215 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1216 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1217
1218 if (u->job_timeout_reboot_arg)
1219 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1220
1221 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1222 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1223
1224 if (dual_timestamp_is_set(&u->condition_timestamp))
1225 fprintf(f,
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1229 prefix, yes_no(u->condition_result));
1230
1231 if (dual_timestamp_is_set(&u->assert_timestamp))
1232 fprintf(f,
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1236 prefix, yes_no(u->assert_result));
1237
1238 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1239 UnitDependencyInfo di;
1240 Unit *other;
1241
1242 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1243 bool space = false;
1244
1245 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1246
1247 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1248 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1249
1250 fputs(")\n", f);
1251 }
1252 }
1253
1254 if (!hashmap_isempty(u->requires_mounts_for)) {
1255 UnitDependencyInfo di;
1256 const char *path;
1257
1258 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1259 bool space = false;
1260
1261 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1262
1263 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1264 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1265
1266 fputs(")\n", f);
1267 }
1268 }
1269
1270 if (u->load_state == UNIT_LOADED) {
1271
1272 fprintf(f,
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix, yes_no(u->stop_when_unneeded),
1280 prefix, yes_no(u->refuse_manual_start),
1281 prefix, yes_no(u->refuse_manual_stop),
1282 prefix, yes_no(u->default_dependencies),
1283 prefix, job_mode_to_string(u->on_failure_job_mode),
1284 prefix, yes_no(u->ignore_on_isolate));
1285
1286 if (UNIT_VTABLE(u)->dump)
1287 UNIT_VTABLE(u)->dump(u, f, prefix2);
1288
1289 } else if (u->load_state == UNIT_MERGED)
1290 fprintf(f,
1291 "%s\tMerged into: %s\n",
1292 prefix, u->merged_into->id);
1293 else if (u->load_state == UNIT_ERROR)
1294 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1295
1296 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1297 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1298
1299 if (u->job)
1300 job_dump(u->job, f, prefix2);
1301
1302 if (u->nop_job)
1303 job_dump(u->nop_job, f, prefix2);
1304 }
1305
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit *u) {
1308 int r;
1309
1310 assert(u);
1311
1312 /* Load a .{service,socket,...} file */
1313 r = unit_load_fragment(u);
1314 if (r < 0)
1315 return r;
1316
1317 if (u->load_state == UNIT_STUB)
1318 return -ENOENT;
1319
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u));
1325 }
1326
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit *u) {
1329 int r;
1330
1331 assert(u);
1332
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1335
1336 /* Load a .service/.socket/.slice/… file */
1337 r = unit_load_fragment(u);
1338 if (r < 0)
1339 return r;
1340
1341 if (u->load_state == UNIT_STUB)
1342 u->load_state = UNIT_LOADED;
1343
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u));
1346 }
1347
1348 void unit_add_to_target_deps_queue(Unit *u) {
1349 Manager *m = u->manager;
1350
1351 assert(u);
1352
1353 if (u->in_target_deps_queue)
1354 return;
1355
1356 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1357 u->in_target_deps_queue = true;
1358 }
1359
1360 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1361 assert(u);
1362 assert(target);
1363
1364 if (target->type != UNIT_TARGET)
1365 return 0;
1366
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u->load_state != UNIT_LOADED ||
1370 target->load_state != UNIT_LOADED)
1371 return 0;
1372
1373 /* If either side wants no automatic dependencies, then let's
1374 * skip this */
1375 if (!u->default_dependencies ||
1376 !target->default_dependencies)
1377 return 0;
1378
1379 /* Don't create loops */
1380 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1381 return 0;
1382
1383 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1384 }
1385
1386 static int unit_add_slice_dependencies(Unit *u) {
1387 UnitDependencyMask mask;
1388 assert(u);
1389
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1391 return 0;
1392
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1395 relationship). */
1396 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1397
1398 if (UNIT_ISSET(u->slice))
1399 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1400
1401 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1402 return 0;
1403
1404 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1405 }
1406
1407 static int unit_add_mount_dependencies(Unit *u) {
1408 UnitDependencyInfo di;
1409 const char *path;
1410 Iterator i;
1411 int r;
1412
1413 assert(u);
1414
1415 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1416 char prefix[strlen(path) + 1];
1417
1418 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1419 _cleanup_free_ char *p = NULL;
1420 Unit *m;
1421
1422 r = unit_name_from_path(prefix, ".mount", &p);
1423 if (r < 0)
1424 return r;
1425
1426 m = manager_get_unit(u->manager, p);
1427 if (!m) {
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1432 * unit. */
1433 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1434 continue;
1435 }
1436 if (m == u)
1437 continue;
1438
1439 if (m->load_state != UNIT_LOADED)
1440 continue;
1441
1442 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1443 if (r < 0)
1444 return r;
1445
1446 if (m->fragment_path) {
1447 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1448 if (r < 0)
1449 return r;
1450 }
1451 }
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int unit_add_startup_units(Unit *u) {
1458 CGroupContext *c;
1459 int r;
1460
1461 c = unit_get_cgroup_context(u);
1462 if (!c)
1463 return 0;
1464
1465 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1466 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1467 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1468 return 0;
1469
1470 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1471 if (r < 0)
1472 return r;
1473
1474 return set_put(u->manager->startup_units, u);
1475 }
1476
1477 int unit_load(Unit *u) {
1478 int r;
1479
1480 assert(u);
1481
1482 if (u->in_load_queue) {
1483 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1484 u->in_load_queue = false;
1485 }
1486
1487 if (u->type == _UNIT_TYPE_INVALID)
1488 return -EINVAL;
1489
1490 if (u->load_state != UNIT_STUB)
1491 return 0;
1492
1493 if (u->transient_file) {
1494 r = fflush_and_check(u->transient_file);
1495 if (r < 0)
1496 goto fail;
1497
1498 u->transient_file = safe_fclose(u->transient_file);
1499 u->fragment_mtime = now(CLOCK_REALTIME);
1500 }
1501
1502 if (UNIT_VTABLE(u)->load) {
1503 r = UNIT_VTABLE(u)->load(u);
1504 if (r < 0)
1505 goto fail;
1506 }
1507
1508 if (u->load_state == UNIT_STUB) {
1509 r = -ENOENT;
1510 goto fail;
1511 }
1512
1513 if (u->load_state == UNIT_LOADED) {
1514 unit_add_to_target_deps_queue(u);
1515
1516 r = unit_add_slice_dependencies(u);
1517 if (r < 0)
1518 goto fail;
1519
1520 r = unit_add_mount_dependencies(u);
1521 if (r < 0)
1522 goto fail;
1523
1524 r = unit_add_startup_units(u);
1525 if (r < 0)
1526 goto fail;
1527
1528 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1529 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1530 r = -ENOEXEC;
1531 goto fail;
1532 }
1533
1534 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1535 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1536
1537 unit_update_cgroup_members_masks(u);
1538 }
1539
1540 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1541
1542 unit_add_to_dbus_queue(unit_follow_merge(u));
1543 unit_add_to_gc_queue(u);
1544
1545 return 0;
1546
1547 fail:
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1550
1551 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1552 r == -ENOEXEC ? UNIT_BAD_SETTING :
1553 UNIT_ERROR;
1554 u->load_error = r;
1555
1556 unit_add_to_dbus_queue(u);
1557 unit_add_to_gc_queue(u);
1558
1559 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1560 }
1561
1562 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1563 Condition *c;
1564 int triggered = -1;
1565
1566 assert(u);
1567 assert(to_string);
1568
1569 /* If the condition list is empty, then it is true */
1570 if (!first)
1571 return true;
1572
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions, c, first) {
1577 int r;
1578
1579 r = condition_test(c);
1580 if (r < 0)
1581 log_unit_warning(u,
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1583 to_string(c->type),
1584 c->trigger ? "|" : "",
1585 c->negate ? "!" : "",
1586 c->parameter);
1587 else
1588 log_unit_debug(u,
1589 "%s=%s%s%s %s.",
1590 to_string(c->type),
1591 c->trigger ? "|" : "",
1592 c->negate ? "!" : "",
1593 c->parameter,
1594 condition_result_to_string(c->result));
1595
1596 if (!c->trigger && r <= 0)
1597 return false;
1598
1599 if (c->trigger && triggered <= 0)
1600 triggered = r > 0;
1601 }
1602
1603 return triggered != 0;
1604 }
1605
1606 static bool unit_condition_test(Unit *u) {
1607 assert(u);
1608
1609 dual_timestamp_get(&u->condition_timestamp);
1610 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1611
1612 return u->condition_result;
1613 }
1614
1615 static bool unit_assert_test(Unit *u) {
1616 assert(u);
1617
1618 dual_timestamp_get(&u->assert_timestamp);
1619 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1620
1621 return u->assert_result;
1622 }
1623
1624 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL;
1626 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1627 REENABLE_WARNING;
1628 }
1629
1630 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1631 const char *format;
1632 const UnitStatusMessageFormats *format_table;
1633
1634 assert(u);
1635 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1636
1637 if (t != JOB_RELOAD) {
1638 format_table = &UNIT_VTABLE(u)->status_message_formats;
1639 if (format_table) {
1640 format = format_table->starting_stopping[t == JOB_STOP];
1641 if (format)
1642 return format;
1643 }
1644 }
1645
1646 /* Return generic strings */
1647 if (t == JOB_START)
1648 return "Starting %s.";
1649 else if (t == JOB_STOP)
1650 return "Stopping %s.";
1651 else
1652 return "Reloading %s.";
1653 }
1654
1655 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1656 const char *format;
1657
1658 assert(u);
1659
1660 /* Reload status messages have traditionally not been printed to console. */
1661 if (!IN_SET(t, JOB_START, JOB_STOP))
1662 return;
1663
1664 format = unit_get_status_message_format(u, t);
1665
1666 DISABLE_WARNING_FORMAT_NONLITERAL;
1667 unit_status_printf(u, "", format);
1668 REENABLE_WARNING;
1669 }
1670
1671 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1672 const char *format, *mid;
1673 char buf[LINE_MAX];
1674
1675 assert(u);
1676
1677 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1678 return;
1679
1680 if (log_on_console())
1681 return;
1682
1683 /* We log status messages for all units and all operations. */
1684
1685 format = unit_get_status_message_format(u, t);
1686
1687 DISABLE_WARNING_FORMAT_NONLITERAL;
1688 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1689 REENABLE_WARNING;
1690
1691 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1692 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1693 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1694
1695 /* Note that we deliberately use LOG_MESSAGE() instead of
1696 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1697 * closely what is written to screen using the status output,
1698 * which is supposed the highest level, friendliest output
1699 * possible, which means we should avoid the low-level unit
1700 * name. */
1701 log_struct(LOG_INFO,
1702 LOG_MESSAGE("%s", buf),
1703 LOG_UNIT_ID(u),
1704 LOG_UNIT_INVOCATION_ID(u),
1705 mid);
1706 }
1707
1708 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1709 assert(u);
1710 assert(t >= 0);
1711 assert(t < _JOB_TYPE_MAX);
1712
1713 unit_status_log_starting_stopping_reloading(u, t);
1714 unit_status_print_starting_stopping(u, t);
1715 }
1716
1717 int unit_start_limit_test(Unit *u) {
1718 const char *reason;
1719
1720 assert(u);
1721
1722 if (ratelimit_below(&u->start_limit)) {
1723 u->start_limit_hit = false;
1724 return 0;
1725 }
1726
1727 log_unit_warning(u, "Start request repeated too quickly.");
1728 u->start_limit_hit = true;
1729
1730 reason = strjoina("unit ", u->id, " failed");
1731
1732 return emergency_action(u->manager, u->start_limit_action,
1733 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1734 u->reboot_arg, reason);
1735 }
1736
1737 bool unit_shall_confirm_spawn(Unit *u) {
1738 assert(u);
1739
1740 if (manager_is_confirm_spawn_disabled(u->manager))
1741 return false;
1742
1743 /* For some reasons units remaining in the same process group
1744 * as PID 1 fail to acquire the console even if it's not used
1745 * by any process. So skip the confirmation question for them. */
1746 return !unit_get_exec_context(u)->same_pgrp;
1747 }
1748
1749 static bool unit_verify_deps(Unit *u) {
1750 Unit *other;
1751 Iterator j;
1752 void *v;
1753
1754 assert(u);
1755
1756 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1757 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1758 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1759 * conjunction with After= as for them any such check would make things entirely racy. */
1760
1761 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1762
1763 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1764 continue;
1765
1766 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1767 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1768 return false;
1769 }
1770 }
1771
1772 return true;
1773 }
1774
1775 /* Errors:
1776 * -EBADR: This unit type does not support starting.
1777 * -EALREADY: Unit is already started.
1778 * -EAGAIN: An operation is already in progress. Retry later.
1779 * -ECANCELED: Too many requests for now.
1780 * -EPROTO: Assert failed
1781 * -EINVAL: Unit not loaded
1782 * -EOPNOTSUPP: Unit type not supported
1783 * -ENOLINK: The necessary dependencies are not fulfilled.
1784 * -ESTALE: This unit has been started before and can't be started a second time
1785 */
1786 int unit_start(Unit *u) {
1787 UnitActiveState state;
1788 Unit *following;
1789
1790 assert(u);
1791
1792 /* If this is already started, then this will succeed. Note
1793 * that this will even succeed if this unit is not startable
1794 * by the user. This is relied on to detect when we need to
1795 * wait for units and when waiting is finished. */
1796 state = unit_active_state(u);
1797 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1798 return -EALREADY;
1799
1800 /* Units that aren't loaded cannot be started */
1801 if (u->load_state != UNIT_LOADED)
1802 return -EINVAL;
1803
1804 /* Refuse starting scope units more than once */
1805 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1806 return -ESTALE;
1807
1808 /* If the conditions failed, don't do anything at all. If we
1809 * already are activating this call might still be useful to
1810 * speed up activation in case there is some hold-off time,
1811 * but we don't want to recheck the condition in that case. */
1812 if (state != UNIT_ACTIVATING &&
1813 !unit_condition_test(u)) {
1814 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1815 return -EALREADY;
1816 }
1817
1818 /* If the asserts failed, fail the entire job */
1819 if (state != UNIT_ACTIVATING &&
1820 !unit_assert_test(u)) {
1821 log_unit_notice(u, "Starting requested but asserts failed.");
1822 return -EPROTO;
1823 }
1824
1825 /* Units of types that aren't supported cannot be
1826 * started. Note that we do this test only after the condition
1827 * checks, so that we rather return condition check errors
1828 * (which are usually not considered a true failure) than "not
1829 * supported" errors (which are considered a failure).
1830 */
1831 if (!unit_supported(u))
1832 return -EOPNOTSUPP;
1833
1834 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1835 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1836 * effect anymore, due to a reload or due to a failed condition. */
1837 if (!unit_verify_deps(u))
1838 return -ENOLINK;
1839
1840 /* Forward to the main object, if we aren't it. */
1841 following = unit_following(u);
1842 if (following) {
1843 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1844 return unit_start(following);
1845 }
1846
1847 /* If it is stopped, but we cannot start it, then fail */
1848 if (!UNIT_VTABLE(u)->start)
1849 return -EBADR;
1850
1851 /* We don't suppress calls to ->start() here when we are
1852 * already starting, to allow this request to be used as a
1853 * "hurry up" call, for example when the unit is in some "auto
1854 * restart" state where it waits for a holdoff timer to elapse
1855 * before it will start again. */
1856
1857 unit_add_to_dbus_queue(u);
1858
1859 return UNIT_VTABLE(u)->start(u);
1860 }
1861
1862 bool unit_can_start(Unit *u) {
1863 assert(u);
1864
1865 if (u->load_state != UNIT_LOADED)
1866 return false;
1867
1868 if (!unit_supported(u))
1869 return false;
1870
1871 /* Scope units may be started only once */
1872 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1873 return false;
1874
1875 return !!UNIT_VTABLE(u)->start;
1876 }
1877
1878 bool unit_can_isolate(Unit *u) {
1879 assert(u);
1880
1881 return unit_can_start(u) &&
1882 u->allow_isolate;
1883 }
1884
1885 /* Errors:
1886 * -EBADR: This unit type does not support stopping.
1887 * -EALREADY: Unit is already stopped.
1888 * -EAGAIN: An operation is already in progress. Retry later.
1889 */
1890 int unit_stop(Unit *u) {
1891 UnitActiveState state;
1892 Unit *following;
1893
1894 assert(u);
1895
1896 state = unit_active_state(u);
1897 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1898 return -EALREADY;
1899
1900 following = unit_following(u);
1901 if (following) {
1902 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1903 return unit_stop(following);
1904 }
1905
1906 if (!UNIT_VTABLE(u)->stop)
1907 return -EBADR;
1908
1909 unit_add_to_dbus_queue(u);
1910
1911 return UNIT_VTABLE(u)->stop(u);
1912 }
1913
1914 bool unit_can_stop(Unit *u) {
1915 assert(u);
1916
1917 if (!unit_supported(u))
1918 return false;
1919
1920 if (u->perpetual)
1921 return false;
1922
1923 return !!UNIT_VTABLE(u)->stop;
1924 }
1925
1926 /* Errors:
1927 * -EBADR: This unit type does not support reloading.
1928 * -ENOEXEC: Unit is not started.
1929 * -EAGAIN: An operation is already in progress. Retry later.
1930 */
1931 int unit_reload(Unit *u) {
1932 UnitActiveState state;
1933 Unit *following;
1934
1935 assert(u);
1936
1937 if (u->load_state != UNIT_LOADED)
1938 return -EINVAL;
1939
1940 if (!unit_can_reload(u))
1941 return -EBADR;
1942
1943 state = unit_active_state(u);
1944 if (state == UNIT_RELOADING)
1945 return -EALREADY;
1946
1947 if (state != UNIT_ACTIVE) {
1948 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1949 return -ENOEXEC;
1950 }
1951
1952 following = unit_following(u);
1953 if (following) {
1954 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1955 return unit_reload(following);
1956 }
1957
1958 unit_add_to_dbus_queue(u);
1959
1960 if (!UNIT_VTABLE(u)->reload) {
1961 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1962 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1963 return 0;
1964 }
1965
1966 return UNIT_VTABLE(u)->reload(u);
1967 }
1968
1969 bool unit_can_reload(Unit *u) {
1970 assert(u);
1971
1972 if (UNIT_VTABLE(u)->can_reload)
1973 return UNIT_VTABLE(u)->can_reload(u);
1974
1975 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1976 return true;
1977
1978 return UNIT_VTABLE(u)->reload;
1979 }
1980
1981 bool unit_is_unneeded(Unit *u) {
1982 static const UnitDependency deps[] = {
1983 UNIT_REQUIRED_BY,
1984 UNIT_REQUISITE_OF,
1985 UNIT_WANTED_BY,
1986 UNIT_BOUND_BY,
1987 };
1988 size_t j;
1989
1990 assert(u);
1991
1992 if (!u->stop_when_unneeded)
1993 return false;
1994
1995 /* Don't clean up while the unit is transitioning or is even inactive. */
1996 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1997 return false;
1998 if (u->job)
1999 return false;
2000
2001 for (j = 0; j < ELEMENTSOF(deps); j++) {
2002 Unit *other;
2003 Iterator i;
2004 void *v;
2005
2006 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2007 * restart, then don't clean this one up. */
2008
2009 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
2010 if (other->job)
2011 return false;
2012
2013 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2014 return false;
2015
2016 if (unit_will_restart(other))
2017 return false;
2018 }
2019 }
2020
2021 return true;
2022 }
2023
2024 static void check_unneeded_dependencies(Unit *u) {
2025
2026 static const UnitDependency deps[] = {
2027 UNIT_REQUIRES,
2028 UNIT_REQUISITE,
2029 UNIT_WANTS,
2030 UNIT_BINDS_TO,
2031 };
2032 size_t j;
2033
2034 assert(u);
2035
2036 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2037
2038 for (j = 0; j < ELEMENTSOF(deps); j++) {
2039 Unit *other;
2040 Iterator i;
2041 void *v;
2042
2043 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2044 unit_submit_to_stop_when_unneeded_queue(other);
2045 }
2046 }
2047
2048 static void unit_check_binds_to(Unit *u) {
2049 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2050 bool stop = false;
2051 Unit *other;
2052 Iterator i;
2053 void *v;
2054 int r;
2055
2056 assert(u);
2057
2058 if (u->job)
2059 return;
2060
2061 if (unit_active_state(u) != UNIT_ACTIVE)
2062 return;
2063
2064 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2065 if (other->job)
2066 continue;
2067
2068 if (!other->coldplugged)
2069 /* We might yet create a job for the other unit… */
2070 continue;
2071
2072 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2073 continue;
2074
2075 stop = true;
2076 break;
2077 }
2078
2079 if (!stop)
2080 return;
2081
2082 /* If stopping a unit fails continuously we might enter a stop
2083 * loop here, hence stop acting on the service being
2084 * unnecessary after a while. */
2085 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2086 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2087 return;
2088 }
2089
2090 assert(other);
2091 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2092
2093 /* A unit we need to run is gone. Sniff. Let's stop this. */
2094 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2095 if (r < 0)
2096 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2097 }
2098
2099 static void retroactively_start_dependencies(Unit *u) {
2100 Iterator i;
2101 Unit *other;
2102 void *v;
2103
2104 assert(u);
2105 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2106
2107 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2108 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2109 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2110 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2111
2112 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2113 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2114 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2115 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2116
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2118 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2119 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2120 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2121
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2124 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2125
2126 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2127 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2128 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2129 }
2130
2131 static void retroactively_stop_dependencies(Unit *u) {
2132 Unit *other;
2133 Iterator i;
2134 void *v;
2135
2136 assert(u);
2137 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2138
2139 /* Pull down units which are bound to us recursively if enabled */
2140 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2141 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2142 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2143 }
2144
2145 void unit_start_on_failure(Unit *u) {
2146 Unit *other;
2147 Iterator i;
2148 void *v;
2149 int r;
2150
2151 assert(u);
2152
2153 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2154 return;
2155
2156 log_unit_info(u, "Triggering OnFailure= dependencies.");
2157
2158 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2159 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2160
2161 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2162 if (r < 0)
2163 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2164 }
2165 }
2166
2167 void unit_trigger_notify(Unit *u) {
2168 Unit *other;
2169 Iterator i;
2170 void *v;
2171
2172 assert(u);
2173
2174 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2175 if (UNIT_VTABLE(other)->trigger_notify)
2176 UNIT_VTABLE(other)->trigger_notify(other, u);
2177 }
2178
2179 static int unit_log_resources(Unit *u) {
2180 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2181 _cleanup_free_ char *igress = NULL, *egress = NULL;
2182 size_t n_message_parts = 0, n_iovec = 0;
2183 char* message_parts[3 + 1], *t;
2184 nsec_t nsec = NSEC_INFINITY;
2185 CGroupIPAccountingMetric m;
2186 bool any_traffic = false;
2187 size_t i;
2188 int r;
2189 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2190 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2194 };
2195
2196 assert(u);
2197
2198 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2199 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2200 * information and the complete data in structured fields. */
2201
2202 (void) unit_get_cpu_usage(u, &nsec);
2203 if (nsec != NSEC_INFINITY) {
2204 char buf[FORMAT_TIMESPAN_MAX] = "";
2205
2206 /* Format the CPU time for inclusion in the structured log message */
2207 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2208 r = log_oom();
2209 goto finish;
2210 }
2211 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2212
2213 /* Format the CPU time for inclusion in the human language message string */
2214 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2215 t = strjoin("consumed ", buf, " CPU time");
2216 if (!t) {
2217 r = log_oom();
2218 goto finish;
2219 }
2220
2221 message_parts[n_message_parts++] = t;
2222 }
2223
2224 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2225 char buf[FORMAT_BYTES_MAX] = "";
2226 uint64_t value = UINT64_MAX;
2227
2228 assert(ip_fields[m]);
2229
2230 (void) unit_get_ip_accounting(u, m, &value);
2231 if (value == UINT64_MAX)
2232 continue;
2233 if (value > 0)
2234 any_traffic = true;
2235
2236 /* Format IP accounting data for inclusion in the structured log message */
2237 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2238 r = log_oom();
2239 goto finish;
2240 }
2241 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2242
2243 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2244 * bytes counters (and not for the packets counters) */
2245 if (m == CGROUP_IP_INGRESS_BYTES) {
2246 assert(!igress);
2247 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2248 if (!igress) {
2249 r = log_oom();
2250 goto finish;
2251 }
2252 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2253 assert(!egress);
2254 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2255 if (!egress) {
2256 r = log_oom();
2257 goto finish;
2258 }
2259 }
2260 }
2261
2262 if (any_traffic) {
2263 if (igress)
2264 message_parts[n_message_parts++] = TAKE_PTR(igress);
2265 if (egress)
2266 message_parts[n_message_parts++] = TAKE_PTR(egress);
2267 } else {
2268 char *k;
2269
2270 k = strdup("no IP traffic");
2271 if (!k) {
2272 r = log_oom();
2273 goto finish;
2274 }
2275
2276 message_parts[n_message_parts++] = k;
2277 }
2278
2279 /* Is there any accounting data available at all? */
2280 if (n_iovec == 0) {
2281 r = 0;
2282 goto finish;
2283 }
2284
2285 if (n_message_parts == 0)
2286 t = strjoina("MESSAGE=", u->id, ": Completed.");
2287 else {
2288 _cleanup_free_ char *joined;
2289
2290 message_parts[n_message_parts] = NULL;
2291
2292 joined = strv_join(message_parts, ", ");
2293 if (!joined) {
2294 r = log_oom();
2295 goto finish;
2296 }
2297
2298 joined[0] = ascii_toupper(joined[0]);
2299 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2300 }
2301
2302 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2303 * and hence don't increase n_iovec for them */
2304 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2305 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2306
2307 t = strjoina(u->manager->unit_log_field, u->id);
2308 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2309
2310 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2311 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2312
2313 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2314 r = 0;
2315
2316 finish:
2317 for (i = 0; i < n_message_parts; i++)
2318 free(message_parts[i]);
2319
2320 for (i = 0; i < n_iovec; i++)
2321 free(iovec[i].iov_base);
2322
2323 return r;
2324
2325 }
2326
2327 static void unit_update_on_console(Unit *u) {
2328 bool b;
2329
2330 assert(u);
2331
2332 b = unit_needs_console(u);
2333 if (u->on_console == b)
2334 return;
2335
2336 u->on_console = b;
2337 if (b)
2338 manager_ref_console(u->manager);
2339 else
2340 manager_unref_console(u->manager);
2341 }
2342
2343 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2344 bool unexpected;
2345 const char *reason;
2346 Manager *m;
2347
2348 assert(u);
2349 assert(os < _UNIT_ACTIVE_STATE_MAX);
2350 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2351
2352 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2353 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2354 * remounted this function will be called too! */
2355
2356 m = u->manager;
2357
2358 /* Update timestamps for state changes */
2359 if (!MANAGER_IS_RELOADING(m)) {
2360 dual_timestamp_get(&u->state_change_timestamp);
2361
2362 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2363 u->inactive_exit_timestamp = u->state_change_timestamp;
2364 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2365 u->inactive_enter_timestamp = u->state_change_timestamp;
2366
2367 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2368 u->active_enter_timestamp = u->state_change_timestamp;
2369 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2370 u->active_exit_timestamp = u->state_change_timestamp;
2371 }
2372
2373 /* Keep track of failed units */
2374 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2375
2376 /* Make sure the cgroup and state files are always removed when we become inactive */
2377 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2378 unit_prune_cgroup(u);
2379 unit_unlink_state_files(u);
2380 }
2381
2382 unit_update_on_console(u);
2383
2384 if (u->job) {
2385 unexpected = false;
2386
2387 if (u->job->state == JOB_WAITING)
2388
2389 /* So we reached a different state for this
2390 * job. Let's see if we can run it now if it
2391 * failed previously due to EAGAIN. */
2392 job_add_to_run_queue(u->job);
2393
2394 /* Let's check whether this state change constitutes a
2395 * finished job, or maybe contradicts a running job and
2396 * hence needs to invalidate jobs. */
2397
2398 switch (u->job->type) {
2399
2400 case JOB_START:
2401 case JOB_VERIFY_ACTIVE:
2402
2403 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2404 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2405 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2406 unexpected = true;
2407
2408 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2409 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2410 }
2411
2412 break;
2413
2414 case JOB_RELOAD:
2415 case JOB_RELOAD_OR_START:
2416 case JOB_TRY_RELOAD:
2417
2418 if (u->job->state == JOB_RUNNING) {
2419 if (ns == UNIT_ACTIVE)
2420 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2421 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2422 unexpected = true;
2423
2424 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2425 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2426 }
2427 }
2428
2429 break;
2430
2431 case JOB_STOP:
2432 case JOB_RESTART:
2433 case JOB_TRY_RESTART:
2434
2435 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2436 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2437 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2438 unexpected = true;
2439 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2440 }
2441
2442 break;
2443
2444 default:
2445 assert_not_reached("Job type unknown");
2446 }
2447
2448 } else
2449 unexpected = true;
2450
2451 if (!MANAGER_IS_RELOADING(m)) {
2452
2453 /* If this state change happened without being
2454 * requested by a job, then let's retroactively start
2455 * or stop dependencies. We skip that step when
2456 * deserializing, since we don't want to create any
2457 * additional jobs just because something is already
2458 * activated. */
2459
2460 if (unexpected) {
2461 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2462 retroactively_start_dependencies(u);
2463 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2464 retroactively_stop_dependencies(u);
2465 }
2466
2467 /* stop unneeded units regardless if going down was expected or not */
2468 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2469 check_unneeded_dependencies(u);
2470
2471 if (ns != os && ns == UNIT_FAILED) {
2472 log_unit_debug(u, "Unit entered failed state.");
2473
2474 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2475 unit_start_on_failure(u);
2476 }
2477
2478 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2479 /* This unit just finished starting up */
2480
2481 if (u->type == UNIT_SERVICE) {
2482 /* Write audit record if we have just finished starting up */
2483 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2484 u->in_audit = true;
2485 }
2486
2487 manager_send_unit_plymouth(m, u);
2488 }
2489
2490 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2491 /* This unit just stopped/failed. */
2492
2493 if (u->type == UNIT_SERVICE) {
2494
2495 if (u->in_audit) {
2496 /* Write audit record if we have just finished shutting down */
2497 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2498 u->in_audit = false;
2499 } else {
2500 /* Hmm, if there was no start record written write it now, so that we always
2501 * have a nice pair */
2502 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2503
2504 if (ns == UNIT_INACTIVE)
2505 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2506 }
2507 }
2508
2509 /* Write a log message about consumed resources */
2510 unit_log_resources(u);
2511 }
2512 }
2513
2514 manager_recheck_journal(m);
2515 manager_recheck_dbus(m);
2516
2517 unit_trigger_notify(u);
2518
2519 if (!MANAGER_IS_RELOADING(u->manager)) {
2520 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2521 unit_submit_to_stop_when_unneeded_queue(u);
2522
2523 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2524 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2525 * without ever entering started.) */
2526 unit_check_binds_to(u);
2527
2528 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2529 reason = strjoina("unit ", u->id, " failed");
2530 (void) emergency_action(u->manager, u->failure_action, 0,
2531 u->reboot_arg, reason);
2532 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2533 reason = strjoina("unit ", u->id, " succeeded");
2534 (void) emergency_action(u->manager, u->success_action, 0,
2535 u->reboot_arg, reason);
2536 }
2537 }
2538
2539 unit_add_to_dbus_queue(u);
2540 unit_add_to_gc_queue(u);
2541 }
2542
2543 int unit_watch_pid(Unit *u, pid_t pid) {
2544 int r;
2545
2546 assert(u);
2547 assert(pid_is_valid(pid));
2548
2549 /* Watch a specific PID */
2550
2551 r = set_ensure_allocated(&u->pids, NULL);
2552 if (r < 0)
2553 return r;
2554
2555 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2556 if (r < 0)
2557 return r;
2558
2559 /* First try, let's add the unit keyed by "pid". */
2560 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2561 if (r == -EEXIST) {
2562 Unit **array;
2563 bool found = false;
2564 size_t n = 0;
2565
2566 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2567 * to an array of Units rather than just a Unit), lists us already. */
2568
2569 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2570 if (array)
2571 for (; array[n]; n++)
2572 if (array[n] == u)
2573 found = true;
2574
2575 if (found) /* Found it already? if so, do nothing */
2576 r = 0;
2577 else {
2578 Unit **new_array;
2579
2580 /* Allocate a new array */
2581 new_array = new(Unit*, n + 2);
2582 if (!new_array)
2583 return -ENOMEM;
2584
2585 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2586 new_array[n] = u;
2587 new_array[n+1] = NULL;
2588
2589 /* Add or replace the old array */
2590 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2591 if (r < 0) {
2592 free(new_array);
2593 return r;
2594 }
2595
2596 free(array);
2597 }
2598 } else if (r < 0)
2599 return r;
2600
2601 r = set_put(u->pids, PID_TO_PTR(pid));
2602 if (r < 0)
2603 return r;
2604
2605 return 0;
2606 }
2607
2608 void unit_unwatch_pid(Unit *u, pid_t pid) {
2609 Unit **array;
2610
2611 assert(u);
2612 assert(pid_is_valid(pid));
2613
2614 /* First let's drop the unit in case it's keyed as "pid". */
2615 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2616
2617 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2618 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2619 if (array) {
2620 size_t n, m = 0;
2621
2622 /* Let's iterate through the array, dropping our own entry */
2623 for (n = 0; array[n]; n++)
2624 if (array[n] != u)
2625 array[m++] = array[n];
2626 array[m] = NULL;
2627
2628 if (m == 0) {
2629 /* The array is now empty, remove the entire entry */
2630 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2631 free(array);
2632 }
2633 }
2634
2635 (void) set_remove(u->pids, PID_TO_PTR(pid));
2636 }
2637
2638 void unit_unwatch_all_pids(Unit *u) {
2639 assert(u);
2640
2641 while (!set_isempty(u->pids))
2642 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2643
2644 u->pids = set_free(u->pids);
2645 }
2646
2647 static void unit_tidy_watch_pids(Unit *u) {
2648 pid_t except1, except2;
2649 Iterator i;
2650 void *e;
2651
2652 assert(u);
2653
2654 /* Cleans dead PIDs from our list */
2655
2656 except1 = unit_main_pid(u);
2657 except2 = unit_control_pid(u);
2658
2659 SET_FOREACH(e, u->pids, i) {
2660 pid_t pid = PTR_TO_PID(e);
2661
2662 if (pid == except1 || pid == except2)
2663 continue;
2664
2665 if (!pid_is_unwaited(pid))
2666 unit_unwatch_pid(u, pid);
2667 }
2668 }
2669
2670 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2671 Unit *u = userdata;
2672
2673 assert(s);
2674 assert(u);
2675
2676 unit_tidy_watch_pids(u);
2677 unit_watch_all_pids(u);
2678
2679 /* If the PID set is empty now, then let's finish this off. */
2680 unit_synthesize_cgroup_empty_event(u);
2681
2682 return 0;
2683 }
2684
2685 int unit_enqueue_rewatch_pids(Unit *u) {
2686 int r;
2687
2688 assert(u);
2689
2690 if (!u->cgroup_path)
2691 return -ENOENT;
2692
2693 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2694 if (r < 0)
2695 return r;
2696 if (r > 0) /* On unified we can use proper notifications */
2697 return 0;
2698
2699 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2700 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2701 * involves issuing kill(pid, 0) on all processes we watch. */
2702
2703 if (!u->rewatch_pids_event_source) {
2704 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2705
2706 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2707 if (r < 0)
2708 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2709
2710 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2711 if (r < 0)
2712 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2713
2714 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2715
2716 u->rewatch_pids_event_source = TAKE_PTR(s);
2717 }
2718
2719 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2720 if (r < 0)
2721 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2722
2723 return 0;
2724 }
2725
2726 void unit_dequeue_rewatch_pids(Unit *u) {
2727 int r;
2728 assert(u);
2729
2730 if (!u->rewatch_pids_event_source)
2731 return;
2732
2733 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2734 if (r < 0)
2735 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2736
2737 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2738 }
2739
2740 bool unit_job_is_applicable(Unit *u, JobType j) {
2741 assert(u);
2742 assert(j >= 0 && j < _JOB_TYPE_MAX);
2743
2744 switch (j) {
2745
2746 case JOB_VERIFY_ACTIVE:
2747 case JOB_START:
2748 case JOB_NOP:
2749 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2750 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2751 * jobs for it. */
2752 return true;
2753
2754 case JOB_STOP:
2755 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2756 * external events), hence it makes no sense to permit enqueing such a request either. */
2757 return !u->perpetual;
2758
2759 case JOB_RESTART:
2760 case JOB_TRY_RESTART:
2761 return unit_can_stop(u) && unit_can_start(u);
2762
2763 case JOB_RELOAD:
2764 case JOB_TRY_RELOAD:
2765 return unit_can_reload(u);
2766
2767 case JOB_RELOAD_OR_START:
2768 return unit_can_reload(u) && unit_can_start(u);
2769
2770 default:
2771 assert_not_reached("Invalid job type");
2772 }
2773 }
2774
2775 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2776 assert(u);
2777
2778 /* Only warn about some unit types */
2779 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2780 return;
2781
2782 if (streq_ptr(u->id, other))
2783 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2784 else
2785 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2786 }
2787
2788 static int unit_add_dependency_hashmap(
2789 Hashmap **h,
2790 Unit *other,
2791 UnitDependencyMask origin_mask,
2792 UnitDependencyMask destination_mask) {
2793
2794 UnitDependencyInfo info;
2795 int r;
2796
2797 assert(h);
2798 assert(other);
2799 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2800 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2801 assert(origin_mask > 0 || destination_mask > 0);
2802
2803 r = hashmap_ensure_allocated(h, NULL);
2804 if (r < 0)
2805 return r;
2806
2807 assert_cc(sizeof(void*) == sizeof(info));
2808
2809 info.data = hashmap_get(*h, other);
2810 if (info.data) {
2811 /* Entry already exists. Add in our mask. */
2812
2813 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2814 FLAGS_SET(destination_mask, info.destination_mask))
2815 return 0; /* NOP */
2816
2817 info.origin_mask |= origin_mask;
2818 info.destination_mask |= destination_mask;
2819
2820 r = hashmap_update(*h, other, info.data);
2821 } else {
2822 info = (UnitDependencyInfo) {
2823 .origin_mask = origin_mask,
2824 .destination_mask = destination_mask,
2825 };
2826
2827 r = hashmap_put(*h, other, info.data);
2828 }
2829 if (r < 0)
2830 return r;
2831
2832 return 1;
2833 }
2834
2835 int unit_add_dependency(
2836 Unit *u,
2837 UnitDependency d,
2838 Unit *other,
2839 bool add_reference,
2840 UnitDependencyMask mask) {
2841
2842 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2843 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2844 [UNIT_WANTS] = UNIT_WANTED_BY,
2845 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2846 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2847 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2848 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2849 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2850 [UNIT_WANTED_BY] = UNIT_WANTS,
2851 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2852 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2853 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2854 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2855 [UNIT_BEFORE] = UNIT_AFTER,
2856 [UNIT_AFTER] = UNIT_BEFORE,
2857 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2858 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2859 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2860 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2861 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2862 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2863 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2864 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2865 };
2866 Unit *original_u = u, *original_other = other;
2867 int r;
2868
2869 assert(u);
2870 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2871 assert(other);
2872
2873 u = unit_follow_merge(u);
2874 other = unit_follow_merge(other);
2875
2876 /* We won't allow dependencies on ourselves. We will not
2877 * consider them an error however. */
2878 if (u == other) {
2879 maybe_warn_about_dependency(original_u, original_other->id, d);
2880 return 0;
2881 }
2882
2883 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2884 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2885 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2886 return 0;
2887 }
2888
2889 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2890 if (r < 0)
2891 return r;
2892
2893 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2894 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2895 if (r < 0)
2896 return r;
2897 }
2898
2899 if (add_reference) {
2900 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2901 if (r < 0)
2902 return r;
2903
2904 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2905 if (r < 0)
2906 return r;
2907 }
2908
2909 unit_add_to_dbus_queue(u);
2910 return 0;
2911 }
2912
2913 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2914 int r;
2915
2916 assert(u);
2917
2918 r = unit_add_dependency(u, d, other, add_reference, mask);
2919 if (r < 0)
2920 return r;
2921
2922 return unit_add_dependency(u, e, other, add_reference, mask);
2923 }
2924
2925 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2926 int r;
2927
2928 assert(u);
2929 assert(name);
2930 assert(buf);
2931 assert(ret);
2932
2933 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2934 *buf = NULL;
2935 *ret = name;
2936 return 0;
2937 }
2938
2939 if (u->instance)
2940 r = unit_name_replace_instance(name, u->instance, buf);
2941 else {
2942 _cleanup_free_ char *i = NULL;
2943
2944 r = unit_name_to_prefix(u->id, &i);
2945 if (r < 0)
2946 return r;
2947
2948 r = unit_name_replace_instance(name, i, buf);
2949 }
2950 if (r < 0)
2951 return r;
2952
2953 *ret = *buf;
2954 return 0;
2955 }
2956
2957 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2958 _cleanup_free_ char *buf = NULL;
2959 Unit *other;
2960 int r;
2961
2962 assert(u);
2963 assert(name);
2964
2965 r = resolve_template(u, name, &buf, &name);
2966 if (r < 0)
2967 return r;
2968
2969 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2970 if (r < 0)
2971 return r;
2972
2973 return unit_add_dependency(u, d, other, add_reference, mask);
2974 }
2975
2976 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2977 _cleanup_free_ char *buf = NULL;
2978 Unit *other;
2979 int r;
2980
2981 assert(u);
2982 assert(name);
2983
2984 r = resolve_template(u, name, &buf, &name);
2985 if (r < 0)
2986 return r;
2987
2988 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2989 if (r < 0)
2990 return r;
2991
2992 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2993 }
2994
2995 int set_unit_path(const char *p) {
2996 /* This is mostly for debug purposes */
2997 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2998 return -errno;
2999
3000 return 0;
3001 }
3002
3003 char *unit_dbus_path(Unit *u) {
3004 assert(u);
3005
3006 if (!u->id)
3007 return NULL;
3008
3009 return unit_dbus_path_from_name(u->id);
3010 }
3011
3012 char *unit_dbus_path_invocation_id(Unit *u) {
3013 assert(u);
3014
3015 if (sd_id128_is_null(u->invocation_id))
3016 return NULL;
3017
3018 return unit_dbus_path_from_name(u->invocation_id_string);
3019 }
3020
3021 int unit_set_slice(Unit *u, Unit *slice) {
3022 assert(u);
3023 assert(slice);
3024
3025 /* Sets the unit slice if it has not been set before. Is extra
3026 * careful, to only allow this for units that actually have a
3027 * cgroup context. Also, we don't allow to set this for slices
3028 * (since the parent slice is derived from the name). Make
3029 * sure the unit we set is actually a slice. */
3030
3031 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3032 return -EOPNOTSUPP;
3033
3034 if (u->type == UNIT_SLICE)
3035 return -EINVAL;
3036
3037 if (unit_active_state(u) != UNIT_INACTIVE)
3038 return -EBUSY;
3039
3040 if (slice->type != UNIT_SLICE)
3041 return -EINVAL;
3042
3043 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3044 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3045 return -EPERM;
3046
3047 if (UNIT_DEREF(u->slice) == slice)
3048 return 0;
3049
3050 /* Disallow slice changes if @u is already bound to cgroups */
3051 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3052 return -EBUSY;
3053
3054 unit_ref_set(&u->slice, u, slice);
3055 return 1;
3056 }
3057
3058 int unit_set_default_slice(Unit *u) {
3059 _cleanup_free_ char *b = NULL;
3060 const char *slice_name;
3061 Unit *slice;
3062 int r;
3063
3064 assert(u);
3065
3066 if (UNIT_ISSET(u->slice))
3067 return 0;
3068
3069 if (u->instance) {
3070 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3071
3072 /* Implicitly place all instantiated units in their
3073 * own per-template slice */
3074
3075 r = unit_name_to_prefix(u->id, &prefix);
3076 if (r < 0)
3077 return r;
3078
3079 /* The prefix is already escaped, but it might include
3080 * "-" which has a special meaning for slice units,
3081 * hence escape it here extra. */
3082 escaped = unit_name_escape(prefix);
3083 if (!escaped)
3084 return -ENOMEM;
3085
3086 if (MANAGER_IS_SYSTEM(u->manager))
3087 b = strjoin("system-", escaped, ".slice");
3088 else
3089 b = strappend(escaped, ".slice");
3090 if (!b)
3091 return -ENOMEM;
3092
3093 slice_name = b;
3094 } else
3095 slice_name =
3096 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3097 ? SPECIAL_SYSTEM_SLICE
3098 : SPECIAL_ROOT_SLICE;
3099
3100 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3101 if (r < 0)
3102 return r;
3103
3104 return unit_set_slice(u, slice);
3105 }
3106
3107 const char *unit_slice_name(Unit *u) {
3108 assert(u);
3109
3110 if (!UNIT_ISSET(u->slice))
3111 return NULL;
3112
3113 return UNIT_DEREF(u->slice)->id;
3114 }
3115
3116 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3117 _cleanup_free_ char *t = NULL;
3118 int r;
3119
3120 assert(u);
3121 assert(type);
3122 assert(_found);
3123
3124 r = unit_name_change_suffix(u->id, type, &t);
3125 if (r < 0)
3126 return r;
3127 if (unit_has_name(u, t))
3128 return -EINVAL;
3129
3130 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3131 assert(r < 0 || *_found != u);
3132 return r;
3133 }
3134
3135 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3136 const char *name, *old_owner, *new_owner;
3137 Unit *u = userdata;
3138 int r;
3139
3140 assert(message);
3141 assert(u);
3142
3143 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3144 if (r < 0) {
3145 bus_log_parse_error(r);
3146 return 0;
3147 }
3148
3149 old_owner = empty_to_null(old_owner);
3150 new_owner = empty_to_null(new_owner);
3151
3152 if (UNIT_VTABLE(u)->bus_name_owner_change)
3153 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3154
3155 return 0;
3156 }
3157
3158 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3159 const char *match;
3160
3161 assert(u);
3162 assert(bus);
3163 assert(name);
3164
3165 if (u->match_bus_slot)
3166 return -EBUSY;
3167
3168 match = strjoina("type='signal',"
3169 "sender='org.freedesktop.DBus',"
3170 "path='/org/freedesktop/DBus',"
3171 "interface='org.freedesktop.DBus',"
3172 "member='NameOwnerChanged',"
3173 "arg0='", name, "'");
3174
3175 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3176 }
3177
3178 int unit_watch_bus_name(Unit *u, const char *name) {
3179 int r;
3180
3181 assert(u);
3182 assert(name);
3183
3184 /* Watch a specific name on the bus. We only support one unit
3185 * watching each name for now. */
3186
3187 if (u->manager->api_bus) {
3188 /* If the bus is already available, install the match directly.
3189 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3190 r = unit_install_bus_match(u, u->manager->api_bus, name);
3191 if (r < 0)
3192 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3193 }
3194
3195 r = hashmap_put(u->manager->watch_bus, name, u);
3196 if (r < 0) {
3197 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3198 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3199 }
3200
3201 return 0;
3202 }
3203
3204 void unit_unwatch_bus_name(Unit *u, const char *name) {
3205 assert(u);
3206 assert(name);
3207
3208 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3209 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3210 }
3211
3212 bool unit_can_serialize(Unit *u) {
3213 assert(u);
3214
3215 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3216 }
3217
3218 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3219 _cleanup_free_ char *s = NULL;
3220 int r;
3221
3222 assert(f);
3223 assert(key);
3224
3225 if (mask == 0)
3226 return 0;
3227
3228 r = cg_mask_to_string(mask, &s);
3229 if (r < 0)
3230 return log_error_errno(r, "Failed to format cgroup mask: %m");
3231
3232 return serialize_item(f, key, s);
3233 }
3234
3235 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3236 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3237 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3238 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3239 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3240 };
3241
3242 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3243 CGroupIPAccountingMetric m;
3244 int r;
3245
3246 assert(u);
3247 assert(f);
3248 assert(fds);
3249
3250 if (unit_can_serialize(u)) {
3251 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3252 if (r < 0)
3253 return r;
3254 }
3255
3256 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3257
3258 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3259 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3260 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3261 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3262
3263 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3264 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3265
3266 if (dual_timestamp_is_set(&u->condition_timestamp))
3267 (void) serialize_bool(f, "condition-result", u->condition_result);
3268
3269 if (dual_timestamp_is_set(&u->assert_timestamp))
3270 (void) serialize_bool(f, "assert-result", u->assert_result);
3271
3272 (void) serialize_bool(f, "transient", u->transient);
3273 (void) serialize_bool(f, "in-audit", u->in_audit);
3274
3275 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3276 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3277 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3278 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3279 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3280
3281 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3282 if (u->cpu_usage_last != NSEC_INFINITY)
3283 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3284
3285 if (u->cgroup_path)
3286 (void) serialize_item(f, "cgroup", u->cgroup_path);
3287
3288 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3289 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3290 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3291 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3292
3293 if (uid_is_valid(u->ref_uid))
3294 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3295 if (gid_is_valid(u->ref_gid))
3296 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3297
3298 if (!sd_id128_is_null(u->invocation_id))
3299 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3300
3301 bus_track_serialize(u->bus_track, f, "ref");
3302
3303 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3304 uint64_t v;
3305
3306 r = unit_get_ip_accounting(u, m, &v);
3307 if (r >= 0)
3308 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3309 }
3310
3311 if (serialize_jobs) {
3312 if (u->job) {
3313 fputs("job\n", f);
3314 job_serialize(u->job, f);
3315 }
3316
3317 if (u->nop_job) {
3318 fputs("job\n", f);
3319 job_serialize(u->nop_job, f);
3320 }
3321 }
3322
3323 /* End marker */
3324 fputc('\n', f);
3325 return 0;
3326 }
3327
3328 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3329 int r;
3330
3331 assert(u);
3332 assert(f);
3333 assert(fds);
3334
3335 for (;;) {
3336 _cleanup_free_ char *line = NULL;
3337 CGroupIPAccountingMetric m;
3338 char *l, *v;
3339 size_t k;
3340
3341 r = read_line(f, LONG_LINE_MAX, &line);
3342 if (r < 0)
3343 return log_error_errno(r, "Failed to read serialization line: %m");
3344 if (r == 0) /* eof */
3345 break;
3346
3347 l = strstrip(line);
3348 if (isempty(l)) /* End marker */
3349 break;
3350
3351 k = strcspn(l, "=");
3352
3353 if (l[k] == '=') {
3354 l[k] = 0;
3355 v = l+k+1;
3356 } else
3357 v = l+k;
3358
3359 if (streq(l, "job")) {
3360 if (v[0] == '\0') {
3361 /* new-style serialized job */
3362 Job *j;
3363
3364 j = job_new_raw(u);
3365 if (!j)
3366 return log_oom();
3367
3368 r = job_deserialize(j, f);
3369 if (r < 0) {
3370 job_free(j);
3371 return r;
3372 }
3373
3374 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3375 if (r < 0) {
3376 job_free(j);
3377 return r;
3378 }
3379
3380 r = job_install_deserialized(j);
3381 if (r < 0) {
3382 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3383 job_free(j);
3384 return r;
3385 }
3386 } else /* legacy for pre-44 */
3387 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3388 continue;
3389 } else if (streq(l, "state-change-timestamp")) {
3390 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3391 continue;
3392 } else if (streq(l, "inactive-exit-timestamp")) {
3393 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3394 continue;
3395 } else if (streq(l, "active-enter-timestamp")) {
3396 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3397 continue;
3398 } else if (streq(l, "active-exit-timestamp")) {
3399 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3400 continue;
3401 } else if (streq(l, "inactive-enter-timestamp")) {
3402 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3403 continue;
3404 } else if (streq(l, "condition-timestamp")) {
3405 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3406 continue;
3407 } else if (streq(l, "assert-timestamp")) {
3408 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3409 continue;
3410 } else if (streq(l, "condition-result")) {
3411
3412 r = parse_boolean(v);
3413 if (r < 0)
3414 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3415 else
3416 u->condition_result = r;
3417
3418 continue;
3419
3420 } else if (streq(l, "assert-result")) {
3421
3422 r = parse_boolean(v);
3423 if (r < 0)
3424 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3425 else
3426 u->assert_result = r;
3427
3428 continue;
3429
3430 } else if (streq(l, "transient")) {
3431
3432 r = parse_boolean(v);
3433 if (r < 0)
3434 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3435 else
3436 u->transient = r;
3437
3438 continue;
3439
3440 } else if (streq(l, "in-audit")) {
3441
3442 r = parse_boolean(v);
3443 if (r < 0)
3444 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3445 else
3446 u->in_audit = r;
3447
3448 continue;
3449
3450 } else if (streq(l, "exported-invocation-id")) {
3451
3452 r = parse_boolean(v);
3453 if (r < 0)
3454 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3455 else
3456 u->exported_invocation_id = r;
3457
3458 continue;
3459
3460 } else if (streq(l, "exported-log-level-max")) {
3461
3462 r = parse_boolean(v);
3463 if (r < 0)
3464 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3465 else
3466 u->exported_log_level_max = r;
3467
3468 continue;
3469
3470 } else if (streq(l, "exported-log-extra-fields")) {
3471
3472 r = parse_boolean(v);
3473 if (r < 0)
3474 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3475 else
3476 u->exported_log_extra_fields = r;
3477
3478 continue;
3479
3480 } else if (streq(l, "exported-log-rate-limit-interval")) {
3481
3482 r = parse_boolean(v);
3483 if (r < 0)
3484 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3485 else
3486 u->exported_log_rate_limit_interval = r;
3487
3488 continue;
3489
3490 } else if (streq(l, "exported-log-rate-limit-burst")) {
3491
3492 r = parse_boolean(v);
3493 if (r < 0)
3494 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3495 else
3496 u->exported_log_rate_limit_burst = r;
3497
3498 continue;
3499
3500 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3501
3502 r = safe_atou64(v, &u->cpu_usage_base);
3503 if (r < 0)
3504 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3505
3506 continue;
3507
3508 } else if (streq(l, "cpu-usage-last")) {
3509
3510 r = safe_atou64(v, &u->cpu_usage_last);
3511 if (r < 0)
3512 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3513
3514 continue;
3515
3516 } else if (streq(l, "cgroup")) {
3517
3518 r = unit_set_cgroup_path(u, v);
3519 if (r < 0)
3520 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3521
3522 (void) unit_watch_cgroup(u);
3523
3524 continue;
3525 } else if (streq(l, "cgroup-realized")) {
3526 int b;
3527
3528 b = parse_boolean(v);
3529 if (b < 0)
3530 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3531 else
3532 u->cgroup_realized = b;
3533
3534 continue;
3535
3536 } else if (streq(l, "cgroup-realized-mask")) {
3537
3538 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3539 if (r < 0)
3540 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3541 continue;
3542
3543 } else if (streq(l, "cgroup-enabled-mask")) {
3544
3545 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3546 if (r < 0)
3547 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3548 continue;
3549
3550 } else if (streq(l, "cgroup-invalidated-mask")) {
3551
3552 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3553 if (r < 0)
3554 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3555 continue;
3556
3557 } else if (streq(l, "ref-uid")) {
3558 uid_t uid;
3559
3560 r = parse_uid(v, &uid);
3561 if (r < 0)
3562 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3563 else
3564 unit_ref_uid_gid(u, uid, GID_INVALID);
3565
3566 continue;
3567
3568 } else if (streq(l, "ref-gid")) {
3569 gid_t gid;
3570
3571 r = parse_gid(v, &gid);
3572 if (r < 0)
3573 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3574 else
3575 unit_ref_uid_gid(u, UID_INVALID, gid);
3576
3577 continue;
3578
3579 } else if (streq(l, "ref")) {
3580
3581 r = strv_extend(&u->deserialized_refs, v);
3582 if (r < 0)
3583 return log_oom();
3584
3585 continue;
3586 } else if (streq(l, "invocation-id")) {
3587 sd_id128_t id;
3588
3589 r = sd_id128_from_string(v, &id);
3590 if (r < 0)
3591 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3592 else {
3593 r = unit_set_invocation_id(u, id);
3594 if (r < 0)
3595 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3596 }
3597
3598 continue;
3599 }
3600
3601 /* Check if this is an IP accounting metric serialization field */
3602 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3603 if (streq(l, ip_accounting_metric_field[m]))
3604 break;
3605 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3606 uint64_t c;
3607
3608 r = safe_atou64(v, &c);
3609 if (r < 0)
3610 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3611 else
3612 u->ip_accounting_extra[m] = c;
3613 continue;
3614 }
3615
3616 if (unit_can_serialize(u)) {
3617 r = exec_runtime_deserialize_compat(u, l, v, fds);
3618 if (r < 0) {
3619 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3620 continue;
3621 }
3622
3623 /* Returns positive if key was handled by the call */
3624 if (r > 0)
3625 continue;
3626
3627 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3628 if (r < 0)
3629 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3630 }
3631 }
3632
3633 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3634 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3635 * before 228 where the base for timeouts was not persistent across reboots. */
3636
3637 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3638 dual_timestamp_get(&u->state_change_timestamp);
3639
3640 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3641 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3642 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3643 unit_invalidate_cgroup_bpf(u);
3644
3645 return 0;
3646 }
3647
3648 int unit_deserialize_skip(FILE *f) {
3649 int r;
3650 assert(f);
3651
3652 /* Skip serialized data for this unit. We don't know what it is. */
3653
3654 for (;;) {
3655 _cleanup_free_ char *line = NULL;
3656 char *l;
3657
3658 r = read_line(f, LONG_LINE_MAX, &line);
3659 if (r < 0)
3660 return log_error_errno(r, "Failed to read serialization line: %m");
3661 if (r == 0)
3662 return 0;
3663
3664 l = strstrip(line);
3665
3666 /* End marker */
3667 if (isempty(l))
3668 return 1;
3669 }
3670 }
3671
3672 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3673 Unit *device;
3674 _cleanup_free_ char *e = NULL;
3675 int r;
3676
3677 assert(u);
3678
3679 /* Adds in links to the device node that this unit is based on */
3680 if (isempty(what))
3681 return 0;
3682
3683 if (!is_device_path(what))
3684 return 0;
3685
3686 /* When device units aren't supported (such as in a
3687 * container), don't create dependencies on them. */
3688 if (!unit_type_supported(UNIT_DEVICE))
3689 return 0;
3690
3691 r = unit_name_from_path(what, ".device", &e);
3692 if (r < 0)
3693 return r;
3694
3695 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3696 if (r < 0)
3697 return r;
3698
3699 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3700 dep = UNIT_BINDS_TO;
3701
3702 r = unit_add_two_dependencies(u, UNIT_AFTER,
3703 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3704 device, true, mask);
3705 if (r < 0)
3706 return r;
3707
3708 if (wants) {
3709 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3710 if (r < 0)
3711 return r;
3712 }
3713
3714 return 0;
3715 }
3716
3717 int unit_coldplug(Unit *u) {
3718 int r = 0, q;
3719 char **i;
3720
3721 assert(u);
3722
3723 /* Make sure we don't enter a loop, when coldplugging recursively. */
3724 if (u->coldplugged)
3725 return 0;
3726
3727 u->coldplugged = true;
3728
3729 STRV_FOREACH(i, u->deserialized_refs) {
3730 q = bus_unit_track_add_name(u, *i);
3731 if (q < 0 && r >= 0)
3732 r = q;
3733 }
3734 u->deserialized_refs = strv_free(u->deserialized_refs);
3735
3736 if (UNIT_VTABLE(u)->coldplug) {
3737 q = UNIT_VTABLE(u)->coldplug(u);
3738 if (q < 0 && r >= 0)
3739 r = q;
3740 }
3741
3742 if (u->job) {
3743 q = job_coldplug(u->job);
3744 if (q < 0 && r >= 0)
3745 r = q;
3746 }
3747
3748 return r;
3749 }
3750
3751 void unit_catchup(Unit *u) {
3752 assert(u);
3753
3754 if (UNIT_VTABLE(u)->catchup)
3755 UNIT_VTABLE(u)->catchup(u);
3756 }
3757
3758 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3759 struct stat st;
3760
3761 if (!path)
3762 return false;
3763
3764 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3765 * are never out-of-date. */
3766 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3767 return false;
3768
3769 if (stat(path, &st) < 0)
3770 /* What, cannot access this anymore? */
3771 return true;
3772
3773 if (path_masked)
3774 /* For masked files check if they are still so */
3775 return !null_or_empty(&st);
3776 else
3777 /* For non-empty files check the mtime */
3778 return timespec_load(&st.st_mtim) > mtime;
3779
3780 return false;
3781 }
3782
3783 bool unit_need_daemon_reload(Unit *u) {
3784 _cleanup_strv_free_ char **t = NULL;
3785 char **path;
3786
3787 assert(u);
3788
3789 /* For unit files, we allow masking… */
3790 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3791 u->load_state == UNIT_MASKED))
3792 return true;
3793
3794 /* Source paths should not be masked… */
3795 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3796 return true;
3797
3798 if (u->load_state == UNIT_LOADED)
3799 (void) unit_find_dropin_paths(u, &t);
3800 if (!strv_equal(u->dropin_paths, t))
3801 return true;
3802
3803 /* … any drop-ins that are masked are simply omitted from the list. */
3804 STRV_FOREACH(path, u->dropin_paths)
3805 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3806 return true;
3807
3808 return false;
3809 }
3810
3811 void unit_reset_failed(Unit *u) {
3812 assert(u);
3813
3814 if (UNIT_VTABLE(u)->reset_failed)
3815 UNIT_VTABLE(u)->reset_failed(u);
3816
3817 RATELIMIT_RESET(u->start_limit);
3818 u->start_limit_hit = false;
3819 }
3820
3821 Unit *unit_following(Unit *u) {
3822 assert(u);
3823
3824 if (UNIT_VTABLE(u)->following)
3825 return UNIT_VTABLE(u)->following(u);
3826
3827 return NULL;
3828 }
3829
3830 bool unit_stop_pending(Unit *u) {
3831 assert(u);
3832
3833 /* This call does check the current state of the unit. It's
3834 * hence useful to be called from state change calls of the
3835 * unit itself, where the state isn't updated yet. This is
3836 * different from unit_inactive_or_pending() which checks both
3837 * the current state and for a queued job. */
3838
3839 return u->job && u->job->type == JOB_STOP;
3840 }
3841
3842 bool unit_inactive_or_pending(Unit *u) {
3843 assert(u);
3844
3845 /* Returns true if the unit is inactive or going down */
3846
3847 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3848 return true;
3849
3850 if (unit_stop_pending(u))
3851 return true;
3852
3853 return false;
3854 }
3855
3856 bool unit_active_or_pending(Unit *u) {
3857 assert(u);
3858
3859 /* Returns true if the unit is active or going up */
3860
3861 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3862 return true;
3863
3864 if (u->job &&
3865 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3866 return true;
3867
3868 return false;
3869 }
3870
3871 bool unit_will_restart(Unit *u) {
3872 assert(u);
3873
3874 if (!UNIT_VTABLE(u)->will_restart)
3875 return false;
3876
3877 return UNIT_VTABLE(u)->will_restart(u);
3878 }
3879
3880 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3881 assert(u);
3882 assert(w >= 0 && w < _KILL_WHO_MAX);
3883 assert(SIGNAL_VALID(signo));
3884
3885 if (!UNIT_VTABLE(u)->kill)
3886 return -EOPNOTSUPP;
3887
3888 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3889 }
3890
3891 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3892 _cleanup_set_free_ Set *pid_set = NULL;
3893 int r;
3894
3895 pid_set = set_new(NULL);
3896 if (!pid_set)
3897 return NULL;
3898
3899 /* Exclude the main/control pids from being killed via the cgroup */
3900 if (main_pid > 0) {
3901 r = set_put(pid_set, PID_TO_PTR(main_pid));
3902 if (r < 0)
3903 return NULL;
3904 }
3905
3906 if (control_pid > 0) {
3907 r = set_put(pid_set, PID_TO_PTR(control_pid));
3908 if (r < 0)
3909 return NULL;
3910 }
3911
3912 return TAKE_PTR(pid_set);
3913 }
3914
3915 int unit_kill_common(
3916 Unit *u,
3917 KillWho who,
3918 int signo,
3919 pid_t main_pid,
3920 pid_t control_pid,
3921 sd_bus_error *error) {
3922
3923 int r = 0;
3924 bool killed = false;
3925
3926 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3927 if (main_pid < 0)
3928 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3929 else if (main_pid == 0)
3930 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3931 }
3932
3933 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3934 if (control_pid < 0)
3935 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3936 else if (control_pid == 0)
3937 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3938 }
3939
3940 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3941 if (control_pid > 0) {
3942 if (kill(control_pid, signo) < 0)
3943 r = -errno;
3944 else
3945 killed = true;
3946 }
3947
3948 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3949 if (main_pid > 0) {
3950 if (kill(main_pid, signo) < 0)
3951 r = -errno;
3952 else
3953 killed = true;
3954 }
3955
3956 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3957 _cleanup_set_free_ Set *pid_set = NULL;
3958 int q;
3959
3960 /* Exclude the main/control pids from being killed via the cgroup */
3961 pid_set = unit_pid_set(main_pid, control_pid);
3962 if (!pid_set)
3963 return -ENOMEM;
3964
3965 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3966 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3967 r = q;
3968 else
3969 killed = true;
3970 }
3971
3972 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3973 return -ESRCH;
3974
3975 return r;
3976 }
3977
3978 int unit_following_set(Unit *u, Set **s) {
3979 assert(u);
3980 assert(s);
3981
3982 if (UNIT_VTABLE(u)->following_set)
3983 return UNIT_VTABLE(u)->following_set(u, s);
3984
3985 *s = NULL;
3986 return 0;
3987 }
3988
3989 UnitFileState unit_get_unit_file_state(Unit *u) {
3990 int r;
3991
3992 assert(u);
3993
3994 if (u->unit_file_state < 0 && u->fragment_path) {
3995 r = unit_file_get_state(
3996 u->manager->unit_file_scope,
3997 NULL,
3998 u->id,
3999 &u->unit_file_state);
4000 if (r < 0)
4001 u->unit_file_state = UNIT_FILE_BAD;
4002 }
4003
4004 return u->unit_file_state;
4005 }
4006
4007 int unit_get_unit_file_preset(Unit *u) {
4008 assert(u);
4009
4010 if (u->unit_file_preset < 0 && u->fragment_path)
4011 u->unit_file_preset = unit_file_query_preset(
4012 u->manager->unit_file_scope,
4013 NULL,
4014 basename(u->fragment_path));
4015
4016 return u->unit_file_preset;
4017 }
4018
4019 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4020 assert(ref);
4021 assert(source);
4022 assert(target);
4023
4024 if (ref->target)
4025 unit_ref_unset(ref);
4026
4027 ref->source = source;
4028 ref->target = target;
4029 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4030 return target;
4031 }
4032
4033 void unit_ref_unset(UnitRef *ref) {
4034 assert(ref);
4035
4036 if (!ref->target)
4037 return;
4038
4039 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4040 * be unreferenced now. */
4041 unit_add_to_gc_queue(ref->target);
4042
4043 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4044 ref->source = ref->target = NULL;
4045 }
4046
4047 static int user_from_unit_name(Unit *u, char **ret) {
4048
4049 static const uint8_t hash_key[] = {
4050 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4051 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4052 };
4053
4054 _cleanup_free_ char *n = NULL;
4055 int r;
4056
4057 r = unit_name_to_prefix(u->id, &n);
4058 if (r < 0)
4059 return r;
4060
4061 if (valid_user_group_name(n)) {
4062 *ret = TAKE_PTR(n);
4063 return 0;
4064 }
4065
4066 /* If we can't use the unit name as a user name, then let's hash it and use that */
4067 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4068 return -ENOMEM;
4069
4070 return 0;
4071 }
4072
4073 int unit_patch_contexts(Unit *u) {
4074 CGroupContext *cc;
4075 ExecContext *ec;
4076 unsigned i;
4077 int r;
4078
4079 assert(u);
4080
4081 /* Patch in the manager defaults into the exec and cgroup
4082 * contexts, _after_ the rest of the settings have been
4083 * initialized */
4084
4085 ec = unit_get_exec_context(u);
4086 if (ec) {
4087 /* This only copies in the ones that need memory */
4088 for (i = 0; i < _RLIMIT_MAX; i++)
4089 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4090 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4091 if (!ec->rlimit[i])
4092 return -ENOMEM;
4093 }
4094
4095 if (MANAGER_IS_USER(u->manager) &&
4096 !ec->working_directory) {
4097
4098 r = get_home_dir(&ec->working_directory);
4099 if (r < 0)
4100 return r;
4101
4102 /* Allow user services to run, even if the
4103 * home directory is missing */
4104 ec->working_directory_missing_ok = true;
4105 }
4106
4107 if (ec->private_devices)
4108 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4109
4110 if (ec->protect_kernel_modules)
4111 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4112
4113 if (ec->dynamic_user) {
4114 if (!ec->user) {
4115 r = user_from_unit_name(u, &ec->user);
4116 if (r < 0)
4117 return r;
4118 }
4119
4120 if (!ec->group) {
4121 ec->group = strdup(ec->user);
4122 if (!ec->group)
4123 return -ENOMEM;
4124 }
4125
4126 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4127 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4128
4129 ec->private_tmp = true;
4130 ec->remove_ipc = true;
4131 ec->protect_system = PROTECT_SYSTEM_STRICT;
4132 if (ec->protect_home == PROTECT_HOME_NO)
4133 ec->protect_home = PROTECT_HOME_READ_ONLY;
4134 }
4135 }
4136
4137 cc = unit_get_cgroup_context(u);
4138 if (cc && ec) {
4139
4140 if (ec->private_devices &&
4141 cc->device_policy == CGROUP_AUTO)
4142 cc->device_policy = CGROUP_CLOSED;
4143
4144 if (ec->root_image &&
4145 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4146
4147 /* When RootImage= is specified, the following devices are touched. */
4148 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4149 if (r < 0)
4150 return r;
4151
4152 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4153 if (r < 0)
4154 return r;
4155
4156 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4157 if (r < 0)
4158 return r;
4159 }
4160 }
4161
4162 return 0;
4163 }
4164
4165 ExecContext *unit_get_exec_context(Unit *u) {
4166 size_t offset;
4167 assert(u);
4168
4169 if (u->type < 0)
4170 return NULL;
4171
4172 offset = UNIT_VTABLE(u)->exec_context_offset;
4173 if (offset <= 0)
4174 return NULL;
4175
4176 return (ExecContext*) ((uint8_t*) u + offset);
4177 }
4178
4179 KillContext *unit_get_kill_context(Unit *u) {
4180 size_t offset;
4181 assert(u);
4182
4183 if (u->type < 0)
4184 return NULL;
4185
4186 offset = UNIT_VTABLE(u)->kill_context_offset;
4187 if (offset <= 0)
4188 return NULL;
4189
4190 return (KillContext*) ((uint8_t*) u + offset);
4191 }
4192
4193 CGroupContext *unit_get_cgroup_context(Unit *u) {
4194 size_t offset;
4195
4196 if (u->type < 0)
4197 return NULL;
4198
4199 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4200 if (offset <= 0)
4201 return NULL;
4202
4203 return (CGroupContext*) ((uint8_t*) u + offset);
4204 }
4205
4206 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4207 size_t offset;
4208
4209 if (u->type < 0)
4210 return NULL;
4211
4212 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4213 if (offset <= 0)
4214 return NULL;
4215
4216 return *(ExecRuntime**) ((uint8_t*) u + offset);
4217 }
4218
4219 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4220 assert(u);
4221
4222 if (UNIT_WRITE_FLAGS_NOOP(flags))
4223 return NULL;
4224
4225 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4226 return u->manager->lookup_paths.transient;
4227
4228 if (flags & UNIT_PERSISTENT)
4229 return u->manager->lookup_paths.persistent_control;
4230
4231 if (flags & UNIT_RUNTIME)
4232 return u->manager->lookup_paths.runtime_control;
4233
4234 return NULL;
4235 }
4236
4237 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4238 char *ret = NULL;
4239
4240 if (!s)
4241 return NULL;
4242
4243 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4244 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4245 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4246 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4247 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4248 * allocations. */
4249
4250 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4251 ret = specifier_escape(s);
4252 if (!ret)
4253 return NULL;
4254
4255 s = ret;
4256 }
4257
4258 if (flags & UNIT_ESCAPE_C) {
4259 char *a;
4260
4261 a = cescape(s);
4262 free(ret);
4263 if (!a)
4264 return NULL;
4265
4266 ret = a;
4267 }
4268
4269 if (buf) {
4270 *buf = ret;
4271 return ret ?: (char*) s;
4272 }
4273
4274 return ret ?: strdup(s);
4275 }
4276
4277 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4278 _cleanup_free_ char *result = NULL;
4279 size_t n = 0, allocated = 0;
4280 char **i;
4281
4282 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4283 * way suitable for ExecStart= stanzas */
4284
4285 STRV_FOREACH(i, l) {
4286 _cleanup_free_ char *buf = NULL;
4287 const char *p;
4288 size_t a;
4289 char *q;
4290
4291 p = unit_escape_setting(*i, flags, &buf);
4292 if (!p)
4293 return NULL;
4294
4295 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4296 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4297 return NULL;
4298
4299 q = result + n;
4300 if (n > 0)
4301 *(q++) = ' ';
4302
4303 *(q++) = '"';
4304 q = stpcpy(q, p);
4305 *(q++) = '"';
4306
4307 n += a;
4308 }
4309
4310 if (!GREEDY_REALLOC(result, allocated, n + 1))
4311 return NULL;
4312
4313 result[n] = 0;
4314
4315 return TAKE_PTR(result);
4316 }
4317
4318 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4319 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4320 const char *dir, *wrapped;
4321 int r;
4322
4323 assert(u);
4324 assert(name);
4325 assert(data);
4326
4327 if (UNIT_WRITE_FLAGS_NOOP(flags))
4328 return 0;
4329
4330 data = unit_escape_setting(data, flags, &escaped);
4331 if (!data)
4332 return -ENOMEM;
4333
4334 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4335 * previous section header is the same */
4336
4337 if (flags & UNIT_PRIVATE) {
4338 if (!UNIT_VTABLE(u)->private_section)
4339 return -EINVAL;
4340
4341 if (!u->transient_file || u->last_section_private < 0)
4342 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4343 else if (u->last_section_private == 0)
4344 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4345 } else {
4346 if (!u->transient_file || u->last_section_private < 0)
4347 data = strjoina("[Unit]\n", data);
4348 else if (u->last_section_private > 0)
4349 data = strjoina("\n[Unit]\n", data);
4350 }
4351
4352 if (u->transient_file) {
4353 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4354 * write to the transient unit file. */
4355 fputs(data, u->transient_file);
4356
4357 if (!endswith(data, "\n"))
4358 fputc('\n', u->transient_file);
4359
4360 /* Remember which section we wrote this entry to */
4361 u->last_section_private = !!(flags & UNIT_PRIVATE);
4362 return 0;
4363 }
4364
4365 dir = unit_drop_in_dir(u, flags);
4366 if (!dir)
4367 return -EINVAL;
4368
4369 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4370 "# or an equivalent operation. Do not edit.\n",
4371 data,
4372 "\n");
4373
4374 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4375 if (r < 0)
4376 return r;
4377
4378 (void) mkdir_p_label(p, 0755);
4379 r = write_string_file_atomic_label(q, wrapped);
4380 if (r < 0)
4381 return r;
4382
4383 r = strv_push(&u->dropin_paths, q);
4384 if (r < 0)
4385 return r;
4386 q = NULL;
4387
4388 strv_uniq(u->dropin_paths);
4389
4390 u->dropin_mtime = now(CLOCK_REALTIME);
4391
4392 return 0;
4393 }
4394
4395 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4396 _cleanup_free_ char *p = NULL;
4397 va_list ap;
4398 int r;
4399
4400 assert(u);
4401 assert(name);
4402 assert(format);
4403
4404 if (UNIT_WRITE_FLAGS_NOOP(flags))
4405 return 0;
4406
4407 va_start(ap, format);
4408 r = vasprintf(&p, format, ap);
4409 va_end(ap);
4410
4411 if (r < 0)
4412 return -ENOMEM;
4413
4414 return unit_write_setting(u, flags, name, p);
4415 }
4416
4417 int unit_make_transient(Unit *u) {
4418 _cleanup_free_ char *path = NULL;
4419 FILE *f;
4420
4421 assert(u);
4422
4423 if (!UNIT_VTABLE(u)->can_transient)
4424 return -EOPNOTSUPP;
4425
4426 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4427
4428 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4429 if (!path)
4430 return -ENOMEM;
4431
4432 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4433 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4434
4435 RUN_WITH_UMASK(0022) {
4436 f = fopen(path, "we");
4437 if (!f)
4438 return -errno;
4439 }
4440
4441 safe_fclose(u->transient_file);
4442 u->transient_file = f;
4443
4444 free_and_replace(u->fragment_path, path);
4445
4446 u->source_path = mfree(u->source_path);
4447 u->dropin_paths = strv_free(u->dropin_paths);
4448 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4449
4450 u->load_state = UNIT_STUB;
4451 u->load_error = 0;
4452 u->transient = true;
4453
4454 unit_add_to_dbus_queue(u);
4455 unit_add_to_gc_queue(u);
4456
4457 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4458 u->transient_file);
4459
4460 return 0;
4461 }
4462
4463 static void log_kill(pid_t pid, int sig, void *userdata) {
4464 _cleanup_free_ char *comm = NULL;
4465
4466 (void) get_process_comm(pid, &comm);
4467
4468 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4469 only, like for example systemd's own PAM stub process. */
4470 if (comm && comm[0] == '(')
4471 return;
4472
4473 log_unit_notice(userdata,
4474 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4475 pid,
4476 strna(comm),
4477 signal_to_string(sig));
4478 }
4479
4480 static int operation_to_signal(KillContext *c, KillOperation k) {
4481 assert(c);
4482
4483 switch (k) {
4484
4485 case KILL_TERMINATE:
4486 case KILL_TERMINATE_AND_LOG:
4487 return c->kill_signal;
4488
4489 case KILL_KILL:
4490 return c->final_kill_signal;
4491
4492 case KILL_WATCHDOG:
4493 return c->watchdog_signal;
4494
4495 default:
4496 assert_not_reached("KillOperation unknown");
4497 }
4498 }
4499
4500 int unit_kill_context(
4501 Unit *u,
4502 KillContext *c,
4503 KillOperation k,
4504 pid_t main_pid,
4505 pid_t control_pid,
4506 bool main_pid_alien) {
4507
4508 bool wait_for_exit = false, send_sighup;
4509 cg_kill_log_func_t log_func = NULL;
4510 int sig, r;
4511
4512 assert(u);
4513 assert(c);
4514
4515 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4516 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4517
4518 if (c->kill_mode == KILL_NONE)
4519 return 0;
4520
4521 sig = operation_to_signal(c, k);
4522
4523 send_sighup =
4524 c->send_sighup &&
4525 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4526 sig != SIGHUP;
4527
4528 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4529 log_func = log_kill;
4530
4531 if (main_pid > 0) {
4532 if (log_func)
4533 log_func(main_pid, sig, u);
4534
4535 r = kill_and_sigcont(main_pid, sig);
4536 if (r < 0 && r != -ESRCH) {
4537 _cleanup_free_ char *comm = NULL;
4538 (void) get_process_comm(main_pid, &comm);
4539
4540 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4541 } else {
4542 if (!main_pid_alien)
4543 wait_for_exit = true;
4544
4545 if (r != -ESRCH && send_sighup)
4546 (void) kill(main_pid, SIGHUP);
4547 }
4548 }
4549
4550 if (control_pid > 0) {
4551 if (log_func)
4552 log_func(control_pid, sig, u);
4553
4554 r = kill_and_sigcont(control_pid, sig);
4555 if (r < 0 && r != -ESRCH) {
4556 _cleanup_free_ char *comm = NULL;
4557 (void) get_process_comm(control_pid, &comm);
4558
4559 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4560 } else {
4561 wait_for_exit = true;
4562
4563 if (r != -ESRCH && send_sighup)
4564 (void) kill(control_pid, SIGHUP);
4565 }
4566 }
4567
4568 if (u->cgroup_path &&
4569 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4570 _cleanup_set_free_ Set *pid_set = NULL;
4571
4572 /* Exclude the main/control pids from being killed via the cgroup */
4573 pid_set = unit_pid_set(main_pid, control_pid);
4574 if (!pid_set)
4575 return -ENOMEM;
4576
4577 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4578 sig,
4579 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4580 pid_set,
4581 log_func, u);
4582 if (r < 0) {
4583 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4584 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4585
4586 } else if (r > 0) {
4587
4588 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4589 * we are running in a container or if this is a delegation unit, simply because cgroup
4590 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4591 * of containers it can be confused easily by left-over directories in the cgroup — which
4592 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4593 * there we get proper events. Hence rely on them. */
4594
4595 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4596 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4597 wait_for_exit = true;
4598
4599 if (send_sighup) {
4600 set_free(pid_set);
4601
4602 pid_set = unit_pid_set(main_pid, control_pid);
4603 if (!pid_set)
4604 return -ENOMEM;
4605
4606 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4607 SIGHUP,
4608 CGROUP_IGNORE_SELF,
4609 pid_set,
4610 NULL, NULL);
4611 }
4612 }
4613 }
4614
4615 return wait_for_exit;
4616 }
4617
4618 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4619 _cleanup_free_ char *p = NULL;
4620 char *prefix;
4621 UnitDependencyInfo di;
4622 int r;
4623
4624 assert(u);
4625 assert(path);
4626
4627 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4628 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4629 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4630 * determine which units to make themselves a dependency of. */
4631
4632 if (!path_is_absolute(path))
4633 return -EINVAL;
4634
4635 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4636 if (r < 0)
4637 return r;
4638
4639 p = strdup(path);
4640 if (!p)
4641 return -ENOMEM;
4642
4643 path = path_simplify(p, false);
4644
4645 if (!path_is_normalized(path))
4646 return -EPERM;
4647
4648 if (hashmap_contains(u->requires_mounts_for, path))
4649 return 0;
4650
4651 di = (UnitDependencyInfo) {
4652 .origin_mask = mask
4653 };
4654
4655 r = hashmap_put(u->requires_mounts_for, path, di.data);
4656 if (r < 0)
4657 return r;
4658 p = NULL;
4659
4660 prefix = alloca(strlen(path) + 1);
4661 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4662 Set *x;
4663
4664 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4665 if (!x) {
4666 _cleanup_free_ char *q = NULL;
4667
4668 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4669 if (r < 0)
4670 return r;
4671
4672 q = strdup(prefix);
4673 if (!q)
4674 return -ENOMEM;
4675
4676 x = set_new(NULL);
4677 if (!x)
4678 return -ENOMEM;
4679
4680 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4681 if (r < 0) {
4682 set_free(x);
4683 return r;
4684 }
4685 q = NULL;
4686 }
4687
4688 r = set_put(x, u);
4689 if (r < 0)
4690 return r;
4691 }
4692
4693 return 0;
4694 }
4695
4696 int unit_setup_exec_runtime(Unit *u) {
4697 ExecRuntime **rt;
4698 size_t offset;
4699 Unit *other;
4700 Iterator i;
4701 void *v;
4702 int r;
4703
4704 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4705 assert(offset > 0);
4706
4707 /* Check if there already is an ExecRuntime for this unit? */
4708 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4709 if (*rt)
4710 return 0;
4711
4712 /* Try to get it from somebody else */
4713 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4714 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4715 if (r == 1)
4716 return 1;
4717 }
4718
4719 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4720 }
4721
4722 int unit_setup_dynamic_creds(Unit *u) {
4723 ExecContext *ec;
4724 DynamicCreds *dcreds;
4725 size_t offset;
4726
4727 assert(u);
4728
4729 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4730 assert(offset > 0);
4731 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4732
4733 ec = unit_get_exec_context(u);
4734 assert(ec);
4735
4736 if (!ec->dynamic_user)
4737 return 0;
4738
4739 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4740 }
4741
4742 bool unit_type_supported(UnitType t) {
4743 if (_unlikely_(t < 0))
4744 return false;
4745 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4746 return false;
4747
4748 if (!unit_vtable[t]->supported)
4749 return true;
4750
4751 return unit_vtable[t]->supported();
4752 }
4753
4754 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4755 int r;
4756
4757 assert(u);
4758 assert(where);
4759
4760 r = dir_is_empty(where);
4761 if (r > 0 || r == -ENOTDIR)
4762 return;
4763 if (r < 0) {
4764 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4765 return;
4766 }
4767
4768 log_struct(LOG_NOTICE,
4769 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4770 LOG_UNIT_ID(u),
4771 LOG_UNIT_INVOCATION_ID(u),
4772 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4773 "WHERE=%s", where);
4774 }
4775
4776 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4777 _cleanup_free_ char *canonical_where;
4778 int r;
4779
4780 assert(u);
4781 assert(where);
4782
4783 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4784 if (r < 0) {
4785 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4786 return 0;
4787 }
4788
4789 /* We will happily ignore a trailing slash (or any redundant slashes) */
4790 if (path_equal(where, canonical_where))
4791 return 0;
4792
4793 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4794 log_struct(LOG_ERR,
4795 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4796 LOG_UNIT_ID(u),
4797 LOG_UNIT_INVOCATION_ID(u),
4798 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4799 "WHERE=%s", where);
4800
4801 return -ELOOP;
4802 }
4803
4804 bool unit_is_pristine(Unit *u) {
4805 assert(u);
4806
4807 /* Check if the unit already exists or is already around,
4808 * in a number of different ways. Note that to cater for unit
4809 * types such as slice, we are generally fine with units that
4810 * are marked UNIT_LOADED even though nothing was actually
4811 * loaded, as those unit types don't require a file on disk. */
4812
4813 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4814 u->fragment_path ||
4815 u->source_path ||
4816 !strv_isempty(u->dropin_paths) ||
4817 u->job ||
4818 u->merged_into);
4819 }
4820
4821 pid_t unit_control_pid(Unit *u) {
4822 assert(u);
4823
4824 if (UNIT_VTABLE(u)->control_pid)
4825 return UNIT_VTABLE(u)->control_pid(u);
4826
4827 return 0;
4828 }
4829
4830 pid_t unit_main_pid(Unit *u) {
4831 assert(u);
4832
4833 if (UNIT_VTABLE(u)->main_pid)
4834 return UNIT_VTABLE(u)->main_pid(u);
4835
4836 return 0;
4837 }
4838
4839 static void unit_unref_uid_internal(
4840 Unit *u,
4841 uid_t *ref_uid,
4842 bool destroy_now,
4843 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4844
4845 assert(u);
4846 assert(ref_uid);
4847 assert(_manager_unref_uid);
4848
4849 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4850 * gid_t are actually the same time, with the same validity rules.
4851 *
4852 * Drops a reference to UID/GID from a unit. */
4853
4854 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4855 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4856
4857 if (!uid_is_valid(*ref_uid))
4858 return;
4859
4860 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4861 *ref_uid = UID_INVALID;
4862 }
4863
4864 void unit_unref_uid(Unit *u, bool destroy_now) {
4865 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4866 }
4867
4868 void unit_unref_gid(Unit *u, bool destroy_now) {
4869 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4870 }
4871
4872 static int unit_ref_uid_internal(
4873 Unit *u,
4874 uid_t *ref_uid,
4875 uid_t uid,
4876 bool clean_ipc,
4877 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4878
4879 int r;
4880
4881 assert(u);
4882 assert(ref_uid);
4883 assert(uid_is_valid(uid));
4884 assert(_manager_ref_uid);
4885
4886 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4887 * are actually the same type, and have the same validity rules.
4888 *
4889 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4890 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4891 * drops to zero. */
4892
4893 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4894 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4895
4896 if (*ref_uid == uid)
4897 return 0;
4898
4899 if (uid_is_valid(*ref_uid)) /* Already set? */
4900 return -EBUSY;
4901
4902 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4903 if (r < 0)
4904 return r;
4905
4906 *ref_uid = uid;
4907 return 1;
4908 }
4909
4910 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4911 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4912 }
4913
4914 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4915 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4916 }
4917
4918 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4919 int r = 0, q = 0;
4920
4921 assert(u);
4922
4923 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4924
4925 if (uid_is_valid(uid)) {
4926 r = unit_ref_uid(u, uid, clean_ipc);
4927 if (r < 0)
4928 return r;
4929 }
4930
4931 if (gid_is_valid(gid)) {
4932 q = unit_ref_gid(u, gid, clean_ipc);
4933 if (q < 0) {
4934 if (r > 0)
4935 unit_unref_uid(u, false);
4936
4937 return q;
4938 }
4939 }
4940
4941 return r > 0 || q > 0;
4942 }
4943
4944 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4945 ExecContext *c;
4946 int r;
4947
4948 assert(u);
4949
4950 c = unit_get_exec_context(u);
4951
4952 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4953 if (r < 0)
4954 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4955
4956 return r;
4957 }
4958
4959 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4960 assert(u);
4961
4962 unit_unref_uid(u, destroy_now);
4963 unit_unref_gid(u, destroy_now);
4964 }
4965
4966 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4967 int r;
4968
4969 assert(u);
4970
4971 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4972 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4973 * objects when no service references the UID/GID anymore. */
4974
4975 r = unit_ref_uid_gid(u, uid, gid);
4976 if (r > 0)
4977 bus_unit_send_change_signal(u);
4978 }
4979
4980 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4981 int r;
4982
4983 assert(u);
4984
4985 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4986
4987 if (sd_id128_equal(u->invocation_id, id))
4988 return 0;
4989
4990 if (!sd_id128_is_null(u->invocation_id))
4991 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4992
4993 if (sd_id128_is_null(id)) {
4994 r = 0;
4995 goto reset;
4996 }
4997
4998 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4999 if (r < 0)
5000 goto reset;
5001
5002 u->invocation_id = id;
5003 sd_id128_to_string(id, u->invocation_id_string);
5004
5005 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5006 if (r < 0)
5007 goto reset;
5008
5009 return 0;
5010
5011 reset:
5012 u->invocation_id = SD_ID128_NULL;
5013 u->invocation_id_string[0] = 0;
5014 return r;
5015 }
5016
5017 int unit_acquire_invocation_id(Unit *u) {
5018 sd_id128_t id;
5019 int r;
5020
5021 assert(u);
5022
5023 r = sd_id128_randomize(&id);
5024 if (r < 0)
5025 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5026
5027 r = unit_set_invocation_id(u, id);
5028 if (r < 0)
5029 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5030
5031 return 0;
5032 }
5033
5034 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5035 int r;
5036
5037 assert(u);
5038 assert(p);
5039
5040 /* Copy parameters from manager */
5041 r = manager_get_effective_environment(u->manager, &p->environment);
5042 if (r < 0)
5043 return r;
5044
5045 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5046 p->cgroup_supported = u->manager->cgroup_supported;
5047 p->prefix = u->manager->prefix;
5048 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5049
5050 /* Copy paramaters from unit */
5051 p->cgroup_path = u->cgroup_path;
5052 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5053
5054 return 0;
5055 }
5056
5057 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5058 int r;
5059
5060 assert(u);
5061 assert(ret);
5062
5063 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5064 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5065
5066 (void) unit_realize_cgroup(u);
5067
5068 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5069 if (r != 0)
5070 return r;
5071
5072 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5073 (void) ignore_signals(SIGPIPE, -1);
5074
5075 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5076
5077 if (u->cgroup_path) {
5078 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5079 if (r < 0) {
5080 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5081 _exit(EXIT_CGROUP);
5082 }
5083 }
5084
5085 return 0;
5086 }
5087
5088 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5089 assert(u);
5090 assert(d >= 0);
5091 assert(d < _UNIT_DEPENDENCY_MAX);
5092 assert(other);
5093
5094 if (di.origin_mask == 0 && di.destination_mask == 0) {
5095 /* No bit set anymore, let's drop the whole entry */
5096 assert_se(hashmap_remove(u->dependencies[d], other));
5097 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5098 } else
5099 /* Mask was reduced, let's update the entry */
5100 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5101 }
5102
5103 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5104 UnitDependency d;
5105
5106 assert(u);
5107
5108 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5109
5110 if (mask == 0)
5111 return;
5112
5113 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5114 bool done;
5115
5116 do {
5117 UnitDependencyInfo di;
5118 Unit *other;
5119 Iterator i;
5120
5121 done = true;
5122
5123 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5124 UnitDependency q;
5125
5126 if ((di.origin_mask & ~mask) == di.origin_mask)
5127 continue;
5128 di.origin_mask &= ~mask;
5129 unit_update_dependency_mask(u, d, other, di);
5130
5131 /* We updated the dependency from our unit to the other unit now. But most dependencies
5132 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5133 * all dependency types on the other unit and delete all those which point to us and
5134 * have the right mask set. */
5135
5136 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5137 UnitDependencyInfo dj;
5138
5139 dj.data = hashmap_get(other->dependencies[q], u);
5140 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5141 continue;
5142 dj.destination_mask &= ~mask;
5143
5144 unit_update_dependency_mask(other, q, u, dj);
5145 }
5146
5147 unit_add_to_gc_queue(other);
5148
5149 done = false;
5150 break;
5151 }
5152
5153 } while (!done);
5154 }
5155 }
5156
5157 static int unit_export_invocation_id(Unit *u) {
5158 const char *p;
5159 int r;
5160
5161 assert(u);
5162
5163 if (u->exported_invocation_id)
5164 return 0;
5165
5166 if (sd_id128_is_null(u->invocation_id))
5167 return 0;
5168
5169 p = strjoina("/run/systemd/units/invocation:", u->id);
5170 r = symlink_atomic(u->invocation_id_string, p);
5171 if (r < 0)
5172 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5173
5174 u->exported_invocation_id = true;
5175 return 0;
5176 }
5177
5178 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5179 const char *p;
5180 char buf[2];
5181 int r;
5182
5183 assert(u);
5184 assert(c);
5185
5186 if (u->exported_log_level_max)
5187 return 0;
5188
5189 if (c->log_level_max < 0)
5190 return 0;
5191
5192 assert(c->log_level_max <= 7);
5193
5194 buf[0] = '0' + c->log_level_max;
5195 buf[1] = 0;
5196
5197 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5198 r = symlink_atomic(buf, p);
5199 if (r < 0)
5200 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5201
5202 u->exported_log_level_max = true;
5203 return 0;
5204 }
5205
5206 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5207 _cleanup_close_ int fd = -1;
5208 struct iovec *iovec;
5209 const char *p;
5210 char *pattern;
5211 le64_t *sizes;
5212 ssize_t n;
5213 size_t i;
5214 int r;
5215
5216 if (u->exported_log_extra_fields)
5217 return 0;
5218
5219 if (c->n_log_extra_fields <= 0)
5220 return 0;
5221
5222 sizes = newa(le64_t, c->n_log_extra_fields);
5223 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5224
5225 for (i = 0; i < c->n_log_extra_fields; i++) {
5226 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5227
5228 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5229 iovec[i*2+1] = c->log_extra_fields[i];
5230 }
5231
5232 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5233 pattern = strjoina(p, ".XXXXXX");
5234
5235 fd = mkostemp_safe(pattern);
5236 if (fd < 0)
5237 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5238
5239 n = writev(fd, iovec, c->n_log_extra_fields*2);
5240 if (n < 0) {
5241 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5242 goto fail;
5243 }
5244
5245 (void) fchmod(fd, 0644);
5246
5247 if (rename(pattern, p) < 0) {
5248 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5249 goto fail;
5250 }
5251
5252 u->exported_log_extra_fields = true;
5253 return 0;
5254
5255 fail:
5256 (void) unlink(pattern);
5257 return r;
5258 }
5259
5260 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5261 _cleanup_free_ char *buf = NULL;
5262 const char *p;
5263 int r;
5264
5265 assert(u);
5266 assert(c);
5267
5268 if (u->exported_log_rate_limit_interval)
5269 return 0;
5270
5271 if (c->log_rate_limit_interval_usec == 0)
5272 return 0;
5273
5274 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5275
5276 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5277 return log_oom();
5278
5279 r = symlink_atomic(buf, p);
5280 if (r < 0)
5281 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5282
5283 u->exported_log_rate_limit_interval = true;
5284 return 0;
5285 }
5286
5287 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5288 _cleanup_free_ char *buf = NULL;
5289 const char *p;
5290 int r;
5291
5292 assert(u);
5293 assert(c);
5294
5295 if (u->exported_log_rate_limit_burst)
5296 return 0;
5297
5298 if (c->log_rate_limit_burst == 0)
5299 return 0;
5300
5301 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5302
5303 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5304 return log_oom();
5305
5306 r = symlink_atomic(buf, p);
5307 if (r < 0)
5308 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5309
5310 u->exported_log_rate_limit_burst = true;
5311 return 0;
5312 }
5313
5314 void unit_export_state_files(Unit *u) {
5315 const ExecContext *c;
5316
5317 assert(u);
5318
5319 if (!u->id)
5320 return;
5321
5322 if (!MANAGER_IS_SYSTEM(u->manager))
5323 return;
5324
5325 if (MANAGER_IS_TEST_RUN(u->manager))
5326 return;
5327
5328 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5329 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5330 * the IPC system itself and PID 1 also log to the journal.
5331 *
5332 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5333 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5334 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5335 * namespace at least.
5336 *
5337 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5338 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5339 * them with one. */
5340
5341 (void) unit_export_invocation_id(u);
5342
5343 c = unit_get_exec_context(u);
5344 if (c) {
5345 (void) unit_export_log_level_max(u, c);
5346 (void) unit_export_log_extra_fields(u, c);
5347 (void) unit_export_log_rate_limit_interval(u, c);
5348 (void) unit_export_log_rate_limit_burst(u, c);
5349 }
5350 }
5351
5352 void unit_unlink_state_files(Unit *u) {
5353 const char *p;
5354
5355 assert(u);
5356
5357 if (!u->id)
5358 return;
5359
5360 if (!MANAGER_IS_SYSTEM(u->manager))
5361 return;
5362
5363 /* Undoes the effect of unit_export_state() */
5364
5365 if (u->exported_invocation_id) {
5366 p = strjoina("/run/systemd/units/invocation:", u->id);
5367 (void) unlink(p);
5368
5369 u->exported_invocation_id = false;
5370 }
5371
5372 if (u->exported_log_level_max) {
5373 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5374 (void) unlink(p);
5375
5376 u->exported_log_level_max = false;
5377 }
5378
5379 if (u->exported_log_extra_fields) {
5380 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5381 (void) unlink(p);
5382
5383 u->exported_log_extra_fields = false;
5384 }
5385
5386 if (u->exported_log_rate_limit_interval) {
5387 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5388 (void) unlink(p);
5389
5390 u->exported_log_rate_limit_interval = false;
5391 }
5392
5393 if (u->exported_log_rate_limit_burst) {
5394 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5395 (void) unlink(p);
5396
5397 u->exported_log_rate_limit_burst = false;
5398 }
5399 }
5400
5401 int unit_prepare_exec(Unit *u) {
5402 int r;
5403
5404 assert(u);
5405
5406 /* Prepares everything so that we can fork of a process for this unit */
5407
5408 (void) unit_realize_cgroup(u);
5409
5410 if (u->reset_accounting) {
5411 (void) unit_reset_cpu_accounting(u);
5412 (void) unit_reset_ip_accounting(u);
5413 u->reset_accounting = false;
5414 }
5415
5416 unit_export_state_files(u);
5417
5418 r = unit_setup_exec_runtime(u);
5419 if (r < 0)
5420 return r;
5421
5422 r = unit_setup_dynamic_creds(u);
5423 if (r < 0)
5424 return r;
5425
5426 return 0;
5427 }
5428
5429 static void log_leftover(pid_t pid, int sig, void *userdata) {
5430 _cleanup_free_ char *comm = NULL;
5431
5432 (void) get_process_comm(pid, &comm);
5433
5434 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5435 return;
5436
5437 log_unit_warning(userdata,
5438 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5439 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5440 pid, strna(comm));
5441 }
5442
5443 void unit_warn_leftover_processes(Unit *u) {
5444 assert(u);
5445
5446 (void) unit_pick_cgroup_path(u);
5447
5448 if (!u->cgroup_path)
5449 return;
5450
5451 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5452 }
5453
5454 bool unit_needs_console(Unit *u) {
5455 ExecContext *ec;
5456 UnitActiveState state;
5457
5458 assert(u);
5459
5460 state = unit_active_state(u);
5461
5462 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5463 return false;
5464
5465 if (UNIT_VTABLE(u)->needs_console)
5466 return UNIT_VTABLE(u)->needs_console(u);
5467
5468 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5469 ec = unit_get_exec_context(u);
5470 if (!ec)
5471 return false;
5472
5473 return exec_context_may_touch_console(ec);
5474 }
5475
5476 const char *unit_label_path(Unit *u) {
5477 const char *p;
5478
5479 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5480 * when validating access checks. */
5481
5482 p = u->source_path ?: u->fragment_path;
5483 if (!p)
5484 return NULL;
5485
5486 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5487 if (path_equal(p, "/dev/null"))
5488 return NULL;
5489
5490 return p;
5491 }
5492
5493 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5494 int r;
5495
5496 assert(u);
5497
5498 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5499 * and not a kernel thread either */
5500
5501 /* First, a simple range check */
5502 if (!pid_is_valid(pid))
5503 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5504
5505 /* Some extra safety check */
5506 if (pid == 1 || pid == getpid_cached())
5507 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5508
5509 /* Don't even begin to bother with kernel threads */
5510 r = is_kernel_thread(pid);
5511 if (r == -ESRCH)
5512 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5513 if (r < 0)
5514 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5515 if (r > 0)
5516 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5517
5518 return 0;
5519 }
5520
5521 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5522 [COLLECT_INACTIVE] = "inactive",
5523 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5524 };
5525
5526 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);