]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
shared/dropin: rename function for clarity
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bpf-firewall.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-util.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "fs-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "load-dropin.h"
32 #include "load-fragment.h"
33 #include "log.h"
34 #include "macro.h"
35 #include "missing.h"
36 #include "mkdir.h"
37 #include "parse-util.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "serialize.h"
41 #include "set.h"
42 #include "signal-util.h"
43 #include "sparse-endian.h"
44 #include "special.h"
45 #include "specifier.h"
46 #include "stat-util.h"
47 #include "stdio-util.h"
48 #include "string-table.h"
49 #include "string-util.h"
50 #include "strv.h"
51 #include "terminal-util.h"
52 #include "tmpfile-util.h"
53 #include "umask-util.h"
54 #include "unit-name.h"
55 #include "unit.h"
56 #include "user-util.h"
57 #include "virt.h"
58
59 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
60 [UNIT_SERVICE] = &service_vtable,
61 [UNIT_SOCKET] = &socket_vtable,
62 [UNIT_TARGET] = &target_vtable,
63 [UNIT_DEVICE] = &device_vtable,
64 [UNIT_MOUNT] = &mount_vtable,
65 [UNIT_AUTOMOUNT] = &automount_vtable,
66 [UNIT_SWAP] = &swap_vtable,
67 [UNIT_TIMER] = &timer_vtable,
68 [UNIT_PATH] = &path_vtable,
69 [UNIT_SLICE] = &slice_vtable,
70 [UNIT_SCOPE] = &scope_vtable,
71 };
72
73 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
74
75 Unit *unit_new(Manager *m, size_t size) {
76 Unit *u;
77
78 assert(m);
79 assert(size >= sizeof(Unit));
80
81 u = malloc0(size);
82 if (!u)
83 return NULL;
84
85 u->names = set_new(&string_hash_ops);
86 if (!u->names)
87 return mfree(u);
88
89 u->manager = m;
90 u->type = _UNIT_TYPE_INVALID;
91 u->default_dependencies = true;
92 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
93 u->unit_file_preset = -1;
94 u->on_failure_job_mode = JOB_REPLACE;
95 u->cgroup_control_inotify_wd = -1;
96 u->cgroup_memory_inotify_wd = -1;
97 u->job_timeout = USEC_INFINITY;
98 u->job_running_timeout = USEC_INFINITY;
99 u->ref_uid = UID_INVALID;
100 u->ref_gid = GID_INVALID;
101 u->cpu_usage_last = NSEC_INFINITY;
102 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
103 u->failure_action_exit_status = u->success_action_exit_status = -1;
104
105 u->ip_accounting_ingress_map_fd = -1;
106 u->ip_accounting_egress_map_fd = -1;
107 u->ipv4_allow_map_fd = -1;
108 u->ipv6_allow_map_fd = -1;
109 u->ipv4_deny_map_fd = -1;
110 u->ipv6_deny_map_fd = -1;
111
112 u->last_section_private = -1;
113
114 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
115 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
116
117 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
118 u->io_accounting_last[i] = UINT64_MAX;
119
120 return u;
121 }
122
123 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
124 _cleanup_(unit_freep) Unit *u = NULL;
125 int r;
126
127 u = unit_new(m, size);
128 if (!u)
129 return -ENOMEM;
130
131 r = unit_add_name(u, name);
132 if (r < 0)
133 return r;
134
135 *ret = TAKE_PTR(u);
136
137 return r;
138 }
139
140 bool unit_has_name(const Unit *u, const char *name) {
141 assert(u);
142 assert(name);
143
144 return set_contains(u->names, (char*) name);
145 }
146
147 static void unit_init(Unit *u) {
148 CGroupContext *cc;
149 ExecContext *ec;
150 KillContext *kc;
151
152 assert(u);
153 assert(u->manager);
154 assert(u->type >= 0);
155
156 cc = unit_get_cgroup_context(u);
157 if (cc) {
158 cgroup_context_init(cc);
159
160 /* Copy in the manager defaults into the cgroup
161 * context, _before_ the rest of the settings have
162 * been initialized */
163
164 cc->cpu_accounting = u->manager->default_cpu_accounting;
165 cc->io_accounting = u->manager->default_io_accounting;
166 cc->blockio_accounting = u->manager->default_blockio_accounting;
167 cc->memory_accounting = u->manager->default_memory_accounting;
168 cc->tasks_accounting = u->manager->default_tasks_accounting;
169 cc->ip_accounting = u->manager->default_ip_accounting;
170
171 if (u->type != UNIT_SLICE)
172 cc->tasks_max = u->manager->default_tasks_max;
173 }
174
175 ec = unit_get_exec_context(u);
176 if (ec) {
177 exec_context_init(ec);
178
179 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
180 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
181 }
182
183 kc = unit_get_kill_context(u);
184 if (kc)
185 kill_context_init(kc);
186
187 if (UNIT_VTABLE(u)->init)
188 UNIT_VTABLE(u)->init(u);
189 }
190
191 int unit_add_name(Unit *u, const char *text) {
192 _cleanup_free_ char *s = NULL, *i = NULL;
193 UnitType t;
194 int r;
195
196 assert(u);
197 assert(text);
198
199 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
200
201 if (!u->instance)
202 return -EINVAL;
203
204 r = unit_name_replace_instance(text, u->instance, &s);
205 if (r < 0)
206 return r;
207 } else {
208 s = strdup(text);
209 if (!s)
210 return -ENOMEM;
211 }
212
213 if (set_contains(u->names, s))
214 return 0;
215 if (hashmap_contains(u->manager->units, s))
216 return -EEXIST;
217
218 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
219 return -EINVAL;
220
221 t = unit_name_to_type(s);
222 if (t < 0)
223 return -EINVAL;
224
225 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
226 return -EINVAL;
227
228 r = unit_name_to_instance(s, &i);
229 if (r < 0)
230 return r;
231
232 if (i && !unit_type_may_template(t))
233 return -EINVAL;
234
235 /* Ensure that this unit is either instanced or not instanced,
236 * but not both. Note that we do allow names with different
237 * instance names however! */
238 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
239 return -EINVAL;
240
241 if (!unit_type_may_alias(t) && !set_isempty(u->names))
242 return -EEXIST;
243
244 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
245 return -E2BIG;
246
247 r = set_put(u->names, s);
248 if (r < 0)
249 return r;
250 assert(r > 0);
251
252 r = hashmap_put(u->manager->units, s, u);
253 if (r < 0) {
254 (void) set_remove(u->names, s);
255 return r;
256 }
257
258 if (u->type == _UNIT_TYPE_INVALID) {
259 u->type = t;
260 u->id = s;
261 u->instance = TAKE_PTR(i);
262
263 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
264
265 unit_init(u);
266 }
267
268 s = NULL;
269
270 unit_add_to_dbus_queue(u);
271 return 0;
272 }
273
274 int unit_choose_id(Unit *u, const char *name) {
275 _cleanup_free_ char *t = NULL;
276 char *s, *i;
277 int r;
278
279 assert(u);
280 assert(name);
281
282 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
283
284 if (!u->instance)
285 return -EINVAL;
286
287 r = unit_name_replace_instance(name, u->instance, &t);
288 if (r < 0)
289 return r;
290
291 name = t;
292 }
293
294 /* Selects one of the names of this unit as the id */
295 s = set_get(u->names, (char*) name);
296 if (!s)
297 return -ENOENT;
298
299 /* Determine the new instance from the new id */
300 r = unit_name_to_instance(s, &i);
301 if (r < 0)
302 return r;
303
304 u->id = s;
305
306 free(u->instance);
307 u->instance = i;
308
309 unit_add_to_dbus_queue(u);
310
311 return 0;
312 }
313
314 int unit_set_description(Unit *u, const char *description) {
315 int r;
316
317 assert(u);
318
319 r = free_and_strdup(&u->description, empty_to_null(description));
320 if (r < 0)
321 return r;
322 if (r > 0)
323 unit_add_to_dbus_queue(u);
324
325 return 0;
326 }
327
328 bool unit_may_gc(Unit *u) {
329 UnitActiveState state;
330 int r;
331
332 assert(u);
333
334 /* Checks whether the unit is ready to be unloaded for garbage collection.
335 * Returns true when the unit may be collected, and false if there's some
336 * reason to keep it loaded.
337 *
338 * References from other units are *not* checked here. Instead, this is done
339 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
340 */
341
342 if (u->job)
343 return false;
344
345 if (u->nop_job)
346 return false;
347
348 state = unit_active_state(u);
349
350 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
351 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
352 UNIT_VTABLE(u)->release_resources)
353 UNIT_VTABLE(u)->release_resources(u);
354
355 if (u->perpetual)
356 return false;
357
358 if (sd_bus_track_count(u->bus_track) > 0)
359 return false;
360
361 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
362 switch (u->collect_mode) {
363
364 case COLLECT_INACTIVE:
365 if (state != UNIT_INACTIVE)
366 return false;
367
368 break;
369
370 case COLLECT_INACTIVE_OR_FAILED:
371 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
372 return false;
373
374 break;
375
376 default:
377 assert_not_reached("Unknown garbage collection mode");
378 }
379
380 if (u->cgroup_path) {
381 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
382 * around. Units with active processes should never be collected. */
383
384 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
385 if (r < 0)
386 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
387 if (r <= 0)
388 return false;
389 }
390
391 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
392 return false;
393
394 return true;
395 }
396
397 void unit_add_to_load_queue(Unit *u) {
398 assert(u);
399 assert(u->type != _UNIT_TYPE_INVALID);
400
401 if (u->load_state != UNIT_STUB || u->in_load_queue)
402 return;
403
404 LIST_PREPEND(load_queue, u->manager->load_queue, u);
405 u->in_load_queue = true;
406 }
407
408 void unit_add_to_cleanup_queue(Unit *u) {
409 assert(u);
410
411 if (u->in_cleanup_queue)
412 return;
413
414 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
415 u->in_cleanup_queue = true;
416 }
417
418 void unit_add_to_gc_queue(Unit *u) {
419 assert(u);
420
421 if (u->in_gc_queue || u->in_cleanup_queue)
422 return;
423
424 if (!unit_may_gc(u))
425 return;
426
427 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
428 u->in_gc_queue = true;
429 }
430
431 void unit_add_to_dbus_queue(Unit *u) {
432 assert(u);
433 assert(u->type != _UNIT_TYPE_INVALID);
434
435 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
436 return;
437
438 /* Shortcut things if nobody cares */
439 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
440 sd_bus_track_count(u->bus_track) <= 0 &&
441 set_isempty(u->manager->private_buses)) {
442 u->sent_dbus_new_signal = true;
443 return;
444 }
445
446 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
447 u->in_dbus_queue = true;
448 }
449
450 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
451 assert(u);
452
453 if (u->in_stop_when_unneeded_queue)
454 return;
455
456 if (!u->stop_when_unneeded)
457 return;
458
459 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
460 return;
461
462 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
463 u->in_stop_when_unneeded_queue = true;
464 }
465
466 static void bidi_set_free(Unit *u, Hashmap *h) {
467 Unit *other;
468 Iterator i;
469 void *v;
470
471 assert(u);
472
473 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
474
475 HASHMAP_FOREACH_KEY(v, other, h, i) {
476 UnitDependency d;
477
478 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
479 hashmap_remove(other->dependencies[d], u);
480
481 unit_add_to_gc_queue(other);
482 }
483
484 hashmap_free(h);
485 }
486
487 static void unit_remove_transient(Unit *u) {
488 char **i;
489
490 assert(u);
491
492 if (!u->transient)
493 return;
494
495 if (u->fragment_path)
496 (void) unlink(u->fragment_path);
497
498 STRV_FOREACH(i, u->dropin_paths) {
499 _cleanup_free_ char *p = NULL, *pp = NULL;
500
501 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
502 if (!p)
503 continue;
504
505 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
506 if (!pp)
507 continue;
508
509 /* Only drop transient drop-ins */
510 if (!path_equal(u->manager->lookup_paths.transient, pp))
511 continue;
512
513 (void) unlink(*i);
514 (void) rmdir(p);
515 }
516 }
517
518 static void unit_free_requires_mounts_for(Unit *u) {
519 assert(u);
520
521 for (;;) {
522 _cleanup_free_ char *path;
523
524 path = hashmap_steal_first_key(u->requires_mounts_for);
525 if (!path)
526 break;
527 else {
528 char s[strlen(path) + 1];
529
530 PATH_FOREACH_PREFIX_MORE(s, path) {
531 char *y;
532 Set *x;
533
534 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
535 if (!x)
536 continue;
537
538 (void) set_remove(x, u);
539
540 if (set_isempty(x)) {
541 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
542 free(y);
543 set_free(x);
544 }
545 }
546 }
547 }
548
549 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
550 }
551
552 static void unit_done(Unit *u) {
553 ExecContext *ec;
554 CGroupContext *cc;
555
556 assert(u);
557
558 if (u->type < 0)
559 return;
560
561 if (UNIT_VTABLE(u)->done)
562 UNIT_VTABLE(u)->done(u);
563
564 ec = unit_get_exec_context(u);
565 if (ec)
566 exec_context_done(ec);
567
568 cc = unit_get_cgroup_context(u);
569 if (cc)
570 cgroup_context_done(cc);
571 }
572
573 void unit_free(Unit *u) {
574 UnitDependency d;
575 Iterator i;
576 char *t;
577
578 if (!u)
579 return;
580
581 if (UNIT_ISSET(u->slice)) {
582 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
583 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
584
585 /* And make sure the parent is realized again, updating cgroup memberships */
586 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
587 }
588
589 u->transient_file = safe_fclose(u->transient_file);
590
591 if (!MANAGER_IS_RELOADING(u->manager))
592 unit_remove_transient(u);
593
594 bus_unit_send_removed_signal(u);
595
596 unit_done(u);
597
598 unit_dequeue_rewatch_pids(u);
599
600 sd_bus_slot_unref(u->match_bus_slot);
601 sd_bus_track_unref(u->bus_track);
602 u->deserialized_refs = strv_free(u->deserialized_refs);
603
604 unit_free_requires_mounts_for(u);
605
606 SET_FOREACH(t, u->names, i)
607 hashmap_remove_value(u->manager->units, t, u);
608
609 if (!sd_id128_is_null(u->invocation_id))
610 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
611
612 if (u->job) {
613 Job *j = u->job;
614 job_uninstall(j);
615 job_free(j);
616 }
617
618 if (u->nop_job) {
619 Job *j = u->nop_job;
620 job_uninstall(j);
621 job_free(j);
622 }
623
624 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
625 bidi_set_free(u, u->dependencies[d]);
626
627 if (u->on_console)
628 manager_unref_console(u->manager);
629
630 unit_release_cgroup(u);
631
632 if (!MANAGER_IS_RELOADING(u->manager))
633 unit_unlink_state_files(u);
634
635 unit_unref_uid_gid(u, false);
636
637 (void) manager_update_failed_units(u->manager, u, false);
638 set_remove(u->manager->startup_units, u);
639
640 unit_unwatch_all_pids(u);
641
642 unit_ref_unset(&u->slice);
643 while (u->refs_by_target)
644 unit_ref_unset(u->refs_by_target);
645
646 if (u->type != _UNIT_TYPE_INVALID)
647 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
648
649 if (u->in_load_queue)
650 LIST_REMOVE(load_queue, u->manager->load_queue, u);
651
652 if (u->in_dbus_queue)
653 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
654
655 if (u->in_gc_queue)
656 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
657
658 if (u->in_cgroup_realize_queue)
659 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
660
661 if (u->in_cgroup_empty_queue)
662 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
663
664 if (u->in_cleanup_queue)
665 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
666
667 if (u->in_target_deps_queue)
668 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
669
670 if (u->in_stop_when_unneeded_queue)
671 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
672
673 safe_close(u->ip_accounting_ingress_map_fd);
674 safe_close(u->ip_accounting_egress_map_fd);
675
676 safe_close(u->ipv4_allow_map_fd);
677 safe_close(u->ipv6_allow_map_fd);
678 safe_close(u->ipv4_deny_map_fd);
679 safe_close(u->ipv6_deny_map_fd);
680
681 bpf_program_unref(u->ip_bpf_ingress);
682 bpf_program_unref(u->ip_bpf_ingress_installed);
683 bpf_program_unref(u->ip_bpf_egress);
684 bpf_program_unref(u->ip_bpf_egress_installed);
685
686 set_free(u->ip_bpf_custom_ingress);
687 set_free(u->ip_bpf_custom_egress);
688 set_free(u->ip_bpf_custom_ingress_installed);
689 set_free(u->ip_bpf_custom_egress_installed);
690
691 bpf_program_unref(u->bpf_device_control_installed);
692
693 condition_free_list(u->conditions);
694 condition_free_list(u->asserts);
695
696 free(u->description);
697 strv_free(u->documentation);
698 free(u->fragment_path);
699 free(u->source_path);
700 strv_free(u->dropin_paths);
701 free(u->instance);
702
703 free(u->job_timeout_reboot_arg);
704
705 set_free_free(u->names);
706
707 free(u->reboot_arg);
708
709 free(u);
710 }
711
712 UnitActiveState unit_active_state(Unit *u) {
713 assert(u);
714
715 if (u->load_state == UNIT_MERGED)
716 return unit_active_state(unit_follow_merge(u));
717
718 /* After a reload it might happen that a unit is not correctly
719 * loaded but still has a process around. That's why we won't
720 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
721
722 return UNIT_VTABLE(u)->active_state(u);
723 }
724
725 const char* unit_sub_state_to_string(Unit *u) {
726 assert(u);
727
728 return UNIT_VTABLE(u)->sub_state_to_string(u);
729 }
730
731 static int set_complete_move(Set **s, Set **other) {
732 assert(s);
733 assert(other);
734
735 if (!other)
736 return 0;
737
738 if (*s)
739 return set_move(*s, *other);
740 else
741 *s = TAKE_PTR(*other);
742
743 return 0;
744 }
745
746 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
747 assert(s);
748 assert(other);
749
750 if (!*other)
751 return 0;
752
753 if (*s)
754 return hashmap_move(*s, *other);
755 else
756 *s = TAKE_PTR(*other);
757
758 return 0;
759 }
760
761 static int merge_names(Unit *u, Unit *other) {
762 char *t;
763 Iterator i;
764 int r;
765
766 assert(u);
767 assert(other);
768
769 r = set_complete_move(&u->names, &other->names);
770 if (r < 0)
771 return r;
772
773 set_free_free(other->names);
774 other->names = NULL;
775 other->id = NULL;
776
777 SET_FOREACH(t, u->names, i)
778 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
779
780 return 0;
781 }
782
783 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
784 unsigned n_reserve;
785
786 assert(u);
787 assert(other);
788 assert(d < _UNIT_DEPENDENCY_MAX);
789
790 /*
791 * If u does not have this dependency set allocated, there is no need
792 * to reserve anything. In that case other's set will be transferred
793 * as a whole to u by complete_move().
794 */
795 if (!u->dependencies[d])
796 return 0;
797
798 /* merge_dependencies() will skip a u-on-u dependency */
799 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
800
801 return hashmap_reserve(u->dependencies[d], n_reserve);
802 }
803
804 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
805 Iterator i;
806 Unit *back;
807 void *v;
808 int r;
809
810 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
811
812 assert(u);
813 assert(other);
814 assert(d < _UNIT_DEPENDENCY_MAX);
815
816 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
817 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
818 UnitDependency k;
819
820 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
821 * pointers back, and let's fix them up, to instead point to 'u'. */
822
823 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
824 if (back == u) {
825 /* Do not add dependencies between u and itself. */
826 if (hashmap_remove(back->dependencies[k], other))
827 maybe_warn_about_dependency(u, other_id, k);
828 } else {
829 UnitDependencyInfo di_u, di_other, di_merged;
830
831 /* Let's drop this dependency between "back" and "other", and let's create it between
832 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
833 * and any such dependency which might already exist */
834
835 di_other.data = hashmap_get(back->dependencies[k], other);
836 if (!di_other.data)
837 continue; /* dependency isn't set, let's try the next one */
838
839 di_u.data = hashmap_get(back->dependencies[k], u);
840
841 di_merged = (UnitDependencyInfo) {
842 .origin_mask = di_u.origin_mask | di_other.origin_mask,
843 .destination_mask = di_u.destination_mask | di_other.destination_mask,
844 };
845
846 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
847 if (r < 0)
848 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
849 assert(r >= 0);
850
851 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
852 }
853 }
854
855 }
856
857 /* Also do not move dependencies on u to itself */
858 back = hashmap_remove(other->dependencies[d], u);
859 if (back)
860 maybe_warn_about_dependency(u, other_id, d);
861
862 /* The move cannot fail. The caller must have performed a reservation. */
863 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
864
865 other->dependencies[d] = hashmap_free(other->dependencies[d]);
866 }
867
868 int unit_merge(Unit *u, Unit *other) {
869 UnitDependency d;
870 const char *other_id = NULL;
871 int r;
872
873 assert(u);
874 assert(other);
875 assert(u->manager == other->manager);
876 assert(u->type != _UNIT_TYPE_INVALID);
877
878 other = unit_follow_merge(other);
879
880 if (other == u)
881 return 0;
882
883 if (u->type != other->type)
884 return -EINVAL;
885
886 if (!u->instance != !other->instance)
887 return -EINVAL;
888
889 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
890 return -EEXIST;
891
892 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
893 return -EEXIST;
894
895 if (other->job)
896 return -EEXIST;
897
898 if (other->nop_job)
899 return -EEXIST;
900
901 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
902 return -EEXIST;
903
904 if (other->id)
905 other_id = strdupa(other->id);
906
907 /* Make reservations to ensure merge_dependencies() won't fail */
908 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
909 r = reserve_dependencies(u, other, d);
910 /*
911 * We don't rollback reservations if we fail. We don't have
912 * a way to undo reservations. A reservation is not a leak.
913 */
914 if (r < 0)
915 return r;
916 }
917
918 /* Merge names */
919 r = merge_names(u, other);
920 if (r < 0)
921 return r;
922
923 /* Redirect all references */
924 while (other->refs_by_target)
925 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
926
927 /* Merge dependencies */
928 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
929 merge_dependencies(u, other, other_id, d);
930
931 other->load_state = UNIT_MERGED;
932 other->merged_into = u;
933
934 /* If there is still some data attached to the other node, we
935 * don't need it anymore, and can free it. */
936 if (other->load_state != UNIT_STUB)
937 if (UNIT_VTABLE(other)->done)
938 UNIT_VTABLE(other)->done(other);
939
940 unit_add_to_dbus_queue(u);
941 unit_add_to_cleanup_queue(other);
942
943 return 0;
944 }
945
946 int unit_merge_by_name(Unit *u, const char *name) {
947 _cleanup_free_ char *s = NULL;
948 Unit *other;
949 int r;
950
951 assert(u);
952 assert(name);
953
954 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
955 if (!u->instance)
956 return -EINVAL;
957
958 r = unit_name_replace_instance(name, u->instance, &s);
959 if (r < 0)
960 return r;
961
962 name = s;
963 }
964
965 other = manager_get_unit(u->manager, name);
966 if (other)
967 return unit_merge(u, other);
968
969 return unit_add_name(u, name);
970 }
971
972 Unit* unit_follow_merge(Unit *u) {
973 assert(u);
974
975 while (u->load_state == UNIT_MERGED)
976 assert_se(u = u->merged_into);
977
978 return u;
979 }
980
981 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
982 ExecDirectoryType dt;
983 char **dp;
984 int r;
985
986 assert(u);
987 assert(c);
988
989 if (c->working_directory && !c->working_directory_missing_ok) {
990 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
991 if (r < 0)
992 return r;
993 }
994
995 if (c->root_directory) {
996 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
997 if (r < 0)
998 return r;
999 }
1000
1001 if (c->root_image) {
1002 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1003 if (r < 0)
1004 return r;
1005 }
1006
1007 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1008 if (!u->manager->prefix[dt])
1009 continue;
1010
1011 STRV_FOREACH(dp, c->directories[dt].paths) {
1012 _cleanup_free_ char *p;
1013
1014 p = path_join(u->manager->prefix[dt], *dp);
1015 if (!p)
1016 return -ENOMEM;
1017
1018 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1019 if (r < 0)
1020 return r;
1021 }
1022 }
1023
1024 if (!MANAGER_IS_SYSTEM(u->manager))
1025 return 0;
1026
1027 if (c->private_tmp) {
1028 const char *p;
1029
1030 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1031 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1032 if (r < 0)
1033 return r;
1034 }
1035
1036 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1037 if (r < 0)
1038 return r;
1039 }
1040
1041 if (!IN_SET(c->std_output,
1042 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1043 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1044 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1045 !IN_SET(c->std_error,
1046 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1047 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1048 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1049 return 0;
1050
1051 /* If syslog or kernel logging is requested, make sure our own
1052 * logging daemon is run first. */
1053
1054 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1055 if (r < 0)
1056 return r;
1057
1058 return 0;
1059 }
1060
1061 const char *unit_description(Unit *u) {
1062 assert(u);
1063
1064 if (u->description)
1065 return u->description;
1066
1067 return strna(u->id);
1068 }
1069
1070 const char *unit_status_string(Unit *u) {
1071 assert(u);
1072
1073 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1074 return u->id;
1075
1076 return unit_description(u);
1077 }
1078
1079 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1080 const struct {
1081 UnitDependencyMask mask;
1082 const char *name;
1083 } table[] = {
1084 { UNIT_DEPENDENCY_FILE, "file" },
1085 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1086 { UNIT_DEPENDENCY_DEFAULT, "default" },
1087 { UNIT_DEPENDENCY_UDEV, "udev" },
1088 { UNIT_DEPENDENCY_PATH, "path" },
1089 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1090 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1091 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1092 };
1093 size_t i;
1094
1095 assert(f);
1096 assert(kind);
1097 assert(space);
1098
1099 for (i = 0; i < ELEMENTSOF(table); i++) {
1100
1101 if (mask == 0)
1102 break;
1103
1104 if (FLAGS_SET(mask, table[i].mask)) {
1105 if (*space)
1106 fputc(' ', f);
1107 else
1108 *space = true;
1109
1110 fputs(kind, f);
1111 fputs("-", f);
1112 fputs(table[i].name, f);
1113
1114 mask &= ~table[i].mask;
1115 }
1116 }
1117
1118 assert(mask == 0);
1119 }
1120
1121 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1122 char *t, **j;
1123 UnitDependency d;
1124 Iterator i;
1125 const char *prefix2;
1126 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1127 Unit *following;
1128 _cleanup_set_free_ Set *following_set = NULL;
1129 const char *n;
1130 CGroupMask m;
1131 int r;
1132
1133 assert(u);
1134 assert(u->type >= 0);
1135
1136 prefix = strempty(prefix);
1137 prefix2 = strjoina(prefix, "\t");
1138
1139 fprintf(f,
1140 "%s-> Unit %s:\n",
1141 prefix, u->id);
1142
1143 SET_FOREACH(t, u->names, i)
1144 if (!streq(t, u->id))
1145 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1146
1147 fprintf(f,
1148 "%s\tDescription: %s\n"
1149 "%s\tInstance: %s\n"
1150 "%s\tUnit Load State: %s\n"
1151 "%s\tUnit Active State: %s\n"
1152 "%s\tState Change Timestamp: %s\n"
1153 "%s\tInactive Exit Timestamp: %s\n"
1154 "%s\tActive Enter Timestamp: %s\n"
1155 "%s\tActive Exit Timestamp: %s\n"
1156 "%s\tInactive Enter Timestamp: %s\n"
1157 "%s\tMay GC: %s\n"
1158 "%s\tNeed Daemon Reload: %s\n"
1159 "%s\tTransient: %s\n"
1160 "%s\tPerpetual: %s\n"
1161 "%s\tGarbage Collection Mode: %s\n"
1162 "%s\tSlice: %s\n"
1163 "%s\tCGroup: %s\n"
1164 "%s\tCGroup realized: %s\n",
1165 prefix, unit_description(u),
1166 prefix, strna(u->instance),
1167 prefix, unit_load_state_to_string(u->load_state),
1168 prefix, unit_active_state_to_string(unit_active_state(u)),
1169 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1170 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1171 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1172 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1173 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1174 prefix, yes_no(unit_may_gc(u)),
1175 prefix, yes_no(unit_need_daemon_reload(u)),
1176 prefix, yes_no(u->transient),
1177 prefix, yes_no(u->perpetual),
1178 prefix, collect_mode_to_string(u->collect_mode),
1179 prefix, strna(unit_slice_name(u)),
1180 prefix, strna(u->cgroup_path),
1181 prefix, yes_no(u->cgroup_realized));
1182
1183 if (u->cgroup_realized_mask != 0) {
1184 _cleanup_free_ char *s = NULL;
1185 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1186 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1187 }
1188
1189 if (u->cgroup_enabled_mask != 0) {
1190 _cleanup_free_ char *s = NULL;
1191 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1192 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1193 }
1194
1195 m = unit_get_own_mask(u);
1196 if (m != 0) {
1197 _cleanup_free_ char *s = NULL;
1198 (void) cg_mask_to_string(m, &s);
1199 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1200 }
1201
1202 m = unit_get_members_mask(u);
1203 if (m != 0) {
1204 _cleanup_free_ char *s = NULL;
1205 (void) cg_mask_to_string(m, &s);
1206 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1207 }
1208
1209 m = unit_get_delegate_mask(u);
1210 if (m != 0) {
1211 _cleanup_free_ char *s = NULL;
1212 (void) cg_mask_to_string(m, &s);
1213 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1214 }
1215
1216 if (!sd_id128_is_null(u->invocation_id))
1217 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1218 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1219
1220 STRV_FOREACH(j, u->documentation)
1221 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1222
1223 following = unit_following(u);
1224 if (following)
1225 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1226
1227 r = unit_following_set(u, &following_set);
1228 if (r >= 0) {
1229 Unit *other;
1230
1231 SET_FOREACH(other, following_set, i)
1232 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1233 }
1234
1235 if (u->fragment_path)
1236 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1237
1238 if (u->source_path)
1239 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1240
1241 STRV_FOREACH(j, u->dropin_paths)
1242 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1243
1244 if (u->failure_action != EMERGENCY_ACTION_NONE)
1245 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1246 if (u->failure_action_exit_status >= 0)
1247 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1248 if (u->success_action != EMERGENCY_ACTION_NONE)
1249 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1250 if (u->success_action_exit_status >= 0)
1251 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1252
1253 if (u->job_timeout != USEC_INFINITY)
1254 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1255
1256 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1257 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1258
1259 if (u->job_timeout_reboot_arg)
1260 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1261
1262 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1263 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1264
1265 if (dual_timestamp_is_set(&u->condition_timestamp))
1266 fprintf(f,
1267 "%s\tCondition Timestamp: %s\n"
1268 "%s\tCondition Result: %s\n",
1269 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1270 prefix, yes_no(u->condition_result));
1271
1272 if (dual_timestamp_is_set(&u->assert_timestamp))
1273 fprintf(f,
1274 "%s\tAssert Timestamp: %s\n"
1275 "%s\tAssert Result: %s\n",
1276 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1277 prefix, yes_no(u->assert_result));
1278
1279 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1280 UnitDependencyInfo di;
1281 Unit *other;
1282
1283 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1284 bool space = false;
1285
1286 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1287
1288 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1289 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1290
1291 fputs(")\n", f);
1292 }
1293 }
1294
1295 if (!hashmap_isempty(u->requires_mounts_for)) {
1296 UnitDependencyInfo di;
1297 const char *path;
1298
1299 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1300 bool space = false;
1301
1302 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1303
1304 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1305 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1306
1307 fputs(")\n", f);
1308 }
1309 }
1310
1311 if (u->load_state == UNIT_LOADED) {
1312
1313 fprintf(f,
1314 "%s\tStopWhenUnneeded: %s\n"
1315 "%s\tRefuseManualStart: %s\n"
1316 "%s\tRefuseManualStop: %s\n"
1317 "%s\tDefaultDependencies: %s\n"
1318 "%s\tOnFailureJobMode: %s\n"
1319 "%s\tIgnoreOnIsolate: %s\n",
1320 prefix, yes_no(u->stop_when_unneeded),
1321 prefix, yes_no(u->refuse_manual_start),
1322 prefix, yes_no(u->refuse_manual_stop),
1323 prefix, yes_no(u->default_dependencies),
1324 prefix, job_mode_to_string(u->on_failure_job_mode),
1325 prefix, yes_no(u->ignore_on_isolate));
1326
1327 if (UNIT_VTABLE(u)->dump)
1328 UNIT_VTABLE(u)->dump(u, f, prefix2);
1329
1330 } else if (u->load_state == UNIT_MERGED)
1331 fprintf(f,
1332 "%s\tMerged into: %s\n",
1333 prefix, u->merged_into->id);
1334 else if (u->load_state == UNIT_ERROR)
1335 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1336
1337 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1338 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1339
1340 if (u->job)
1341 job_dump(u->job, f, prefix2);
1342
1343 if (u->nop_job)
1344 job_dump(u->nop_job, f, prefix2);
1345 }
1346
1347 /* Common implementation for multiple backends */
1348 int unit_load_fragment_and_dropin(Unit *u) {
1349 int r;
1350
1351 assert(u);
1352
1353 /* Load a .{service,socket,...} file */
1354 r = unit_load_fragment(u);
1355 if (r < 0)
1356 return r;
1357
1358 if (u->load_state == UNIT_STUB)
1359 return -ENOENT;
1360
1361 /* Load drop-in directory data. If u is an alias, we might be reloading the
1362 * target unit needlessly. But we cannot be sure which drops-ins have already
1363 * been loaded and which not, at least without doing complicated book-keeping,
1364 * so let's always reread all drop-ins. */
1365 return unit_load_dropin(unit_follow_merge(u));
1366 }
1367
1368 /* Common implementation for multiple backends */
1369 int unit_load_fragment_and_dropin_optional(Unit *u) {
1370 int r;
1371
1372 assert(u);
1373
1374 /* Same as unit_load_fragment_and_dropin(), but whether
1375 * something can be loaded or not doesn't matter. */
1376
1377 /* Load a .service/.socket/.slice/… file */
1378 r = unit_load_fragment(u);
1379 if (r < 0)
1380 return r;
1381
1382 if (u->load_state == UNIT_STUB)
1383 u->load_state = UNIT_LOADED;
1384
1385 /* Load drop-in directory data */
1386 return unit_load_dropin(unit_follow_merge(u));
1387 }
1388
1389 void unit_add_to_target_deps_queue(Unit *u) {
1390 Manager *m = u->manager;
1391
1392 assert(u);
1393
1394 if (u->in_target_deps_queue)
1395 return;
1396
1397 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1398 u->in_target_deps_queue = true;
1399 }
1400
1401 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1402 assert(u);
1403 assert(target);
1404
1405 if (target->type != UNIT_TARGET)
1406 return 0;
1407
1408 /* Only add the dependency if both units are loaded, so that
1409 * that loop check below is reliable */
1410 if (u->load_state != UNIT_LOADED ||
1411 target->load_state != UNIT_LOADED)
1412 return 0;
1413
1414 /* If either side wants no automatic dependencies, then let's
1415 * skip this */
1416 if (!u->default_dependencies ||
1417 !target->default_dependencies)
1418 return 0;
1419
1420 /* Don't create loops */
1421 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1422 return 0;
1423
1424 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1425 }
1426
1427 static int unit_add_slice_dependencies(Unit *u) {
1428 UnitDependencyMask mask;
1429 assert(u);
1430
1431 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1432 return 0;
1433
1434 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1435 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1436 relationship). */
1437 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1438
1439 if (UNIT_ISSET(u->slice))
1440 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1441
1442 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1443 return 0;
1444
1445 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1446 }
1447
1448 static int unit_add_mount_dependencies(Unit *u) {
1449 UnitDependencyInfo di;
1450 const char *path;
1451 Iterator i;
1452 int r;
1453
1454 assert(u);
1455
1456 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1457 char prefix[strlen(path) + 1];
1458
1459 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1460 _cleanup_free_ char *p = NULL;
1461 Unit *m;
1462
1463 r = unit_name_from_path(prefix, ".mount", &p);
1464 if (r < 0)
1465 return r;
1466
1467 m = manager_get_unit(u->manager, p);
1468 if (!m) {
1469 /* Make sure to load the mount unit if
1470 * it exists. If so the dependencies
1471 * on this unit will be added later
1472 * during the loading of the mount
1473 * unit. */
1474 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1475 continue;
1476 }
1477 if (m == u)
1478 continue;
1479
1480 if (m->load_state != UNIT_LOADED)
1481 continue;
1482
1483 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1484 if (r < 0)
1485 return r;
1486
1487 if (m->fragment_path) {
1488 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1489 if (r < 0)
1490 return r;
1491 }
1492 }
1493 }
1494
1495 return 0;
1496 }
1497
1498 static int unit_add_startup_units(Unit *u) {
1499 CGroupContext *c;
1500 int r;
1501
1502 c = unit_get_cgroup_context(u);
1503 if (!c)
1504 return 0;
1505
1506 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1507 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1508 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1509 return 0;
1510
1511 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1512 if (r < 0)
1513 return r;
1514
1515 return set_put(u->manager->startup_units, u);
1516 }
1517
1518 int unit_load(Unit *u) {
1519 int r;
1520
1521 assert(u);
1522
1523 if (u->in_load_queue) {
1524 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1525 u->in_load_queue = false;
1526 }
1527
1528 if (u->type == _UNIT_TYPE_INVALID)
1529 return -EINVAL;
1530
1531 if (u->load_state != UNIT_STUB)
1532 return 0;
1533
1534 if (u->transient_file) {
1535 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1536 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1537
1538 r = fflush_and_check(u->transient_file);
1539 if (r < 0)
1540 goto fail;
1541
1542 u->transient_file = safe_fclose(u->transient_file);
1543 u->fragment_mtime = now(CLOCK_REALTIME);
1544 }
1545
1546 if (UNIT_VTABLE(u)->load) {
1547 r = UNIT_VTABLE(u)->load(u);
1548 if (r < 0)
1549 goto fail;
1550 }
1551
1552 if (u->load_state == UNIT_STUB) {
1553 r = -ENOENT;
1554 goto fail;
1555 }
1556
1557 if (u->load_state == UNIT_LOADED) {
1558 unit_add_to_target_deps_queue(u);
1559
1560 r = unit_add_slice_dependencies(u);
1561 if (r < 0)
1562 goto fail;
1563
1564 r = unit_add_mount_dependencies(u);
1565 if (r < 0)
1566 goto fail;
1567
1568 r = unit_add_startup_units(u);
1569 if (r < 0)
1570 goto fail;
1571
1572 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1573 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1574 r = -ENOEXEC;
1575 goto fail;
1576 }
1577
1578 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1579 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1580
1581 /* We finished loading, let's ensure our parents recalculate the members mask */
1582 unit_invalidate_cgroup_members_masks(u);
1583 }
1584
1585 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1586
1587 unit_add_to_dbus_queue(unit_follow_merge(u));
1588 unit_add_to_gc_queue(u);
1589
1590 return 0;
1591
1592 fail:
1593 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1594 * return ENOEXEC to ensure units are placed in this state after loading */
1595
1596 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1597 r == -ENOEXEC ? UNIT_BAD_SETTING :
1598 UNIT_ERROR;
1599 u->load_error = r;
1600
1601 unit_add_to_dbus_queue(u);
1602 unit_add_to_gc_queue(u);
1603
1604 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1605 }
1606
1607 _printf_(7, 8)
1608 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1609 Unit *u = userdata;
1610 va_list ap;
1611 int r;
1612
1613 va_start(ap, format);
1614 if (u)
1615 r = log_object_internalv(level, error, file, line, func,
1616 u->manager->unit_log_field,
1617 u->id,
1618 u->manager->invocation_log_field,
1619 u->invocation_id_string,
1620 format, ap);
1621 else
1622 r = log_internalv(level, error, file, line, func, format, ap);
1623 va_end(ap);
1624
1625 return r;
1626 }
1627
1628 static bool unit_test_condition(Unit *u) {
1629 assert(u);
1630
1631 dual_timestamp_get(&u->condition_timestamp);
1632 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1633
1634 unit_add_to_dbus_queue(u);
1635
1636 return u->condition_result;
1637 }
1638
1639 static bool unit_test_assert(Unit *u) {
1640 assert(u);
1641
1642 dual_timestamp_get(&u->assert_timestamp);
1643 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1644
1645 unit_add_to_dbus_queue(u);
1646
1647 return u->assert_result;
1648 }
1649
1650 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1651 const char *d;
1652
1653 d = unit_status_string(u);
1654 if (log_get_show_color())
1655 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1656
1657 DISABLE_WARNING_FORMAT_NONLITERAL;
1658 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1659 REENABLE_WARNING;
1660 }
1661
1662 int unit_test_start_limit(Unit *u) {
1663 const char *reason;
1664
1665 assert(u);
1666
1667 if (ratelimit_below(&u->start_limit)) {
1668 u->start_limit_hit = false;
1669 return 0;
1670 }
1671
1672 log_unit_warning(u, "Start request repeated too quickly.");
1673 u->start_limit_hit = true;
1674
1675 reason = strjoina("unit ", u->id, " failed");
1676
1677 emergency_action(u->manager, u->start_limit_action,
1678 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1679 u->reboot_arg, -1, reason);
1680
1681 return -ECANCELED;
1682 }
1683
1684 bool unit_shall_confirm_spawn(Unit *u) {
1685 assert(u);
1686
1687 if (manager_is_confirm_spawn_disabled(u->manager))
1688 return false;
1689
1690 /* For some reasons units remaining in the same process group
1691 * as PID 1 fail to acquire the console even if it's not used
1692 * by any process. So skip the confirmation question for them. */
1693 return !unit_get_exec_context(u)->same_pgrp;
1694 }
1695
1696 static bool unit_verify_deps(Unit *u) {
1697 Unit *other;
1698 Iterator j;
1699 void *v;
1700
1701 assert(u);
1702
1703 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1704 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1705 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1706 * conjunction with After= as for them any such check would make things entirely racy. */
1707
1708 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1709
1710 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1711 continue;
1712
1713 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1714 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1715 return false;
1716 }
1717 }
1718
1719 return true;
1720 }
1721
1722 /* Errors that aren't really errors:
1723 * -EALREADY: Unit is already started.
1724 * -ECOMM: Condition failed
1725 * -EAGAIN: An operation is already in progress. Retry later.
1726 *
1727 * Errors that are real errors:
1728 * -EBADR: This unit type does not support starting.
1729 * -ECANCELED: Start limit hit, too many requests for now
1730 * -EPROTO: Assert failed
1731 * -EINVAL: Unit not loaded
1732 * -EOPNOTSUPP: Unit type not supported
1733 * -ENOLINK: The necessary dependencies are not fulfilled.
1734 * -ESTALE: This unit has been started before and can't be started a second time
1735 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1736 */
1737 int unit_start(Unit *u) {
1738 UnitActiveState state;
1739 Unit *following;
1740 int r;
1741
1742 assert(u);
1743
1744 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1745 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1746 * waiting is finished. */
1747 state = unit_active_state(u);
1748 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1749 return -EALREADY;
1750 if (state == UNIT_MAINTENANCE)
1751 return -EAGAIN;
1752
1753 /* Units that aren't loaded cannot be started */
1754 if (u->load_state != UNIT_LOADED)
1755 return -EINVAL;
1756
1757 /* Refuse starting scope units more than once */
1758 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1759 return -ESTALE;
1760
1761 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1762 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1763 * recheck the condition in that case. */
1764 if (state != UNIT_ACTIVATING &&
1765 !unit_test_condition(u)) {
1766
1767 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1768 * .start() method of the unit type after it did some additional checks verifying everything
1769 * is in order (so that those other checks can propagate errors properly). However, if a
1770 * condition check doesn't hold we don't get that far but we should still ensure we are not
1771 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1772 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1773 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1774 * condition check is a bit different from the condition check inside the per-unit .start()
1775 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1776 * after all the condition failed). */
1777
1778 r = unit_test_start_limit(u);
1779 if (r < 0)
1780 return r;
1781
1782 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1783 }
1784
1785 /* If the asserts failed, fail the entire job */
1786 if (state != UNIT_ACTIVATING &&
1787 !unit_test_assert(u))
1788 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1789
1790 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1791 * condition checks, so that we rather return condition check errors (which are usually not
1792 * considered a true failure) than "not supported" errors (which are considered a failure).
1793 */
1794 if (!unit_type_supported(u->type))
1795 return -EOPNOTSUPP;
1796
1797 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1798 * should have taken care of this already, but let's check this here again. After all, our
1799 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1800 if (!unit_verify_deps(u))
1801 return -ENOLINK;
1802
1803 /* Forward to the main object, if we aren't it. */
1804 following = unit_following(u);
1805 if (following) {
1806 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1807 return unit_start(following);
1808 }
1809
1810 /* If it is stopped, but we cannot start it, then fail */
1811 if (!UNIT_VTABLE(u)->start)
1812 return -EBADR;
1813
1814 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1815 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1816 * waits for a holdoff timer to elapse before it will start again. */
1817
1818 unit_add_to_dbus_queue(u);
1819
1820 return UNIT_VTABLE(u)->start(u);
1821 }
1822
1823 bool unit_can_start(Unit *u) {
1824 assert(u);
1825
1826 if (u->load_state != UNIT_LOADED)
1827 return false;
1828
1829 if (!unit_type_supported(u->type))
1830 return false;
1831
1832 /* Scope units may be started only once */
1833 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1834 return false;
1835
1836 return !!UNIT_VTABLE(u)->start;
1837 }
1838
1839 bool unit_can_isolate(Unit *u) {
1840 assert(u);
1841
1842 return unit_can_start(u) &&
1843 u->allow_isolate;
1844 }
1845
1846 /* Errors:
1847 * -EBADR: This unit type does not support stopping.
1848 * -EALREADY: Unit is already stopped.
1849 * -EAGAIN: An operation is already in progress. Retry later.
1850 */
1851 int unit_stop(Unit *u) {
1852 UnitActiveState state;
1853 Unit *following;
1854
1855 assert(u);
1856
1857 state = unit_active_state(u);
1858 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1859 return -EALREADY;
1860
1861 following = unit_following(u);
1862 if (following) {
1863 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1864 return unit_stop(following);
1865 }
1866
1867 if (!UNIT_VTABLE(u)->stop)
1868 return -EBADR;
1869
1870 unit_add_to_dbus_queue(u);
1871
1872 return UNIT_VTABLE(u)->stop(u);
1873 }
1874
1875 bool unit_can_stop(Unit *u) {
1876 assert(u);
1877
1878 if (!unit_type_supported(u->type))
1879 return false;
1880
1881 if (u->perpetual)
1882 return false;
1883
1884 return !!UNIT_VTABLE(u)->stop;
1885 }
1886
1887 /* Errors:
1888 * -EBADR: This unit type does not support reloading.
1889 * -ENOEXEC: Unit is not started.
1890 * -EAGAIN: An operation is already in progress. Retry later.
1891 */
1892 int unit_reload(Unit *u) {
1893 UnitActiveState state;
1894 Unit *following;
1895
1896 assert(u);
1897
1898 if (u->load_state != UNIT_LOADED)
1899 return -EINVAL;
1900
1901 if (!unit_can_reload(u))
1902 return -EBADR;
1903
1904 state = unit_active_state(u);
1905 if (state == UNIT_RELOADING)
1906 return -EAGAIN;
1907
1908 if (state != UNIT_ACTIVE) {
1909 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1910 return -ENOEXEC;
1911 }
1912
1913 following = unit_following(u);
1914 if (following) {
1915 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1916 return unit_reload(following);
1917 }
1918
1919 unit_add_to_dbus_queue(u);
1920
1921 if (!UNIT_VTABLE(u)->reload) {
1922 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1923 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1924 return 0;
1925 }
1926
1927 return UNIT_VTABLE(u)->reload(u);
1928 }
1929
1930 bool unit_can_reload(Unit *u) {
1931 assert(u);
1932
1933 if (UNIT_VTABLE(u)->can_reload)
1934 return UNIT_VTABLE(u)->can_reload(u);
1935
1936 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1937 return true;
1938
1939 return UNIT_VTABLE(u)->reload;
1940 }
1941
1942 bool unit_is_unneeded(Unit *u) {
1943 static const UnitDependency deps[] = {
1944 UNIT_REQUIRED_BY,
1945 UNIT_REQUISITE_OF,
1946 UNIT_WANTED_BY,
1947 UNIT_BOUND_BY,
1948 };
1949 size_t j;
1950
1951 assert(u);
1952
1953 if (!u->stop_when_unneeded)
1954 return false;
1955
1956 /* Don't clean up while the unit is transitioning or is even inactive. */
1957 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1958 return false;
1959 if (u->job)
1960 return false;
1961
1962 for (j = 0; j < ELEMENTSOF(deps); j++) {
1963 Unit *other;
1964 Iterator i;
1965 void *v;
1966
1967 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1968 * restart, then don't clean this one up. */
1969
1970 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1971 if (other->job)
1972 return false;
1973
1974 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1975 return false;
1976
1977 if (unit_will_restart(other))
1978 return false;
1979 }
1980 }
1981
1982 return true;
1983 }
1984
1985 static void check_unneeded_dependencies(Unit *u) {
1986
1987 static const UnitDependency deps[] = {
1988 UNIT_REQUIRES,
1989 UNIT_REQUISITE,
1990 UNIT_WANTS,
1991 UNIT_BINDS_TO,
1992 };
1993 size_t j;
1994
1995 assert(u);
1996
1997 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1998
1999 for (j = 0; j < ELEMENTSOF(deps); j++) {
2000 Unit *other;
2001 Iterator i;
2002 void *v;
2003
2004 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2005 unit_submit_to_stop_when_unneeded_queue(other);
2006 }
2007 }
2008
2009 static void unit_check_binds_to(Unit *u) {
2010 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2011 bool stop = false;
2012 Unit *other;
2013 Iterator i;
2014 void *v;
2015 int r;
2016
2017 assert(u);
2018
2019 if (u->job)
2020 return;
2021
2022 if (unit_active_state(u) != UNIT_ACTIVE)
2023 return;
2024
2025 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2026 if (other->job)
2027 continue;
2028
2029 if (!other->coldplugged)
2030 /* We might yet create a job for the other unit… */
2031 continue;
2032
2033 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2034 continue;
2035
2036 stop = true;
2037 break;
2038 }
2039
2040 if (!stop)
2041 return;
2042
2043 /* If stopping a unit fails continuously we might enter a stop
2044 * loop here, hence stop acting on the service being
2045 * unnecessary after a while. */
2046 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2047 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2048 return;
2049 }
2050
2051 assert(other);
2052 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2053
2054 /* A unit we need to run is gone. Sniff. Let's stop this. */
2055 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2056 if (r < 0)
2057 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2058 }
2059
2060 static void retroactively_start_dependencies(Unit *u) {
2061 Iterator i;
2062 Unit *other;
2063 void *v;
2064
2065 assert(u);
2066 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2067
2068 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2069 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2070 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2071 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2072
2073 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2074 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2075 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2076 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2077
2078 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2079 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2080 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2081 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2082
2083 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2084 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2085 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2086
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2090 }
2091
2092 static void retroactively_stop_dependencies(Unit *u) {
2093 Unit *other;
2094 Iterator i;
2095 void *v;
2096
2097 assert(u);
2098 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2099
2100 /* Pull down units which are bound to us recursively if enabled */
2101 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2102 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2104 }
2105
2106 void unit_start_on_failure(Unit *u) {
2107 Unit *other;
2108 Iterator i;
2109 void *v;
2110 int r;
2111
2112 assert(u);
2113
2114 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2115 return;
2116
2117 log_unit_info(u, "Triggering OnFailure= dependencies.");
2118
2119 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2120 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2121
2122 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2123 if (r < 0)
2124 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2125 }
2126 }
2127
2128 void unit_trigger_notify(Unit *u) {
2129 Unit *other;
2130 Iterator i;
2131 void *v;
2132
2133 assert(u);
2134
2135 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2136 if (UNIT_VTABLE(other)->trigger_notify)
2137 UNIT_VTABLE(other)->trigger_notify(other, u);
2138 }
2139
2140 static int unit_log_resources(Unit *u) {
2141 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2142 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2143 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2144 size_t n_message_parts = 0, n_iovec = 0;
2145 char* message_parts[1 + 2 + 2 + 1], *t;
2146 nsec_t nsec = NSEC_INFINITY;
2147 CGroupIPAccountingMetric m;
2148 size_t i;
2149 int r;
2150 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2151 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2152 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2153 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2154 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2155 };
2156 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2157 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2158 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2159 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2160 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2161 };
2162
2163 assert(u);
2164
2165 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2166 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2167 * information and the complete data in structured fields. */
2168
2169 (void) unit_get_cpu_usage(u, &nsec);
2170 if (nsec != NSEC_INFINITY) {
2171 char buf[FORMAT_TIMESPAN_MAX] = "";
2172
2173 /* Format the CPU time for inclusion in the structured log message */
2174 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2175 r = log_oom();
2176 goto finish;
2177 }
2178 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2179
2180 /* Format the CPU time for inclusion in the human language message string */
2181 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2182 t = strjoin("consumed ", buf, " CPU time");
2183 if (!t) {
2184 r = log_oom();
2185 goto finish;
2186 }
2187
2188 message_parts[n_message_parts++] = t;
2189 }
2190
2191 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2192 char buf[FORMAT_BYTES_MAX] = "";
2193 uint64_t value = UINT64_MAX;
2194
2195 assert(io_fields[k]);
2196
2197 (void) unit_get_io_accounting(u, k, k > 0, &value);
2198 if (value == UINT64_MAX)
2199 continue;
2200
2201 have_io_accounting = true;
2202 if (value > 0)
2203 any_io = true;
2204
2205 /* Format IO accounting data for inclusion in the structured log message */
2206 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2207 r = log_oom();
2208 goto finish;
2209 }
2210 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2211
2212 /* Format the IO accounting data for inclusion in the human language message string, but only
2213 * for the bytes counters (and not for the operations counters) */
2214 if (k == CGROUP_IO_READ_BYTES) {
2215 assert(!rr);
2216 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2217 if (!rr) {
2218 r = log_oom();
2219 goto finish;
2220 }
2221 } else if (k == CGROUP_IO_WRITE_BYTES) {
2222 assert(!wr);
2223 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2224 if (!wr) {
2225 r = log_oom();
2226 goto finish;
2227 }
2228 }
2229 }
2230
2231 if (have_io_accounting) {
2232 if (any_io) {
2233 if (rr)
2234 message_parts[n_message_parts++] = TAKE_PTR(rr);
2235 if (wr)
2236 message_parts[n_message_parts++] = TAKE_PTR(wr);
2237
2238 } else {
2239 char *k;
2240
2241 k = strdup("no IO");
2242 if (!k) {
2243 r = log_oom();
2244 goto finish;
2245 }
2246
2247 message_parts[n_message_parts++] = k;
2248 }
2249 }
2250
2251 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2252 char buf[FORMAT_BYTES_MAX] = "";
2253 uint64_t value = UINT64_MAX;
2254
2255 assert(ip_fields[m]);
2256
2257 (void) unit_get_ip_accounting(u, m, &value);
2258 if (value == UINT64_MAX)
2259 continue;
2260
2261 have_ip_accounting = true;
2262 if (value > 0)
2263 any_traffic = true;
2264
2265 /* Format IP accounting data for inclusion in the structured log message */
2266 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2267 r = log_oom();
2268 goto finish;
2269 }
2270 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2271
2272 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2273 * bytes counters (and not for the packets counters) */
2274 if (m == CGROUP_IP_INGRESS_BYTES) {
2275 assert(!igress);
2276 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2277 if (!igress) {
2278 r = log_oom();
2279 goto finish;
2280 }
2281 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2282 assert(!egress);
2283 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2284 if (!egress) {
2285 r = log_oom();
2286 goto finish;
2287 }
2288 }
2289 }
2290
2291 if (have_ip_accounting) {
2292 if (any_traffic) {
2293 if (igress)
2294 message_parts[n_message_parts++] = TAKE_PTR(igress);
2295 if (egress)
2296 message_parts[n_message_parts++] = TAKE_PTR(egress);
2297
2298 } else {
2299 char *k;
2300
2301 k = strdup("no IP traffic");
2302 if (!k) {
2303 r = log_oom();
2304 goto finish;
2305 }
2306
2307 message_parts[n_message_parts++] = k;
2308 }
2309 }
2310
2311 /* Is there any accounting data available at all? */
2312 if (n_iovec == 0) {
2313 r = 0;
2314 goto finish;
2315 }
2316
2317 if (n_message_parts == 0)
2318 t = strjoina("MESSAGE=", u->id, ": Completed.");
2319 else {
2320 _cleanup_free_ char *joined;
2321
2322 message_parts[n_message_parts] = NULL;
2323
2324 joined = strv_join(message_parts, ", ");
2325 if (!joined) {
2326 r = log_oom();
2327 goto finish;
2328 }
2329
2330 joined[0] = ascii_toupper(joined[0]);
2331 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2332 }
2333
2334 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2335 * and hence don't increase n_iovec for them */
2336 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2337 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2338
2339 t = strjoina(u->manager->unit_log_field, u->id);
2340 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2341
2342 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2343 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2344
2345 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2346 r = 0;
2347
2348 finish:
2349 for (i = 0; i < n_message_parts; i++)
2350 free(message_parts[i]);
2351
2352 for (i = 0; i < n_iovec; i++)
2353 free(iovec[i].iov_base);
2354
2355 return r;
2356
2357 }
2358
2359 static void unit_update_on_console(Unit *u) {
2360 bool b;
2361
2362 assert(u);
2363
2364 b = unit_needs_console(u);
2365 if (u->on_console == b)
2366 return;
2367
2368 u->on_console = b;
2369 if (b)
2370 manager_ref_console(u->manager);
2371 else
2372 manager_unref_console(u->manager);
2373 }
2374
2375 static void unit_emit_audit_start(Unit *u) {
2376 assert(u);
2377
2378 if (u->type != UNIT_SERVICE)
2379 return;
2380
2381 /* Write audit record if we have just finished starting up */
2382 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2383 u->in_audit = true;
2384 }
2385
2386 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2387 assert(u);
2388
2389 if (u->type != UNIT_SERVICE)
2390 return;
2391
2392 if (u->in_audit) {
2393 /* Write audit record if we have just finished shutting down */
2394 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2395 u->in_audit = false;
2396 } else {
2397 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2398 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2399
2400 if (state == UNIT_INACTIVE)
2401 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2402 }
2403 }
2404
2405 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2406 bool unexpected = false;
2407 JobResult result;
2408
2409 assert(j);
2410
2411 if (j->state == JOB_WAITING)
2412
2413 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2414 * due to EAGAIN. */
2415 job_add_to_run_queue(j);
2416
2417 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2418 * hence needs to invalidate jobs. */
2419
2420 switch (j->type) {
2421
2422 case JOB_START:
2423 case JOB_VERIFY_ACTIVE:
2424
2425 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2426 job_finish_and_invalidate(j, JOB_DONE, true, false);
2427 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2428 unexpected = true;
2429
2430 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2431 if (ns == UNIT_FAILED)
2432 result = JOB_FAILED;
2433 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2434 result = JOB_SKIPPED;
2435 else
2436 result = JOB_DONE;
2437
2438 job_finish_and_invalidate(j, result, true, false);
2439 }
2440 }
2441
2442 break;
2443
2444 case JOB_RELOAD:
2445 case JOB_RELOAD_OR_START:
2446 case JOB_TRY_RELOAD:
2447
2448 if (j->state == JOB_RUNNING) {
2449 if (ns == UNIT_ACTIVE)
2450 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2451 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2452 unexpected = true;
2453
2454 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2455 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2456 }
2457 }
2458
2459 break;
2460
2461 case JOB_STOP:
2462 case JOB_RESTART:
2463 case JOB_TRY_RESTART:
2464
2465 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2466 job_finish_and_invalidate(j, JOB_DONE, true, false);
2467 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2468 unexpected = true;
2469 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2470 }
2471
2472 break;
2473
2474 default:
2475 assert_not_reached("Job type unknown");
2476 }
2477
2478 return unexpected;
2479 }
2480
2481 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2482 const char *reason;
2483 Manager *m;
2484
2485 assert(u);
2486 assert(os < _UNIT_ACTIVE_STATE_MAX);
2487 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2488
2489 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2490 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2491 * remounted this function will be called too! */
2492
2493 m = u->manager;
2494
2495 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2496 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2497 unit_add_to_dbus_queue(u);
2498
2499 /* Update timestamps for state changes */
2500 if (!MANAGER_IS_RELOADING(m)) {
2501 dual_timestamp_get(&u->state_change_timestamp);
2502
2503 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2504 u->inactive_exit_timestamp = u->state_change_timestamp;
2505 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2506 u->inactive_enter_timestamp = u->state_change_timestamp;
2507
2508 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2509 u->active_enter_timestamp = u->state_change_timestamp;
2510 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2511 u->active_exit_timestamp = u->state_change_timestamp;
2512 }
2513
2514 /* Keep track of failed units */
2515 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2516
2517 /* Make sure the cgroup and state files are always removed when we become inactive */
2518 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2519 unit_prune_cgroup(u);
2520 unit_unlink_state_files(u);
2521 }
2522
2523 unit_update_on_console(u);
2524
2525 if (!MANAGER_IS_RELOADING(m)) {
2526 bool unexpected;
2527
2528 /* Let's propagate state changes to the job */
2529 if (u->job)
2530 unexpected = unit_process_job(u->job, ns, flags);
2531 else
2532 unexpected = true;
2533
2534 /* If this state change happened without being requested by a job, then let's retroactively start or
2535 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2536 * additional jobs just because something is already activated. */
2537
2538 if (unexpected) {
2539 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2540 retroactively_start_dependencies(u);
2541 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2542 retroactively_stop_dependencies(u);
2543 }
2544
2545 /* stop unneeded units regardless if going down was expected or not */
2546 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2547 check_unneeded_dependencies(u);
2548
2549 if (ns != os && ns == UNIT_FAILED) {
2550 log_unit_debug(u, "Unit entered failed state.");
2551
2552 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2553 unit_start_on_failure(u);
2554 }
2555
2556 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2557 /* This unit just finished starting up */
2558
2559 unit_emit_audit_start(u);
2560 manager_send_unit_plymouth(m, u);
2561 }
2562
2563 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2564 /* This unit just stopped/failed. */
2565
2566 unit_emit_audit_stop(u, ns);
2567 unit_log_resources(u);
2568 }
2569 }
2570
2571 manager_recheck_journal(m);
2572 manager_recheck_dbus(m);
2573
2574 unit_trigger_notify(u);
2575
2576 if (!MANAGER_IS_RELOADING(m)) {
2577 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2578 unit_submit_to_stop_when_unneeded_queue(u);
2579
2580 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2581 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2582 * without ever entering started.) */
2583 unit_check_binds_to(u);
2584
2585 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2586 reason = strjoina("unit ", u->id, " failed");
2587 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2588 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2589 reason = strjoina("unit ", u->id, " succeeded");
2590 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2591 }
2592 }
2593
2594 unit_add_to_gc_queue(u);
2595 }
2596
2597 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2598 int r;
2599
2600 assert(u);
2601 assert(pid_is_valid(pid));
2602
2603 /* Watch a specific PID */
2604
2605 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2606 * opportunity to remove any stalled references to this PID as they can be created
2607 * easily (when watching a process which is not our direct child). */
2608 if (exclusive)
2609 manager_unwatch_pid(u->manager, pid);
2610
2611 r = set_ensure_allocated(&u->pids, NULL);
2612 if (r < 0)
2613 return r;
2614
2615 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2616 if (r < 0)
2617 return r;
2618
2619 /* First try, let's add the unit keyed by "pid". */
2620 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2621 if (r == -EEXIST) {
2622 Unit **array;
2623 bool found = false;
2624 size_t n = 0;
2625
2626 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2627 * to an array of Units rather than just a Unit), lists us already. */
2628
2629 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2630 if (array)
2631 for (; array[n]; n++)
2632 if (array[n] == u)
2633 found = true;
2634
2635 if (found) /* Found it already? if so, do nothing */
2636 r = 0;
2637 else {
2638 Unit **new_array;
2639
2640 /* Allocate a new array */
2641 new_array = new(Unit*, n + 2);
2642 if (!new_array)
2643 return -ENOMEM;
2644
2645 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2646 new_array[n] = u;
2647 new_array[n+1] = NULL;
2648
2649 /* Add or replace the old array */
2650 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2651 if (r < 0) {
2652 free(new_array);
2653 return r;
2654 }
2655
2656 free(array);
2657 }
2658 } else if (r < 0)
2659 return r;
2660
2661 r = set_put(u->pids, PID_TO_PTR(pid));
2662 if (r < 0)
2663 return r;
2664
2665 return 0;
2666 }
2667
2668 void unit_unwatch_pid(Unit *u, pid_t pid) {
2669 Unit **array;
2670
2671 assert(u);
2672 assert(pid_is_valid(pid));
2673
2674 /* First let's drop the unit in case it's keyed as "pid". */
2675 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2676
2677 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2678 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2679 if (array) {
2680 size_t n, m = 0;
2681
2682 /* Let's iterate through the array, dropping our own entry */
2683 for (n = 0; array[n]; n++)
2684 if (array[n] != u)
2685 array[m++] = array[n];
2686 array[m] = NULL;
2687
2688 if (m == 0) {
2689 /* The array is now empty, remove the entire entry */
2690 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2691 free(array);
2692 }
2693 }
2694
2695 (void) set_remove(u->pids, PID_TO_PTR(pid));
2696 }
2697
2698 void unit_unwatch_all_pids(Unit *u) {
2699 assert(u);
2700
2701 while (!set_isempty(u->pids))
2702 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2703
2704 u->pids = set_free(u->pids);
2705 }
2706
2707 static void unit_tidy_watch_pids(Unit *u) {
2708 pid_t except1, except2;
2709 Iterator i;
2710 void *e;
2711
2712 assert(u);
2713
2714 /* Cleans dead PIDs from our list */
2715
2716 except1 = unit_main_pid(u);
2717 except2 = unit_control_pid(u);
2718
2719 SET_FOREACH(e, u->pids, i) {
2720 pid_t pid = PTR_TO_PID(e);
2721
2722 if (pid == except1 || pid == except2)
2723 continue;
2724
2725 if (!pid_is_unwaited(pid))
2726 unit_unwatch_pid(u, pid);
2727 }
2728 }
2729
2730 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2731 Unit *u = userdata;
2732
2733 assert(s);
2734 assert(u);
2735
2736 unit_tidy_watch_pids(u);
2737 unit_watch_all_pids(u);
2738
2739 /* If the PID set is empty now, then let's finish this off. */
2740 unit_synthesize_cgroup_empty_event(u);
2741
2742 return 0;
2743 }
2744
2745 int unit_enqueue_rewatch_pids(Unit *u) {
2746 int r;
2747
2748 assert(u);
2749
2750 if (!u->cgroup_path)
2751 return -ENOENT;
2752
2753 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2754 if (r < 0)
2755 return r;
2756 if (r > 0) /* On unified we can use proper notifications */
2757 return 0;
2758
2759 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2760 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2761 * involves issuing kill(pid, 0) on all processes we watch. */
2762
2763 if (!u->rewatch_pids_event_source) {
2764 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2765
2766 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2767 if (r < 0)
2768 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2769
2770 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2771 if (r < 0)
2772 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2773
2774 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2775
2776 u->rewatch_pids_event_source = TAKE_PTR(s);
2777 }
2778
2779 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2780 if (r < 0)
2781 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2782
2783 return 0;
2784 }
2785
2786 void unit_dequeue_rewatch_pids(Unit *u) {
2787 int r;
2788 assert(u);
2789
2790 if (!u->rewatch_pids_event_source)
2791 return;
2792
2793 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2794 if (r < 0)
2795 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2796
2797 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2798 }
2799
2800 bool unit_job_is_applicable(Unit *u, JobType j) {
2801 assert(u);
2802 assert(j >= 0 && j < _JOB_TYPE_MAX);
2803
2804 switch (j) {
2805
2806 case JOB_VERIFY_ACTIVE:
2807 case JOB_START:
2808 case JOB_NOP:
2809 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2810 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2811 * jobs for it. */
2812 return true;
2813
2814 case JOB_STOP:
2815 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2816 * external events), hence it makes no sense to permit enqueing such a request either. */
2817 return !u->perpetual;
2818
2819 case JOB_RESTART:
2820 case JOB_TRY_RESTART:
2821 return unit_can_stop(u) && unit_can_start(u);
2822
2823 case JOB_RELOAD:
2824 case JOB_TRY_RELOAD:
2825 return unit_can_reload(u);
2826
2827 case JOB_RELOAD_OR_START:
2828 return unit_can_reload(u) && unit_can_start(u);
2829
2830 default:
2831 assert_not_reached("Invalid job type");
2832 }
2833 }
2834
2835 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2836 assert(u);
2837
2838 /* Only warn about some unit types */
2839 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2840 return;
2841
2842 if (streq_ptr(u->id, other))
2843 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2844 else
2845 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2846 }
2847
2848 static int unit_add_dependency_hashmap(
2849 Hashmap **h,
2850 Unit *other,
2851 UnitDependencyMask origin_mask,
2852 UnitDependencyMask destination_mask) {
2853
2854 UnitDependencyInfo info;
2855 int r;
2856
2857 assert(h);
2858 assert(other);
2859 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2860 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2861 assert(origin_mask > 0 || destination_mask > 0);
2862
2863 r = hashmap_ensure_allocated(h, NULL);
2864 if (r < 0)
2865 return r;
2866
2867 assert_cc(sizeof(void*) == sizeof(info));
2868
2869 info.data = hashmap_get(*h, other);
2870 if (info.data) {
2871 /* Entry already exists. Add in our mask. */
2872
2873 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2874 FLAGS_SET(destination_mask, info.destination_mask))
2875 return 0; /* NOP */
2876
2877 info.origin_mask |= origin_mask;
2878 info.destination_mask |= destination_mask;
2879
2880 r = hashmap_update(*h, other, info.data);
2881 } else {
2882 info = (UnitDependencyInfo) {
2883 .origin_mask = origin_mask,
2884 .destination_mask = destination_mask,
2885 };
2886
2887 r = hashmap_put(*h, other, info.data);
2888 }
2889 if (r < 0)
2890 return r;
2891
2892 return 1;
2893 }
2894
2895 int unit_add_dependency(
2896 Unit *u,
2897 UnitDependency d,
2898 Unit *other,
2899 bool add_reference,
2900 UnitDependencyMask mask) {
2901
2902 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2903 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2904 [UNIT_WANTS] = UNIT_WANTED_BY,
2905 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2906 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2907 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2908 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2909 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2910 [UNIT_WANTED_BY] = UNIT_WANTS,
2911 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2912 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2913 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2914 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2915 [UNIT_BEFORE] = UNIT_AFTER,
2916 [UNIT_AFTER] = UNIT_BEFORE,
2917 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2918 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2919 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2920 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2921 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2922 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2923 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2924 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2925 };
2926 Unit *original_u = u, *original_other = other;
2927 int r;
2928
2929 assert(u);
2930 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2931 assert(other);
2932
2933 u = unit_follow_merge(u);
2934 other = unit_follow_merge(other);
2935
2936 /* We won't allow dependencies on ourselves. We will not
2937 * consider them an error however. */
2938 if (u == other) {
2939 maybe_warn_about_dependency(original_u, original_other->id, d);
2940 return 0;
2941 }
2942
2943 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2944 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2945 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2946 return 0;
2947 }
2948
2949 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2950 if (r < 0)
2951 return r;
2952
2953 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2954 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2955 if (r < 0)
2956 return r;
2957 }
2958
2959 if (add_reference) {
2960 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2961 if (r < 0)
2962 return r;
2963
2964 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2965 if (r < 0)
2966 return r;
2967 }
2968
2969 unit_add_to_dbus_queue(u);
2970 return 0;
2971 }
2972
2973 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2974 int r;
2975
2976 assert(u);
2977
2978 r = unit_add_dependency(u, d, other, add_reference, mask);
2979 if (r < 0)
2980 return r;
2981
2982 return unit_add_dependency(u, e, other, add_reference, mask);
2983 }
2984
2985 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2986 int r;
2987
2988 assert(u);
2989 assert(name);
2990 assert(buf);
2991 assert(ret);
2992
2993 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2994 *buf = NULL;
2995 *ret = name;
2996 return 0;
2997 }
2998
2999 if (u->instance)
3000 r = unit_name_replace_instance(name, u->instance, buf);
3001 else {
3002 _cleanup_free_ char *i = NULL;
3003
3004 r = unit_name_to_prefix(u->id, &i);
3005 if (r < 0)
3006 return r;
3007
3008 r = unit_name_replace_instance(name, i, buf);
3009 }
3010 if (r < 0)
3011 return r;
3012
3013 *ret = *buf;
3014 return 0;
3015 }
3016
3017 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3018 _cleanup_free_ char *buf = NULL;
3019 Unit *other;
3020 int r;
3021
3022 assert(u);
3023 assert(name);
3024
3025 r = resolve_template(u, name, &buf, &name);
3026 if (r < 0)
3027 return r;
3028
3029 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3030 if (r < 0)
3031 return r;
3032
3033 return unit_add_dependency(u, d, other, add_reference, mask);
3034 }
3035
3036 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3037 _cleanup_free_ char *buf = NULL;
3038 Unit *other;
3039 int r;
3040
3041 assert(u);
3042 assert(name);
3043
3044 r = resolve_template(u, name, &buf, &name);
3045 if (r < 0)
3046 return r;
3047
3048 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3049 if (r < 0)
3050 return r;
3051
3052 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3053 }
3054
3055 int set_unit_path(const char *p) {
3056 /* This is mostly for debug purposes */
3057 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3058 return -errno;
3059
3060 return 0;
3061 }
3062
3063 char *unit_dbus_path(Unit *u) {
3064 assert(u);
3065
3066 if (!u->id)
3067 return NULL;
3068
3069 return unit_dbus_path_from_name(u->id);
3070 }
3071
3072 char *unit_dbus_path_invocation_id(Unit *u) {
3073 assert(u);
3074
3075 if (sd_id128_is_null(u->invocation_id))
3076 return NULL;
3077
3078 return unit_dbus_path_from_name(u->invocation_id_string);
3079 }
3080
3081 int unit_set_slice(Unit *u, Unit *slice) {
3082 assert(u);
3083 assert(slice);
3084
3085 /* Sets the unit slice if it has not been set before. Is extra
3086 * careful, to only allow this for units that actually have a
3087 * cgroup context. Also, we don't allow to set this for slices
3088 * (since the parent slice is derived from the name). Make
3089 * sure the unit we set is actually a slice. */
3090
3091 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3092 return -EOPNOTSUPP;
3093
3094 if (u->type == UNIT_SLICE)
3095 return -EINVAL;
3096
3097 if (unit_active_state(u) != UNIT_INACTIVE)
3098 return -EBUSY;
3099
3100 if (slice->type != UNIT_SLICE)
3101 return -EINVAL;
3102
3103 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3104 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3105 return -EPERM;
3106
3107 if (UNIT_DEREF(u->slice) == slice)
3108 return 0;
3109
3110 /* Disallow slice changes if @u is already bound to cgroups */
3111 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3112 return -EBUSY;
3113
3114 unit_ref_set(&u->slice, u, slice);
3115 return 1;
3116 }
3117
3118 int unit_set_default_slice(Unit *u) {
3119 const char *slice_name;
3120 Unit *slice;
3121 int r;
3122
3123 assert(u);
3124
3125 if (UNIT_ISSET(u->slice))
3126 return 0;
3127
3128 if (u->instance) {
3129 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3130
3131 /* Implicitly place all instantiated units in their
3132 * own per-template slice */
3133
3134 r = unit_name_to_prefix(u->id, &prefix);
3135 if (r < 0)
3136 return r;
3137
3138 /* The prefix is already escaped, but it might include
3139 * "-" which has a special meaning for slice units,
3140 * hence escape it here extra. */
3141 escaped = unit_name_escape(prefix);
3142 if (!escaped)
3143 return -ENOMEM;
3144
3145 if (MANAGER_IS_SYSTEM(u->manager))
3146 slice_name = strjoina("system-", escaped, ".slice");
3147 else
3148 slice_name = strjoina(escaped, ".slice");
3149 } else
3150 slice_name =
3151 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3152 ? SPECIAL_SYSTEM_SLICE
3153 : SPECIAL_ROOT_SLICE;
3154
3155 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3156 if (r < 0)
3157 return r;
3158
3159 return unit_set_slice(u, slice);
3160 }
3161
3162 const char *unit_slice_name(Unit *u) {
3163 assert(u);
3164
3165 if (!UNIT_ISSET(u->slice))
3166 return NULL;
3167
3168 return UNIT_DEREF(u->slice)->id;
3169 }
3170
3171 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3172 _cleanup_free_ char *t = NULL;
3173 int r;
3174
3175 assert(u);
3176 assert(type);
3177 assert(_found);
3178
3179 r = unit_name_change_suffix(u->id, type, &t);
3180 if (r < 0)
3181 return r;
3182 if (unit_has_name(u, t))
3183 return -EINVAL;
3184
3185 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3186 assert(r < 0 || *_found != u);
3187 return r;
3188 }
3189
3190 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3191 const char *name, *old_owner, *new_owner;
3192 Unit *u = userdata;
3193 int r;
3194
3195 assert(message);
3196 assert(u);
3197
3198 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3199 if (r < 0) {
3200 bus_log_parse_error(r);
3201 return 0;
3202 }
3203
3204 old_owner = empty_to_null(old_owner);
3205 new_owner = empty_to_null(new_owner);
3206
3207 if (UNIT_VTABLE(u)->bus_name_owner_change)
3208 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3209
3210 return 0;
3211 }
3212
3213 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3214 const char *match;
3215
3216 assert(u);
3217 assert(bus);
3218 assert(name);
3219
3220 if (u->match_bus_slot)
3221 return -EBUSY;
3222
3223 match = strjoina("type='signal',"
3224 "sender='org.freedesktop.DBus',"
3225 "path='/org/freedesktop/DBus',"
3226 "interface='org.freedesktop.DBus',"
3227 "member='NameOwnerChanged',"
3228 "arg0='", name, "'");
3229
3230 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3231 }
3232
3233 int unit_watch_bus_name(Unit *u, const char *name) {
3234 int r;
3235
3236 assert(u);
3237 assert(name);
3238
3239 /* Watch a specific name on the bus. We only support one unit
3240 * watching each name for now. */
3241
3242 if (u->manager->api_bus) {
3243 /* If the bus is already available, install the match directly.
3244 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3245 r = unit_install_bus_match(u, u->manager->api_bus, name);
3246 if (r < 0)
3247 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3248 }
3249
3250 r = hashmap_put(u->manager->watch_bus, name, u);
3251 if (r < 0) {
3252 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3253 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3254 }
3255
3256 return 0;
3257 }
3258
3259 void unit_unwatch_bus_name(Unit *u, const char *name) {
3260 assert(u);
3261 assert(name);
3262
3263 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3264 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3265 }
3266
3267 bool unit_can_serialize(Unit *u) {
3268 assert(u);
3269
3270 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3271 }
3272
3273 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3274 _cleanup_free_ char *s = NULL;
3275 int r;
3276
3277 assert(f);
3278 assert(key);
3279
3280 if (mask == 0)
3281 return 0;
3282
3283 r = cg_mask_to_string(mask, &s);
3284 if (r < 0)
3285 return log_error_errno(r, "Failed to format cgroup mask: %m");
3286
3287 return serialize_item(f, key, s);
3288 }
3289
3290 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3291 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3292 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3293 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3294 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3295 };
3296
3297 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3298 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3299 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3300 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3301 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3302 };
3303
3304 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3305 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3306 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3307 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3308 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3309 };
3310
3311 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3312 CGroupIPAccountingMetric m;
3313 int r;
3314
3315 assert(u);
3316 assert(f);
3317 assert(fds);
3318
3319 if (unit_can_serialize(u)) {
3320 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3321 if (r < 0)
3322 return r;
3323 }
3324
3325 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3326
3327 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3328 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3329 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3330 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3331
3332 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3333 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3334
3335 if (dual_timestamp_is_set(&u->condition_timestamp))
3336 (void) serialize_bool(f, "condition-result", u->condition_result);
3337
3338 if (dual_timestamp_is_set(&u->assert_timestamp))
3339 (void) serialize_bool(f, "assert-result", u->assert_result);
3340
3341 (void) serialize_bool(f, "transient", u->transient);
3342 (void) serialize_bool(f, "in-audit", u->in_audit);
3343
3344 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3345 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3346 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3347 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3348 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3349
3350 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3351 if (u->cpu_usage_last != NSEC_INFINITY)
3352 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3353
3354 if (u->oom_kill_last > 0)
3355 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3356
3357 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3358 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3359
3360 if (u->io_accounting_last[im] != UINT64_MAX)
3361 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3362 }
3363
3364 if (u->cgroup_path)
3365 (void) serialize_item(f, "cgroup", u->cgroup_path);
3366
3367 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3368 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3369 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3370 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3371
3372 if (uid_is_valid(u->ref_uid))
3373 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3374 if (gid_is_valid(u->ref_gid))
3375 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3376
3377 if (!sd_id128_is_null(u->invocation_id))
3378 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3379
3380 bus_track_serialize(u->bus_track, f, "ref");
3381
3382 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3383 uint64_t v;
3384
3385 r = unit_get_ip_accounting(u, m, &v);
3386 if (r >= 0)
3387 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3388 }
3389
3390 if (serialize_jobs) {
3391 if (u->job) {
3392 fputs("job\n", f);
3393 job_serialize(u->job, f);
3394 }
3395
3396 if (u->nop_job) {
3397 fputs("job\n", f);
3398 job_serialize(u->nop_job, f);
3399 }
3400 }
3401
3402 /* End marker */
3403 fputc('\n', f);
3404 return 0;
3405 }
3406
3407 static int unit_deserialize_job(Unit *u, FILE *f) {
3408 _cleanup_(job_freep) Job *j = NULL;
3409 int r;
3410
3411 assert(u);
3412 assert(f);
3413
3414 j = job_new_raw(u);
3415 if (!j)
3416 return log_oom();
3417
3418 r = job_deserialize(j, f);
3419 if (r < 0)
3420 return r;
3421
3422 r = job_install_deserialized(j);
3423 if (r < 0)
3424 return r;
3425
3426 TAKE_PTR(j);
3427 return 0;
3428 }
3429
3430 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3431 int r;
3432
3433 assert(u);
3434 assert(f);
3435 assert(fds);
3436
3437 for (;;) {
3438 _cleanup_free_ char *line = NULL;
3439 char *l, *v;
3440 ssize_t m;
3441 size_t k;
3442
3443 r = read_line(f, LONG_LINE_MAX, &line);
3444 if (r < 0)
3445 return log_error_errno(r, "Failed to read serialization line: %m");
3446 if (r == 0) /* eof */
3447 break;
3448
3449 l = strstrip(line);
3450 if (isempty(l)) /* End marker */
3451 break;
3452
3453 k = strcspn(l, "=");
3454
3455 if (l[k] == '=') {
3456 l[k] = 0;
3457 v = l+k+1;
3458 } else
3459 v = l+k;
3460
3461 if (streq(l, "job")) {
3462 if (v[0] == '\0') {
3463 /* New-style serialized job */
3464 r = unit_deserialize_job(u, f);
3465 if (r < 0)
3466 return r;
3467 } else /* Legacy for pre-44 */
3468 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3469 continue;
3470 } else if (streq(l, "state-change-timestamp")) {
3471 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3472 continue;
3473 } else if (streq(l, "inactive-exit-timestamp")) {
3474 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3475 continue;
3476 } else if (streq(l, "active-enter-timestamp")) {
3477 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3478 continue;
3479 } else if (streq(l, "active-exit-timestamp")) {
3480 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3481 continue;
3482 } else if (streq(l, "inactive-enter-timestamp")) {
3483 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3484 continue;
3485 } else if (streq(l, "condition-timestamp")) {
3486 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3487 continue;
3488 } else if (streq(l, "assert-timestamp")) {
3489 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3490 continue;
3491 } else if (streq(l, "condition-result")) {
3492
3493 r = parse_boolean(v);
3494 if (r < 0)
3495 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3496 else
3497 u->condition_result = r;
3498
3499 continue;
3500
3501 } else if (streq(l, "assert-result")) {
3502
3503 r = parse_boolean(v);
3504 if (r < 0)
3505 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3506 else
3507 u->assert_result = r;
3508
3509 continue;
3510
3511 } else if (streq(l, "transient")) {
3512
3513 r = parse_boolean(v);
3514 if (r < 0)
3515 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3516 else
3517 u->transient = r;
3518
3519 continue;
3520
3521 } else if (streq(l, "in-audit")) {
3522
3523 r = parse_boolean(v);
3524 if (r < 0)
3525 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3526 else
3527 u->in_audit = r;
3528
3529 continue;
3530
3531 } else if (streq(l, "exported-invocation-id")) {
3532
3533 r = parse_boolean(v);
3534 if (r < 0)
3535 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3536 else
3537 u->exported_invocation_id = r;
3538
3539 continue;
3540
3541 } else if (streq(l, "exported-log-level-max")) {
3542
3543 r = parse_boolean(v);
3544 if (r < 0)
3545 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3546 else
3547 u->exported_log_level_max = r;
3548
3549 continue;
3550
3551 } else if (streq(l, "exported-log-extra-fields")) {
3552
3553 r = parse_boolean(v);
3554 if (r < 0)
3555 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3556 else
3557 u->exported_log_extra_fields = r;
3558
3559 continue;
3560
3561 } else if (streq(l, "exported-log-rate-limit-interval")) {
3562
3563 r = parse_boolean(v);
3564 if (r < 0)
3565 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3566 else
3567 u->exported_log_rate_limit_interval = r;
3568
3569 continue;
3570
3571 } else if (streq(l, "exported-log-rate-limit-burst")) {
3572
3573 r = parse_boolean(v);
3574 if (r < 0)
3575 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3576 else
3577 u->exported_log_rate_limit_burst = r;
3578
3579 continue;
3580
3581 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3582
3583 r = safe_atou64(v, &u->cpu_usage_base);
3584 if (r < 0)
3585 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3586
3587 continue;
3588
3589 } else if (streq(l, "cpu-usage-last")) {
3590
3591 r = safe_atou64(v, &u->cpu_usage_last);
3592 if (r < 0)
3593 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3594
3595 continue;
3596
3597 } else if (streq(l, "oom-kill-last")) {
3598
3599 r = safe_atou64(v, &u->oom_kill_last);
3600 if (r < 0)
3601 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3602
3603 continue;
3604
3605 } else if (streq(l, "cgroup")) {
3606
3607 r = unit_set_cgroup_path(u, v);
3608 if (r < 0)
3609 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3610
3611 (void) unit_watch_cgroup(u);
3612 (void) unit_watch_cgroup_memory(u);
3613
3614 continue;
3615 } else if (streq(l, "cgroup-realized")) {
3616 int b;
3617
3618 b = parse_boolean(v);
3619 if (b < 0)
3620 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3621 else
3622 u->cgroup_realized = b;
3623
3624 continue;
3625
3626 } else if (streq(l, "cgroup-realized-mask")) {
3627
3628 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3629 if (r < 0)
3630 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3631 continue;
3632
3633 } else if (streq(l, "cgroup-enabled-mask")) {
3634
3635 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3636 if (r < 0)
3637 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3638 continue;
3639
3640 } else if (streq(l, "cgroup-invalidated-mask")) {
3641
3642 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3643 if (r < 0)
3644 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3645 continue;
3646
3647 } else if (streq(l, "ref-uid")) {
3648 uid_t uid;
3649
3650 r = parse_uid(v, &uid);
3651 if (r < 0)
3652 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3653 else
3654 unit_ref_uid_gid(u, uid, GID_INVALID);
3655
3656 continue;
3657
3658 } else if (streq(l, "ref-gid")) {
3659 gid_t gid;
3660
3661 r = parse_gid(v, &gid);
3662 if (r < 0)
3663 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3664 else
3665 unit_ref_uid_gid(u, UID_INVALID, gid);
3666
3667 continue;
3668
3669 } else if (streq(l, "ref")) {
3670
3671 r = strv_extend(&u->deserialized_refs, v);
3672 if (r < 0)
3673 return log_oom();
3674
3675 continue;
3676 } else if (streq(l, "invocation-id")) {
3677 sd_id128_t id;
3678
3679 r = sd_id128_from_string(v, &id);
3680 if (r < 0)
3681 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3682 else {
3683 r = unit_set_invocation_id(u, id);
3684 if (r < 0)
3685 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3686 }
3687
3688 continue;
3689 }
3690
3691 /* Check if this is an IP accounting metric serialization field */
3692 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3693 if (m >= 0) {
3694 uint64_t c;
3695
3696 r = safe_atou64(v, &c);
3697 if (r < 0)
3698 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3699 else
3700 u->ip_accounting_extra[m] = c;
3701 continue;
3702 }
3703
3704 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3705 if (m >= 0) {
3706 uint64_t c;
3707
3708 r = safe_atou64(v, &c);
3709 if (r < 0)
3710 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3711 else
3712 u->io_accounting_base[m] = c;
3713 continue;
3714 }
3715
3716 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3717 if (m >= 0) {
3718 uint64_t c;
3719
3720 r = safe_atou64(v, &c);
3721 if (r < 0)
3722 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3723 else
3724 u->io_accounting_last[m] = c;
3725 continue;
3726 }
3727
3728 if (unit_can_serialize(u)) {
3729 r = exec_runtime_deserialize_compat(u, l, v, fds);
3730 if (r < 0) {
3731 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3732 continue;
3733 }
3734
3735 /* Returns positive if key was handled by the call */
3736 if (r > 0)
3737 continue;
3738
3739 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3740 if (r < 0)
3741 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3742 }
3743 }
3744
3745 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3746 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3747 * before 228 where the base for timeouts was not persistent across reboots. */
3748
3749 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3750 dual_timestamp_get(&u->state_change_timestamp);
3751
3752 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3753 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3754 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3755 unit_invalidate_cgroup_bpf(u);
3756
3757 return 0;
3758 }
3759
3760 int unit_deserialize_skip(FILE *f) {
3761 int r;
3762 assert(f);
3763
3764 /* Skip serialized data for this unit. We don't know what it is. */
3765
3766 for (;;) {
3767 _cleanup_free_ char *line = NULL;
3768 char *l;
3769
3770 r = read_line(f, LONG_LINE_MAX, &line);
3771 if (r < 0)
3772 return log_error_errno(r, "Failed to read serialization line: %m");
3773 if (r == 0)
3774 return 0;
3775
3776 l = strstrip(line);
3777
3778 /* End marker */
3779 if (isempty(l))
3780 return 1;
3781 }
3782 }
3783
3784 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3785 Unit *device;
3786 _cleanup_free_ char *e = NULL;
3787 int r;
3788
3789 assert(u);
3790
3791 /* Adds in links to the device node that this unit is based on */
3792 if (isempty(what))
3793 return 0;
3794
3795 if (!is_device_path(what))
3796 return 0;
3797
3798 /* When device units aren't supported (such as in a
3799 * container), don't create dependencies on them. */
3800 if (!unit_type_supported(UNIT_DEVICE))
3801 return 0;
3802
3803 r = unit_name_from_path(what, ".device", &e);
3804 if (r < 0)
3805 return r;
3806
3807 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3808 if (r < 0)
3809 return r;
3810
3811 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3812 dep = UNIT_BINDS_TO;
3813
3814 r = unit_add_two_dependencies(u, UNIT_AFTER,
3815 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3816 device, true, mask);
3817 if (r < 0)
3818 return r;
3819
3820 if (wants) {
3821 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3822 if (r < 0)
3823 return r;
3824 }
3825
3826 return 0;
3827 }
3828
3829 int unit_coldplug(Unit *u) {
3830 int r = 0, q;
3831 char **i;
3832
3833 assert(u);
3834
3835 /* Make sure we don't enter a loop, when coldplugging recursively. */
3836 if (u->coldplugged)
3837 return 0;
3838
3839 u->coldplugged = true;
3840
3841 STRV_FOREACH(i, u->deserialized_refs) {
3842 q = bus_unit_track_add_name(u, *i);
3843 if (q < 0 && r >= 0)
3844 r = q;
3845 }
3846 u->deserialized_refs = strv_free(u->deserialized_refs);
3847
3848 if (UNIT_VTABLE(u)->coldplug) {
3849 q = UNIT_VTABLE(u)->coldplug(u);
3850 if (q < 0 && r >= 0)
3851 r = q;
3852 }
3853
3854 if (u->job) {
3855 q = job_coldplug(u->job);
3856 if (q < 0 && r >= 0)
3857 r = q;
3858 }
3859
3860 return r;
3861 }
3862
3863 void unit_catchup(Unit *u) {
3864 assert(u);
3865
3866 if (UNIT_VTABLE(u)->catchup)
3867 UNIT_VTABLE(u)->catchup(u);
3868 }
3869
3870 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3871 struct stat st;
3872
3873 if (!path)
3874 return false;
3875
3876 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3877 * are never out-of-date. */
3878 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3879 return false;
3880
3881 if (stat(path, &st) < 0)
3882 /* What, cannot access this anymore? */
3883 return true;
3884
3885 if (path_masked)
3886 /* For masked files check if they are still so */
3887 return !null_or_empty(&st);
3888 else
3889 /* For non-empty files check the mtime */
3890 return timespec_load(&st.st_mtim) > mtime;
3891
3892 return false;
3893 }
3894
3895 bool unit_need_daemon_reload(Unit *u) {
3896 _cleanup_strv_free_ char **t = NULL;
3897 char **path;
3898
3899 assert(u);
3900
3901 /* For unit files, we allow masking… */
3902 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3903 u->load_state == UNIT_MASKED))
3904 return true;
3905
3906 /* Source paths should not be masked… */
3907 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3908 return true;
3909
3910 if (u->load_state == UNIT_LOADED)
3911 (void) unit_find_dropin_paths(u, &t);
3912 if (!strv_equal(u->dropin_paths, t))
3913 return true;
3914
3915 /* … any drop-ins that are masked are simply omitted from the list. */
3916 STRV_FOREACH(path, u->dropin_paths)
3917 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3918 return true;
3919
3920 return false;
3921 }
3922
3923 void unit_reset_failed(Unit *u) {
3924 assert(u);
3925
3926 if (UNIT_VTABLE(u)->reset_failed)
3927 UNIT_VTABLE(u)->reset_failed(u);
3928
3929 RATELIMIT_RESET(u->start_limit);
3930 u->start_limit_hit = false;
3931 }
3932
3933 Unit *unit_following(Unit *u) {
3934 assert(u);
3935
3936 if (UNIT_VTABLE(u)->following)
3937 return UNIT_VTABLE(u)->following(u);
3938
3939 return NULL;
3940 }
3941
3942 bool unit_stop_pending(Unit *u) {
3943 assert(u);
3944
3945 /* This call does check the current state of the unit. It's
3946 * hence useful to be called from state change calls of the
3947 * unit itself, where the state isn't updated yet. This is
3948 * different from unit_inactive_or_pending() which checks both
3949 * the current state and for a queued job. */
3950
3951 return u->job && u->job->type == JOB_STOP;
3952 }
3953
3954 bool unit_inactive_or_pending(Unit *u) {
3955 assert(u);
3956
3957 /* Returns true if the unit is inactive or going down */
3958
3959 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3960 return true;
3961
3962 if (unit_stop_pending(u))
3963 return true;
3964
3965 return false;
3966 }
3967
3968 bool unit_active_or_pending(Unit *u) {
3969 assert(u);
3970
3971 /* Returns true if the unit is active or going up */
3972
3973 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3974 return true;
3975
3976 if (u->job &&
3977 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3978 return true;
3979
3980 return false;
3981 }
3982
3983 bool unit_will_restart(Unit *u) {
3984 assert(u);
3985
3986 if (!UNIT_VTABLE(u)->will_restart)
3987 return false;
3988
3989 return UNIT_VTABLE(u)->will_restart(u);
3990 }
3991
3992 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3993 assert(u);
3994 assert(w >= 0 && w < _KILL_WHO_MAX);
3995 assert(SIGNAL_VALID(signo));
3996
3997 if (!UNIT_VTABLE(u)->kill)
3998 return -EOPNOTSUPP;
3999
4000 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4001 }
4002
4003 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4004 _cleanup_set_free_ Set *pid_set = NULL;
4005 int r;
4006
4007 pid_set = set_new(NULL);
4008 if (!pid_set)
4009 return NULL;
4010
4011 /* Exclude the main/control pids from being killed via the cgroup */
4012 if (main_pid > 0) {
4013 r = set_put(pid_set, PID_TO_PTR(main_pid));
4014 if (r < 0)
4015 return NULL;
4016 }
4017
4018 if (control_pid > 0) {
4019 r = set_put(pid_set, PID_TO_PTR(control_pid));
4020 if (r < 0)
4021 return NULL;
4022 }
4023
4024 return TAKE_PTR(pid_set);
4025 }
4026
4027 int unit_kill_common(
4028 Unit *u,
4029 KillWho who,
4030 int signo,
4031 pid_t main_pid,
4032 pid_t control_pid,
4033 sd_bus_error *error) {
4034
4035 int r = 0;
4036 bool killed = false;
4037
4038 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4039 if (main_pid < 0)
4040 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4041 else if (main_pid == 0)
4042 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4043 }
4044
4045 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4046 if (control_pid < 0)
4047 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4048 else if (control_pid == 0)
4049 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4050 }
4051
4052 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4053 if (control_pid > 0) {
4054 if (kill(control_pid, signo) < 0)
4055 r = -errno;
4056 else
4057 killed = true;
4058 }
4059
4060 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4061 if (main_pid > 0) {
4062 if (kill(main_pid, signo) < 0)
4063 r = -errno;
4064 else
4065 killed = true;
4066 }
4067
4068 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4069 _cleanup_set_free_ Set *pid_set = NULL;
4070 int q;
4071
4072 /* Exclude the main/control pids from being killed via the cgroup */
4073 pid_set = unit_pid_set(main_pid, control_pid);
4074 if (!pid_set)
4075 return -ENOMEM;
4076
4077 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4078 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4079 r = q;
4080 else
4081 killed = true;
4082 }
4083
4084 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4085 return -ESRCH;
4086
4087 return r;
4088 }
4089
4090 int unit_following_set(Unit *u, Set **s) {
4091 assert(u);
4092 assert(s);
4093
4094 if (UNIT_VTABLE(u)->following_set)
4095 return UNIT_VTABLE(u)->following_set(u, s);
4096
4097 *s = NULL;
4098 return 0;
4099 }
4100
4101 UnitFileState unit_get_unit_file_state(Unit *u) {
4102 int r;
4103
4104 assert(u);
4105
4106 if (u->unit_file_state < 0 && u->fragment_path) {
4107 r = unit_file_get_state(
4108 u->manager->unit_file_scope,
4109 NULL,
4110 u->id,
4111 &u->unit_file_state);
4112 if (r < 0)
4113 u->unit_file_state = UNIT_FILE_BAD;
4114 }
4115
4116 return u->unit_file_state;
4117 }
4118
4119 int unit_get_unit_file_preset(Unit *u) {
4120 assert(u);
4121
4122 if (u->unit_file_preset < 0 && u->fragment_path)
4123 u->unit_file_preset = unit_file_query_preset(
4124 u->manager->unit_file_scope,
4125 NULL,
4126 basename(u->fragment_path));
4127
4128 return u->unit_file_preset;
4129 }
4130
4131 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4132 assert(ref);
4133 assert(source);
4134 assert(target);
4135
4136 if (ref->target)
4137 unit_ref_unset(ref);
4138
4139 ref->source = source;
4140 ref->target = target;
4141 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4142 return target;
4143 }
4144
4145 void unit_ref_unset(UnitRef *ref) {
4146 assert(ref);
4147
4148 if (!ref->target)
4149 return;
4150
4151 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4152 * be unreferenced now. */
4153 unit_add_to_gc_queue(ref->target);
4154
4155 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4156 ref->source = ref->target = NULL;
4157 }
4158
4159 static int user_from_unit_name(Unit *u, char **ret) {
4160
4161 static const uint8_t hash_key[] = {
4162 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4163 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4164 };
4165
4166 _cleanup_free_ char *n = NULL;
4167 int r;
4168
4169 r = unit_name_to_prefix(u->id, &n);
4170 if (r < 0)
4171 return r;
4172
4173 if (valid_user_group_name(n)) {
4174 *ret = TAKE_PTR(n);
4175 return 0;
4176 }
4177
4178 /* If we can't use the unit name as a user name, then let's hash it and use that */
4179 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4180 return -ENOMEM;
4181
4182 return 0;
4183 }
4184
4185 int unit_patch_contexts(Unit *u) {
4186 CGroupContext *cc;
4187 ExecContext *ec;
4188 unsigned i;
4189 int r;
4190
4191 assert(u);
4192
4193 /* Patch in the manager defaults into the exec and cgroup
4194 * contexts, _after_ the rest of the settings have been
4195 * initialized */
4196
4197 ec = unit_get_exec_context(u);
4198 if (ec) {
4199 /* This only copies in the ones that need memory */
4200 for (i = 0; i < _RLIMIT_MAX; i++)
4201 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4202 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4203 if (!ec->rlimit[i])
4204 return -ENOMEM;
4205 }
4206
4207 if (MANAGER_IS_USER(u->manager) &&
4208 !ec->working_directory) {
4209
4210 r = get_home_dir(&ec->working_directory);
4211 if (r < 0)
4212 return r;
4213
4214 /* Allow user services to run, even if the
4215 * home directory is missing */
4216 ec->working_directory_missing_ok = true;
4217 }
4218
4219 if (ec->private_devices)
4220 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4221
4222 if (ec->protect_kernel_modules)
4223 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4224
4225 if (ec->dynamic_user) {
4226 if (!ec->user) {
4227 r = user_from_unit_name(u, &ec->user);
4228 if (r < 0)
4229 return r;
4230 }
4231
4232 if (!ec->group) {
4233 ec->group = strdup(ec->user);
4234 if (!ec->group)
4235 return -ENOMEM;
4236 }
4237
4238 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4239 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4240 * sandbox. */
4241
4242 ec->private_tmp = true;
4243 ec->remove_ipc = true;
4244 ec->protect_system = PROTECT_SYSTEM_STRICT;
4245 if (ec->protect_home == PROTECT_HOME_NO)
4246 ec->protect_home = PROTECT_HOME_READ_ONLY;
4247
4248 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4249 * them. */
4250 ec->no_new_privileges = true;
4251 ec->restrict_suid_sgid = true;
4252 }
4253 }
4254
4255 cc = unit_get_cgroup_context(u);
4256 if (cc && ec) {
4257
4258 if (ec->private_devices &&
4259 cc->device_policy == CGROUP_AUTO)
4260 cc->device_policy = CGROUP_CLOSED;
4261
4262 if (ec->root_image &&
4263 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4264
4265 /* When RootImage= is specified, the following devices are touched. */
4266 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4267 if (r < 0)
4268 return r;
4269
4270 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4271 if (r < 0)
4272 return r;
4273
4274 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4275 if (r < 0)
4276 return r;
4277 }
4278 }
4279
4280 return 0;
4281 }
4282
4283 ExecContext *unit_get_exec_context(Unit *u) {
4284 size_t offset;
4285 assert(u);
4286
4287 if (u->type < 0)
4288 return NULL;
4289
4290 offset = UNIT_VTABLE(u)->exec_context_offset;
4291 if (offset <= 0)
4292 return NULL;
4293
4294 return (ExecContext*) ((uint8_t*) u + offset);
4295 }
4296
4297 KillContext *unit_get_kill_context(Unit *u) {
4298 size_t offset;
4299 assert(u);
4300
4301 if (u->type < 0)
4302 return NULL;
4303
4304 offset = UNIT_VTABLE(u)->kill_context_offset;
4305 if (offset <= 0)
4306 return NULL;
4307
4308 return (KillContext*) ((uint8_t*) u + offset);
4309 }
4310
4311 CGroupContext *unit_get_cgroup_context(Unit *u) {
4312 size_t offset;
4313
4314 if (u->type < 0)
4315 return NULL;
4316
4317 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4318 if (offset <= 0)
4319 return NULL;
4320
4321 return (CGroupContext*) ((uint8_t*) u + offset);
4322 }
4323
4324 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4325 size_t offset;
4326
4327 if (u->type < 0)
4328 return NULL;
4329
4330 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4331 if (offset <= 0)
4332 return NULL;
4333
4334 return *(ExecRuntime**) ((uint8_t*) u + offset);
4335 }
4336
4337 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4338 assert(u);
4339
4340 if (UNIT_WRITE_FLAGS_NOOP(flags))
4341 return NULL;
4342
4343 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4344 return u->manager->lookup_paths.transient;
4345
4346 if (flags & UNIT_PERSISTENT)
4347 return u->manager->lookup_paths.persistent_control;
4348
4349 if (flags & UNIT_RUNTIME)
4350 return u->manager->lookup_paths.runtime_control;
4351
4352 return NULL;
4353 }
4354
4355 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4356 char *ret = NULL;
4357
4358 if (!s)
4359 return NULL;
4360
4361 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4362 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4363 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4364 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4365 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4366 * allocations. */
4367
4368 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4369 ret = specifier_escape(s);
4370 if (!ret)
4371 return NULL;
4372
4373 s = ret;
4374 }
4375
4376 if (flags & UNIT_ESCAPE_C) {
4377 char *a;
4378
4379 a = cescape(s);
4380 free(ret);
4381 if (!a)
4382 return NULL;
4383
4384 ret = a;
4385 }
4386
4387 if (buf) {
4388 *buf = ret;
4389 return ret ?: (char*) s;
4390 }
4391
4392 return ret ?: strdup(s);
4393 }
4394
4395 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4396 _cleanup_free_ char *result = NULL;
4397 size_t n = 0, allocated = 0;
4398 char **i;
4399
4400 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4401 * way suitable for ExecStart= stanzas */
4402
4403 STRV_FOREACH(i, l) {
4404 _cleanup_free_ char *buf = NULL;
4405 const char *p;
4406 size_t a;
4407 char *q;
4408
4409 p = unit_escape_setting(*i, flags, &buf);
4410 if (!p)
4411 return NULL;
4412
4413 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4414 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4415 return NULL;
4416
4417 q = result + n;
4418 if (n > 0)
4419 *(q++) = ' ';
4420
4421 *(q++) = '"';
4422 q = stpcpy(q, p);
4423 *(q++) = '"';
4424
4425 n += a;
4426 }
4427
4428 if (!GREEDY_REALLOC(result, allocated, n + 1))
4429 return NULL;
4430
4431 result[n] = 0;
4432
4433 return TAKE_PTR(result);
4434 }
4435
4436 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4437 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4438 const char *dir, *wrapped;
4439 int r;
4440
4441 assert(u);
4442 assert(name);
4443 assert(data);
4444
4445 if (UNIT_WRITE_FLAGS_NOOP(flags))
4446 return 0;
4447
4448 data = unit_escape_setting(data, flags, &escaped);
4449 if (!data)
4450 return -ENOMEM;
4451
4452 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4453 * previous section header is the same */
4454
4455 if (flags & UNIT_PRIVATE) {
4456 if (!UNIT_VTABLE(u)->private_section)
4457 return -EINVAL;
4458
4459 if (!u->transient_file || u->last_section_private < 0)
4460 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4461 else if (u->last_section_private == 0)
4462 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4463 } else {
4464 if (!u->transient_file || u->last_section_private < 0)
4465 data = strjoina("[Unit]\n", data);
4466 else if (u->last_section_private > 0)
4467 data = strjoina("\n[Unit]\n", data);
4468 }
4469
4470 if (u->transient_file) {
4471 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4472 * write to the transient unit file. */
4473 fputs(data, u->transient_file);
4474
4475 if (!endswith(data, "\n"))
4476 fputc('\n', u->transient_file);
4477
4478 /* Remember which section we wrote this entry to */
4479 u->last_section_private = !!(flags & UNIT_PRIVATE);
4480 return 0;
4481 }
4482
4483 dir = unit_drop_in_dir(u, flags);
4484 if (!dir)
4485 return -EINVAL;
4486
4487 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4488 "# or an equivalent operation. Do not edit.\n",
4489 data,
4490 "\n");
4491
4492 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4493 if (r < 0)
4494 return r;
4495
4496 (void) mkdir_p_label(p, 0755);
4497 r = write_string_file_atomic_label(q, wrapped);
4498 if (r < 0)
4499 return r;
4500
4501 r = strv_push(&u->dropin_paths, q);
4502 if (r < 0)
4503 return r;
4504 q = NULL;
4505
4506 strv_uniq(u->dropin_paths);
4507
4508 u->dropin_mtime = now(CLOCK_REALTIME);
4509
4510 return 0;
4511 }
4512
4513 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4514 _cleanup_free_ char *p = NULL;
4515 va_list ap;
4516 int r;
4517
4518 assert(u);
4519 assert(name);
4520 assert(format);
4521
4522 if (UNIT_WRITE_FLAGS_NOOP(flags))
4523 return 0;
4524
4525 va_start(ap, format);
4526 r = vasprintf(&p, format, ap);
4527 va_end(ap);
4528
4529 if (r < 0)
4530 return -ENOMEM;
4531
4532 return unit_write_setting(u, flags, name, p);
4533 }
4534
4535 int unit_make_transient(Unit *u) {
4536 _cleanup_free_ char *path = NULL;
4537 FILE *f;
4538
4539 assert(u);
4540
4541 if (!UNIT_VTABLE(u)->can_transient)
4542 return -EOPNOTSUPP;
4543
4544 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4545
4546 path = path_join(u->manager->lookup_paths.transient, u->id);
4547 if (!path)
4548 return -ENOMEM;
4549
4550 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4551 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4552
4553 RUN_WITH_UMASK(0022) {
4554 f = fopen(path, "we");
4555 if (!f)
4556 return -errno;
4557 }
4558
4559 safe_fclose(u->transient_file);
4560 u->transient_file = f;
4561
4562 free_and_replace(u->fragment_path, path);
4563
4564 u->source_path = mfree(u->source_path);
4565 u->dropin_paths = strv_free(u->dropin_paths);
4566 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4567
4568 u->load_state = UNIT_STUB;
4569 u->load_error = 0;
4570 u->transient = true;
4571
4572 unit_add_to_dbus_queue(u);
4573 unit_add_to_gc_queue(u);
4574
4575 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4576 u->transient_file);
4577
4578 return 0;
4579 }
4580
4581 static int log_kill(pid_t pid, int sig, void *userdata) {
4582 _cleanup_free_ char *comm = NULL;
4583
4584 (void) get_process_comm(pid, &comm);
4585
4586 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4587 only, like for example systemd's own PAM stub process. */
4588 if (comm && comm[0] == '(')
4589 return 0;
4590
4591 log_unit_notice(userdata,
4592 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4593 pid,
4594 strna(comm),
4595 signal_to_string(sig));
4596
4597 return 1;
4598 }
4599
4600 static int operation_to_signal(KillContext *c, KillOperation k) {
4601 assert(c);
4602
4603 switch (k) {
4604
4605 case KILL_TERMINATE:
4606 case KILL_TERMINATE_AND_LOG:
4607 return c->kill_signal;
4608
4609 case KILL_KILL:
4610 return c->final_kill_signal;
4611
4612 case KILL_WATCHDOG:
4613 return c->watchdog_signal;
4614
4615 default:
4616 assert_not_reached("KillOperation unknown");
4617 }
4618 }
4619
4620 int unit_kill_context(
4621 Unit *u,
4622 KillContext *c,
4623 KillOperation k,
4624 pid_t main_pid,
4625 pid_t control_pid,
4626 bool main_pid_alien) {
4627
4628 bool wait_for_exit = false, send_sighup;
4629 cg_kill_log_func_t log_func = NULL;
4630 int sig, r;
4631
4632 assert(u);
4633 assert(c);
4634
4635 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4636 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4637
4638 if (c->kill_mode == KILL_NONE)
4639 return 0;
4640
4641 sig = operation_to_signal(c, k);
4642
4643 send_sighup =
4644 c->send_sighup &&
4645 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4646 sig != SIGHUP;
4647
4648 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4649 log_func = log_kill;
4650
4651 if (main_pid > 0) {
4652 if (log_func)
4653 log_func(main_pid, sig, u);
4654
4655 r = kill_and_sigcont(main_pid, sig);
4656 if (r < 0 && r != -ESRCH) {
4657 _cleanup_free_ char *comm = NULL;
4658 (void) get_process_comm(main_pid, &comm);
4659
4660 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4661 } else {
4662 if (!main_pid_alien)
4663 wait_for_exit = true;
4664
4665 if (r != -ESRCH && send_sighup)
4666 (void) kill(main_pid, SIGHUP);
4667 }
4668 }
4669
4670 if (control_pid > 0) {
4671 if (log_func)
4672 log_func(control_pid, sig, u);
4673
4674 r = kill_and_sigcont(control_pid, sig);
4675 if (r < 0 && r != -ESRCH) {
4676 _cleanup_free_ char *comm = NULL;
4677 (void) get_process_comm(control_pid, &comm);
4678
4679 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4680 } else {
4681 wait_for_exit = true;
4682
4683 if (r != -ESRCH && send_sighup)
4684 (void) kill(control_pid, SIGHUP);
4685 }
4686 }
4687
4688 if (u->cgroup_path &&
4689 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4690 _cleanup_set_free_ Set *pid_set = NULL;
4691
4692 /* Exclude the main/control pids from being killed via the cgroup */
4693 pid_set = unit_pid_set(main_pid, control_pid);
4694 if (!pid_set)
4695 return -ENOMEM;
4696
4697 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4698 sig,
4699 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4700 pid_set,
4701 log_func, u);
4702 if (r < 0) {
4703 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4704 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4705
4706 } else if (r > 0) {
4707
4708 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4709 * we are running in a container or if this is a delegation unit, simply because cgroup
4710 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4711 * of containers it can be confused easily by left-over directories in the cgroup — which
4712 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4713 * there we get proper events. Hence rely on them. */
4714
4715 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4716 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4717 wait_for_exit = true;
4718
4719 if (send_sighup) {
4720 set_free(pid_set);
4721
4722 pid_set = unit_pid_set(main_pid, control_pid);
4723 if (!pid_set)
4724 return -ENOMEM;
4725
4726 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4727 SIGHUP,
4728 CGROUP_IGNORE_SELF,
4729 pid_set,
4730 NULL, NULL);
4731 }
4732 }
4733 }
4734
4735 return wait_for_exit;
4736 }
4737
4738 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4739 _cleanup_free_ char *p = NULL;
4740 UnitDependencyInfo di;
4741 int r;
4742
4743 assert(u);
4744 assert(path);
4745
4746 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4747 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4748 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4749 * determine which units to make themselves a dependency of. */
4750
4751 if (!path_is_absolute(path))
4752 return -EINVAL;
4753
4754 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4755 if (r < 0)
4756 return r;
4757
4758 p = strdup(path);
4759 if (!p)
4760 return -ENOMEM;
4761
4762 path = path_simplify(p, true);
4763
4764 if (!path_is_normalized(path))
4765 return -EPERM;
4766
4767 if (hashmap_contains(u->requires_mounts_for, path))
4768 return 0;
4769
4770 di = (UnitDependencyInfo) {
4771 .origin_mask = mask
4772 };
4773
4774 r = hashmap_put(u->requires_mounts_for, path, di.data);
4775 if (r < 0)
4776 return r;
4777 p = NULL;
4778
4779 char prefix[strlen(path) + 1];
4780 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4781 Set *x;
4782
4783 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4784 if (!x) {
4785 _cleanup_free_ char *q = NULL;
4786
4787 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4788 if (r < 0)
4789 return r;
4790
4791 q = strdup(prefix);
4792 if (!q)
4793 return -ENOMEM;
4794
4795 x = set_new(NULL);
4796 if (!x)
4797 return -ENOMEM;
4798
4799 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4800 if (r < 0) {
4801 set_free(x);
4802 return r;
4803 }
4804 q = NULL;
4805 }
4806
4807 r = set_put(x, u);
4808 if (r < 0)
4809 return r;
4810 }
4811
4812 return 0;
4813 }
4814
4815 int unit_setup_exec_runtime(Unit *u) {
4816 ExecRuntime **rt;
4817 size_t offset;
4818 Unit *other;
4819 Iterator i;
4820 void *v;
4821 int r;
4822
4823 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4824 assert(offset > 0);
4825
4826 /* Check if there already is an ExecRuntime for this unit? */
4827 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4828 if (*rt)
4829 return 0;
4830
4831 /* Try to get it from somebody else */
4832 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4833 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4834 if (r == 1)
4835 return 1;
4836 }
4837
4838 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4839 }
4840
4841 int unit_setup_dynamic_creds(Unit *u) {
4842 ExecContext *ec;
4843 DynamicCreds *dcreds;
4844 size_t offset;
4845
4846 assert(u);
4847
4848 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4849 assert(offset > 0);
4850 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4851
4852 ec = unit_get_exec_context(u);
4853 assert(ec);
4854
4855 if (!ec->dynamic_user)
4856 return 0;
4857
4858 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4859 }
4860
4861 bool unit_type_supported(UnitType t) {
4862 if (_unlikely_(t < 0))
4863 return false;
4864 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4865 return false;
4866
4867 if (!unit_vtable[t]->supported)
4868 return true;
4869
4870 return unit_vtable[t]->supported();
4871 }
4872
4873 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4874 int r;
4875
4876 assert(u);
4877 assert(where);
4878
4879 r = dir_is_empty(where);
4880 if (r > 0 || r == -ENOTDIR)
4881 return;
4882 if (r < 0) {
4883 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4884 return;
4885 }
4886
4887 log_struct(LOG_NOTICE,
4888 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4889 LOG_UNIT_ID(u),
4890 LOG_UNIT_INVOCATION_ID(u),
4891 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4892 "WHERE=%s", where);
4893 }
4894
4895 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4896 _cleanup_free_ char *canonical_where = NULL;
4897 int r;
4898
4899 assert(u);
4900 assert(where);
4901
4902 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4903 if (r < 0) {
4904 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4905 return 0;
4906 }
4907
4908 /* We will happily ignore a trailing slash (or any redundant slashes) */
4909 if (path_equal(where, canonical_where))
4910 return 0;
4911
4912 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4913 log_struct(LOG_ERR,
4914 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4915 LOG_UNIT_ID(u),
4916 LOG_UNIT_INVOCATION_ID(u),
4917 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4918 "WHERE=%s", where);
4919
4920 return -ELOOP;
4921 }
4922
4923 bool unit_is_pristine(Unit *u) {
4924 assert(u);
4925
4926 /* Check if the unit already exists or is already around,
4927 * in a number of different ways. Note that to cater for unit
4928 * types such as slice, we are generally fine with units that
4929 * are marked UNIT_LOADED even though nothing was actually
4930 * loaded, as those unit types don't require a file on disk. */
4931
4932 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4933 u->fragment_path ||
4934 u->source_path ||
4935 !strv_isempty(u->dropin_paths) ||
4936 u->job ||
4937 u->merged_into);
4938 }
4939
4940 pid_t unit_control_pid(Unit *u) {
4941 assert(u);
4942
4943 if (UNIT_VTABLE(u)->control_pid)
4944 return UNIT_VTABLE(u)->control_pid(u);
4945
4946 return 0;
4947 }
4948
4949 pid_t unit_main_pid(Unit *u) {
4950 assert(u);
4951
4952 if (UNIT_VTABLE(u)->main_pid)
4953 return UNIT_VTABLE(u)->main_pid(u);
4954
4955 return 0;
4956 }
4957
4958 static void unit_unref_uid_internal(
4959 Unit *u,
4960 uid_t *ref_uid,
4961 bool destroy_now,
4962 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4963
4964 assert(u);
4965 assert(ref_uid);
4966 assert(_manager_unref_uid);
4967
4968 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4969 * gid_t are actually the same time, with the same validity rules.
4970 *
4971 * Drops a reference to UID/GID from a unit. */
4972
4973 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4974 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4975
4976 if (!uid_is_valid(*ref_uid))
4977 return;
4978
4979 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4980 *ref_uid = UID_INVALID;
4981 }
4982
4983 void unit_unref_uid(Unit *u, bool destroy_now) {
4984 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4985 }
4986
4987 void unit_unref_gid(Unit *u, bool destroy_now) {
4988 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4989 }
4990
4991 static int unit_ref_uid_internal(
4992 Unit *u,
4993 uid_t *ref_uid,
4994 uid_t uid,
4995 bool clean_ipc,
4996 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4997
4998 int r;
4999
5000 assert(u);
5001 assert(ref_uid);
5002 assert(uid_is_valid(uid));
5003 assert(_manager_ref_uid);
5004
5005 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5006 * are actually the same type, and have the same validity rules.
5007 *
5008 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5009 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5010 * drops to zero. */
5011
5012 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5013 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5014
5015 if (*ref_uid == uid)
5016 return 0;
5017
5018 if (uid_is_valid(*ref_uid)) /* Already set? */
5019 return -EBUSY;
5020
5021 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5022 if (r < 0)
5023 return r;
5024
5025 *ref_uid = uid;
5026 return 1;
5027 }
5028
5029 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5030 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5031 }
5032
5033 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5034 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5035 }
5036
5037 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5038 int r = 0, q = 0;
5039
5040 assert(u);
5041
5042 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5043
5044 if (uid_is_valid(uid)) {
5045 r = unit_ref_uid(u, uid, clean_ipc);
5046 if (r < 0)
5047 return r;
5048 }
5049
5050 if (gid_is_valid(gid)) {
5051 q = unit_ref_gid(u, gid, clean_ipc);
5052 if (q < 0) {
5053 if (r > 0)
5054 unit_unref_uid(u, false);
5055
5056 return q;
5057 }
5058 }
5059
5060 return r > 0 || q > 0;
5061 }
5062
5063 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5064 ExecContext *c;
5065 int r;
5066
5067 assert(u);
5068
5069 c = unit_get_exec_context(u);
5070
5071 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5072 if (r < 0)
5073 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5074
5075 return r;
5076 }
5077
5078 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5079 assert(u);
5080
5081 unit_unref_uid(u, destroy_now);
5082 unit_unref_gid(u, destroy_now);
5083 }
5084
5085 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5086 int r;
5087
5088 assert(u);
5089
5090 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5091 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5092 * objects when no service references the UID/GID anymore. */
5093
5094 r = unit_ref_uid_gid(u, uid, gid);
5095 if (r > 0)
5096 unit_add_to_dbus_queue(u);
5097 }
5098
5099 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5100 int r;
5101
5102 assert(u);
5103
5104 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5105
5106 if (sd_id128_equal(u->invocation_id, id))
5107 return 0;
5108
5109 if (!sd_id128_is_null(u->invocation_id))
5110 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5111
5112 if (sd_id128_is_null(id)) {
5113 r = 0;
5114 goto reset;
5115 }
5116
5117 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5118 if (r < 0)
5119 goto reset;
5120
5121 u->invocation_id = id;
5122 sd_id128_to_string(id, u->invocation_id_string);
5123
5124 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5125 if (r < 0)
5126 goto reset;
5127
5128 return 0;
5129
5130 reset:
5131 u->invocation_id = SD_ID128_NULL;
5132 u->invocation_id_string[0] = 0;
5133 return r;
5134 }
5135
5136 int unit_acquire_invocation_id(Unit *u) {
5137 sd_id128_t id;
5138 int r;
5139
5140 assert(u);
5141
5142 r = sd_id128_randomize(&id);
5143 if (r < 0)
5144 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5145
5146 r = unit_set_invocation_id(u, id);
5147 if (r < 0)
5148 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5149
5150 unit_add_to_dbus_queue(u);
5151 return 0;
5152 }
5153
5154 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5155 int r;
5156
5157 assert(u);
5158 assert(p);
5159
5160 /* Copy parameters from manager */
5161 r = manager_get_effective_environment(u->manager, &p->environment);
5162 if (r < 0)
5163 return r;
5164
5165 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5166 p->cgroup_supported = u->manager->cgroup_supported;
5167 p->prefix = u->manager->prefix;
5168 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5169
5170 /* Copy parameters from unit */
5171 p->cgroup_path = u->cgroup_path;
5172 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5173
5174 return 0;
5175 }
5176
5177 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5178 int r;
5179
5180 assert(u);
5181 assert(ret);
5182
5183 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5184 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5185
5186 (void) unit_realize_cgroup(u);
5187
5188 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5189 if (r != 0)
5190 return r;
5191
5192 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5193 (void) ignore_signals(SIGPIPE, -1);
5194
5195 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5196
5197 if (u->cgroup_path) {
5198 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5199 if (r < 0) {
5200 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5201 _exit(EXIT_CGROUP);
5202 }
5203 }
5204
5205 return 0;
5206 }
5207
5208 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5209 assert(u);
5210 assert(d >= 0);
5211 assert(d < _UNIT_DEPENDENCY_MAX);
5212 assert(other);
5213
5214 if (di.origin_mask == 0 && di.destination_mask == 0) {
5215 /* No bit set anymore, let's drop the whole entry */
5216 assert_se(hashmap_remove(u->dependencies[d], other));
5217 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5218 } else
5219 /* Mask was reduced, let's update the entry */
5220 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5221 }
5222
5223 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5224 UnitDependency d;
5225
5226 assert(u);
5227
5228 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5229
5230 if (mask == 0)
5231 return;
5232
5233 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5234 bool done;
5235
5236 do {
5237 UnitDependencyInfo di;
5238 Unit *other;
5239 Iterator i;
5240
5241 done = true;
5242
5243 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5244 UnitDependency q;
5245
5246 if ((di.origin_mask & ~mask) == di.origin_mask)
5247 continue;
5248 di.origin_mask &= ~mask;
5249 unit_update_dependency_mask(u, d, other, di);
5250
5251 /* We updated the dependency from our unit to the other unit now. But most dependencies
5252 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5253 * all dependency types on the other unit and delete all those which point to us and
5254 * have the right mask set. */
5255
5256 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5257 UnitDependencyInfo dj;
5258
5259 dj.data = hashmap_get(other->dependencies[q], u);
5260 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5261 continue;
5262 dj.destination_mask &= ~mask;
5263
5264 unit_update_dependency_mask(other, q, u, dj);
5265 }
5266
5267 unit_add_to_gc_queue(other);
5268
5269 done = false;
5270 break;
5271 }
5272
5273 } while (!done);
5274 }
5275 }
5276
5277 static int unit_export_invocation_id(Unit *u) {
5278 const char *p;
5279 int r;
5280
5281 assert(u);
5282
5283 if (u->exported_invocation_id)
5284 return 0;
5285
5286 if (sd_id128_is_null(u->invocation_id))
5287 return 0;
5288
5289 p = strjoina("/run/systemd/units/invocation:", u->id);
5290 r = symlink_atomic(u->invocation_id_string, p);
5291 if (r < 0)
5292 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5293
5294 u->exported_invocation_id = true;
5295 return 0;
5296 }
5297
5298 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5299 const char *p;
5300 char buf[2];
5301 int r;
5302
5303 assert(u);
5304 assert(c);
5305
5306 if (u->exported_log_level_max)
5307 return 0;
5308
5309 if (c->log_level_max < 0)
5310 return 0;
5311
5312 assert(c->log_level_max <= 7);
5313
5314 buf[0] = '0' + c->log_level_max;
5315 buf[1] = 0;
5316
5317 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5318 r = symlink_atomic(buf, p);
5319 if (r < 0)
5320 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5321
5322 u->exported_log_level_max = true;
5323 return 0;
5324 }
5325
5326 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5327 _cleanup_close_ int fd = -1;
5328 struct iovec *iovec;
5329 const char *p;
5330 char *pattern;
5331 le64_t *sizes;
5332 ssize_t n;
5333 size_t i;
5334 int r;
5335
5336 if (u->exported_log_extra_fields)
5337 return 0;
5338
5339 if (c->n_log_extra_fields <= 0)
5340 return 0;
5341
5342 sizes = newa(le64_t, c->n_log_extra_fields);
5343 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5344
5345 for (i = 0; i < c->n_log_extra_fields; i++) {
5346 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5347
5348 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5349 iovec[i*2+1] = c->log_extra_fields[i];
5350 }
5351
5352 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5353 pattern = strjoina(p, ".XXXXXX");
5354
5355 fd = mkostemp_safe(pattern);
5356 if (fd < 0)
5357 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5358
5359 n = writev(fd, iovec, c->n_log_extra_fields*2);
5360 if (n < 0) {
5361 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5362 goto fail;
5363 }
5364
5365 (void) fchmod(fd, 0644);
5366
5367 if (rename(pattern, p) < 0) {
5368 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5369 goto fail;
5370 }
5371
5372 u->exported_log_extra_fields = true;
5373 return 0;
5374
5375 fail:
5376 (void) unlink(pattern);
5377 return r;
5378 }
5379
5380 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5381 _cleanup_free_ char *buf = NULL;
5382 const char *p;
5383 int r;
5384
5385 assert(u);
5386 assert(c);
5387
5388 if (u->exported_log_rate_limit_interval)
5389 return 0;
5390
5391 if (c->log_rate_limit_interval_usec == 0)
5392 return 0;
5393
5394 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5395
5396 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5397 return log_oom();
5398
5399 r = symlink_atomic(buf, p);
5400 if (r < 0)
5401 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5402
5403 u->exported_log_rate_limit_interval = true;
5404 return 0;
5405 }
5406
5407 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5408 _cleanup_free_ char *buf = NULL;
5409 const char *p;
5410 int r;
5411
5412 assert(u);
5413 assert(c);
5414
5415 if (u->exported_log_rate_limit_burst)
5416 return 0;
5417
5418 if (c->log_rate_limit_burst == 0)
5419 return 0;
5420
5421 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5422
5423 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5424 return log_oom();
5425
5426 r = symlink_atomic(buf, p);
5427 if (r < 0)
5428 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5429
5430 u->exported_log_rate_limit_burst = true;
5431 return 0;
5432 }
5433
5434 void unit_export_state_files(Unit *u) {
5435 const ExecContext *c;
5436
5437 assert(u);
5438
5439 if (!u->id)
5440 return;
5441
5442 if (!MANAGER_IS_SYSTEM(u->manager))
5443 return;
5444
5445 if (MANAGER_IS_TEST_RUN(u->manager))
5446 return;
5447
5448 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5449 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5450 * the IPC system itself and PID 1 also log to the journal.
5451 *
5452 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5453 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5454 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5455 * namespace at least.
5456 *
5457 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5458 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5459 * them with one. */
5460
5461 (void) unit_export_invocation_id(u);
5462
5463 c = unit_get_exec_context(u);
5464 if (c) {
5465 (void) unit_export_log_level_max(u, c);
5466 (void) unit_export_log_extra_fields(u, c);
5467 (void) unit_export_log_rate_limit_interval(u, c);
5468 (void) unit_export_log_rate_limit_burst(u, c);
5469 }
5470 }
5471
5472 void unit_unlink_state_files(Unit *u) {
5473 const char *p;
5474
5475 assert(u);
5476
5477 if (!u->id)
5478 return;
5479
5480 if (!MANAGER_IS_SYSTEM(u->manager))
5481 return;
5482
5483 /* Undoes the effect of unit_export_state() */
5484
5485 if (u->exported_invocation_id) {
5486 p = strjoina("/run/systemd/units/invocation:", u->id);
5487 (void) unlink(p);
5488
5489 u->exported_invocation_id = false;
5490 }
5491
5492 if (u->exported_log_level_max) {
5493 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5494 (void) unlink(p);
5495
5496 u->exported_log_level_max = false;
5497 }
5498
5499 if (u->exported_log_extra_fields) {
5500 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5501 (void) unlink(p);
5502
5503 u->exported_log_extra_fields = false;
5504 }
5505
5506 if (u->exported_log_rate_limit_interval) {
5507 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5508 (void) unlink(p);
5509
5510 u->exported_log_rate_limit_interval = false;
5511 }
5512
5513 if (u->exported_log_rate_limit_burst) {
5514 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5515 (void) unlink(p);
5516
5517 u->exported_log_rate_limit_burst = false;
5518 }
5519 }
5520
5521 int unit_prepare_exec(Unit *u) {
5522 int r;
5523
5524 assert(u);
5525
5526 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5527 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5528 r = bpf_firewall_load_custom(u);
5529 if (r < 0)
5530 return r;
5531
5532 /* Prepares everything so that we can fork of a process for this unit */
5533
5534 (void) unit_realize_cgroup(u);
5535
5536 if (u->reset_accounting) {
5537 (void) unit_reset_accounting(u);
5538 u->reset_accounting = false;
5539 }
5540
5541 unit_export_state_files(u);
5542
5543 r = unit_setup_exec_runtime(u);
5544 if (r < 0)
5545 return r;
5546
5547 r = unit_setup_dynamic_creds(u);
5548 if (r < 0)
5549 return r;
5550
5551 return 0;
5552 }
5553
5554 static int log_leftover(pid_t pid, int sig, void *userdata) {
5555 _cleanup_free_ char *comm = NULL;
5556
5557 (void) get_process_comm(pid, &comm);
5558
5559 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5560 return 0;
5561
5562 log_unit_warning(userdata,
5563 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5564 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5565 pid, strna(comm));
5566
5567 return 1;
5568 }
5569
5570 int unit_warn_leftover_processes(Unit *u) {
5571 assert(u);
5572
5573 (void) unit_pick_cgroup_path(u);
5574
5575 if (!u->cgroup_path)
5576 return 0;
5577
5578 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5579 }
5580
5581 bool unit_needs_console(Unit *u) {
5582 ExecContext *ec;
5583 UnitActiveState state;
5584
5585 assert(u);
5586
5587 state = unit_active_state(u);
5588
5589 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5590 return false;
5591
5592 if (UNIT_VTABLE(u)->needs_console)
5593 return UNIT_VTABLE(u)->needs_console(u);
5594
5595 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5596 ec = unit_get_exec_context(u);
5597 if (!ec)
5598 return false;
5599
5600 return exec_context_may_touch_console(ec);
5601 }
5602
5603 const char *unit_label_path(Unit *u) {
5604 const char *p;
5605
5606 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5607 * when validating access checks. */
5608
5609 p = u->source_path ?: u->fragment_path;
5610 if (!p)
5611 return NULL;
5612
5613 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5614 if (path_equal(p, "/dev/null"))
5615 return NULL;
5616
5617 return p;
5618 }
5619
5620 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5621 int r;
5622
5623 assert(u);
5624
5625 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5626 * and not a kernel thread either */
5627
5628 /* First, a simple range check */
5629 if (!pid_is_valid(pid))
5630 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5631
5632 /* Some extra safety check */
5633 if (pid == 1 || pid == getpid_cached())
5634 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5635
5636 /* Don't even begin to bother with kernel threads */
5637 r = is_kernel_thread(pid);
5638 if (r == -ESRCH)
5639 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5640 if (r < 0)
5641 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5642 if (r > 0)
5643 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5644
5645 return 0;
5646 }
5647
5648 void unit_log_success(Unit *u) {
5649 assert(u);
5650
5651 log_struct(LOG_INFO,
5652 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5653 LOG_UNIT_ID(u),
5654 LOG_UNIT_INVOCATION_ID(u),
5655 LOG_UNIT_MESSAGE(u, "Succeeded."));
5656 }
5657
5658 void unit_log_failure(Unit *u, const char *result) {
5659 assert(u);
5660 assert(result);
5661
5662 log_struct(LOG_WARNING,
5663 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5664 LOG_UNIT_ID(u),
5665 LOG_UNIT_INVOCATION_ID(u),
5666 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5667 "UNIT_RESULT=%s", result);
5668 }
5669
5670 void unit_log_skip(Unit *u, const char *result) {
5671 assert(u);
5672 assert(result);
5673
5674 log_struct(LOG_INFO,
5675 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5676 LOG_UNIT_ID(u),
5677 LOG_UNIT_INVOCATION_ID(u),
5678 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5679 "UNIT_RESULT=%s", result);
5680 }
5681
5682 void unit_log_process_exit(
5683 Unit *u,
5684 int level,
5685 const char *kind,
5686 const char *command,
5687 int code,
5688 int status) {
5689
5690 assert(u);
5691 assert(kind);
5692
5693 if (code != CLD_EXITED)
5694 level = LOG_WARNING;
5695
5696 log_struct(level,
5697 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5698 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5699 kind,
5700 sigchld_code_to_string(code), status,
5701 strna(code == CLD_EXITED
5702 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5703 : signal_to_string(status))),
5704 "EXIT_CODE=%s", sigchld_code_to_string(code),
5705 "EXIT_STATUS=%i", status,
5706 "COMMAND=%s", strna(command),
5707 LOG_UNIT_ID(u),
5708 LOG_UNIT_INVOCATION_ID(u));
5709 }
5710
5711 int unit_exit_status(Unit *u) {
5712 assert(u);
5713
5714 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5715 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5716 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5717 * service process has exited abnormally (signal/coredump). */
5718
5719 if (!UNIT_VTABLE(u)->exit_status)
5720 return -EOPNOTSUPP;
5721
5722 return UNIT_VTABLE(u)->exit_status(u);
5723 }
5724
5725 int unit_failure_action_exit_status(Unit *u) {
5726 int r;
5727
5728 assert(u);
5729
5730 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5731
5732 if (u->failure_action_exit_status >= 0)
5733 return u->failure_action_exit_status;
5734
5735 r = unit_exit_status(u);
5736 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5737 return 255;
5738
5739 return r;
5740 }
5741
5742 int unit_success_action_exit_status(Unit *u) {
5743 int r;
5744
5745 assert(u);
5746
5747 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5748
5749 if (u->success_action_exit_status >= 0)
5750 return u->success_action_exit_status;
5751
5752 r = unit_exit_status(u);
5753 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5754 return 255;
5755
5756 return r;
5757 }
5758
5759 int unit_test_trigger_loaded(Unit *u) {
5760 Unit *trigger;
5761
5762 /* Tests whether the unit to trigger is loaded */
5763
5764 trigger = UNIT_TRIGGER(u);
5765 if (!trigger)
5766 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit to trigger not loaded.");
5767 if (trigger->load_state != UNIT_LOADED)
5768 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit %s to trigger not loaded.", u->id);
5769
5770 return 0;
5771 }
5772
5773 int unit_clean(Unit *u, ExecCleanMask mask) {
5774 UnitActiveState state;
5775
5776 assert(u);
5777
5778 /* Special return values:
5779 *
5780 * -EOPNOTSUPP → cleaning not supported for this unit type
5781 * -EUNATCH → cleaning not defined for this resource type
5782 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5783 * a job queued or similar
5784 */
5785
5786 if (!UNIT_VTABLE(u)->clean)
5787 return -EOPNOTSUPP;
5788
5789 if (mask == 0)
5790 return -EUNATCH;
5791
5792 if (u->load_state != UNIT_LOADED)
5793 return -EBUSY;
5794
5795 if (u->job)
5796 return -EBUSY;
5797
5798 state = unit_active_state(u);
5799 if (!IN_SET(state, UNIT_INACTIVE))
5800 return -EBUSY;
5801
5802 return UNIT_VTABLE(u)->clean(u, mask);
5803 }
5804
5805 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5806 assert(u);
5807
5808 if (!UNIT_VTABLE(u)->clean ||
5809 u->load_state != UNIT_LOADED) {
5810 *ret = 0;
5811 return 0;
5812 }
5813
5814 /* When the clean() method is set, can_clean() really should be set too */
5815 assert(UNIT_VTABLE(u)->can_clean);
5816
5817 return UNIT_VTABLE(u)->can_clean(u, ret);
5818 }
5819
5820 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5821 [COLLECT_INACTIVE] = "inactive",
5822 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5823 };
5824
5825 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);