]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
pid1: use a cache for all unit aliases
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bpf-firewall.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-util.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "fs-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "install.h"
32 #include "load-dropin.h"
33 #include "load-fragment.h"
34 #include "log.h"
35 #include "macro.h"
36 #include "missing.h"
37 #include "mkdir.h"
38 #include "parse-util.h"
39 #include "path-util.h"
40 #include "process-util.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
61 [UNIT_SERVICE] = &service_vtable,
62 [UNIT_SOCKET] = &socket_vtable,
63 [UNIT_TARGET] = &target_vtable,
64 [UNIT_DEVICE] = &device_vtable,
65 [UNIT_MOUNT] = &mount_vtable,
66 [UNIT_AUTOMOUNT] = &automount_vtable,
67 [UNIT_SWAP] = &swap_vtable,
68 [UNIT_TIMER] = &timer_vtable,
69 [UNIT_PATH] = &path_vtable,
70 [UNIT_SLICE] = &slice_vtable,
71 [UNIT_SCOPE] = &scope_vtable,
72 };
73
74 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
75
76 Unit *unit_new(Manager *m, size_t size) {
77 Unit *u;
78
79 assert(m);
80 assert(size >= sizeof(Unit));
81
82 u = malloc0(size);
83 if (!u)
84 return NULL;
85
86 u->names = set_new(&string_hash_ops);
87 if (!u->names)
88 return mfree(u);
89
90 u->manager = m;
91 u->type = _UNIT_TYPE_INVALID;
92 u->default_dependencies = true;
93 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
94 u->unit_file_preset = -1;
95 u->on_failure_job_mode = JOB_REPLACE;
96 u->cgroup_control_inotify_wd = -1;
97 u->cgroup_memory_inotify_wd = -1;
98 u->job_timeout = USEC_INFINITY;
99 u->job_running_timeout = USEC_INFINITY;
100 u->ref_uid = UID_INVALID;
101 u->ref_gid = GID_INVALID;
102 u->cpu_usage_last = NSEC_INFINITY;
103 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
104 u->failure_action_exit_status = u->success_action_exit_status = -1;
105
106 u->ip_accounting_ingress_map_fd = -1;
107 u->ip_accounting_egress_map_fd = -1;
108 u->ipv4_allow_map_fd = -1;
109 u->ipv6_allow_map_fd = -1;
110 u->ipv4_deny_map_fd = -1;
111 u->ipv6_deny_map_fd = -1;
112
113 u->last_section_private = -1;
114
115 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
116 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
117
118 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
119 u->io_accounting_last[i] = UINT64_MAX;
120
121 return u;
122 }
123
124 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
125 _cleanup_(unit_freep) Unit *u = NULL;
126 int r;
127
128 u = unit_new(m, size);
129 if (!u)
130 return -ENOMEM;
131
132 r = unit_add_name(u, name);
133 if (r < 0)
134 return r;
135
136 *ret = TAKE_PTR(u);
137
138 return r;
139 }
140
141 bool unit_has_name(const Unit *u, const char *name) {
142 assert(u);
143 assert(name);
144
145 return set_contains(u->names, (char*) name);
146 }
147
148 static void unit_init(Unit *u) {
149 CGroupContext *cc;
150 ExecContext *ec;
151 KillContext *kc;
152
153 assert(u);
154 assert(u->manager);
155 assert(u->type >= 0);
156
157 cc = unit_get_cgroup_context(u);
158 if (cc) {
159 cgroup_context_init(cc);
160
161 /* Copy in the manager defaults into the cgroup
162 * context, _before_ the rest of the settings have
163 * been initialized */
164
165 cc->cpu_accounting = u->manager->default_cpu_accounting;
166 cc->io_accounting = u->manager->default_io_accounting;
167 cc->blockio_accounting = u->manager->default_blockio_accounting;
168 cc->memory_accounting = u->manager->default_memory_accounting;
169 cc->tasks_accounting = u->manager->default_tasks_accounting;
170 cc->ip_accounting = u->manager->default_ip_accounting;
171
172 if (u->type != UNIT_SLICE)
173 cc->tasks_max = u->manager->default_tasks_max;
174 }
175
176 ec = unit_get_exec_context(u);
177 if (ec) {
178 exec_context_init(ec);
179
180 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
181 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
182 }
183
184 kc = unit_get_kill_context(u);
185 if (kc)
186 kill_context_init(kc);
187
188 if (UNIT_VTABLE(u)->init)
189 UNIT_VTABLE(u)->init(u);
190 }
191
192 int unit_add_name(Unit *u, const char *text) {
193 _cleanup_free_ char *s = NULL, *i = NULL;
194 UnitType t;
195 int r;
196
197 assert(u);
198 assert(text);
199
200 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
201
202 if (!u->instance)
203 return -EINVAL;
204
205 r = unit_name_replace_instance(text, u->instance, &s);
206 if (r < 0)
207 return r;
208 } else {
209 s = strdup(text);
210 if (!s)
211 return -ENOMEM;
212 }
213
214 if (set_contains(u->names, s))
215 return 0;
216 if (hashmap_contains(u->manager->units, s))
217 return -EEXIST;
218
219 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
220 return -EINVAL;
221
222 t = unit_name_to_type(s);
223 if (t < 0)
224 return -EINVAL;
225
226 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
227 return -EINVAL;
228
229 r = unit_name_to_instance(s, &i);
230 if (r < 0)
231 return r;
232
233 if (i && !unit_type_may_template(t))
234 return -EINVAL;
235
236 /* Ensure that this unit is either instanced or not instanced,
237 * but not both. Note that we do allow names with different
238 * instance names however! */
239 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
240 return -EINVAL;
241
242 if (!unit_type_may_alias(t) && !set_isempty(u->names))
243 return -EEXIST;
244
245 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
246 return -E2BIG;
247
248 r = set_put(u->names, s);
249 if (r < 0)
250 return r;
251 assert(r > 0);
252
253 r = hashmap_put(u->manager->units, s, u);
254 if (r < 0) {
255 (void) set_remove(u->names, s);
256 return r;
257 }
258
259 if (u->type == _UNIT_TYPE_INVALID) {
260 u->type = t;
261 u->id = s;
262 u->instance = TAKE_PTR(i);
263
264 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
265
266 unit_init(u);
267 }
268
269 s = NULL;
270
271 unit_add_to_dbus_queue(u);
272 return 0;
273 }
274
275 int unit_choose_id(Unit *u, const char *name) {
276 _cleanup_free_ char *t = NULL;
277 char *s, *i;
278 int r;
279
280 assert(u);
281 assert(name);
282
283 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
284
285 if (!u->instance)
286 return -EINVAL;
287
288 r = unit_name_replace_instance(name, u->instance, &t);
289 if (r < 0)
290 return r;
291
292 name = t;
293 }
294
295 /* Selects one of the names of this unit as the id */
296 s = set_get(u->names, (char*) name);
297 if (!s)
298 return -ENOENT;
299
300 /* Determine the new instance from the new id */
301 r = unit_name_to_instance(s, &i);
302 if (r < 0)
303 return r;
304
305 u->id = s;
306
307 free(u->instance);
308 u->instance = i;
309
310 unit_add_to_dbus_queue(u);
311
312 return 0;
313 }
314
315 int unit_set_description(Unit *u, const char *description) {
316 int r;
317
318 assert(u);
319
320 r = free_and_strdup(&u->description, empty_to_null(description));
321 if (r < 0)
322 return r;
323 if (r > 0)
324 unit_add_to_dbus_queue(u);
325
326 return 0;
327 }
328
329 bool unit_may_gc(Unit *u) {
330 UnitActiveState state;
331 int r;
332
333 assert(u);
334
335 /* Checks whether the unit is ready to be unloaded for garbage collection.
336 * Returns true when the unit may be collected, and false if there's some
337 * reason to keep it loaded.
338 *
339 * References from other units are *not* checked here. Instead, this is done
340 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
341 */
342
343 if (u->job)
344 return false;
345
346 if (u->nop_job)
347 return false;
348
349 state = unit_active_state(u);
350
351 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
352 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
353 UNIT_VTABLE(u)->release_resources)
354 UNIT_VTABLE(u)->release_resources(u);
355
356 if (u->perpetual)
357 return false;
358
359 if (sd_bus_track_count(u->bus_track) > 0)
360 return false;
361
362 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
363 switch (u->collect_mode) {
364
365 case COLLECT_INACTIVE:
366 if (state != UNIT_INACTIVE)
367 return false;
368
369 break;
370
371 case COLLECT_INACTIVE_OR_FAILED:
372 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
373 return false;
374
375 break;
376
377 default:
378 assert_not_reached("Unknown garbage collection mode");
379 }
380
381 if (u->cgroup_path) {
382 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
383 * around. Units with active processes should never be collected. */
384
385 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
386 if (r < 0)
387 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
388 if (r <= 0)
389 return false;
390 }
391
392 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
393 return false;
394
395 return true;
396 }
397
398 void unit_add_to_load_queue(Unit *u) {
399 assert(u);
400 assert(u->type != _UNIT_TYPE_INVALID);
401
402 if (u->load_state != UNIT_STUB || u->in_load_queue)
403 return;
404
405 LIST_PREPEND(load_queue, u->manager->load_queue, u);
406 u->in_load_queue = true;
407 }
408
409 void unit_add_to_cleanup_queue(Unit *u) {
410 assert(u);
411
412 if (u->in_cleanup_queue)
413 return;
414
415 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
416 u->in_cleanup_queue = true;
417 }
418
419 void unit_add_to_gc_queue(Unit *u) {
420 assert(u);
421
422 if (u->in_gc_queue || u->in_cleanup_queue)
423 return;
424
425 if (!unit_may_gc(u))
426 return;
427
428 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
429 u->in_gc_queue = true;
430 }
431
432 void unit_add_to_dbus_queue(Unit *u) {
433 assert(u);
434 assert(u->type != _UNIT_TYPE_INVALID);
435
436 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
437 return;
438
439 /* Shortcut things if nobody cares */
440 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
441 sd_bus_track_count(u->bus_track) <= 0 &&
442 set_isempty(u->manager->private_buses)) {
443 u->sent_dbus_new_signal = true;
444 return;
445 }
446
447 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
448 u->in_dbus_queue = true;
449 }
450
451 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
452 assert(u);
453
454 if (u->in_stop_when_unneeded_queue)
455 return;
456
457 if (!u->stop_when_unneeded)
458 return;
459
460 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
461 return;
462
463 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
464 u->in_stop_when_unneeded_queue = true;
465 }
466
467 static void bidi_set_free(Unit *u, Hashmap *h) {
468 Unit *other;
469 Iterator i;
470 void *v;
471
472 assert(u);
473
474 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
475
476 HASHMAP_FOREACH_KEY(v, other, h, i) {
477 UnitDependency d;
478
479 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
480 hashmap_remove(other->dependencies[d], u);
481
482 unit_add_to_gc_queue(other);
483 }
484
485 hashmap_free(h);
486 }
487
488 static void unit_remove_transient(Unit *u) {
489 char **i;
490
491 assert(u);
492
493 if (!u->transient)
494 return;
495
496 if (u->fragment_path)
497 (void) unlink(u->fragment_path);
498
499 STRV_FOREACH(i, u->dropin_paths) {
500 _cleanup_free_ char *p = NULL, *pp = NULL;
501
502 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
503 if (!p)
504 continue;
505
506 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
507 if (!pp)
508 continue;
509
510 /* Only drop transient drop-ins */
511 if (!path_equal(u->manager->lookup_paths.transient, pp))
512 continue;
513
514 (void) unlink(*i);
515 (void) rmdir(p);
516 }
517 }
518
519 static void unit_free_requires_mounts_for(Unit *u) {
520 assert(u);
521
522 for (;;) {
523 _cleanup_free_ char *path;
524
525 path = hashmap_steal_first_key(u->requires_mounts_for);
526 if (!path)
527 break;
528 else {
529 char s[strlen(path) + 1];
530
531 PATH_FOREACH_PREFIX_MORE(s, path) {
532 char *y;
533 Set *x;
534
535 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
536 if (!x)
537 continue;
538
539 (void) set_remove(x, u);
540
541 if (set_isempty(x)) {
542 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
543 free(y);
544 set_free(x);
545 }
546 }
547 }
548 }
549
550 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
551 }
552
553 static void unit_done(Unit *u) {
554 ExecContext *ec;
555 CGroupContext *cc;
556
557 assert(u);
558
559 if (u->type < 0)
560 return;
561
562 if (UNIT_VTABLE(u)->done)
563 UNIT_VTABLE(u)->done(u);
564
565 ec = unit_get_exec_context(u);
566 if (ec)
567 exec_context_done(ec);
568
569 cc = unit_get_cgroup_context(u);
570 if (cc)
571 cgroup_context_done(cc);
572 }
573
574 void unit_free(Unit *u) {
575 UnitDependency d;
576 Iterator i;
577 char *t;
578
579 if (!u)
580 return;
581
582 if (UNIT_ISSET(u->slice)) {
583 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
584 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
585
586 /* And make sure the parent is realized again, updating cgroup memberships */
587 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
588 }
589
590 u->transient_file = safe_fclose(u->transient_file);
591
592 if (!MANAGER_IS_RELOADING(u->manager))
593 unit_remove_transient(u);
594
595 bus_unit_send_removed_signal(u);
596
597 unit_done(u);
598
599 unit_dequeue_rewatch_pids(u);
600
601 sd_bus_slot_unref(u->match_bus_slot);
602 sd_bus_track_unref(u->bus_track);
603 u->deserialized_refs = strv_free(u->deserialized_refs);
604
605 unit_free_requires_mounts_for(u);
606
607 SET_FOREACH(t, u->names, i)
608 hashmap_remove_value(u->manager->units, t, u);
609
610 if (!sd_id128_is_null(u->invocation_id))
611 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
612
613 if (u->job) {
614 Job *j = u->job;
615 job_uninstall(j);
616 job_free(j);
617 }
618
619 if (u->nop_job) {
620 Job *j = u->nop_job;
621 job_uninstall(j);
622 job_free(j);
623 }
624
625 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
626 bidi_set_free(u, u->dependencies[d]);
627
628 if (u->on_console)
629 manager_unref_console(u->manager);
630
631 unit_release_cgroup(u);
632
633 if (!MANAGER_IS_RELOADING(u->manager))
634 unit_unlink_state_files(u);
635
636 unit_unref_uid_gid(u, false);
637
638 (void) manager_update_failed_units(u->manager, u, false);
639 set_remove(u->manager->startup_units, u);
640
641 unit_unwatch_all_pids(u);
642
643 unit_ref_unset(&u->slice);
644 while (u->refs_by_target)
645 unit_ref_unset(u->refs_by_target);
646
647 if (u->type != _UNIT_TYPE_INVALID)
648 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
649
650 if (u->in_load_queue)
651 LIST_REMOVE(load_queue, u->manager->load_queue, u);
652
653 if (u->in_dbus_queue)
654 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
655
656 if (u->in_gc_queue)
657 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
658
659 if (u->in_cgroup_realize_queue)
660 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
661
662 if (u->in_cgroup_empty_queue)
663 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
664
665 if (u->in_cleanup_queue)
666 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
667
668 if (u->in_target_deps_queue)
669 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
670
671 if (u->in_stop_when_unneeded_queue)
672 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
673
674 safe_close(u->ip_accounting_ingress_map_fd);
675 safe_close(u->ip_accounting_egress_map_fd);
676
677 safe_close(u->ipv4_allow_map_fd);
678 safe_close(u->ipv6_allow_map_fd);
679 safe_close(u->ipv4_deny_map_fd);
680 safe_close(u->ipv6_deny_map_fd);
681
682 bpf_program_unref(u->ip_bpf_ingress);
683 bpf_program_unref(u->ip_bpf_ingress_installed);
684 bpf_program_unref(u->ip_bpf_egress);
685 bpf_program_unref(u->ip_bpf_egress_installed);
686
687 set_free(u->ip_bpf_custom_ingress);
688 set_free(u->ip_bpf_custom_egress);
689 set_free(u->ip_bpf_custom_ingress_installed);
690 set_free(u->ip_bpf_custom_egress_installed);
691
692 bpf_program_unref(u->bpf_device_control_installed);
693
694 condition_free_list(u->conditions);
695 condition_free_list(u->asserts);
696
697 free(u->description);
698 strv_free(u->documentation);
699 free(u->fragment_path);
700 free(u->source_path);
701 strv_free(u->dropin_paths);
702 free(u->instance);
703
704 free(u->job_timeout_reboot_arg);
705
706 set_free_free(u->names);
707
708 free(u->reboot_arg);
709
710 free(u);
711 }
712
713 UnitActiveState unit_active_state(Unit *u) {
714 assert(u);
715
716 if (u->load_state == UNIT_MERGED)
717 return unit_active_state(unit_follow_merge(u));
718
719 /* After a reload it might happen that a unit is not correctly
720 * loaded but still has a process around. That's why we won't
721 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
722
723 return UNIT_VTABLE(u)->active_state(u);
724 }
725
726 const char* unit_sub_state_to_string(Unit *u) {
727 assert(u);
728
729 return UNIT_VTABLE(u)->sub_state_to_string(u);
730 }
731
732 static int set_complete_move(Set **s, Set **other) {
733 assert(s);
734 assert(other);
735
736 if (!other)
737 return 0;
738
739 if (*s)
740 return set_move(*s, *other);
741 else
742 *s = TAKE_PTR(*other);
743
744 return 0;
745 }
746
747 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
748 assert(s);
749 assert(other);
750
751 if (!*other)
752 return 0;
753
754 if (*s)
755 return hashmap_move(*s, *other);
756 else
757 *s = TAKE_PTR(*other);
758
759 return 0;
760 }
761
762 static int merge_names(Unit *u, Unit *other) {
763 char *t;
764 Iterator i;
765 int r;
766
767 assert(u);
768 assert(other);
769
770 r = set_complete_move(&u->names, &other->names);
771 if (r < 0)
772 return r;
773
774 set_free_free(other->names);
775 other->names = NULL;
776 other->id = NULL;
777
778 SET_FOREACH(t, u->names, i)
779 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
780
781 return 0;
782 }
783
784 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
785 unsigned n_reserve;
786
787 assert(u);
788 assert(other);
789 assert(d < _UNIT_DEPENDENCY_MAX);
790
791 /*
792 * If u does not have this dependency set allocated, there is no need
793 * to reserve anything. In that case other's set will be transferred
794 * as a whole to u by complete_move().
795 */
796 if (!u->dependencies[d])
797 return 0;
798
799 /* merge_dependencies() will skip a u-on-u dependency */
800 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
801
802 return hashmap_reserve(u->dependencies[d], n_reserve);
803 }
804
805 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
806 Iterator i;
807 Unit *back;
808 void *v;
809 int r;
810
811 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
812
813 assert(u);
814 assert(other);
815 assert(d < _UNIT_DEPENDENCY_MAX);
816
817 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
818 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
819 UnitDependency k;
820
821 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
822 * pointers back, and let's fix them up, to instead point to 'u'. */
823
824 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
825 if (back == u) {
826 /* Do not add dependencies between u and itself. */
827 if (hashmap_remove(back->dependencies[k], other))
828 maybe_warn_about_dependency(u, other_id, k);
829 } else {
830 UnitDependencyInfo di_u, di_other, di_merged;
831
832 /* Let's drop this dependency between "back" and "other", and let's create it between
833 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
834 * and any such dependency which might already exist */
835
836 di_other.data = hashmap_get(back->dependencies[k], other);
837 if (!di_other.data)
838 continue; /* dependency isn't set, let's try the next one */
839
840 di_u.data = hashmap_get(back->dependencies[k], u);
841
842 di_merged = (UnitDependencyInfo) {
843 .origin_mask = di_u.origin_mask | di_other.origin_mask,
844 .destination_mask = di_u.destination_mask | di_other.destination_mask,
845 };
846
847 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
848 if (r < 0)
849 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
850 assert(r >= 0);
851
852 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
853 }
854 }
855
856 }
857
858 /* Also do not move dependencies on u to itself */
859 back = hashmap_remove(other->dependencies[d], u);
860 if (back)
861 maybe_warn_about_dependency(u, other_id, d);
862
863 /* The move cannot fail. The caller must have performed a reservation. */
864 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
865
866 other->dependencies[d] = hashmap_free(other->dependencies[d]);
867 }
868
869 int unit_merge(Unit *u, Unit *other) {
870 UnitDependency d;
871 const char *other_id = NULL;
872 int r;
873
874 assert(u);
875 assert(other);
876 assert(u->manager == other->manager);
877 assert(u->type != _UNIT_TYPE_INVALID);
878
879 other = unit_follow_merge(other);
880
881 if (other == u)
882 return 0;
883
884 if (u->type != other->type)
885 return -EINVAL;
886
887 if (!u->instance != !other->instance)
888 return -EINVAL;
889
890 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
891 return -EEXIST;
892
893 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
894 return -EEXIST;
895
896 if (other->job)
897 return -EEXIST;
898
899 if (other->nop_job)
900 return -EEXIST;
901
902 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
903 return -EEXIST;
904
905 if (other->id)
906 other_id = strdupa(other->id);
907
908 /* Make reservations to ensure merge_dependencies() won't fail */
909 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
910 r = reserve_dependencies(u, other, d);
911 /*
912 * We don't rollback reservations if we fail. We don't have
913 * a way to undo reservations. A reservation is not a leak.
914 */
915 if (r < 0)
916 return r;
917 }
918
919 /* Merge names */
920 r = merge_names(u, other);
921 if (r < 0)
922 return r;
923
924 /* Redirect all references */
925 while (other->refs_by_target)
926 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
927
928 /* Merge dependencies */
929 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
930 merge_dependencies(u, other, other_id, d);
931
932 other->load_state = UNIT_MERGED;
933 other->merged_into = u;
934
935 /* If there is still some data attached to the other node, we
936 * don't need it anymore, and can free it. */
937 if (other->load_state != UNIT_STUB)
938 if (UNIT_VTABLE(other)->done)
939 UNIT_VTABLE(other)->done(other);
940
941 unit_add_to_dbus_queue(u);
942 unit_add_to_cleanup_queue(other);
943
944 return 0;
945 }
946
947 int unit_merge_by_name(Unit *u, const char *name) {
948 _cleanup_free_ char *s = NULL;
949 Unit *other;
950 int r;
951
952 /* Either add name to u, or if a unit with name already exists, merge it with u.
953 * If name is a template, do the same for name@instance, where instance is u's instance. */
954
955 assert(u);
956 assert(name);
957
958 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
959 if (!u->instance)
960 return -EINVAL;
961
962 r = unit_name_replace_instance(name, u->instance, &s);
963 if (r < 0)
964 return r;
965
966 name = s;
967 }
968
969 other = manager_get_unit(u->manager, name);
970 if (other)
971 return unit_merge(u, other);
972
973 return unit_add_name(u, name);
974 }
975
976 Unit* unit_follow_merge(Unit *u) {
977 assert(u);
978
979 while (u->load_state == UNIT_MERGED)
980 assert_se(u = u->merged_into);
981
982 return u;
983 }
984
985 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
986 ExecDirectoryType dt;
987 char **dp;
988 int r;
989
990 assert(u);
991 assert(c);
992
993 if (c->working_directory && !c->working_directory_missing_ok) {
994 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
995 if (r < 0)
996 return r;
997 }
998
999 if (c->root_directory) {
1000 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1001 if (r < 0)
1002 return r;
1003 }
1004
1005 if (c->root_image) {
1006 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1007 if (r < 0)
1008 return r;
1009 }
1010
1011 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1012 if (!u->manager->prefix[dt])
1013 continue;
1014
1015 STRV_FOREACH(dp, c->directories[dt].paths) {
1016 _cleanup_free_ char *p;
1017
1018 p = path_join(u->manager->prefix[dt], *dp);
1019 if (!p)
1020 return -ENOMEM;
1021
1022 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1023 if (r < 0)
1024 return r;
1025 }
1026 }
1027
1028 if (!MANAGER_IS_SYSTEM(u->manager))
1029 return 0;
1030
1031 if (c->private_tmp) {
1032 const char *p;
1033
1034 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1035 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1036 if (r < 0)
1037 return r;
1038 }
1039
1040 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1041 if (r < 0)
1042 return r;
1043 }
1044
1045 if (!IN_SET(c->std_output,
1046 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1047 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1048 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1049 !IN_SET(c->std_error,
1050 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1051 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1052 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1053 return 0;
1054
1055 /* If syslog or kernel logging is requested, make sure our own
1056 * logging daemon is run first. */
1057
1058 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1059 if (r < 0)
1060 return r;
1061
1062 return 0;
1063 }
1064
1065 const char *unit_description(Unit *u) {
1066 assert(u);
1067
1068 if (u->description)
1069 return u->description;
1070
1071 return strna(u->id);
1072 }
1073
1074 const char *unit_status_string(Unit *u) {
1075 assert(u);
1076
1077 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1078 return u->id;
1079
1080 return unit_description(u);
1081 }
1082
1083 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1084 const struct {
1085 UnitDependencyMask mask;
1086 const char *name;
1087 } table[] = {
1088 { UNIT_DEPENDENCY_FILE, "file" },
1089 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1090 { UNIT_DEPENDENCY_DEFAULT, "default" },
1091 { UNIT_DEPENDENCY_UDEV, "udev" },
1092 { UNIT_DEPENDENCY_PATH, "path" },
1093 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1094 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1095 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1096 };
1097 size_t i;
1098
1099 assert(f);
1100 assert(kind);
1101 assert(space);
1102
1103 for (i = 0; i < ELEMENTSOF(table); i++) {
1104
1105 if (mask == 0)
1106 break;
1107
1108 if (FLAGS_SET(mask, table[i].mask)) {
1109 if (*space)
1110 fputc(' ', f);
1111 else
1112 *space = true;
1113
1114 fputs(kind, f);
1115 fputs("-", f);
1116 fputs(table[i].name, f);
1117
1118 mask &= ~table[i].mask;
1119 }
1120 }
1121
1122 assert(mask == 0);
1123 }
1124
1125 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1126 char *t, **j;
1127 UnitDependency d;
1128 Iterator i;
1129 const char *prefix2;
1130 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1131 Unit *following;
1132 _cleanup_set_free_ Set *following_set = NULL;
1133 const char *n;
1134 CGroupMask m;
1135 int r;
1136
1137 assert(u);
1138 assert(u->type >= 0);
1139
1140 prefix = strempty(prefix);
1141 prefix2 = strjoina(prefix, "\t");
1142
1143 fprintf(f,
1144 "%s-> Unit %s:\n",
1145 prefix, u->id);
1146
1147 SET_FOREACH(t, u->names, i)
1148 if (!streq(t, u->id))
1149 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1150
1151 fprintf(f,
1152 "%s\tDescription: %s\n"
1153 "%s\tInstance: %s\n"
1154 "%s\tUnit Load State: %s\n"
1155 "%s\tUnit Active State: %s\n"
1156 "%s\tState Change Timestamp: %s\n"
1157 "%s\tInactive Exit Timestamp: %s\n"
1158 "%s\tActive Enter Timestamp: %s\n"
1159 "%s\tActive Exit Timestamp: %s\n"
1160 "%s\tInactive Enter Timestamp: %s\n"
1161 "%s\tMay GC: %s\n"
1162 "%s\tNeed Daemon Reload: %s\n"
1163 "%s\tTransient: %s\n"
1164 "%s\tPerpetual: %s\n"
1165 "%s\tGarbage Collection Mode: %s\n"
1166 "%s\tSlice: %s\n"
1167 "%s\tCGroup: %s\n"
1168 "%s\tCGroup realized: %s\n",
1169 prefix, unit_description(u),
1170 prefix, strna(u->instance),
1171 prefix, unit_load_state_to_string(u->load_state),
1172 prefix, unit_active_state_to_string(unit_active_state(u)),
1173 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1174 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1175 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1176 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1177 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1178 prefix, yes_no(unit_may_gc(u)),
1179 prefix, yes_no(unit_need_daemon_reload(u)),
1180 prefix, yes_no(u->transient),
1181 prefix, yes_no(u->perpetual),
1182 prefix, collect_mode_to_string(u->collect_mode),
1183 prefix, strna(unit_slice_name(u)),
1184 prefix, strna(u->cgroup_path),
1185 prefix, yes_no(u->cgroup_realized));
1186
1187 if (u->cgroup_realized_mask != 0) {
1188 _cleanup_free_ char *s = NULL;
1189 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1190 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1191 }
1192
1193 if (u->cgroup_enabled_mask != 0) {
1194 _cleanup_free_ char *s = NULL;
1195 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1196 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1197 }
1198
1199 m = unit_get_own_mask(u);
1200 if (m != 0) {
1201 _cleanup_free_ char *s = NULL;
1202 (void) cg_mask_to_string(m, &s);
1203 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1204 }
1205
1206 m = unit_get_members_mask(u);
1207 if (m != 0) {
1208 _cleanup_free_ char *s = NULL;
1209 (void) cg_mask_to_string(m, &s);
1210 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1211 }
1212
1213 m = unit_get_delegate_mask(u);
1214 if (m != 0) {
1215 _cleanup_free_ char *s = NULL;
1216 (void) cg_mask_to_string(m, &s);
1217 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1218 }
1219
1220 if (!sd_id128_is_null(u->invocation_id))
1221 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1222 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1223
1224 STRV_FOREACH(j, u->documentation)
1225 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1226
1227 following = unit_following(u);
1228 if (following)
1229 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1230
1231 r = unit_following_set(u, &following_set);
1232 if (r >= 0) {
1233 Unit *other;
1234
1235 SET_FOREACH(other, following_set, i)
1236 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1237 }
1238
1239 if (u->fragment_path)
1240 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1241
1242 if (u->source_path)
1243 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1244
1245 STRV_FOREACH(j, u->dropin_paths)
1246 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1247
1248 if (u->failure_action != EMERGENCY_ACTION_NONE)
1249 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1250 if (u->failure_action_exit_status >= 0)
1251 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1252 if (u->success_action != EMERGENCY_ACTION_NONE)
1253 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1254 if (u->success_action_exit_status >= 0)
1255 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1256
1257 if (u->job_timeout != USEC_INFINITY)
1258 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1259
1260 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1261 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1262
1263 if (u->job_timeout_reboot_arg)
1264 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1265
1266 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1267 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1268
1269 if (dual_timestamp_is_set(&u->condition_timestamp))
1270 fprintf(f,
1271 "%s\tCondition Timestamp: %s\n"
1272 "%s\tCondition Result: %s\n",
1273 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1274 prefix, yes_no(u->condition_result));
1275
1276 if (dual_timestamp_is_set(&u->assert_timestamp))
1277 fprintf(f,
1278 "%s\tAssert Timestamp: %s\n"
1279 "%s\tAssert Result: %s\n",
1280 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1281 prefix, yes_no(u->assert_result));
1282
1283 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1284 UnitDependencyInfo di;
1285 Unit *other;
1286
1287 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1288 bool space = false;
1289
1290 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1291
1292 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1293 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1294
1295 fputs(")\n", f);
1296 }
1297 }
1298
1299 if (!hashmap_isempty(u->requires_mounts_for)) {
1300 UnitDependencyInfo di;
1301 const char *path;
1302
1303 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1304 bool space = false;
1305
1306 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1307
1308 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1309 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1310
1311 fputs(")\n", f);
1312 }
1313 }
1314
1315 if (u->load_state == UNIT_LOADED) {
1316
1317 fprintf(f,
1318 "%s\tStopWhenUnneeded: %s\n"
1319 "%s\tRefuseManualStart: %s\n"
1320 "%s\tRefuseManualStop: %s\n"
1321 "%s\tDefaultDependencies: %s\n"
1322 "%s\tOnFailureJobMode: %s\n"
1323 "%s\tIgnoreOnIsolate: %s\n",
1324 prefix, yes_no(u->stop_when_unneeded),
1325 prefix, yes_no(u->refuse_manual_start),
1326 prefix, yes_no(u->refuse_manual_stop),
1327 prefix, yes_no(u->default_dependencies),
1328 prefix, job_mode_to_string(u->on_failure_job_mode),
1329 prefix, yes_no(u->ignore_on_isolate));
1330
1331 if (UNIT_VTABLE(u)->dump)
1332 UNIT_VTABLE(u)->dump(u, f, prefix2);
1333
1334 } else if (u->load_state == UNIT_MERGED)
1335 fprintf(f,
1336 "%s\tMerged into: %s\n",
1337 prefix, u->merged_into->id);
1338 else if (u->load_state == UNIT_ERROR)
1339 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1340
1341 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1342 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1343
1344 if (u->job)
1345 job_dump(u->job, f, prefix2);
1346
1347 if (u->nop_job)
1348 job_dump(u->nop_job, f, prefix2);
1349 }
1350
1351 /* Common implementation for multiple backends */
1352 int unit_load_fragment_and_dropin(Unit *u) {
1353 int r;
1354
1355 assert(u);
1356
1357 /* Load a .{service,socket,...} file */
1358 r = unit_load_fragment(u);
1359 if (r < 0)
1360 return r;
1361
1362 if (u->load_state == UNIT_STUB)
1363 return -ENOENT;
1364
1365 /* Load drop-in directory data. If u is an alias, we might be reloading the
1366 * target unit needlessly. But we cannot be sure which drops-ins have already
1367 * been loaded and which not, at least without doing complicated book-keeping,
1368 * so let's always reread all drop-ins. */
1369 return unit_load_dropin(unit_follow_merge(u));
1370 }
1371
1372 /* Common implementation for multiple backends */
1373 int unit_load_fragment_and_dropin_optional(Unit *u) {
1374 int r;
1375
1376 assert(u);
1377
1378 /* Same as unit_load_fragment_and_dropin(), but whether
1379 * something can be loaded or not doesn't matter. */
1380
1381 /* Load a .service/.socket/.slice/… file */
1382 r = unit_load_fragment(u);
1383 if (r < 0)
1384 return r;
1385
1386 if (u->load_state == UNIT_STUB)
1387 u->load_state = UNIT_LOADED;
1388
1389 /* Load drop-in directory data */
1390 return unit_load_dropin(unit_follow_merge(u));
1391 }
1392
1393 void unit_add_to_target_deps_queue(Unit *u) {
1394 Manager *m = u->manager;
1395
1396 assert(u);
1397
1398 if (u->in_target_deps_queue)
1399 return;
1400
1401 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1402 u->in_target_deps_queue = true;
1403 }
1404
1405 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1406 assert(u);
1407 assert(target);
1408
1409 if (target->type != UNIT_TARGET)
1410 return 0;
1411
1412 /* Only add the dependency if both units are loaded, so that
1413 * that loop check below is reliable */
1414 if (u->load_state != UNIT_LOADED ||
1415 target->load_state != UNIT_LOADED)
1416 return 0;
1417
1418 /* If either side wants no automatic dependencies, then let's
1419 * skip this */
1420 if (!u->default_dependencies ||
1421 !target->default_dependencies)
1422 return 0;
1423
1424 /* Don't create loops */
1425 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1426 return 0;
1427
1428 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1429 }
1430
1431 static int unit_add_slice_dependencies(Unit *u) {
1432 UnitDependencyMask mask;
1433 assert(u);
1434
1435 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1436 return 0;
1437
1438 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1439 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1440 relationship). */
1441 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1442
1443 if (UNIT_ISSET(u->slice))
1444 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1445
1446 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1447 return 0;
1448
1449 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1450 }
1451
1452 static int unit_add_mount_dependencies(Unit *u) {
1453 UnitDependencyInfo di;
1454 const char *path;
1455 Iterator i;
1456 int r;
1457
1458 assert(u);
1459
1460 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1461 char prefix[strlen(path) + 1];
1462
1463 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1464 _cleanup_free_ char *p = NULL;
1465 Unit *m;
1466
1467 r = unit_name_from_path(prefix, ".mount", &p);
1468 if (r < 0)
1469 return r;
1470
1471 m = manager_get_unit(u->manager, p);
1472 if (!m) {
1473 /* Make sure to load the mount unit if
1474 * it exists. If so the dependencies
1475 * on this unit will be added later
1476 * during the loading of the mount
1477 * unit. */
1478 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1479 continue;
1480 }
1481 if (m == u)
1482 continue;
1483
1484 if (m->load_state != UNIT_LOADED)
1485 continue;
1486
1487 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1488 if (r < 0)
1489 return r;
1490
1491 if (m->fragment_path) {
1492 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1493 if (r < 0)
1494 return r;
1495 }
1496 }
1497 }
1498
1499 return 0;
1500 }
1501
1502 static int unit_add_startup_units(Unit *u) {
1503 CGroupContext *c;
1504 int r;
1505
1506 c = unit_get_cgroup_context(u);
1507 if (!c)
1508 return 0;
1509
1510 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1511 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1512 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1513 return 0;
1514
1515 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1516 if (r < 0)
1517 return r;
1518
1519 return set_put(u->manager->startup_units, u);
1520 }
1521
1522 int unit_load(Unit *u) {
1523 int r;
1524
1525 assert(u);
1526
1527 if (u->in_load_queue) {
1528 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1529 u->in_load_queue = false;
1530 }
1531
1532 if (u->type == _UNIT_TYPE_INVALID)
1533 return -EINVAL;
1534
1535 if (u->load_state != UNIT_STUB)
1536 return 0;
1537
1538 if (u->transient_file) {
1539 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1540 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1541
1542 r = fflush_and_check(u->transient_file);
1543 if (r < 0)
1544 goto fail;
1545
1546 u->transient_file = safe_fclose(u->transient_file);
1547 u->fragment_mtime = now(CLOCK_REALTIME);
1548 }
1549
1550 if (UNIT_VTABLE(u)->load) {
1551 r = UNIT_VTABLE(u)->load(u);
1552 if (r < 0)
1553 goto fail;
1554 }
1555
1556 if (u->load_state == UNIT_STUB) {
1557 r = -ENOENT;
1558 goto fail;
1559 }
1560
1561 if (u->load_state == UNIT_LOADED) {
1562 unit_add_to_target_deps_queue(u);
1563
1564 r = unit_add_slice_dependencies(u);
1565 if (r < 0)
1566 goto fail;
1567
1568 r = unit_add_mount_dependencies(u);
1569 if (r < 0)
1570 goto fail;
1571
1572 r = unit_add_startup_units(u);
1573 if (r < 0)
1574 goto fail;
1575
1576 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1577 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1578 r = -ENOEXEC;
1579 goto fail;
1580 }
1581
1582 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1583 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1584
1585 /* We finished loading, let's ensure our parents recalculate the members mask */
1586 unit_invalidate_cgroup_members_masks(u);
1587 }
1588
1589 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1590
1591 unit_add_to_dbus_queue(unit_follow_merge(u));
1592 unit_add_to_gc_queue(u);
1593
1594 return 0;
1595
1596 fail:
1597 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1598 * return ENOEXEC to ensure units are placed in this state after loading */
1599
1600 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1601 r == -ENOEXEC ? UNIT_BAD_SETTING :
1602 UNIT_ERROR;
1603 u->load_error = r;
1604
1605 unit_add_to_dbus_queue(u);
1606 unit_add_to_gc_queue(u);
1607
1608 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1609 }
1610
1611 _printf_(7, 8)
1612 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1613 Unit *u = userdata;
1614 va_list ap;
1615 int r;
1616
1617 va_start(ap, format);
1618 if (u)
1619 r = log_object_internalv(level, error, file, line, func,
1620 u->manager->unit_log_field,
1621 u->id,
1622 u->manager->invocation_log_field,
1623 u->invocation_id_string,
1624 format, ap);
1625 else
1626 r = log_internalv(level, error, file, line, func, format, ap);
1627 va_end(ap);
1628
1629 return r;
1630 }
1631
1632 static bool unit_test_condition(Unit *u) {
1633 assert(u);
1634
1635 dual_timestamp_get(&u->condition_timestamp);
1636 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1637
1638 unit_add_to_dbus_queue(u);
1639
1640 return u->condition_result;
1641 }
1642
1643 static bool unit_test_assert(Unit *u) {
1644 assert(u);
1645
1646 dual_timestamp_get(&u->assert_timestamp);
1647 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1648
1649 unit_add_to_dbus_queue(u);
1650
1651 return u->assert_result;
1652 }
1653
1654 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1655 const char *d;
1656
1657 d = unit_status_string(u);
1658 if (log_get_show_color())
1659 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1660
1661 DISABLE_WARNING_FORMAT_NONLITERAL;
1662 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1663 REENABLE_WARNING;
1664 }
1665
1666 int unit_test_start_limit(Unit *u) {
1667 const char *reason;
1668
1669 assert(u);
1670
1671 if (ratelimit_below(&u->start_limit)) {
1672 u->start_limit_hit = false;
1673 return 0;
1674 }
1675
1676 log_unit_warning(u, "Start request repeated too quickly.");
1677 u->start_limit_hit = true;
1678
1679 reason = strjoina("unit ", u->id, " failed");
1680
1681 emergency_action(u->manager, u->start_limit_action,
1682 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1683 u->reboot_arg, -1, reason);
1684
1685 return -ECANCELED;
1686 }
1687
1688 bool unit_shall_confirm_spawn(Unit *u) {
1689 assert(u);
1690
1691 if (manager_is_confirm_spawn_disabled(u->manager))
1692 return false;
1693
1694 /* For some reasons units remaining in the same process group
1695 * as PID 1 fail to acquire the console even if it's not used
1696 * by any process. So skip the confirmation question for them. */
1697 return !unit_get_exec_context(u)->same_pgrp;
1698 }
1699
1700 static bool unit_verify_deps(Unit *u) {
1701 Unit *other;
1702 Iterator j;
1703 void *v;
1704
1705 assert(u);
1706
1707 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1708 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1709 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1710 * conjunction with After= as for them any such check would make things entirely racy. */
1711
1712 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1713
1714 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1715 continue;
1716
1717 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1718 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1719 return false;
1720 }
1721 }
1722
1723 return true;
1724 }
1725
1726 /* Errors that aren't really errors:
1727 * -EALREADY: Unit is already started.
1728 * -ECOMM: Condition failed
1729 * -EAGAIN: An operation is already in progress. Retry later.
1730 *
1731 * Errors that are real errors:
1732 * -EBADR: This unit type does not support starting.
1733 * -ECANCELED: Start limit hit, too many requests for now
1734 * -EPROTO: Assert failed
1735 * -EINVAL: Unit not loaded
1736 * -EOPNOTSUPP: Unit type not supported
1737 * -ENOLINK: The necessary dependencies are not fulfilled.
1738 * -ESTALE: This unit has been started before and can't be started a second time
1739 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1740 */
1741 int unit_start(Unit *u) {
1742 UnitActiveState state;
1743 Unit *following;
1744 int r;
1745
1746 assert(u);
1747
1748 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1749 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1750 * waiting is finished. */
1751 state = unit_active_state(u);
1752 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1753 return -EALREADY;
1754 if (state == UNIT_MAINTENANCE)
1755 return -EAGAIN;
1756
1757 /* Units that aren't loaded cannot be started */
1758 if (u->load_state != UNIT_LOADED)
1759 return -EINVAL;
1760
1761 /* Refuse starting scope units more than once */
1762 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1763 return -ESTALE;
1764
1765 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1766 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1767 * recheck the condition in that case. */
1768 if (state != UNIT_ACTIVATING &&
1769 !unit_test_condition(u)) {
1770
1771 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1772 * .start() method of the unit type after it did some additional checks verifying everything
1773 * is in order (so that those other checks can propagate errors properly). However, if a
1774 * condition check doesn't hold we don't get that far but we should still ensure we are not
1775 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1776 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1777 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1778 * condition check is a bit different from the condition check inside the per-unit .start()
1779 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1780 * after all the condition failed). */
1781
1782 r = unit_test_start_limit(u);
1783 if (r < 0)
1784 return r;
1785
1786 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1787 }
1788
1789 /* If the asserts failed, fail the entire job */
1790 if (state != UNIT_ACTIVATING &&
1791 !unit_test_assert(u))
1792 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1793
1794 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1795 * condition checks, so that we rather return condition check errors (which are usually not
1796 * considered a true failure) than "not supported" errors (which are considered a failure).
1797 */
1798 if (!unit_type_supported(u->type))
1799 return -EOPNOTSUPP;
1800
1801 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1802 * should have taken care of this already, but let's check this here again. After all, our
1803 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1804 if (!unit_verify_deps(u))
1805 return -ENOLINK;
1806
1807 /* Forward to the main object, if we aren't it. */
1808 following = unit_following(u);
1809 if (following) {
1810 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1811 return unit_start(following);
1812 }
1813
1814 /* If it is stopped, but we cannot start it, then fail */
1815 if (!UNIT_VTABLE(u)->start)
1816 return -EBADR;
1817
1818 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1819 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1820 * waits for a holdoff timer to elapse before it will start again. */
1821
1822 unit_add_to_dbus_queue(u);
1823
1824 return UNIT_VTABLE(u)->start(u);
1825 }
1826
1827 bool unit_can_start(Unit *u) {
1828 assert(u);
1829
1830 if (u->load_state != UNIT_LOADED)
1831 return false;
1832
1833 if (!unit_type_supported(u->type))
1834 return false;
1835
1836 /* Scope units may be started only once */
1837 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1838 return false;
1839
1840 return !!UNIT_VTABLE(u)->start;
1841 }
1842
1843 bool unit_can_isolate(Unit *u) {
1844 assert(u);
1845
1846 return unit_can_start(u) &&
1847 u->allow_isolate;
1848 }
1849
1850 /* Errors:
1851 * -EBADR: This unit type does not support stopping.
1852 * -EALREADY: Unit is already stopped.
1853 * -EAGAIN: An operation is already in progress. Retry later.
1854 */
1855 int unit_stop(Unit *u) {
1856 UnitActiveState state;
1857 Unit *following;
1858
1859 assert(u);
1860
1861 state = unit_active_state(u);
1862 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1863 return -EALREADY;
1864
1865 following = unit_following(u);
1866 if (following) {
1867 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1868 return unit_stop(following);
1869 }
1870
1871 if (!UNIT_VTABLE(u)->stop)
1872 return -EBADR;
1873
1874 unit_add_to_dbus_queue(u);
1875
1876 return UNIT_VTABLE(u)->stop(u);
1877 }
1878
1879 bool unit_can_stop(Unit *u) {
1880 assert(u);
1881
1882 if (!unit_type_supported(u->type))
1883 return false;
1884
1885 if (u->perpetual)
1886 return false;
1887
1888 return !!UNIT_VTABLE(u)->stop;
1889 }
1890
1891 /* Errors:
1892 * -EBADR: This unit type does not support reloading.
1893 * -ENOEXEC: Unit is not started.
1894 * -EAGAIN: An operation is already in progress. Retry later.
1895 */
1896 int unit_reload(Unit *u) {
1897 UnitActiveState state;
1898 Unit *following;
1899
1900 assert(u);
1901
1902 if (u->load_state != UNIT_LOADED)
1903 return -EINVAL;
1904
1905 if (!unit_can_reload(u))
1906 return -EBADR;
1907
1908 state = unit_active_state(u);
1909 if (state == UNIT_RELOADING)
1910 return -EAGAIN;
1911
1912 if (state != UNIT_ACTIVE) {
1913 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1914 return -ENOEXEC;
1915 }
1916
1917 following = unit_following(u);
1918 if (following) {
1919 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1920 return unit_reload(following);
1921 }
1922
1923 unit_add_to_dbus_queue(u);
1924
1925 if (!UNIT_VTABLE(u)->reload) {
1926 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1927 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1928 return 0;
1929 }
1930
1931 return UNIT_VTABLE(u)->reload(u);
1932 }
1933
1934 bool unit_can_reload(Unit *u) {
1935 assert(u);
1936
1937 if (UNIT_VTABLE(u)->can_reload)
1938 return UNIT_VTABLE(u)->can_reload(u);
1939
1940 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1941 return true;
1942
1943 return UNIT_VTABLE(u)->reload;
1944 }
1945
1946 bool unit_is_unneeded(Unit *u) {
1947 static const UnitDependency deps[] = {
1948 UNIT_REQUIRED_BY,
1949 UNIT_REQUISITE_OF,
1950 UNIT_WANTED_BY,
1951 UNIT_BOUND_BY,
1952 };
1953 size_t j;
1954
1955 assert(u);
1956
1957 if (!u->stop_when_unneeded)
1958 return false;
1959
1960 /* Don't clean up while the unit is transitioning or is even inactive. */
1961 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1962 return false;
1963 if (u->job)
1964 return false;
1965
1966 for (j = 0; j < ELEMENTSOF(deps); j++) {
1967 Unit *other;
1968 Iterator i;
1969 void *v;
1970
1971 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1972 * restart, then don't clean this one up. */
1973
1974 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1975 if (other->job)
1976 return false;
1977
1978 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1979 return false;
1980
1981 if (unit_will_restart(other))
1982 return false;
1983 }
1984 }
1985
1986 return true;
1987 }
1988
1989 static void check_unneeded_dependencies(Unit *u) {
1990
1991 static const UnitDependency deps[] = {
1992 UNIT_REQUIRES,
1993 UNIT_REQUISITE,
1994 UNIT_WANTS,
1995 UNIT_BINDS_TO,
1996 };
1997 size_t j;
1998
1999 assert(u);
2000
2001 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2002
2003 for (j = 0; j < ELEMENTSOF(deps); j++) {
2004 Unit *other;
2005 Iterator i;
2006 void *v;
2007
2008 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2009 unit_submit_to_stop_when_unneeded_queue(other);
2010 }
2011 }
2012
2013 static void unit_check_binds_to(Unit *u) {
2014 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2015 bool stop = false;
2016 Unit *other;
2017 Iterator i;
2018 void *v;
2019 int r;
2020
2021 assert(u);
2022
2023 if (u->job)
2024 return;
2025
2026 if (unit_active_state(u) != UNIT_ACTIVE)
2027 return;
2028
2029 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2030 if (other->job)
2031 continue;
2032
2033 if (!other->coldplugged)
2034 /* We might yet create a job for the other unit… */
2035 continue;
2036
2037 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2038 continue;
2039
2040 stop = true;
2041 break;
2042 }
2043
2044 if (!stop)
2045 return;
2046
2047 /* If stopping a unit fails continuously we might enter a stop
2048 * loop here, hence stop acting on the service being
2049 * unnecessary after a while. */
2050 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2051 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2052 return;
2053 }
2054
2055 assert(other);
2056 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2057
2058 /* A unit we need to run is gone. Sniff. Let's stop this. */
2059 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2060 if (r < 0)
2061 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2062 }
2063
2064 static void retroactively_start_dependencies(Unit *u) {
2065 Iterator i;
2066 Unit *other;
2067 void *v;
2068
2069 assert(u);
2070 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2071
2072 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2073 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2074 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2075 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2076
2077 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2078 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2079 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2080 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2081
2082 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2083 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2084 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2085 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2086
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2090
2091 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2092 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2093 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2094 }
2095
2096 static void retroactively_stop_dependencies(Unit *u) {
2097 Unit *other;
2098 Iterator i;
2099 void *v;
2100
2101 assert(u);
2102 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2103
2104 /* Pull down units which are bound to us recursively if enabled */
2105 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2106 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2107 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2108 }
2109
2110 void unit_start_on_failure(Unit *u) {
2111 Unit *other;
2112 Iterator i;
2113 void *v;
2114 int r;
2115
2116 assert(u);
2117
2118 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2119 return;
2120
2121 log_unit_info(u, "Triggering OnFailure= dependencies.");
2122
2123 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2124 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2125
2126 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2127 if (r < 0)
2128 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2129 }
2130 }
2131
2132 void unit_trigger_notify(Unit *u) {
2133 Unit *other;
2134 Iterator i;
2135 void *v;
2136
2137 assert(u);
2138
2139 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2140 if (UNIT_VTABLE(other)->trigger_notify)
2141 UNIT_VTABLE(other)->trigger_notify(other, u);
2142 }
2143
2144 static int unit_log_resources(Unit *u) {
2145 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2146 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2147 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2148 size_t n_message_parts = 0, n_iovec = 0;
2149 char* message_parts[1 + 2 + 2 + 1], *t;
2150 nsec_t nsec = NSEC_INFINITY;
2151 CGroupIPAccountingMetric m;
2152 size_t i;
2153 int r;
2154 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2155 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2156 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2157 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2158 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2159 };
2160 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2161 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2162 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2163 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2164 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2165 };
2166
2167 assert(u);
2168
2169 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2170 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2171 * information and the complete data in structured fields. */
2172
2173 (void) unit_get_cpu_usage(u, &nsec);
2174 if (nsec != NSEC_INFINITY) {
2175 char buf[FORMAT_TIMESPAN_MAX] = "";
2176
2177 /* Format the CPU time for inclusion in the structured log message */
2178 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2179 r = log_oom();
2180 goto finish;
2181 }
2182 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2183
2184 /* Format the CPU time for inclusion in the human language message string */
2185 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2186 t = strjoin("consumed ", buf, " CPU time");
2187 if (!t) {
2188 r = log_oom();
2189 goto finish;
2190 }
2191
2192 message_parts[n_message_parts++] = t;
2193 }
2194
2195 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2196 char buf[FORMAT_BYTES_MAX] = "";
2197 uint64_t value = UINT64_MAX;
2198
2199 assert(io_fields[k]);
2200
2201 (void) unit_get_io_accounting(u, k, k > 0, &value);
2202 if (value == UINT64_MAX)
2203 continue;
2204
2205 have_io_accounting = true;
2206 if (value > 0)
2207 any_io = true;
2208
2209 /* Format IO accounting data for inclusion in the structured log message */
2210 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2211 r = log_oom();
2212 goto finish;
2213 }
2214 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2215
2216 /* Format the IO accounting data for inclusion in the human language message string, but only
2217 * for the bytes counters (and not for the operations counters) */
2218 if (k == CGROUP_IO_READ_BYTES) {
2219 assert(!rr);
2220 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2221 if (!rr) {
2222 r = log_oom();
2223 goto finish;
2224 }
2225 } else if (k == CGROUP_IO_WRITE_BYTES) {
2226 assert(!wr);
2227 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2228 if (!wr) {
2229 r = log_oom();
2230 goto finish;
2231 }
2232 }
2233 }
2234
2235 if (have_io_accounting) {
2236 if (any_io) {
2237 if (rr)
2238 message_parts[n_message_parts++] = TAKE_PTR(rr);
2239 if (wr)
2240 message_parts[n_message_parts++] = TAKE_PTR(wr);
2241
2242 } else {
2243 char *k;
2244
2245 k = strdup("no IO");
2246 if (!k) {
2247 r = log_oom();
2248 goto finish;
2249 }
2250
2251 message_parts[n_message_parts++] = k;
2252 }
2253 }
2254
2255 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2256 char buf[FORMAT_BYTES_MAX] = "";
2257 uint64_t value = UINT64_MAX;
2258
2259 assert(ip_fields[m]);
2260
2261 (void) unit_get_ip_accounting(u, m, &value);
2262 if (value == UINT64_MAX)
2263 continue;
2264
2265 have_ip_accounting = true;
2266 if (value > 0)
2267 any_traffic = true;
2268
2269 /* Format IP accounting data for inclusion in the structured log message */
2270 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2271 r = log_oom();
2272 goto finish;
2273 }
2274 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2275
2276 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2277 * bytes counters (and not for the packets counters) */
2278 if (m == CGROUP_IP_INGRESS_BYTES) {
2279 assert(!igress);
2280 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2281 if (!igress) {
2282 r = log_oom();
2283 goto finish;
2284 }
2285 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2286 assert(!egress);
2287 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2288 if (!egress) {
2289 r = log_oom();
2290 goto finish;
2291 }
2292 }
2293 }
2294
2295 if (have_ip_accounting) {
2296 if (any_traffic) {
2297 if (igress)
2298 message_parts[n_message_parts++] = TAKE_PTR(igress);
2299 if (egress)
2300 message_parts[n_message_parts++] = TAKE_PTR(egress);
2301
2302 } else {
2303 char *k;
2304
2305 k = strdup("no IP traffic");
2306 if (!k) {
2307 r = log_oom();
2308 goto finish;
2309 }
2310
2311 message_parts[n_message_parts++] = k;
2312 }
2313 }
2314
2315 /* Is there any accounting data available at all? */
2316 if (n_iovec == 0) {
2317 r = 0;
2318 goto finish;
2319 }
2320
2321 if (n_message_parts == 0)
2322 t = strjoina("MESSAGE=", u->id, ": Completed.");
2323 else {
2324 _cleanup_free_ char *joined;
2325
2326 message_parts[n_message_parts] = NULL;
2327
2328 joined = strv_join(message_parts, ", ");
2329 if (!joined) {
2330 r = log_oom();
2331 goto finish;
2332 }
2333
2334 joined[0] = ascii_toupper(joined[0]);
2335 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2336 }
2337
2338 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2339 * and hence don't increase n_iovec for them */
2340 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2341 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2342
2343 t = strjoina(u->manager->unit_log_field, u->id);
2344 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2345
2346 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2347 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2348
2349 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2350 r = 0;
2351
2352 finish:
2353 for (i = 0; i < n_message_parts; i++)
2354 free(message_parts[i]);
2355
2356 for (i = 0; i < n_iovec; i++)
2357 free(iovec[i].iov_base);
2358
2359 return r;
2360
2361 }
2362
2363 static void unit_update_on_console(Unit *u) {
2364 bool b;
2365
2366 assert(u);
2367
2368 b = unit_needs_console(u);
2369 if (u->on_console == b)
2370 return;
2371
2372 u->on_console = b;
2373 if (b)
2374 manager_ref_console(u->manager);
2375 else
2376 manager_unref_console(u->manager);
2377 }
2378
2379 static void unit_emit_audit_start(Unit *u) {
2380 assert(u);
2381
2382 if (u->type != UNIT_SERVICE)
2383 return;
2384
2385 /* Write audit record if we have just finished starting up */
2386 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2387 u->in_audit = true;
2388 }
2389
2390 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2391 assert(u);
2392
2393 if (u->type != UNIT_SERVICE)
2394 return;
2395
2396 if (u->in_audit) {
2397 /* Write audit record if we have just finished shutting down */
2398 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2399 u->in_audit = false;
2400 } else {
2401 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2402 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2403
2404 if (state == UNIT_INACTIVE)
2405 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2406 }
2407 }
2408
2409 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2410 bool unexpected = false;
2411 JobResult result;
2412
2413 assert(j);
2414
2415 if (j->state == JOB_WAITING)
2416
2417 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2418 * due to EAGAIN. */
2419 job_add_to_run_queue(j);
2420
2421 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2422 * hence needs to invalidate jobs. */
2423
2424 switch (j->type) {
2425
2426 case JOB_START:
2427 case JOB_VERIFY_ACTIVE:
2428
2429 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2430 job_finish_and_invalidate(j, JOB_DONE, true, false);
2431 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2432 unexpected = true;
2433
2434 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2435 if (ns == UNIT_FAILED)
2436 result = JOB_FAILED;
2437 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2438 result = JOB_SKIPPED;
2439 else
2440 result = JOB_DONE;
2441
2442 job_finish_and_invalidate(j, result, true, false);
2443 }
2444 }
2445
2446 break;
2447
2448 case JOB_RELOAD:
2449 case JOB_RELOAD_OR_START:
2450 case JOB_TRY_RELOAD:
2451
2452 if (j->state == JOB_RUNNING) {
2453 if (ns == UNIT_ACTIVE)
2454 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2455 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2456 unexpected = true;
2457
2458 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2459 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2460 }
2461 }
2462
2463 break;
2464
2465 case JOB_STOP:
2466 case JOB_RESTART:
2467 case JOB_TRY_RESTART:
2468
2469 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2470 job_finish_and_invalidate(j, JOB_DONE, true, false);
2471 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2472 unexpected = true;
2473 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2474 }
2475
2476 break;
2477
2478 default:
2479 assert_not_reached("Job type unknown");
2480 }
2481
2482 return unexpected;
2483 }
2484
2485 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2486 const char *reason;
2487 Manager *m;
2488
2489 assert(u);
2490 assert(os < _UNIT_ACTIVE_STATE_MAX);
2491 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2492
2493 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2494 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2495 * remounted this function will be called too! */
2496
2497 m = u->manager;
2498
2499 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2500 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2501 unit_add_to_dbus_queue(u);
2502
2503 /* Update timestamps for state changes */
2504 if (!MANAGER_IS_RELOADING(m)) {
2505 dual_timestamp_get(&u->state_change_timestamp);
2506
2507 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2508 u->inactive_exit_timestamp = u->state_change_timestamp;
2509 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2510 u->inactive_enter_timestamp = u->state_change_timestamp;
2511
2512 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2513 u->active_enter_timestamp = u->state_change_timestamp;
2514 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2515 u->active_exit_timestamp = u->state_change_timestamp;
2516 }
2517
2518 /* Keep track of failed units */
2519 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2520
2521 /* Make sure the cgroup and state files are always removed when we become inactive */
2522 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2523 unit_prune_cgroup(u);
2524 unit_unlink_state_files(u);
2525 }
2526
2527 unit_update_on_console(u);
2528
2529 if (!MANAGER_IS_RELOADING(m)) {
2530 bool unexpected;
2531
2532 /* Let's propagate state changes to the job */
2533 if (u->job)
2534 unexpected = unit_process_job(u->job, ns, flags);
2535 else
2536 unexpected = true;
2537
2538 /* If this state change happened without being requested by a job, then let's retroactively start or
2539 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2540 * additional jobs just because something is already activated. */
2541
2542 if (unexpected) {
2543 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2544 retroactively_start_dependencies(u);
2545 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2546 retroactively_stop_dependencies(u);
2547 }
2548
2549 /* stop unneeded units regardless if going down was expected or not */
2550 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2551 check_unneeded_dependencies(u);
2552
2553 if (ns != os && ns == UNIT_FAILED) {
2554 log_unit_debug(u, "Unit entered failed state.");
2555
2556 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2557 unit_start_on_failure(u);
2558 }
2559
2560 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2561 /* This unit just finished starting up */
2562
2563 unit_emit_audit_start(u);
2564 manager_send_unit_plymouth(m, u);
2565 }
2566
2567 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2568 /* This unit just stopped/failed. */
2569
2570 unit_emit_audit_stop(u, ns);
2571 unit_log_resources(u);
2572 }
2573 }
2574
2575 manager_recheck_journal(m);
2576 manager_recheck_dbus(m);
2577
2578 unit_trigger_notify(u);
2579
2580 if (!MANAGER_IS_RELOADING(m)) {
2581 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2582 unit_submit_to_stop_when_unneeded_queue(u);
2583
2584 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2585 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2586 * without ever entering started.) */
2587 unit_check_binds_to(u);
2588
2589 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2590 reason = strjoina("unit ", u->id, " failed");
2591 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2592 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2593 reason = strjoina("unit ", u->id, " succeeded");
2594 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2595 }
2596 }
2597
2598 unit_add_to_gc_queue(u);
2599 }
2600
2601 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2602 int r;
2603
2604 assert(u);
2605 assert(pid_is_valid(pid));
2606
2607 /* Watch a specific PID */
2608
2609 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2610 * opportunity to remove any stalled references to this PID as they can be created
2611 * easily (when watching a process which is not our direct child). */
2612 if (exclusive)
2613 manager_unwatch_pid(u->manager, pid);
2614
2615 r = set_ensure_allocated(&u->pids, NULL);
2616 if (r < 0)
2617 return r;
2618
2619 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2620 if (r < 0)
2621 return r;
2622
2623 /* First try, let's add the unit keyed by "pid". */
2624 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2625 if (r == -EEXIST) {
2626 Unit **array;
2627 bool found = false;
2628 size_t n = 0;
2629
2630 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2631 * to an array of Units rather than just a Unit), lists us already. */
2632
2633 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2634 if (array)
2635 for (; array[n]; n++)
2636 if (array[n] == u)
2637 found = true;
2638
2639 if (found) /* Found it already? if so, do nothing */
2640 r = 0;
2641 else {
2642 Unit **new_array;
2643
2644 /* Allocate a new array */
2645 new_array = new(Unit*, n + 2);
2646 if (!new_array)
2647 return -ENOMEM;
2648
2649 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2650 new_array[n] = u;
2651 new_array[n+1] = NULL;
2652
2653 /* Add or replace the old array */
2654 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2655 if (r < 0) {
2656 free(new_array);
2657 return r;
2658 }
2659
2660 free(array);
2661 }
2662 } else if (r < 0)
2663 return r;
2664
2665 r = set_put(u->pids, PID_TO_PTR(pid));
2666 if (r < 0)
2667 return r;
2668
2669 return 0;
2670 }
2671
2672 void unit_unwatch_pid(Unit *u, pid_t pid) {
2673 Unit **array;
2674
2675 assert(u);
2676 assert(pid_is_valid(pid));
2677
2678 /* First let's drop the unit in case it's keyed as "pid". */
2679 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2680
2681 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2682 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2683 if (array) {
2684 size_t n, m = 0;
2685
2686 /* Let's iterate through the array, dropping our own entry */
2687 for (n = 0; array[n]; n++)
2688 if (array[n] != u)
2689 array[m++] = array[n];
2690 array[m] = NULL;
2691
2692 if (m == 0) {
2693 /* The array is now empty, remove the entire entry */
2694 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2695 free(array);
2696 }
2697 }
2698
2699 (void) set_remove(u->pids, PID_TO_PTR(pid));
2700 }
2701
2702 void unit_unwatch_all_pids(Unit *u) {
2703 assert(u);
2704
2705 while (!set_isempty(u->pids))
2706 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2707
2708 u->pids = set_free(u->pids);
2709 }
2710
2711 static void unit_tidy_watch_pids(Unit *u) {
2712 pid_t except1, except2;
2713 Iterator i;
2714 void *e;
2715
2716 assert(u);
2717
2718 /* Cleans dead PIDs from our list */
2719
2720 except1 = unit_main_pid(u);
2721 except2 = unit_control_pid(u);
2722
2723 SET_FOREACH(e, u->pids, i) {
2724 pid_t pid = PTR_TO_PID(e);
2725
2726 if (pid == except1 || pid == except2)
2727 continue;
2728
2729 if (!pid_is_unwaited(pid))
2730 unit_unwatch_pid(u, pid);
2731 }
2732 }
2733
2734 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2735 Unit *u = userdata;
2736
2737 assert(s);
2738 assert(u);
2739
2740 unit_tidy_watch_pids(u);
2741 unit_watch_all_pids(u);
2742
2743 /* If the PID set is empty now, then let's finish this off. */
2744 unit_synthesize_cgroup_empty_event(u);
2745
2746 return 0;
2747 }
2748
2749 int unit_enqueue_rewatch_pids(Unit *u) {
2750 int r;
2751
2752 assert(u);
2753
2754 if (!u->cgroup_path)
2755 return -ENOENT;
2756
2757 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2758 if (r < 0)
2759 return r;
2760 if (r > 0) /* On unified we can use proper notifications */
2761 return 0;
2762
2763 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2764 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2765 * involves issuing kill(pid, 0) on all processes we watch. */
2766
2767 if (!u->rewatch_pids_event_source) {
2768 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2769
2770 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2771 if (r < 0)
2772 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2773
2774 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2775 if (r < 0)
2776 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2777
2778 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2779
2780 u->rewatch_pids_event_source = TAKE_PTR(s);
2781 }
2782
2783 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2784 if (r < 0)
2785 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2786
2787 return 0;
2788 }
2789
2790 void unit_dequeue_rewatch_pids(Unit *u) {
2791 int r;
2792 assert(u);
2793
2794 if (!u->rewatch_pids_event_source)
2795 return;
2796
2797 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2798 if (r < 0)
2799 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2800
2801 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2802 }
2803
2804 bool unit_job_is_applicable(Unit *u, JobType j) {
2805 assert(u);
2806 assert(j >= 0 && j < _JOB_TYPE_MAX);
2807
2808 switch (j) {
2809
2810 case JOB_VERIFY_ACTIVE:
2811 case JOB_START:
2812 case JOB_NOP:
2813 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2814 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2815 * jobs for it. */
2816 return true;
2817
2818 case JOB_STOP:
2819 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2820 * external events), hence it makes no sense to permit enqueing such a request either. */
2821 return !u->perpetual;
2822
2823 case JOB_RESTART:
2824 case JOB_TRY_RESTART:
2825 return unit_can_stop(u) && unit_can_start(u);
2826
2827 case JOB_RELOAD:
2828 case JOB_TRY_RELOAD:
2829 return unit_can_reload(u);
2830
2831 case JOB_RELOAD_OR_START:
2832 return unit_can_reload(u) && unit_can_start(u);
2833
2834 default:
2835 assert_not_reached("Invalid job type");
2836 }
2837 }
2838
2839 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2840 assert(u);
2841
2842 /* Only warn about some unit types */
2843 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2844 return;
2845
2846 if (streq_ptr(u->id, other))
2847 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2848 else
2849 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2850 }
2851
2852 static int unit_add_dependency_hashmap(
2853 Hashmap **h,
2854 Unit *other,
2855 UnitDependencyMask origin_mask,
2856 UnitDependencyMask destination_mask) {
2857
2858 UnitDependencyInfo info;
2859 int r;
2860
2861 assert(h);
2862 assert(other);
2863 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2864 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2865 assert(origin_mask > 0 || destination_mask > 0);
2866
2867 r = hashmap_ensure_allocated(h, NULL);
2868 if (r < 0)
2869 return r;
2870
2871 assert_cc(sizeof(void*) == sizeof(info));
2872
2873 info.data = hashmap_get(*h, other);
2874 if (info.data) {
2875 /* Entry already exists. Add in our mask. */
2876
2877 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2878 FLAGS_SET(destination_mask, info.destination_mask))
2879 return 0; /* NOP */
2880
2881 info.origin_mask |= origin_mask;
2882 info.destination_mask |= destination_mask;
2883
2884 r = hashmap_update(*h, other, info.data);
2885 } else {
2886 info = (UnitDependencyInfo) {
2887 .origin_mask = origin_mask,
2888 .destination_mask = destination_mask,
2889 };
2890
2891 r = hashmap_put(*h, other, info.data);
2892 }
2893 if (r < 0)
2894 return r;
2895
2896 return 1;
2897 }
2898
2899 int unit_add_dependency(
2900 Unit *u,
2901 UnitDependency d,
2902 Unit *other,
2903 bool add_reference,
2904 UnitDependencyMask mask) {
2905
2906 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2907 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2908 [UNIT_WANTS] = UNIT_WANTED_BY,
2909 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2910 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2911 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2912 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2913 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2914 [UNIT_WANTED_BY] = UNIT_WANTS,
2915 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2916 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2917 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2918 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2919 [UNIT_BEFORE] = UNIT_AFTER,
2920 [UNIT_AFTER] = UNIT_BEFORE,
2921 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2922 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2923 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2924 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2925 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2926 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2927 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2928 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2929 };
2930 Unit *original_u = u, *original_other = other;
2931 int r;
2932
2933 assert(u);
2934 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2935 assert(other);
2936
2937 u = unit_follow_merge(u);
2938 other = unit_follow_merge(other);
2939
2940 /* We won't allow dependencies on ourselves. We will not
2941 * consider them an error however. */
2942 if (u == other) {
2943 maybe_warn_about_dependency(original_u, original_other->id, d);
2944 return 0;
2945 }
2946
2947 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2948 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2949 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2950 return 0;
2951 }
2952
2953 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2954 if (r < 0)
2955 return r;
2956
2957 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2958 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2959 if (r < 0)
2960 return r;
2961 }
2962
2963 if (add_reference) {
2964 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2965 if (r < 0)
2966 return r;
2967
2968 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2969 if (r < 0)
2970 return r;
2971 }
2972
2973 unit_add_to_dbus_queue(u);
2974 return 0;
2975 }
2976
2977 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2978 int r;
2979
2980 assert(u);
2981
2982 r = unit_add_dependency(u, d, other, add_reference, mask);
2983 if (r < 0)
2984 return r;
2985
2986 return unit_add_dependency(u, e, other, add_reference, mask);
2987 }
2988
2989 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2990 int r;
2991
2992 assert(u);
2993 assert(name);
2994 assert(buf);
2995 assert(ret);
2996
2997 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2998 *buf = NULL;
2999 *ret = name;
3000 return 0;
3001 }
3002
3003 if (u->instance)
3004 r = unit_name_replace_instance(name, u->instance, buf);
3005 else {
3006 _cleanup_free_ char *i = NULL;
3007
3008 r = unit_name_to_prefix(u->id, &i);
3009 if (r < 0)
3010 return r;
3011
3012 r = unit_name_replace_instance(name, i, buf);
3013 }
3014 if (r < 0)
3015 return r;
3016
3017 *ret = *buf;
3018 return 0;
3019 }
3020
3021 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3022 _cleanup_free_ char *buf = NULL;
3023 Unit *other;
3024 int r;
3025
3026 assert(u);
3027 assert(name);
3028
3029 r = resolve_template(u, name, &buf, &name);
3030 if (r < 0)
3031 return r;
3032
3033 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3034 if (r < 0)
3035 return r;
3036
3037 return unit_add_dependency(u, d, other, add_reference, mask);
3038 }
3039
3040 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3041 _cleanup_free_ char *buf = NULL;
3042 Unit *other;
3043 int r;
3044
3045 assert(u);
3046 assert(name);
3047
3048 r = resolve_template(u, name, &buf, &name);
3049 if (r < 0)
3050 return r;
3051
3052 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3053 if (r < 0)
3054 return r;
3055
3056 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3057 }
3058
3059 int set_unit_path(const char *p) {
3060 /* This is mostly for debug purposes */
3061 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3062 return -errno;
3063
3064 return 0;
3065 }
3066
3067 char *unit_dbus_path(Unit *u) {
3068 assert(u);
3069
3070 if (!u->id)
3071 return NULL;
3072
3073 return unit_dbus_path_from_name(u->id);
3074 }
3075
3076 char *unit_dbus_path_invocation_id(Unit *u) {
3077 assert(u);
3078
3079 if (sd_id128_is_null(u->invocation_id))
3080 return NULL;
3081
3082 return unit_dbus_path_from_name(u->invocation_id_string);
3083 }
3084
3085 int unit_set_slice(Unit *u, Unit *slice) {
3086 assert(u);
3087 assert(slice);
3088
3089 /* Sets the unit slice if it has not been set before. Is extra
3090 * careful, to only allow this for units that actually have a
3091 * cgroup context. Also, we don't allow to set this for slices
3092 * (since the parent slice is derived from the name). Make
3093 * sure the unit we set is actually a slice. */
3094
3095 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3096 return -EOPNOTSUPP;
3097
3098 if (u->type == UNIT_SLICE)
3099 return -EINVAL;
3100
3101 if (unit_active_state(u) != UNIT_INACTIVE)
3102 return -EBUSY;
3103
3104 if (slice->type != UNIT_SLICE)
3105 return -EINVAL;
3106
3107 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3108 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3109 return -EPERM;
3110
3111 if (UNIT_DEREF(u->slice) == slice)
3112 return 0;
3113
3114 /* Disallow slice changes if @u is already bound to cgroups */
3115 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3116 return -EBUSY;
3117
3118 unit_ref_set(&u->slice, u, slice);
3119 return 1;
3120 }
3121
3122 int unit_set_default_slice(Unit *u) {
3123 const char *slice_name;
3124 Unit *slice;
3125 int r;
3126
3127 assert(u);
3128
3129 if (UNIT_ISSET(u->slice))
3130 return 0;
3131
3132 if (u->instance) {
3133 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3134
3135 /* Implicitly place all instantiated units in their
3136 * own per-template slice */
3137
3138 r = unit_name_to_prefix(u->id, &prefix);
3139 if (r < 0)
3140 return r;
3141
3142 /* The prefix is already escaped, but it might include
3143 * "-" which has a special meaning for slice units,
3144 * hence escape it here extra. */
3145 escaped = unit_name_escape(prefix);
3146 if (!escaped)
3147 return -ENOMEM;
3148
3149 if (MANAGER_IS_SYSTEM(u->manager))
3150 slice_name = strjoina("system-", escaped, ".slice");
3151 else
3152 slice_name = strjoina(escaped, ".slice");
3153 } else
3154 slice_name =
3155 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3156 ? SPECIAL_SYSTEM_SLICE
3157 : SPECIAL_ROOT_SLICE;
3158
3159 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3160 if (r < 0)
3161 return r;
3162
3163 return unit_set_slice(u, slice);
3164 }
3165
3166 const char *unit_slice_name(Unit *u) {
3167 assert(u);
3168
3169 if (!UNIT_ISSET(u->slice))
3170 return NULL;
3171
3172 return UNIT_DEREF(u->slice)->id;
3173 }
3174
3175 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3176 _cleanup_free_ char *t = NULL;
3177 int r;
3178
3179 assert(u);
3180 assert(type);
3181 assert(_found);
3182
3183 r = unit_name_change_suffix(u->id, type, &t);
3184 if (r < 0)
3185 return r;
3186 if (unit_has_name(u, t))
3187 return -EINVAL;
3188
3189 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3190 assert(r < 0 || *_found != u);
3191 return r;
3192 }
3193
3194 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3195 const char *name, *old_owner, *new_owner;
3196 Unit *u = userdata;
3197 int r;
3198
3199 assert(message);
3200 assert(u);
3201
3202 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3203 if (r < 0) {
3204 bus_log_parse_error(r);
3205 return 0;
3206 }
3207
3208 old_owner = empty_to_null(old_owner);
3209 new_owner = empty_to_null(new_owner);
3210
3211 if (UNIT_VTABLE(u)->bus_name_owner_change)
3212 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3213
3214 return 0;
3215 }
3216
3217 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3218 const char *match;
3219
3220 assert(u);
3221 assert(bus);
3222 assert(name);
3223
3224 if (u->match_bus_slot)
3225 return -EBUSY;
3226
3227 match = strjoina("type='signal',"
3228 "sender='org.freedesktop.DBus',"
3229 "path='/org/freedesktop/DBus',"
3230 "interface='org.freedesktop.DBus',"
3231 "member='NameOwnerChanged',"
3232 "arg0='", name, "'");
3233
3234 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3235 }
3236
3237 int unit_watch_bus_name(Unit *u, const char *name) {
3238 int r;
3239
3240 assert(u);
3241 assert(name);
3242
3243 /* Watch a specific name on the bus. We only support one unit
3244 * watching each name for now. */
3245
3246 if (u->manager->api_bus) {
3247 /* If the bus is already available, install the match directly.
3248 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3249 r = unit_install_bus_match(u, u->manager->api_bus, name);
3250 if (r < 0)
3251 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3252 }
3253
3254 r = hashmap_put(u->manager->watch_bus, name, u);
3255 if (r < 0) {
3256 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3257 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3258 }
3259
3260 return 0;
3261 }
3262
3263 void unit_unwatch_bus_name(Unit *u, const char *name) {
3264 assert(u);
3265 assert(name);
3266
3267 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3268 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3269 }
3270
3271 bool unit_can_serialize(Unit *u) {
3272 assert(u);
3273
3274 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3275 }
3276
3277 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3278 _cleanup_free_ char *s = NULL;
3279 int r;
3280
3281 assert(f);
3282 assert(key);
3283
3284 if (mask == 0)
3285 return 0;
3286
3287 r = cg_mask_to_string(mask, &s);
3288 if (r < 0)
3289 return log_error_errno(r, "Failed to format cgroup mask: %m");
3290
3291 return serialize_item(f, key, s);
3292 }
3293
3294 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3295 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3296 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3297 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3298 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3299 };
3300
3301 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3302 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3303 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3304 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3305 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3306 };
3307
3308 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3309 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3310 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3311 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3312 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3313 };
3314
3315 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3316 CGroupIPAccountingMetric m;
3317 int r;
3318
3319 assert(u);
3320 assert(f);
3321 assert(fds);
3322
3323 if (unit_can_serialize(u)) {
3324 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3325 if (r < 0)
3326 return r;
3327 }
3328
3329 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3330
3331 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3332 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3333 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3334 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3335
3336 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3337 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3338
3339 if (dual_timestamp_is_set(&u->condition_timestamp))
3340 (void) serialize_bool(f, "condition-result", u->condition_result);
3341
3342 if (dual_timestamp_is_set(&u->assert_timestamp))
3343 (void) serialize_bool(f, "assert-result", u->assert_result);
3344
3345 (void) serialize_bool(f, "transient", u->transient);
3346 (void) serialize_bool(f, "in-audit", u->in_audit);
3347
3348 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3349 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3350 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3351 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3352 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3353
3354 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3355 if (u->cpu_usage_last != NSEC_INFINITY)
3356 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3357
3358 if (u->oom_kill_last > 0)
3359 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3360
3361 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3362 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3363
3364 if (u->io_accounting_last[im] != UINT64_MAX)
3365 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3366 }
3367
3368 if (u->cgroup_path)
3369 (void) serialize_item(f, "cgroup", u->cgroup_path);
3370
3371 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3372 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3373 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3374 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3375
3376 if (uid_is_valid(u->ref_uid))
3377 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3378 if (gid_is_valid(u->ref_gid))
3379 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3380
3381 if (!sd_id128_is_null(u->invocation_id))
3382 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3383
3384 bus_track_serialize(u->bus_track, f, "ref");
3385
3386 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3387 uint64_t v;
3388
3389 r = unit_get_ip_accounting(u, m, &v);
3390 if (r >= 0)
3391 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3392 }
3393
3394 if (serialize_jobs) {
3395 if (u->job) {
3396 fputs("job\n", f);
3397 job_serialize(u->job, f);
3398 }
3399
3400 if (u->nop_job) {
3401 fputs("job\n", f);
3402 job_serialize(u->nop_job, f);
3403 }
3404 }
3405
3406 /* End marker */
3407 fputc('\n', f);
3408 return 0;
3409 }
3410
3411 static int unit_deserialize_job(Unit *u, FILE *f) {
3412 _cleanup_(job_freep) Job *j = NULL;
3413 int r;
3414
3415 assert(u);
3416 assert(f);
3417
3418 j = job_new_raw(u);
3419 if (!j)
3420 return log_oom();
3421
3422 r = job_deserialize(j, f);
3423 if (r < 0)
3424 return r;
3425
3426 r = job_install_deserialized(j);
3427 if (r < 0)
3428 return r;
3429
3430 TAKE_PTR(j);
3431 return 0;
3432 }
3433
3434 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3435 int r;
3436
3437 assert(u);
3438 assert(f);
3439 assert(fds);
3440
3441 for (;;) {
3442 _cleanup_free_ char *line = NULL;
3443 char *l, *v;
3444 ssize_t m;
3445 size_t k;
3446
3447 r = read_line(f, LONG_LINE_MAX, &line);
3448 if (r < 0)
3449 return log_error_errno(r, "Failed to read serialization line: %m");
3450 if (r == 0) /* eof */
3451 break;
3452
3453 l = strstrip(line);
3454 if (isempty(l)) /* End marker */
3455 break;
3456
3457 k = strcspn(l, "=");
3458
3459 if (l[k] == '=') {
3460 l[k] = 0;
3461 v = l+k+1;
3462 } else
3463 v = l+k;
3464
3465 if (streq(l, "job")) {
3466 if (v[0] == '\0') {
3467 /* New-style serialized job */
3468 r = unit_deserialize_job(u, f);
3469 if (r < 0)
3470 return r;
3471 } else /* Legacy for pre-44 */
3472 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3473 continue;
3474 } else if (streq(l, "state-change-timestamp")) {
3475 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3476 continue;
3477 } else if (streq(l, "inactive-exit-timestamp")) {
3478 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3479 continue;
3480 } else if (streq(l, "active-enter-timestamp")) {
3481 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3482 continue;
3483 } else if (streq(l, "active-exit-timestamp")) {
3484 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3485 continue;
3486 } else if (streq(l, "inactive-enter-timestamp")) {
3487 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3488 continue;
3489 } else if (streq(l, "condition-timestamp")) {
3490 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3491 continue;
3492 } else if (streq(l, "assert-timestamp")) {
3493 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3494 continue;
3495 } else if (streq(l, "condition-result")) {
3496
3497 r = parse_boolean(v);
3498 if (r < 0)
3499 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3500 else
3501 u->condition_result = r;
3502
3503 continue;
3504
3505 } else if (streq(l, "assert-result")) {
3506
3507 r = parse_boolean(v);
3508 if (r < 0)
3509 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3510 else
3511 u->assert_result = r;
3512
3513 continue;
3514
3515 } else if (streq(l, "transient")) {
3516
3517 r = parse_boolean(v);
3518 if (r < 0)
3519 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3520 else
3521 u->transient = r;
3522
3523 continue;
3524
3525 } else if (streq(l, "in-audit")) {
3526
3527 r = parse_boolean(v);
3528 if (r < 0)
3529 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3530 else
3531 u->in_audit = r;
3532
3533 continue;
3534
3535 } else if (streq(l, "exported-invocation-id")) {
3536
3537 r = parse_boolean(v);
3538 if (r < 0)
3539 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3540 else
3541 u->exported_invocation_id = r;
3542
3543 continue;
3544
3545 } else if (streq(l, "exported-log-level-max")) {
3546
3547 r = parse_boolean(v);
3548 if (r < 0)
3549 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3550 else
3551 u->exported_log_level_max = r;
3552
3553 continue;
3554
3555 } else if (streq(l, "exported-log-extra-fields")) {
3556
3557 r = parse_boolean(v);
3558 if (r < 0)
3559 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3560 else
3561 u->exported_log_extra_fields = r;
3562
3563 continue;
3564
3565 } else if (streq(l, "exported-log-rate-limit-interval")) {
3566
3567 r = parse_boolean(v);
3568 if (r < 0)
3569 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3570 else
3571 u->exported_log_rate_limit_interval = r;
3572
3573 continue;
3574
3575 } else if (streq(l, "exported-log-rate-limit-burst")) {
3576
3577 r = parse_boolean(v);
3578 if (r < 0)
3579 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3580 else
3581 u->exported_log_rate_limit_burst = r;
3582
3583 continue;
3584
3585 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3586
3587 r = safe_atou64(v, &u->cpu_usage_base);
3588 if (r < 0)
3589 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3590
3591 continue;
3592
3593 } else if (streq(l, "cpu-usage-last")) {
3594
3595 r = safe_atou64(v, &u->cpu_usage_last);
3596 if (r < 0)
3597 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3598
3599 continue;
3600
3601 } else if (streq(l, "oom-kill-last")) {
3602
3603 r = safe_atou64(v, &u->oom_kill_last);
3604 if (r < 0)
3605 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3606
3607 continue;
3608
3609 } else if (streq(l, "cgroup")) {
3610
3611 r = unit_set_cgroup_path(u, v);
3612 if (r < 0)
3613 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3614
3615 (void) unit_watch_cgroup(u);
3616 (void) unit_watch_cgroup_memory(u);
3617
3618 continue;
3619 } else if (streq(l, "cgroup-realized")) {
3620 int b;
3621
3622 b = parse_boolean(v);
3623 if (b < 0)
3624 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3625 else
3626 u->cgroup_realized = b;
3627
3628 continue;
3629
3630 } else if (streq(l, "cgroup-realized-mask")) {
3631
3632 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3633 if (r < 0)
3634 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3635 continue;
3636
3637 } else if (streq(l, "cgroup-enabled-mask")) {
3638
3639 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3640 if (r < 0)
3641 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3642 continue;
3643
3644 } else if (streq(l, "cgroup-invalidated-mask")) {
3645
3646 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3647 if (r < 0)
3648 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3649 continue;
3650
3651 } else if (streq(l, "ref-uid")) {
3652 uid_t uid;
3653
3654 r = parse_uid(v, &uid);
3655 if (r < 0)
3656 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3657 else
3658 unit_ref_uid_gid(u, uid, GID_INVALID);
3659
3660 continue;
3661
3662 } else if (streq(l, "ref-gid")) {
3663 gid_t gid;
3664
3665 r = parse_gid(v, &gid);
3666 if (r < 0)
3667 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3668 else
3669 unit_ref_uid_gid(u, UID_INVALID, gid);
3670
3671 continue;
3672
3673 } else if (streq(l, "ref")) {
3674
3675 r = strv_extend(&u->deserialized_refs, v);
3676 if (r < 0)
3677 return log_oom();
3678
3679 continue;
3680 } else if (streq(l, "invocation-id")) {
3681 sd_id128_t id;
3682
3683 r = sd_id128_from_string(v, &id);
3684 if (r < 0)
3685 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3686 else {
3687 r = unit_set_invocation_id(u, id);
3688 if (r < 0)
3689 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3690 }
3691
3692 continue;
3693 }
3694
3695 /* Check if this is an IP accounting metric serialization field */
3696 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3697 if (m >= 0) {
3698 uint64_t c;
3699
3700 r = safe_atou64(v, &c);
3701 if (r < 0)
3702 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3703 else
3704 u->ip_accounting_extra[m] = c;
3705 continue;
3706 }
3707
3708 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3709 if (m >= 0) {
3710 uint64_t c;
3711
3712 r = safe_atou64(v, &c);
3713 if (r < 0)
3714 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3715 else
3716 u->io_accounting_base[m] = c;
3717 continue;
3718 }
3719
3720 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3721 if (m >= 0) {
3722 uint64_t c;
3723
3724 r = safe_atou64(v, &c);
3725 if (r < 0)
3726 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3727 else
3728 u->io_accounting_last[m] = c;
3729 continue;
3730 }
3731
3732 if (unit_can_serialize(u)) {
3733 r = exec_runtime_deserialize_compat(u, l, v, fds);
3734 if (r < 0) {
3735 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3736 continue;
3737 }
3738
3739 /* Returns positive if key was handled by the call */
3740 if (r > 0)
3741 continue;
3742
3743 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3744 if (r < 0)
3745 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3746 }
3747 }
3748
3749 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3750 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3751 * before 228 where the base for timeouts was not persistent across reboots. */
3752
3753 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3754 dual_timestamp_get(&u->state_change_timestamp);
3755
3756 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3757 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3758 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3759 unit_invalidate_cgroup_bpf(u);
3760
3761 return 0;
3762 }
3763
3764 int unit_deserialize_skip(FILE *f) {
3765 int r;
3766 assert(f);
3767
3768 /* Skip serialized data for this unit. We don't know what it is. */
3769
3770 for (;;) {
3771 _cleanup_free_ char *line = NULL;
3772 char *l;
3773
3774 r = read_line(f, LONG_LINE_MAX, &line);
3775 if (r < 0)
3776 return log_error_errno(r, "Failed to read serialization line: %m");
3777 if (r == 0)
3778 return 0;
3779
3780 l = strstrip(line);
3781
3782 /* End marker */
3783 if (isempty(l))
3784 return 1;
3785 }
3786 }
3787
3788 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3789 Unit *device;
3790 _cleanup_free_ char *e = NULL;
3791 int r;
3792
3793 assert(u);
3794
3795 /* Adds in links to the device node that this unit is based on */
3796 if (isempty(what))
3797 return 0;
3798
3799 if (!is_device_path(what))
3800 return 0;
3801
3802 /* When device units aren't supported (such as in a
3803 * container), don't create dependencies on them. */
3804 if (!unit_type_supported(UNIT_DEVICE))
3805 return 0;
3806
3807 r = unit_name_from_path(what, ".device", &e);
3808 if (r < 0)
3809 return r;
3810
3811 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3812 if (r < 0)
3813 return r;
3814
3815 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3816 dep = UNIT_BINDS_TO;
3817
3818 r = unit_add_two_dependencies(u, UNIT_AFTER,
3819 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3820 device, true, mask);
3821 if (r < 0)
3822 return r;
3823
3824 if (wants) {
3825 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3826 if (r < 0)
3827 return r;
3828 }
3829
3830 return 0;
3831 }
3832
3833 int unit_coldplug(Unit *u) {
3834 int r = 0, q;
3835 char **i;
3836
3837 assert(u);
3838
3839 /* Make sure we don't enter a loop, when coldplugging recursively. */
3840 if (u->coldplugged)
3841 return 0;
3842
3843 u->coldplugged = true;
3844
3845 STRV_FOREACH(i, u->deserialized_refs) {
3846 q = bus_unit_track_add_name(u, *i);
3847 if (q < 0 && r >= 0)
3848 r = q;
3849 }
3850 u->deserialized_refs = strv_free(u->deserialized_refs);
3851
3852 if (UNIT_VTABLE(u)->coldplug) {
3853 q = UNIT_VTABLE(u)->coldplug(u);
3854 if (q < 0 && r >= 0)
3855 r = q;
3856 }
3857
3858 if (u->job) {
3859 q = job_coldplug(u->job);
3860 if (q < 0 && r >= 0)
3861 r = q;
3862 }
3863
3864 return r;
3865 }
3866
3867 void unit_catchup(Unit *u) {
3868 assert(u);
3869
3870 if (UNIT_VTABLE(u)->catchup)
3871 UNIT_VTABLE(u)->catchup(u);
3872 }
3873
3874 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3875 struct stat st;
3876
3877 if (!path)
3878 return false;
3879
3880 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3881 * are never out-of-date. */
3882 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3883 return false;
3884
3885 if (stat(path, &st) < 0)
3886 /* What, cannot access this anymore? */
3887 return true;
3888
3889 if (path_masked)
3890 /* For masked files check if they are still so */
3891 return !null_or_empty(&st);
3892 else
3893 /* For non-empty files check the mtime */
3894 return timespec_load(&st.st_mtim) > mtime;
3895
3896 return false;
3897 }
3898
3899 bool unit_need_daemon_reload(Unit *u) {
3900 _cleanup_strv_free_ char **t = NULL;
3901 char **path;
3902
3903 assert(u);
3904
3905 /* For unit files, we allow masking… */
3906 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3907 u->load_state == UNIT_MASKED))
3908 return true;
3909
3910 /* Source paths should not be masked… */
3911 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3912 return true;
3913
3914 if (u->load_state == UNIT_LOADED)
3915 (void) unit_find_dropin_paths(u, &t);
3916 if (!strv_equal(u->dropin_paths, t))
3917 return true;
3918
3919 /* … any drop-ins that are masked are simply omitted from the list. */
3920 STRV_FOREACH(path, u->dropin_paths)
3921 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3922 return true;
3923
3924 return false;
3925 }
3926
3927 void unit_reset_failed(Unit *u) {
3928 assert(u);
3929
3930 if (UNIT_VTABLE(u)->reset_failed)
3931 UNIT_VTABLE(u)->reset_failed(u);
3932
3933 RATELIMIT_RESET(u->start_limit);
3934 u->start_limit_hit = false;
3935 }
3936
3937 Unit *unit_following(Unit *u) {
3938 assert(u);
3939
3940 if (UNIT_VTABLE(u)->following)
3941 return UNIT_VTABLE(u)->following(u);
3942
3943 return NULL;
3944 }
3945
3946 bool unit_stop_pending(Unit *u) {
3947 assert(u);
3948
3949 /* This call does check the current state of the unit. It's
3950 * hence useful to be called from state change calls of the
3951 * unit itself, where the state isn't updated yet. This is
3952 * different from unit_inactive_or_pending() which checks both
3953 * the current state and for a queued job. */
3954
3955 return u->job && u->job->type == JOB_STOP;
3956 }
3957
3958 bool unit_inactive_or_pending(Unit *u) {
3959 assert(u);
3960
3961 /* Returns true if the unit is inactive or going down */
3962
3963 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3964 return true;
3965
3966 if (unit_stop_pending(u))
3967 return true;
3968
3969 return false;
3970 }
3971
3972 bool unit_active_or_pending(Unit *u) {
3973 assert(u);
3974
3975 /* Returns true if the unit is active or going up */
3976
3977 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3978 return true;
3979
3980 if (u->job &&
3981 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3982 return true;
3983
3984 return false;
3985 }
3986
3987 bool unit_will_restart(Unit *u) {
3988 assert(u);
3989
3990 if (!UNIT_VTABLE(u)->will_restart)
3991 return false;
3992
3993 return UNIT_VTABLE(u)->will_restart(u);
3994 }
3995
3996 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3997 assert(u);
3998 assert(w >= 0 && w < _KILL_WHO_MAX);
3999 assert(SIGNAL_VALID(signo));
4000
4001 if (!UNIT_VTABLE(u)->kill)
4002 return -EOPNOTSUPP;
4003
4004 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4005 }
4006
4007 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4008 _cleanup_set_free_ Set *pid_set = NULL;
4009 int r;
4010
4011 pid_set = set_new(NULL);
4012 if (!pid_set)
4013 return NULL;
4014
4015 /* Exclude the main/control pids from being killed via the cgroup */
4016 if (main_pid > 0) {
4017 r = set_put(pid_set, PID_TO_PTR(main_pid));
4018 if (r < 0)
4019 return NULL;
4020 }
4021
4022 if (control_pid > 0) {
4023 r = set_put(pid_set, PID_TO_PTR(control_pid));
4024 if (r < 0)
4025 return NULL;
4026 }
4027
4028 return TAKE_PTR(pid_set);
4029 }
4030
4031 int unit_kill_common(
4032 Unit *u,
4033 KillWho who,
4034 int signo,
4035 pid_t main_pid,
4036 pid_t control_pid,
4037 sd_bus_error *error) {
4038
4039 int r = 0;
4040 bool killed = false;
4041
4042 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4043 if (main_pid < 0)
4044 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4045 else if (main_pid == 0)
4046 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4047 }
4048
4049 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4050 if (control_pid < 0)
4051 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4052 else if (control_pid == 0)
4053 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4054 }
4055
4056 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4057 if (control_pid > 0) {
4058 if (kill(control_pid, signo) < 0)
4059 r = -errno;
4060 else
4061 killed = true;
4062 }
4063
4064 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4065 if (main_pid > 0) {
4066 if (kill(main_pid, signo) < 0)
4067 r = -errno;
4068 else
4069 killed = true;
4070 }
4071
4072 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4073 _cleanup_set_free_ Set *pid_set = NULL;
4074 int q;
4075
4076 /* Exclude the main/control pids from being killed via the cgroup */
4077 pid_set = unit_pid_set(main_pid, control_pid);
4078 if (!pid_set)
4079 return -ENOMEM;
4080
4081 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4082 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4083 r = q;
4084 else
4085 killed = true;
4086 }
4087
4088 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4089 return -ESRCH;
4090
4091 return r;
4092 }
4093
4094 int unit_following_set(Unit *u, Set **s) {
4095 assert(u);
4096 assert(s);
4097
4098 if (UNIT_VTABLE(u)->following_set)
4099 return UNIT_VTABLE(u)->following_set(u, s);
4100
4101 *s = NULL;
4102 return 0;
4103 }
4104
4105 UnitFileState unit_get_unit_file_state(Unit *u) {
4106 int r;
4107
4108 assert(u);
4109
4110 if (u->unit_file_state < 0 && u->fragment_path) {
4111 r = unit_file_get_state(
4112 u->manager->unit_file_scope,
4113 NULL,
4114 u->id,
4115 &u->unit_file_state);
4116 if (r < 0)
4117 u->unit_file_state = UNIT_FILE_BAD;
4118 }
4119
4120 return u->unit_file_state;
4121 }
4122
4123 int unit_get_unit_file_preset(Unit *u) {
4124 assert(u);
4125
4126 if (u->unit_file_preset < 0 && u->fragment_path)
4127 u->unit_file_preset = unit_file_query_preset(
4128 u->manager->unit_file_scope,
4129 NULL,
4130 basename(u->fragment_path));
4131
4132 return u->unit_file_preset;
4133 }
4134
4135 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4136 assert(ref);
4137 assert(source);
4138 assert(target);
4139
4140 if (ref->target)
4141 unit_ref_unset(ref);
4142
4143 ref->source = source;
4144 ref->target = target;
4145 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4146 return target;
4147 }
4148
4149 void unit_ref_unset(UnitRef *ref) {
4150 assert(ref);
4151
4152 if (!ref->target)
4153 return;
4154
4155 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4156 * be unreferenced now. */
4157 unit_add_to_gc_queue(ref->target);
4158
4159 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4160 ref->source = ref->target = NULL;
4161 }
4162
4163 static int user_from_unit_name(Unit *u, char **ret) {
4164
4165 static const uint8_t hash_key[] = {
4166 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4167 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4168 };
4169
4170 _cleanup_free_ char *n = NULL;
4171 int r;
4172
4173 r = unit_name_to_prefix(u->id, &n);
4174 if (r < 0)
4175 return r;
4176
4177 if (valid_user_group_name(n)) {
4178 *ret = TAKE_PTR(n);
4179 return 0;
4180 }
4181
4182 /* If we can't use the unit name as a user name, then let's hash it and use that */
4183 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4184 return -ENOMEM;
4185
4186 return 0;
4187 }
4188
4189 int unit_patch_contexts(Unit *u) {
4190 CGroupContext *cc;
4191 ExecContext *ec;
4192 unsigned i;
4193 int r;
4194
4195 assert(u);
4196
4197 /* Patch in the manager defaults into the exec and cgroup
4198 * contexts, _after_ the rest of the settings have been
4199 * initialized */
4200
4201 ec = unit_get_exec_context(u);
4202 if (ec) {
4203 /* This only copies in the ones that need memory */
4204 for (i = 0; i < _RLIMIT_MAX; i++)
4205 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4206 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4207 if (!ec->rlimit[i])
4208 return -ENOMEM;
4209 }
4210
4211 if (MANAGER_IS_USER(u->manager) &&
4212 !ec->working_directory) {
4213
4214 r = get_home_dir(&ec->working_directory);
4215 if (r < 0)
4216 return r;
4217
4218 /* Allow user services to run, even if the
4219 * home directory is missing */
4220 ec->working_directory_missing_ok = true;
4221 }
4222
4223 if (ec->private_devices)
4224 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4225
4226 if (ec->protect_kernel_modules)
4227 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4228
4229 if (ec->dynamic_user) {
4230 if (!ec->user) {
4231 r = user_from_unit_name(u, &ec->user);
4232 if (r < 0)
4233 return r;
4234 }
4235
4236 if (!ec->group) {
4237 ec->group = strdup(ec->user);
4238 if (!ec->group)
4239 return -ENOMEM;
4240 }
4241
4242 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4243 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4244 * sandbox. */
4245
4246 ec->private_tmp = true;
4247 ec->remove_ipc = true;
4248 ec->protect_system = PROTECT_SYSTEM_STRICT;
4249 if (ec->protect_home == PROTECT_HOME_NO)
4250 ec->protect_home = PROTECT_HOME_READ_ONLY;
4251
4252 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4253 * them. */
4254 ec->no_new_privileges = true;
4255 ec->restrict_suid_sgid = true;
4256 }
4257 }
4258
4259 cc = unit_get_cgroup_context(u);
4260 if (cc && ec) {
4261
4262 if (ec->private_devices &&
4263 cc->device_policy == CGROUP_AUTO)
4264 cc->device_policy = CGROUP_CLOSED;
4265
4266 if (ec->root_image &&
4267 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4268
4269 /* When RootImage= is specified, the following devices are touched. */
4270 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4271 if (r < 0)
4272 return r;
4273
4274 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4275 if (r < 0)
4276 return r;
4277
4278 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4279 if (r < 0)
4280 return r;
4281 }
4282 }
4283
4284 return 0;
4285 }
4286
4287 ExecContext *unit_get_exec_context(Unit *u) {
4288 size_t offset;
4289 assert(u);
4290
4291 if (u->type < 0)
4292 return NULL;
4293
4294 offset = UNIT_VTABLE(u)->exec_context_offset;
4295 if (offset <= 0)
4296 return NULL;
4297
4298 return (ExecContext*) ((uint8_t*) u + offset);
4299 }
4300
4301 KillContext *unit_get_kill_context(Unit *u) {
4302 size_t offset;
4303 assert(u);
4304
4305 if (u->type < 0)
4306 return NULL;
4307
4308 offset = UNIT_VTABLE(u)->kill_context_offset;
4309 if (offset <= 0)
4310 return NULL;
4311
4312 return (KillContext*) ((uint8_t*) u + offset);
4313 }
4314
4315 CGroupContext *unit_get_cgroup_context(Unit *u) {
4316 size_t offset;
4317
4318 if (u->type < 0)
4319 return NULL;
4320
4321 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4322 if (offset <= 0)
4323 return NULL;
4324
4325 return (CGroupContext*) ((uint8_t*) u + offset);
4326 }
4327
4328 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4329 size_t offset;
4330
4331 if (u->type < 0)
4332 return NULL;
4333
4334 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4335 if (offset <= 0)
4336 return NULL;
4337
4338 return *(ExecRuntime**) ((uint8_t*) u + offset);
4339 }
4340
4341 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4342 assert(u);
4343
4344 if (UNIT_WRITE_FLAGS_NOOP(flags))
4345 return NULL;
4346
4347 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4348 return u->manager->lookup_paths.transient;
4349
4350 if (flags & UNIT_PERSISTENT)
4351 return u->manager->lookup_paths.persistent_control;
4352
4353 if (flags & UNIT_RUNTIME)
4354 return u->manager->lookup_paths.runtime_control;
4355
4356 return NULL;
4357 }
4358
4359 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4360 char *ret = NULL;
4361
4362 if (!s)
4363 return NULL;
4364
4365 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4366 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4367 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4368 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4369 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4370 * allocations. */
4371
4372 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4373 ret = specifier_escape(s);
4374 if (!ret)
4375 return NULL;
4376
4377 s = ret;
4378 }
4379
4380 if (flags & UNIT_ESCAPE_C) {
4381 char *a;
4382
4383 a = cescape(s);
4384 free(ret);
4385 if (!a)
4386 return NULL;
4387
4388 ret = a;
4389 }
4390
4391 if (buf) {
4392 *buf = ret;
4393 return ret ?: (char*) s;
4394 }
4395
4396 return ret ?: strdup(s);
4397 }
4398
4399 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4400 _cleanup_free_ char *result = NULL;
4401 size_t n = 0, allocated = 0;
4402 char **i;
4403
4404 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4405 * way suitable for ExecStart= stanzas */
4406
4407 STRV_FOREACH(i, l) {
4408 _cleanup_free_ char *buf = NULL;
4409 const char *p;
4410 size_t a;
4411 char *q;
4412
4413 p = unit_escape_setting(*i, flags, &buf);
4414 if (!p)
4415 return NULL;
4416
4417 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4418 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4419 return NULL;
4420
4421 q = result + n;
4422 if (n > 0)
4423 *(q++) = ' ';
4424
4425 *(q++) = '"';
4426 q = stpcpy(q, p);
4427 *(q++) = '"';
4428
4429 n += a;
4430 }
4431
4432 if (!GREEDY_REALLOC(result, allocated, n + 1))
4433 return NULL;
4434
4435 result[n] = 0;
4436
4437 return TAKE_PTR(result);
4438 }
4439
4440 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4441 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4442 const char *dir, *wrapped;
4443 int r;
4444
4445 assert(u);
4446 assert(name);
4447 assert(data);
4448
4449 if (UNIT_WRITE_FLAGS_NOOP(flags))
4450 return 0;
4451
4452 data = unit_escape_setting(data, flags, &escaped);
4453 if (!data)
4454 return -ENOMEM;
4455
4456 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4457 * previous section header is the same */
4458
4459 if (flags & UNIT_PRIVATE) {
4460 if (!UNIT_VTABLE(u)->private_section)
4461 return -EINVAL;
4462
4463 if (!u->transient_file || u->last_section_private < 0)
4464 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4465 else if (u->last_section_private == 0)
4466 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4467 } else {
4468 if (!u->transient_file || u->last_section_private < 0)
4469 data = strjoina("[Unit]\n", data);
4470 else if (u->last_section_private > 0)
4471 data = strjoina("\n[Unit]\n", data);
4472 }
4473
4474 if (u->transient_file) {
4475 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4476 * write to the transient unit file. */
4477 fputs(data, u->transient_file);
4478
4479 if (!endswith(data, "\n"))
4480 fputc('\n', u->transient_file);
4481
4482 /* Remember which section we wrote this entry to */
4483 u->last_section_private = !!(flags & UNIT_PRIVATE);
4484 return 0;
4485 }
4486
4487 dir = unit_drop_in_dir(u, flags);
4488 if (!dir)
4489 return -EINVAL;
4490
4491 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4492 "# or an equivalent operation. Do not edit.\n",
4493 data,
4494 "\n");
4495
4496 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4497 if (r < 0)
4498 return r;
4499
4500 (void) mkdir_p_label(p, 0755);
4501 r = write_string_file_atomic_label(q, wrapped);
4502 if (r < 0)
4503 return r;
4504
4505 r = strv_push(&u->dropin_paths, q);
4506 if (r < 0)
4507 return r;
4508 q = NULL;
4509
4510 strv_uniq(u->dropin_paths);
4511
4512 u->dropin_mtime = now(CLOCK_REALTIME);
4513
4514 return 0;
4515 }
4516
4517 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4518 _cleanup_free_ char *p = NULL;
4519 va_list ap;
4520 int r;
4521
4522 assert(u);
4523 assert(name);
4524 assert(format);
4525
4526 if (UNIT_WRITE_FLAGS_NOOP(flags))
4527 return 0;
4528
4529 va_start(ap, format);
4530 r = vasprintf(&p, format, ap);
4531 va_end(ap);
4532
4533 if (r < 0)
4534 return -ENOMEM;
4535
4536 return unit_write_setting(u, flags, name, p);
4537 }
4538
4539 int unit_make_transient(Unit *u) {
4540 _cleanup_free_ char *path = NULL;
4541 FILE *f;
4542
4543 assert(u);
4544
4545 if (!UNIT_VTABLE(u)->can_transient)
4546 return -EOPNOTSUPP;
4547
4548 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4549
4550 path = path_join(u->manager->lookup_paths.transient, u->id);
4551 if (!path)
4552 return -ENOMEM;
4553
4554 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4555 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4556
4557 RUN_WITH_UMASK(0022) {
4558 f = fopen(path, "we");
4559 if (!f)
4560 return -errno;
4561 }
4562
4563 safe_fclose(u->transient_file);
4564 u->transient_file = f;
4565
4566 free_and_replace(u->fragment_path, path);
4567
4568 u->source_path = mfree(u->source_path);
4569 u->dropin_paths = strv_free(u->dropin_paths);
4570 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4571
4572 u->load_state = UNIT_STUB;
4573 u->load_error = 0;
4574 u->transient = true;
4575
4576 unit_add_to_dbus_queue(u);
4577 unit_add_to_gc_queue(u);
4578
4579 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4580 u->transient_file);
4581
4582 return 0;
4583 }
4584
4585 static int log_kill(pid_t pid, int sig, void *userdata) {
4586 _cleanup_free_ char *comm = NULL;
4587
4588 (void) get_process_comm(pid, &comm);
4589
4590 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4591 only, like for example systemd's own PAM stub process. */
4592 if (comm && comm[0] == '(')
4593 return 0;
4594
4595 log_unit_notice(userdata,
4596 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4597 pid,
4598 strna(comm),
4599 signal_to_string(sig));
4600
4601 return 1;
4602 }
4603
4604 static int operation_to_signal(KillContext *c, KillOperation k) {
4605 assert(c);
4606
4607 switch (k) {
4608
4609 case KILL_TERMINATE:
4610 case KILL_TERMINATE_AND_LOG:
4611 return c->kill_signal;
4612
4613 case KILL_KILL:
4614 return c->final_kill_signal;
4615
4616 case KILL_WATCHDOG:
4617 return c->watchdog_signal;
4618
4619 default:
4620 assert_not_reached("KillOperation unknown");
4621 }
4622 }
4623
4624 int unit_kill_context(
4625 Unit *u,
4626 KillContext *c,
4627 KillOperation k,
4628 pid_t main_pid,
4629 pid_t control_pid,
4630 bool main_pid_alien) {
4631
4632 bool wait_for_exit = false, send_sighup;
4633 cg_kill_log_func_t log_func = NULL;
4634 int sig, r;
4635
4636 assert(u);
4637 assert(c);
4638
4639 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4640 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4641
4642 if (c->kill_mode == KILL_NONE)
4643 return 0;
4644
4645 sig = operation_to_signal(c, k);
4646
4647 send_sighup =
4648 c->send_sighup &&
4649 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4650 sig != SIGHUP;
4651
4652 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4653 log_func = log_kill;
4654
4655 if (main_pid > 0) {
4656 if (log_func)
4657 log_func(main_pid, sig, u);
4658
4659 r = kill_and_sigcont(main_pid, sig);
4660 if (r < 0 && r != -ESRCH) {
4661 _cleanup_free_ char *comm = NULL;
4662 (void) get_process_comm(main_pid, &comm);
4663
4664 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4665 } else {
4666 if (!main_pid_alien)
4667 wait_for_exit = true;
4668
4669 if (r != -ESRCH && send_sighup)
4670 (void) kill(main_pid, SIGHUP);
4671 }
4672 }
4673
4674 if (control_pid > 0) {
4675 if (log_func)
4676 log_func(control_pid, sig, u);
4677
4678 r = kill_and_sigcont(control_pid, sig);
4679 if (r < 0 && r != -ESRCH) {
4680 _cleanup_free_ char *comm = NULL;
4681 (void) get_process_comm(control_pid, &comm);
4682
4683 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4684 } else {
4685 wait_for_exit = true;
4686
4687 if (r != -ESRCH && send_sighup)
4688 (void) kill(control_pid, SIGHUP);
4689 }
4690 }
4691
4692 if (u->cgroup_path &&
4693 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4694 _cleanup_set_free_ Set *pid_set = NULL;
4695
4696 /* Exclude the main/control pids from being killed via the cgroup */
4697 pid_set = unit_pid_set(main_pid, control_pid);
4698 if (!pid_set)
4699 return -ENOMEM;
4700
4701 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4702 sig,
4703 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4704 pid_set,
4705 log_func, u);
4706 if (r < 0) {
4707 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4708 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4709
4710 } else if (r > 0) {
4711
4712 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4713 * we are running in a container or if this is a delegation unit, simply because cgroup
4714 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4715 * of containers it can be confused easily by left-over directories in the cgroup — which
4716 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4717 * there we get proper events. Hence rely on them. */
4718
4719 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4720 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4721 wait_for_exit = true;
4722
4723 if (send_sighup) {
4724 set_free(pid_set);
4725
4726 pid_set = unit_pid_set(main_pid, control_pid);
4727 if (!pid_set)
4728 return -ENOMEM;
4729
4730 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4731 SIGHUP,
4732 CGROUP_IGNORE_SELF,
4733 pid_set,
4734 NULL, NULL);
4735 }
4736 }
4737 }
4738
4739 return wait_for_exit;
4740 }
4741
4742 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4743 _cleanup_free_ char *p = NULL;
4744 UnitDependencyInfo di;
4745 int r;
4746
4747 assert(u);
4748 assert(path);
4749
4750 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4751 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4752 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4753 * determine which units to make themselves a dependency of. */
4754
4755 if (!path_is_absolute(path))
4756 return -EINVAL;
4757
4758 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4759 if (r < 0)
4760 return r;
4761
4762 p = strdup(path);
4763 if (!p)
4764 return -ENOMEM;
4765
4766 path = path_simplify(p, true);
4767
4768 if (!path_is_normalized(path))
4769 return -EPERM;
4770
4771 if (hashmap_contains(u->requires_mounts_for, path))
4772 return 0;
4773
4774 di = (UnitDependencyInfo) {
4775 .origin_mask = mask
4776 };
4777
4778 r = hashmap_put(u->requires_mounts_for, path, di.data);
4779 if (r < 0)
4780 return r;
4781 p = NULL;
4782
4783 char prefix[strlen(path) + 1];
4784 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4785 Set *x;
4786
4787 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4788 if (!x) {
4789 _cleanup_free_ char *q = NULL;
4790
4791 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4792 if (r < 0)
4793 return r;
4794
4795 q = strdup(prefix);
4796 if (!q)
4797 return -ENOMEM;
4798
4799 x = set_new(NULL);
4800 if (!x)
4801 return -ENOMEM;
4802
4803 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4804 if (r < 0) {
4805 set_free(x);
4806 return r;
4807 }
4808 q = NULL;
4809 }
4810
4811 r = set_put(x, u);
4812 if (r < 0)
4813 return r;
4814 }
4815
4816 return 0;
4817 }
4818
4819 int unit_setup_exec_runtime(Unit *u) {
4820 ExecRuntime **rt;
4821 size_t offset;
4822 Unit *other;
4823 Iterator i;
4824 void *v;
4825 int r;
4826
4827 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4828 assert(offset > 0);
4829
4830 /* Check if there already is an ExecRuntime for this unit? */
4831 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4832 if (*rt)
4833 return 0;
4834
4835 /* Try to get it from somebody else */
4836 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4837 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4838 if (r == 1)
4839 return 1;
4840 }
4841
4842 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4843 }
4844
4845 int unit_setup_dynamic_creds(Unit *u) {
4846 ExecContext *ec;
4847 DynamicCreds *dcreds;
4848 size_t offset;
4849
4850 assert(u);
4851
4852 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4853 assert(offset > 0);
4854 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4855
4856 ec = unit_get_exec_context(u);
4857 assert(ec);
4858
4859 if (!ec->dynamic_user)
4860 return 0;
4861
4862 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4863 }
4864
4865 bool unit_type_supported(UnitType t) {
4866 if (_unlikely_(t < 0))
4867 return false;
4868 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4869 return false;
4870
4871 if (!unit_vtable[t]->supported)
4872 return true;
4873
4874 return unit_vtable[t]->supported();
4875 }
4876
4877 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4878 int r;
4879
4880 assert(u);
4881 assert(where);
4882
4883 r = dir_is_empty(where);
4884 if (r > 0 || r == -ENOTDIR)
4885 return;
4886 if (r < 0) {
4887 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4888 return;
4889 }
4890
4891 log_struct(LOG_NOTICE,
4892 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4893 LOG_UNIT_ID(u),
4894 LOG_UNIT_INVOCATION_ID(u),
4895 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4896 "WHERE=%s", where);
4897 }
4898
4899 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4900 _cleanup_free_ char *canonical_where = NULL;
4901 int r;
4902
4903 assert(u);
4904 assert(where);
4905
4906 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4907 if (r < 0) {
4908 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4909 return 0;
4910 }
4911
4912 /* We will happily ignore a trailing slash (or any redundant slashes) */
4913 if (path_equal(where, canonical_where))
4914 return 0;
4915
4916 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4917 log_struct(LOG_ERR,
4918 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4919 LOG_UNIT_ID(u),
4920 LOG_UNIT_INVOCATION_ID(u),
4921 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4922 "WHERE=%s", where);
4923
4924 return -ELOOP;
4925 }
4926
4927 bool unit_is_pristine(Unit *u) {
4928 assert(u);
4929
4930 /* Check if the unit already exists or is already around,
4931 * in a number of different ways. Note that to cater for unit
4932 * types such as slice, we are generally fine with units that
4933 * are marked UNIT_LOADED even though nothing was actually
4934 * loaded, as those unit types don't require a file on disk. */
4935
4936 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4937 u->fragment_path ||
4938 u->source_path ||
4939 !strv_isempty(u->dropin_paths) ||
4940 u->job ||
4941 u->merged_into);
4942 }
4943
4944 pid_t unit_control_pid(Unit *u) {
4945 assert(u);
4946
4947 if (UNIT_VTABLE(u)->control_pid)
4948 return UNIT_VTABLE(u)->control_pid(u);
4949
4950 return 0;
4951 }
4952
4953 pid_t unit_main_pid(Unit *u) {
4954 assert(u);
4955
4956 if (UNIT_VTABLE(u)->main_pid)
4957 return UNIT_VTABLE(u)->main_pid(u);
4958
4959 return 0;
4960 }
4961
4962 static void unit_unref_uid_internal(
4963 Unit *u,
4964 uid_t *ref_uid,
4965 bool destroy_now,
4966 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4967
4968 assert(u);
4969 assert(ref_uid);
4970 assert(_manager_unref_uid);
4971
4972 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4973 * gid_t are actually the same time, with the same validity rules.
4974 *
4975 * Drops a reference to UID/GID from a unit. */
4976
4977 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4978 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4979
4980 if (!uid_is_valid(*ref_uid))
4981 return;
4982
4983 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4984 *ref_uid = UID_INVALID;
4985 }
4986
4987 void unit_unref_uid(Unit *u, bool destroy_now) {
4988 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4989 }
4990
4991 void unit_unref_gid(Unit *u, bool destroy_now) {
4992 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4993 }
4994
4995 static int unit_ref_uid_internal(
4996 Unit *u,
4997 uid_t *ref_uid,
4998 uid_t uid,
4999 bool clean_ipc,
5000 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5001
5002 int r;
5003
5004 assert(u);
5005 assert(ref_uid);
5006 assert(uid_is_valid(uid));
5007 assert(_manager_ref_uid);
5008
5009 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5010 * are actually the same type, and have the same validity rules.
5011 *
5012 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5013 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5014 * drops to zero. */
5015
5016 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5017 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5018
5019 if (*ref_uid == uid)
5020 return 0;
5021
5022 if (uid_is_valid(*ref_uid)) /* Already set? */
5023 return -EBUSY;
5024
5025 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5026 if (r < 0)
5027 return r;
5028
5029 *ref_uid = uid;
5030 return 1;
5031 }
5032
5033 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5034 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5035 }
5036
5037 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5038 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5039 }
5040
5041 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5042 int r = 0, q = 0;
5043
5044 assert(u);
5045
5046 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5047
5048 if (uid_is_valid(uid)) {
5049 r = unit_ref_uid(u, uid, clean_ipc);
5050 if (r < 0)
5051 return r;
5052 }
5053
5054 if (gid_is_valid(gid)) {
5055 q = unit_ref_gid(u, gid, clean_ipc);
5056 if (q < 0) {
5057 if (r > 0)
5058 unit_unref_uid(u, false);
5059
5060 return q;
5061 }
5062 }
5063
5064 return r > 0 || q > 0;
5065 }
5066
5067 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5068 ExecContext *c;
5069 int r;
5070
5071 assert(u);
5072
5073 c = unit_get_exec_context(u);
5074
5075 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5076 if (r < 0)
5077 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5078
5079 return r;
5080 }
5081
5082 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5083 assert(u);
5084
5085 unit_unref_uid(u, destroy_now);
5086 unit_unref_gid(u, destroy_now);
5087 }
5088
5089 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5090 int r;
5091
5092 assert(u);
5093
5094 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5095 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5096 * objects when no service references the UID/GID anymore. */
5097
5098 r = unit_ref_uid_gid(u, uid, gid);
5099 if (r > 0)
5100 unit_add_to_dbus_queue(u);
5101 }
5102
5103 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5104 int r;
5105
5106 assert(u);
5107
5108 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5109
5110 if (sd_id128_equal(u->invocation_id, id))
5111 return 0;
5112
5113 if (!sd_id128_is_null(u->invocation_id))
5114 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5115
5116 if (sd_id128_is_null(id)) {
5117 r = 0;
5118 goto reset;
5119 }
5120
5121 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5122 if (r < 0)
5123 goto reset;
5124
5125 u->invocation_id = id;
5126 sd_id128_to_string(id, u->invocation_id_string);
5127
5128 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5129 if (r < 0)
5130 goto reset;
5131
5132 return 0;
5133
5134 reset:
5135 u->invocation_id = SD_ID128_NULL;
5136 u->invocation_id_string[0] = 0;
5137 return r;
5138 }
5139
5140 int unit_acquire_invocation_id(Unit *u) {
5141 sd_id128_t id;
5142 int r;
5143
5144 assert(u);
5145
5146 r = sd_id128_randomize(&id);
5147 if (r < 0)
5148 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5149
5150 r = unit_set_invocation_id(u, id);
5151 if (r < 0)
5152 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5153
5154 unit_add_to_dbus_queue(u);
5155 return 0;
5156 }
5157
5158 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5159 int r;
5160
5161 assert(u);
5162 assert(p);
5163
5164 /* Copy parameters from manager */
5165 r = manager_get_effective_environment(u->manager, &p->environment);
5166 if (r < 0)
5167 return r;
5168
5169 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5170 p->cgroup_supported = u->manager->cgroup_supported;
5171 p->prefix = u->manager->prefix;
5172 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5173
5174 /* Copy parameters from unit */
5175 p->cgroup_path = u->cgroup_path;
5176 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5177
5178 return 0;
5179 }
5180
5181 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5182 int r;
5183
5184 assert(u);
5185 assert(ret);
5186
5187 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5188 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5189
5190 (void) unit_realize_cgroup(u);
5191
5192 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5193 if (r != 0)
5194 return r;
5195
5196 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5197 (void) ignore_signals(SIGPIPE, -1);
5198
5199 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5200
5201 if (u->cgroup_path) {
5202 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5203 if (r < 0) {
5204 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5205 _exit(EXIT_CGROUP);
5206 }
5207 }
5208
5209 return 0;
5210 }
5211
5212 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5213 assert(u);
5214 assert(d >= 0);
5215 assert(d < _UNIT_DEPENDENCY_MAX);
5216 assert(other);
5217
5218 if (di.origin_mask == 0 && di.destination_mask == 0) {
5219 /* No bit set anymore, let's drop the whole entry */
5220 assert_se(hashmap_remove(u->dependencies[d], other));
5221 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5222 } else
5223 /* Mask was reduced, let's update the entry */
5224 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5225 }
5226
5227 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5228 UnitDependency d;
5229
5230 assert(u);
5231
5232 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5233
5234 if (mask == 0)
5235 return;
5236
5237 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5238 bool done;
5239
5240 do {
5241 UnitDependencyInfo di;
5242 Unit *other;
5243 Iterator i;
5244
5245 done = true;
5246
5247 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5248 UnitDependency q;
5249
5250 if ((di.origin_mask & ~mask) == di.origin_mask)
5251 continue;
5252 di.origin_mask &= ~mask;
5253 unit_update_dependency_mask(u, d, other, di);
5254
5255 /* We updated the dependency from our unit to the other unit now. But most dependencies
5256 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5257 * all dependency types on the other unit and delete all those which point to us and
5258 * have the right mask set. */
5259
5260 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5261 UnitDependencyInfo dj;
5262
5263 dj.data = hashmap_get(other->dependencies[q], u);
5264 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5265 continue;
5266 dj.destination_mask &= ~mask;
5267
5268 unit_update_dependency_mask(other, q, u, dj);
5269 }
5270
5271 unit_add_to_gc_queue(other);
5272
5273 done = false;
5274 break;
5275 }
5276
5277 } while (!done);
5278 }
5279 }
5280
5281 static int unit_export_invocation_id(Unit *u) {
5282 const char *p;
5283 int r;
5284
5285 assert(u);
5286
5287 if (u->exported_invocation_id)
5288 return 0;
5289
5290 if (sd_id128_is_null(u->invocation_id))
5291 return 0;
5292
5293 p = strjoina("/run/systemd/units/invocation:", u->id);
5294 r = symlink_atomic(u->invocation_id_string, p);
5295 if (r < 0)
5296 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5297
5298 u->exported_invocation_id = true;
5299 return 0;
5300 }
5301
5302 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5303 const char *p;
5304 char buf[2];
5305 int r;
5306
5307 assert(u);
5308 assert(c);
5309
5310 if (u->exported_log_level_max)
5311 return 0;
5312
5313 if (c->log_level_max < 0)
5314 return 0;
5315
5316 assert(c->log_level_max <= 7);
5317
5318 buf[0] = '0' + c->log_level_max;
5319 buf[1] = 0;
5320
5321 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5322 r = symlink_atomic(buf, p);
5323 if (r < 0)
5324 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5325
5326 u->exported_log_level_max = true;
5327 return 0;
5328 }
5329
5330 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5331 _cleanup_close_ int fd = -1;
5332 struct iovec *iovec;
5333 const char *p;
5334 char *pattern;
5335 le64_t *sizes;
5336 ssize_t n;
5337 size_t i;
5338 int r;
5339
5340 if (u->exported_log_extra_fields)
5341 return 0;
5342
5343 if (c->n_log_extra_fields <= 0)
5344 return 0;
5345
5346 sizes = newa(le64_t, c->n_log_extra_fields);
5347 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5348
5349 for (i = 0; i < c->n_log_extra_fields; i++) {
5350 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5351
5352 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5353 iovec[i*2+1] = c->log_extra_fields[i];
5354 }
5355
5356 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5357 pattern = strjoina(p, ".XXXXXX");
5358
5359 fd = mkostemp_safe(pattern);
5360 if (fd < 0)
5361 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5362
5363 n = writev(fd, iovec, c->n_log_extra_fields*2);
5364 if (n < 0) {
5365 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5366 goto fail;
5367 }
5368
5369 (void) fchmod(fd, 0644);
5370
5371 if (rename(pattern, p) < 0) {
5372 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5373 goto fail;
5374 }
5375
5376 u->exported_log_extra_fields = true;
5377 return 0;
5378
5379 fail:
5380 (void) unlink(pattern);
5381 return r;
5382 }
5383
5384 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5385 _cleanup_free_ char *buf = NULL;
5386 const char *p;
5387 int r;
5388
5389 assert(u);
5390 assert(c);
5391
5392 if (u->exported_log_rate_limit_interval)
5393 return 0;
5394
5395 if (c->log_rate_limit_interval_usec == 0)
5396 return 0;
5397
5398 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5399
5400 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5401 return log_oom();
5402
5403 r = symlink_atomic(buf, p);
5404 if (r < 0)
5405 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5406
5407 u->exported_log_rate_limit_interval = true;
5408 return 0;
5409 }
5410
5411 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5412 _cleanup_free_ char *buf = NULL;
5413 const char *p;
5414 int r;
5415
5416 assert(u);
5417 assert(c);
5418
5419 if (u->exported_log_rate_limit_burst)
5420 return 0;
5421
5422 if (c->log_rate_limit_burst == 0)
5423 return 0;
5424
5425 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5426
5427 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5428 return log_oom();
5429
5430 r = symlink_atomic(buf, p);
5431 if (r < 0)
5432 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5433
5434 u->exported_log_rate_limit_burst = true;
5435 return 0;
5436 }
5437
5438 void unit_export_state_files(Unit *u) {
5439 const ExecContext *c;
5440
5441 assert(u);
5442
5443 if (!u->id)
5444 return;
5445
5446 if (!MANAGER_IS_SYSTEM(u->manager))
5447 return;
5448
5449 if (MANAGER_IS_TEST_RUN(u->manager))
5450 return;
5451
5452 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5453 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5454 * the IPC system itself and PID 1 also log to the journal.
5455 *
5456 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5457 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5458 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5459 * namespace at least.
5460 *
5461 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5462 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5463 * them with one. */
5464
5465 (void) unit_export_invocation_id(u);
5466
5467 c = unit_get_exec_context(u);
5468 if (c) {
5469 (void) unit_export_log_level_max(u, c);
5470 (void) unit_export_log_extra_fields(u, c);
5471 (void) unit_export_log_rate_limit_interval(u, c);
5472 (void) unit_export_log_rate_limit_burst(u, c);
5473 }
5474 }
5475
5476 void unit_unlink_state_files(Unit *u) {
5477 const char *p;
5478
5479 assert(u);
5480
5481 if (!u->id)
5482 return;
5483
5484 if (!MANAGER_IS_SYSTEM(u->manager))
5485 return;
5486
5487 /* Undoes the effect of unit_export_state() */
5488
5489 if (u->exported_invocation_id) {
5490 p = strjoina("/run/systemd/units/invocation:", u->id);
5491 (void) unlink(p);
5492
5493 u->exported_invocation_id = false;
5494 }
5495
5496 if (u->exported_log_level_max) {
5497 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5498 (void) unlink(p);
5499
5500 u->exported_log_level_max = false;
5501 }
5502
5503 if (u->exported_log_extra_fields) {
5504 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5505 (void) unlink(p);
5506
5507 u->exported_log_extra_fields = false;
5508 }
5509
5510 if (u->exported_log_rate_limit_interval) {
5511 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5512 (void) unlink(p);
5513
5514 u->exported_log_rate_limit_interval = false;
5515 }
5516
5517 if (u->exported_log_rate_limit_burst) {
5518 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5519 (void) unlink(p);
5520
5521 u->exported_log_rate_limit_burst = false;
5522 }
5523 }
5524
5525 int unit_prepare_exec(Unit *u) {
5526 int r;
5527
5528 assert(u);
5529
5530 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5531 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5532 r = bpf_firewall_load_custom(u);
5533 if (r < 0)
5534 return r;
5535
5536 /* Prepares everything so that we can fork of a process for this unit */
5537
5538 (void) unit_realize_cgroup(u);
5539
5540 if (u->reset_accounting) {
5541 (void) unit_reset_accounting(u);
5542 u->reset_accounting = false;
5543 }
5544
5545 unit_export_state_files(u);
5546
5547 r = unit_setup_exec_runtime(u);
5548 if (r < 0)
5549 return r;
5550
5551 r = unit_setup_dynamic_creds(u);
5552 if (r < 0)
5553 return r;
5554
5555 return 0;
5556 }
5557
5558 static int log_leftover(pid_t pid, int sig, void *userdata) {
5559 _cleanup_free_ char *comm = NULL;
5560
5561 (void) get_process_comm(pid, &comm);
5562
5563 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5564 return 0;
5565
5566 log_unit_warning(userdata,
5567 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5568 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5569 pid, strna(comm));
5570
5571 return 1;
5572 }
5573
5574 int unit_warn_leftover_processes(Unit *u) {
5575 assert(u);
5576
5577 (void) unit_pick_cgroup_path(u);
5578
5579 if (!u->cgroup_path)
5580 return 0;
5581
5582 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5583 }
5584
5585 bool unit_needs_console(Unit *u) {
5586 ExecContext *ec;
5587 UnitActiveState state;
5588
5589 assert(u);
5590
5591 state = unit_active_state(u);
5592
5593 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5594 return false;
5595
5596 if (UNIT_VTABLE(u)->needs_console)
5597 return UNIT_VTABLE(u)->needs_console(u);
5598
5599 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5600 ec = unit_get_exec_context(u);
5601 if (!ec)
5602 return false;
5603
5604 return exec_context_may_touch_console(ec);
5605 }
5606
5607 const char *unit_label_path(Unit *u) {
5608 const char *p;
5609
5610 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5611 * when validating access checks. */
5612
5613 p = u->source_path ?: u->fragment_path;
5614 if (!p)
5615 return NULL;
5616
5617 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5618 if (path_equal(p, "/dev/null"))
5619 return NULL;
5620
5621 return p;
5622 }
5623
5624 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5625 int r;
5626
5627 assert(u);
5628
5629 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5630 * and not a kernel thread either */
5631
5632 /* First, a simple range check */
5633 if (!pid_is_valid(pid))
5634 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5635
5636 /* Some extra safety check */
5637 if (pid == 1 || pid == getpid_cached())
5638 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5639
5640 /* Don't even begin to bother with kernel threads */
5641 r = is_kernel_thread(pid);
5642 if (r == -ESRCH)
5643 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5644 if (r < 0)
5645 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5646 if (r > 0)
5647 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5648
5649 return 0;
5650 }
5651
5652 void unit_log_success(Unit *u) {
5653 assert(u);
5654
5655 log_struct(LOG_INFO,
5656 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5657 LOG_UNIT_ID(u),
5658 LOG_UNIT_INVOCATION_ID(u),
5659 LOG_UNIT_MESSAGE(u, "Succeeded."));
5660 }
5661
5662 void unit_log_failure(Unit *u, const char *result) {
5663 assert(u);
5664 assert(result);
5665
5666 log_struct(LOG_WARNING,
5667 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5668 LOG_UNIT_ID(u),
5669 LOG_UNIT_INVOCATION_ID(u),
5670 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5671 "UNIT_RESULT=%s", result);
5672 }
5673
5674 void unit_log_skip(Unit *u, const char *result) {
5675 assert(u);
5676 assert(result);
5677
5678 log_struct(LOG_INFO,
5679 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5680 LOG_UNIT_ID(u),
5681 LOG_UNIT_INVOCATION_ID(u),
5682 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5683 "UNIT_RESULT=%s", result);
5684 }
5685
5686 void unit_log_process_exit(
5687 Unit *u,
5688 int level,
5689 const char *kind,
5690 const char *command,
5691 int code,
5692 int status) {
5693
5694 assert(u);
5695 assert(kind);
5696
5697 if (code != CLD_EXITED)
5698 level = LOG_WARNING;
5699
5700 log_struct(level,
5701 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5702 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5703 kind,
5704 sigchld_code_to_string(code), status,
5705 strna(code == CLD_EXITED
5706 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5707 : signal_to_string(status))),
5708 "EXIT_CODE=%s", sigchld_code_to_string(code),
5709 "EXIT_STATUS=%i", status,
5710 "COMMAND=%s", strna(command),
5711 LOG_UNIT_ID(u),
5712 LOG_UNIT_INVOCATION_ID(u));
5713 }
5714
5715 int unit_exit_status(Unit *u) {
5716 assert(u);
5717
5718 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5719 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5720 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5721 * service process has exited abnormally (signal/coredump). */
5722
5723 if (!UNIT_VTABLE(u)->exit_status)
5724 return -EOPNOTSUPP;
5725
5726 return UNIT_VTABLE(u)->exit_status(u);
5727 }
5728
5729 int unit_failure_action_exit_status(Unit *u) {
5730 int r;
5731
5732 assert(u);
5733
5734 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5735
5736 if (u->failure_action_exit_status >= 0)
5737 return u->failure_action_exit_status;
5738
5739 r = unit_exit_status(u);
5740 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5741 return 255;
5742
5743 return r;
5744 }
5745
5746 int unit_success_action_exit_status(Unit *u) {
5747 int r;
5748
5749 assert(u);
5750
5751 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5752
5753 if (u->success_action_exit_status >= 0)
5754 return u->success_action_exit_status;
5755
5756 r = unit_exit_status(u);
5757 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5758 return 255;
5759
5760 return r;
5761 }
5762
5763 int unit_test_trigger_loaded(Unit *u) {
5764 Unit *trigger;
5765
5766 /* Tests whether the unit to trigger is loaded */
5767
5768 trigger = UNIT_TRIGGER(u);
5769 if (!trigger)
5770 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit to trigger not loaded.");
5771 if (trigger->load_state != UNIT_LOADED)
5772 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit %s to trigger not loaded.", u->id);
5773
5774 return 0;
5775 }
5776
5777 int unit_clean(Unit *u, ExecCleanMask mask) {
5778 UnitActiveState state;
5779
5780 assert(u);
5781
5782 /* Special return values:
5783 *
5784 * -EOPNOTSUPP → cleaning not supported for this unit type
5785 * -EUNATCH → cleaning not defined for this resource type
5786 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5787 * a job queued or similar
5788 */
5789
5790 if (!UNIT_VTABLE(u)->clean)
5791 return -EOPNOTSUPP;
5792
5793 if (mask == 0)
5794 return -EUNATCH;
5795
5796 if (u->load_state != UNIT_LOADED)
5797 return -EBUSY;
5798
5799 if (u->job)
5800 return -EBUSY;
5801
5802 state = unit_active_state(u);
5803 if (!IN_SET(state, UNIT_INACTIVE))
5804 return -EBUSY;
5805
5806 return UNIT_VTABLE(u)->clean(u, mask);
5807 }
5808
5809 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5810 assert(u);
5811
5812 if (!UNIT_VTABLE(u)->clean ||
5813 u->load_state != UNIT_LOADED) {
5814 *ret = 0;
5815 return 0;
5816 }
5817
5818 /* When the clean() method is set, can_clean() really should be set too */
5819 assert(UNIT_VTABLE(u)->can_clean);
5820
5821 return UNIT_VTABLE(u)->can_clean(u, ret);
5822 }
5823
5824 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5825 [COLLECT_INACTIVE] = "inactive",
5826 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5827 };
5828
5829 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);