]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #12218 from keszybz/use-libmount-more
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "load-dropin.h"
31 #include "load-fragment.h"
32 #include "log.h"
33 #include "macro.h"
34 #include "missing.h"
35 #include "mkdir.h"
36 #include "parse-util.h"
37 #include "path-util.h"
38 #include "process-util.h"
39 #include "serialize.h"
40 #include "set.h"
41 #include "signal-util.h"
42 #include "sparse-endian.h"
43 #include "special.h"
44 #include "specifier.h"
45 #include "stat-util.h"
46 #include "stdio-util.h"
47 #include "string-table.h"
48 #include "string-util.h"
49 #include "strv.h"
50 #include "terminal-util.h"
51 #include "tmpfile-util.h"
52 #include "umask-util.h"
53 #include "unit-name.h"
54 #include "unit.h"
55 #include "user-util.h"
56 #include "virt.h"
57
58 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
59 [UNIT_SERVICE] = &service_vtable,
60 [UNIT_SOCKET] = &socket_vtable,
61 [UNIT_TARGET] = &target_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable,
70 };
71
72 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
73
74 Unit *unit_new(Manager *m, size_t size) {
75 Unit *u;
76
77 assert(m);
78 assert(size >= sizeof(Unit));
79
80 u = malloc0(size);
81 if (!u)
82 return NULL;
83
84 u->names = set_new(&string_hash_ops);
85 if (!u->names)
86 return mfree(u);
87
88 u->manager = m;
89 u->type = _UNIT_TYPE_INVALID;
90 u->default_dependencies = true;
91 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
92 u->unit_file_preset = -1;
93 u->on_failure_job_mode = JOB_REPLACE;
94 u->cgroup_control_inotify_wd = -1;
95 u->cgroup_memory_inotify_wd = -1;
96 u->job_timeout = USEC_INFINITY;
97 u->job_running_timeout = USEC_INFINITY;
98 u->ref_uid = UID_INVALID;
99 u->ref_gid = GID_INVALID;
100 u->cpu_usage_last = NSEC_INFINITY;
101 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
102 u->failure_action_exit_status = u->success_action_exit_status = -1;
103
104 u->ip_accounting_ingress_map_fd = -1;
105 u->ip_accounting_egress_map_fd = -1;
106 u->ipv4_allow_map_fd = -1;
107 u->ipv6_allow_map_fd = -1;
108 u->ipv4_deny_map_fd = -1;
109 u->ipv6_deny_map_fd = -1;
110
111 u->last_section_private = -1;
112
113 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
114 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
115
116 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
117 u->io_accounting_last[i] = UINT64_MAX;
118
119 return u;
120 }
121
122 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
123 _cleanup_(unit_freep) Unit *u = NULL;
124 int r;
125
126 u = unit_new(m, size);
127 if (!u)
128 return -ENOMEM;
129
130 r = unit_add_name(u, name);
131 if (r < 0)
132 return r;
133
134 *ret = TAKE_PTR(u);
135
136 return r;
137 }
138
139 bool unit_has_name(const Unit *u, const char *name) {
140 assert(u);
141 assert(name);
142
143 return set_contains(u->names, (char*) name);
144 }
145
146 static void unit_init(Unit *u) {
147 CGroupContext *cc;
148 ExecContext *ec;
149 KillContext *kc;
150
151 assert(u);
152 assert(u->manager);
153 assert(u->type >= 0);
154
155 cc = unit_get_cgroup_context(u);
156 if (cc) {
157 cgroup_context_init(cc);
158
159 /* Copy in the manager defaults into the cgroup
160 * context, _before_ the rest of the settings have
161 * been initialized */
162
163 cc->cpu_accounting = u->manager->default_cpu_accounting;
164 cc->io_accounting = u->manager->default_io_accounting;
165 cc->blockio_accounting = u->manager->default_blockio_accounting;
166 cc->memory_accounting = u->manager->default_memory_accounting;
167 cc->tasks_accounting = u->manager->default_tasks_accounting;
168 cc->ip_accounting = u->manager->default_ip_accounting;
169
170 if (u->type != UNIT_SLICE)
171 cc->tasks_max = u->manager->default_tasks_max;
172 }
173
174 ec = unit_get_exec_context(u);
175 if (ec) {
176 exec_context_init(ec);
177
178 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
179 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
180 }
181
182 kc = unit_get_kill_context(u);
183 if (kc)
184 kill_context_init(kc);
185
186 if (UNIT_VTABLE(u)->init)
187 UNIT_VTABLE(u)->init(u);
188 }
189
190 int unit_add_name(Unit *u, const char *text) {
191 _cleanup_free_ char *s = NULL, *i = NULL;
192 UnitType t;
193 int r;
194
195 assert(u);
196 assert(text);
197
198 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
199
200 if (!u->instance)
201 return -EINVAL;
202
203 r = unit_name_replace_instance(text, u->instance, &s);
204 if (r < 0)
205 return r;
206 } else {
207 s = strdup(text);
208 if (!s)
209 return -ENOMEM;
210 }
211
212 if (set_contains(u->names, s))
213 return 0;
214 if (hashmap_contains(u->manager->units, s))
215 return -EEXIST;
216
217 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
218 return -EINVAL;
219
220 t = unit_name_to_type(s);
221 if (t < 0)
222 return -EINVAL;
223
224 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
225 return -EINVAL;
226
227 r = unit_name_to_instance(s, &i);
228 if (r < 0)
229 return r;
230
231 if (i && !unit_type_may_template(t))
232 return -EINVAL;
233
234 /* Ensure that this unit is either instanced or not instanced,
235 * but not both. Note that we do allow names with different
236 * instance names however! */
237 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
238 return -EINVAL;
239
240 if (!unit_type_may_alias(t) && !set_isempty(u->names))
241 return -EEXIST;
242
243 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
244 return -E2BIG;
245
246 r = set_put(u->names, s);
247 if (r < 0)
248 return r;
249 assert(r > 0);
250
251 r = hashmap_put(u->manager->units, s, u);
252 if (r < 0) {
253 (void) set_remove(u->names, s);
254 return r;
255 }
256
257 if (u->type == _UNIT_TYPE_INVALID) {
258 u->type = t;
259 u->id = s;
260 u->instance = TAKE_PTR(i);
261
262 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
263
264 unit_init(u);
265 }
266
267 s = NULL;
268
269 unit_add_to_dbus_queue(u);
270 return 0;
271 }
272
273 int unit_choose_id(Unit *u, const char *name) {
274 _cleanup_free_ char *t = NULL;
275 char *s, *i;
276 int r;
277
278 assert(u);
279 assert(name);
280
281 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
282
283 if (!u->instance)
284 return -EINVAL;
285
286 r = unit_name_replace_instance(name, u->instance, &t);
287 if (r < 0)
288 return r;
289
290 name = t;
291 }
292
293 /* Selects one of the names of this unit as the id */
294 s = set_get(u->names, (char*) name);
295 if (!s)
296 return -ENOENT;
297
298 /* Determine the new instance from the new id */
299 r = unit_name_to_instance(s, &i);
300 if (r < 0)
301 return r;
302
303 u->id = s;
304
305 free(u->instance);
306 u->instance = i;
307
308 unit_add_to_dbus_queue(u);
309
310 return 0;
311 }
312
313 int unit_set_description(Unit *u, const char *description) {
314 int r;
315
316 assert(u);
317
318 r = free_and_strdup(&u->description, empty_to_null(description));
319 if (r < 0)
320 return r;
321 if (r > 0)
322 unit_add_to_dbus_queue(u);
323
324 return 0;
325 }
326
327 bool unit_may_gc(Unit *u) {
328 UnitActiveState state;
329 int r;
330
331 assert(u);
332
333 /* Checks whether the unit is ready to be unloaded for garbage collection.
334 * Returns true when the unit may be collected, and false if there's some
335 * reason to keep it loaded.
336 *
337 * References from other units are *not* checked here. Instead, this is done
338 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
339 */
340
341 if (u->job)
342 return false;
343
344 if (u->nop_job)
345 return false;
346
347 state = unit_active_state(u);
348
349 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
350 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
351 UNIT_VTABLE(u)->release_resources)
352 UNIT_VTABLE(u)->release_resources(u);
353
354 if (u->perpetual)
355 return false;
356
357 if (sd_bus_track_count(u->bus_track) > 0)
358 return false;
359
360 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
361 switch (u->collect_mode) {
362
363 case COLLECT_INACTIVE:
364 if (state != UNIT_INACTIVE)
365 return false;
366
367 break;
368
369 case COLLECT_INACTIVE_OR_FAILED:
370 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
371 return false;
372
373 break;
374
375 default:
376 assert_not_reached("Unknown garbage collection mode");
377 }
378
379 if (u->cgroup_path) {
380 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
381 * around. Units with active processes should never be collected. */
382
383 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
384 if (r < 0)
385 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
386 if (r <= 0)
387 return false;
388 }
389
390 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
391 return false;
392
393 return true;
394 }
395
396 void unit_add_to_load_queue(Unit *u) {
397 assert(u);
398 assert(u->type != _UNIT_TYPE_INVALID);
399
400 if (u->load_state != UNIT_STUB || u->in_load_queue)
401 return;
402
403 LIST_PREPEND(load_queue, u->manager->load_queue, u);
404 u->in_load_queue = true;
405 }
406
407 void unit_add_to_cleanup_queue(Unit *u) {
408 assert(u);
409
410 if (u->in_cleanup_queue)
411 return;
412
413 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
414 u->in_cleanup_queue = true;
415 }
416
417 void unit_add_to_gc_queue(Unit *u) {
418 assert(u);
419
420 if (u->in_gc_queue || u->in_cleanup_queue)
421 return;
422
423 if (!unit_may_gc(u))
424 return;
425
426 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
427 u->in_gc_queue = true;
428 }
429
430 void unit_add_to_dbus_queue(Unit *u) {
431 assert(u);
432 assert(u->type != _UNIT_TYPE_INVALID);
433
434 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
435 return;
436
437 /* Shortcut things if nobody cares */
438 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
439 sd_bus_track_count(u->bus_track) <= 0 &&
440 set_isempty(u->manager->private_buses)) {
441 u->sent_dbus_new_signal = true;
442 return;
443 }
444
445 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
446 u->in_dbus_queue = true;
447 }
448
449 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
450 assert(u);
451
452 if (u->in_stop_when_unneeded_queue)
453 return;
454
455 if (!u->stop_when_unneeded)
456 return;
457
458 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
459 return;
460
461 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
462 u->in_stop_when_unneeded_queue = true;
463 }
464
465 static void bidi_set_free(Unit *u, Hashmap *h) {
466 Unit *other;
467 Iterator i;
468 void *v;
469
470 assert(u);
471
472 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
473
474 HASHMAP_FOREACH_KEY(v, other, h, i) {
475 UnitDependency d;
476
477 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
478 hashmap_remove(other->dependencies[d], u);
479
480 unit_add_to_gc_queue(other);
481 }
482
483 hashmap_free(h);
484 }
485
486 static void unit_remove_transient(Unit *u) {
487 char **i;
488
489 assert(u);
490
491 if (!u->transient)
492 return;
493
494 if (u->fragment_path)
495 (void) unlink(u->fragment_path);
496
497 STRV_FOREACH(i, u->dropin_paths) {
498 _cleanup_free_ char *p = NULL, *pp = NULL;
499
500 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
501 if (!p)
502 continue;
503
504 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
505 if (!pp)
506 continue;
507
508 /* Only drop transient drop-ins */
509 if (!path_equal(u->manager->lookup_paths.transient, pp))
510 continue;
511
512 (void) unlink(*i);
513 (void) rmdir(p);
514 }
515 }
516
517 static void unit_free_requires_mounts_for(Unit *u) {
518 assert(u);
519
520 for (;;) {
521 _cleanup_free_ char *path;
522
523 path = hashmap_steal_first_key(u->requires_mounts_for);
524 if (!path)
525 break;
526 else {
527 char s[strlen(path) + 1];
528
529 PATH_FOREACH_PREFIX_MORE(s, path) {
530 char *y;
531 Set *x;
532
533 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
534 if (!x)
535 continue;
536
537 (void) set_remove(x, u);
538
539 if (set_isempty(x)) {
540 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
541 free(y);
542 set_free(x);
543 }
544 }
545 }
546 }
547
548 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
549 }
550
551 static void unit_done(Unit *u) {
552 ExecContext *ec;
553 CGroupContext *cc;
554
555 assert(u);
556
557 if (u->type < 0)
558 return;
559
560 if (UNIT_VTABLE(u)->done)
561 UNIT_VTABLE(u)->done(u);
562
563 ec = unit_get_exec_context(u);
564 if (ec)
565 exec_context_done(ec);
566
567 cc = unit_get_cgroup_context(u);
568 if (cc)
569 cgroup_context_done(cc);
570 }
571
572 void unit_free(Unit *u) {
573 UnitDependency d;
574 Iterator i;
575 char *t;
576
577 if (!u)
578 return;
579
580 if (UNIT_ISSET(u->slice)) {
581 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
582 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
583
584 /* And make sure the parent is realized again, updating cgroup memberships */
585 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
586 }
587
588 u->transient_file = safe_fclose(u->transient_file);
589
590 if (!MANAGER_IS_RELOADING(u->manager))
591 unit_remove_transient(u);
592
593 bus_unit_send_removed_signal(u);
594
595 unit_done(u);
596
597 unit_dequeue_rewatch_pids(u);
598
599 sd_bus_slot_unref(u->match_bus_slot);
600 sd_bus_track_unref(u->bus_track);
601 u->deserialized_refs = strv_free(u->deserialized_refs);
602
603 unit_free_requires_mounts_for(u);
604
605 SET_FOREACH(t, u->names, i)
606 hashmap_remove_value(u->manager->units, t, u);
607
608 if (!sd_id128_is_null(u->invocation_id))
609 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
610
611 if (u->job) {
612 Job *j = u->job;
613 job_uninstall(j);
614 job_free(j);
615 }
616
617 if (u->nop_job) {
618 Job *j = u->nop_job;
619 job_uninstall(j);
620 job_free(j);
621 }
622
623 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
624 bidi_set_free(u, u->dependencies[d]);
625
626 if (u->on_console)
627 manager_unref_console(u->manager);
628
629 unit_release_cgroup(u);
630
631 if (!MANAGER_IS_RELOADING(u->manager))
632 unit_unlink_state_files(u);
633
634 unit_unref_uid_gid(u, false);
635
636 (void) manager_update_failed_units(u->manager, u, false);
637 set_remove(u->manager->startup_units, u);
638
639 unit_unwatch_all_pids(u);
640
641 unit_ref_unset(&u->slice);
642 while (u->refs_by_target)
643 unit_ref_unset(u->refs_by_target);
644
645 if (u->type != _UNIT_TYPE_INVALID)
646 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
647
648 if (u->in_load_queue)
649 LIST_REMOVE(load_queue, u->manager->load_queue, u);
650
651 if (u->in_dbus_queue)
652 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
653
654 if (u->in_gc_queue)
655 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
656
657 if (u->in_cgroup_realize_queue)
658 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
659
660 if (u->in_cgroup_empty_queue)
661 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
662
663 if (u->in_cleanup_queue)
664 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
665
666 if (u->in_target_deps_queue)
667 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
668
669 if (u->in_stop_when_unneeded_queue)
670 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
671
672 safe_close(u->ip_accounting_ingress_map_fd);
673 safe_close(u->ip_accounting_egress_map_fd);
674
675 safe_close(u->ipv4_allow_map_fd);
676 safe_close(u->ipv6_allow_map_fd);
677 safe_close(u->ipv4_deny_map_fd);
678 safe_close(u->ipv6_deny_map_fd);
679
680 bpf_program_unref(u->ip_bpf_ingress);
681 bpf_program_unref(u->ip_bpf_ingress_installed);
682 bpf_program_unref(u->ip_bpf_egress);
683 bpf_program_unref(u->ip_bpf_egress_installed);
684
685 bpf_program_unref(u->bpf_device_control_installed);
686
687 condition_free_list(u->conditions);
688 condition_free_list(u->asserts);
689
690 free(u->description);
691 strv_free(u->documentation);
692 free(u->fragment_path);
693 free(u->source_path);
694 strv_free(u->dropin_paths);
695 free(u->instance);
696
697 free(u->job_timeout_reboot_arg);
698
699 set_free_free(u->names);
700
701 free(u->reboot_arg);
702
703 free(u);
704 }
705
706 UnitActiveState unit_active_state(Unit *u) {
707 assert(u);
708
709 if (u->load_state == UNIT_MERGED)
710 return unit_active_state(unit_follow_merge(u));
711
712 /* After a reload it might happen that a unit is not correctly
713 * loaded but still has a process around. That's why we won't
714 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
715
716 return UNIT_VTABLE(u)->active_state(u);
717 }
718
719 const char* unit_sub_state_to_string(Unit *u) {
720 assert(u);
721
722 return UNIT_VTABLE(u)->sub_state_to_string(u);
723 }
724
725 static int set_complete_move(Set **s, Set **other) {
726 assert(s);
727 assert(other);
728
729 if (!other)
730 return 0;
731
732 if (*s)
733 return set_move(*s, *other);
734 else
735 *s = TAKE_PTR(*other);
736
737 return 0;
738 }
739
740 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
741 assert(s);
742 assert(other);
743
744 if (!*other)
745 return 0;
746
747 if (*s)
748 return hashmap_move(*s, *other);
749 else
750 *s = TAKE_PTR(*other);
751
752 return 0;
753 }
754
755 static int merge_names(Unit *u, Unit *other) {
756 char *t;
757 Iterator i;
758 int r;
759
760 assert(u);
761 assert(other);
762
763 r = set_complete_move(&u->names, &other->names);
764 if (r < 0)
765 return r;
766
767 set_free_free(other->names);
768 other->names = NULL;
769 other->id = NULL;
770
771 SET_FOREACH(t, u->names, i)
772 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
773
774 return 0;
775 }
776
777 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
778 unsigned n_reserve;
779
780 assert(u);
781 assert(other);
782 assert(d < _UNIT_DEPENDENCY_MAX);
783
784 /*
785 * If u does not have this dependency set allocated, there is no need
786 * to reserve anything. In that case other's set will be transferred
787 * as a whole to u by complete_move().
788 */
789 if (!u->dependencies[d])
790 return 0;
791
792 /* merge_dependencies() will skip a u-on-u dependency */
793 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
794
795 return hashmap_reserve(u->dependencies[d], n_reserve);
796 }
797
798 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
799 Iterator i;
800 Unit *back;
801 void *v;
802 int r;
803
804 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
805
806 assert(u);
807 assert(other);
808 assert(d < _UNIT_DEPENDENCY_MAX);
809
810 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
811 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
812 UnitDependency k;
813
814 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
815 * pointers back, and let's fix them up, to instead point to 'u'. */
816
817 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
818 if (back == u) {
819 /* Do not add dependencies between u and itself. */
820 if (hashmap_remove(back->dependencies[k], other))
821 maybe_warn_about_dependency(u, other_id, k);
822 } else {
823 UnitDependencyInfo di_u, di_other, di_merged;
824
825 /* Let's drop this dependency between "back" and "other", and let's create it between
826 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
827 * and any such dependency which might already exist */
828
829 di_other.data = hashmap_get(back->dependencies[k], other);
830 if (!di_other.data)
831 continue; /* dependency isn't set, let's try the next one */
832
833 di_u.data = hashmap_get(back->dependencies[k], u);
834
835 di_merged = (UnitDependencyInfo) {
836 .origin_mask = di_u.origin_mask | di_other.origin_mask,
837 .destination_mask = di_u.destination_mask | di_other.destination_mask,
838 };
839
840 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
841 if (r < 0)
842 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
843 assert(r >= 0);
844
845 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
846 }
847 }
848
849 }
850
851 /* Also do not move dependencies on u to itself */
852 back = hashmap_remove(other->dependencies[d], u);
853 if (back)
854 maybe_warn_about_dependency(u, other_id, d);
855
856 /* The move cannot fail. The caller must have performed a reservation. */
857 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
858
859 other->dependencies[d] = hashmap_free(other->dependencies[d]);
860 }
861
862 int unit_merge(Unit *u, Unit *other) {
863 UnitDependency d;
864 const char *other_id = NULL;
865 int r;
866
867 assert(u);
868 assert(other);
869 assert(u->manager == other->manager);
870 assert(u->type != _UNIT_TYPE_INVALID);
871
872 other = unit_follow_merge(other);
873
874 if (other == u)
875 return 0;
876
877 if (u->type != other->type)
878 return -EINVAL;
879
880 if (!u->instance != !other->instance)
881 return -EINVAL;
882
883 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
884 return -EEXIST;
885
886 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
887 return -EEXIST;
888
889 if (other->job)
890 return -EEXIST;
891
892 if (other->nop_job)
893 return -EEXIST;
894
895 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
896 return -EEXIST;
897
898 if (other->id)
899 other_id = strdupa(other->id);
900
901 /* Make reservations to ensure merge_dependencies() won't fail */
902 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
903 r = reserve_dependencies(u, other, d);
904 /*
905 * We don't rollback reservations if we fail. We don't have
906 * a way to undo reservations. A reservation is not a leak.
907 */
908 if (r < 0)
909 return r;
910 }
911
912 /* Merge names */
913 r = merge_names(u, other);
914 if (r < 0)
915 return r;
916
917 /* Redirect all references */
918 while (other->refs_by_target)
919 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
920
921 /* Merge dependencies */
922 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
923 merge_dependencies(u, other, other_id, d);
924
925 other->load_state = UNIT_MERGED;
926 other->merged_into = u;
927
928 /* If there is still some data attached to the other node, we
929 * don't need it anymore, and can free it. */
930 if (other->load_state != UNIT_STUB)
931 if (UNIT_VTABLE(other)->done)
932 UNIT_VTABLE(other)->done(other);
933
934 unit_add_to_dbus_queue(u);
935 unit_add_to_cleanup_queue(other);
936
937 return 0;
938 }
939
940 int unit_merge_by_name(Unit *u, const char *name) {
941 _cleanup_free_ char *s = NULL;
942 Unit *other;
943 int r;
944
945 assert(u);
946 assert(name);
947
948 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
949 if (!u->instance)
950 return -EINVAL;
951
952 r = unit_name_replace_instance(name, u->instance, &s);
953 if (r < 0)
954 return r;
955
956 name = s;
957 }
958
959 other = manager_get_unit(u->manager, name);
960 if (other)
961 return unit_merge(u, other);
962
963 return unit_add_name(u, name);
964 }
965
966 Unit* unit_follow_merge(Unit *u) {
967 assert(u);
968
969 while (u->load_state == UNIT_MERGED)
970 assert_se(u = u->merged_into);
971
972 return u;
973 }
974
975 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
976 ExecDirectoryType dt;
977 char **dp;
978 int r;
979
980 assert(u);
981 assert(c);
982
983 if (c->working_directory && !c->working_directory_missing_ok) {
984 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
985 if (r < 0)
986 return r;
987 }
988
989 if (c->root_directory) {
990 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
991 if (r < 0)
992 return r;
993 }
994
995 if (c->root_image) {
996 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
997 if (r < 0)
998 return r;
999 }
1000
1001 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1002 if (!u->manager->prefix[dt])
1003 continue;
1004
1005 STRV_FOREACH(dp, c->directories[dt].paths) {
1006 _cleanup_free_ char *p;
1007
1008 p = strjoin(u->manager->prefix[dt], "/", *dp);
1009 if (!p)
1010 return -ENOMEM;
1011
1012 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1013 if (r < 0)
1014 return r;
1015 }
1016 }
1017
1018 if (!MANAGER_IS_SYSTEM(u->manager))
1019 return 0;
1020
1021 if (c->private_tmp) {
1022 const char *p;
1023
1024 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1025 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1026 if (r < 0)
1027 return r;
1028 }
1029
1030 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1031 if (r < 0)
1032 return r;
1033 }
1034
1035 if (!IN_SET(c->std_output,
1036 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1037 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1038 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1039 !IN_SET(c->std_error,
1040 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1041 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1042 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1043 return 0;
1044
1045 /* If syslog or kernel logging is requested, make sure our own
1046 * logging daemon is run first. */
1047
1048 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1049 if (r < 0)
1050 return r;
1051
1052 return 0;
1053 }
1054
1055 const char *unit_description(Unit *u) {
1056 assert(u);
1057
1058 if (u->description)
1059 return u->description;
1060
1061 return strna(u->id);
1062 }
1063
1064 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1065 const struct {
1066 UnitDependencyMask mask;
1067 const char *name;
1068 } table[] = {
1069 { UNIT_DEPENDENCY_FILE, "file" },
1070 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1071 { UNIT_DEPENDENCY_DEFAULT, "default" },
1072 { UNIT_DEPENDENCY_UDEV, "udev" },
1073 { UNIT_DEPENDENCY_PATH, "path" },
1074 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1075 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1076 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1077 };
1078 size_t i;
1079
1080 assert(f);
1081 assert(kind);
1082 assert(space);
1083
1084 for (i = 0; i < ELEMENTSOF(table); i++) {
1085
1086 if (mask == 0)
1087 break;
1088
1089 if (FLAGS_SET(mask, table[i].mask)) {
1090 if (*space)
1091 fputc(' ', f);
1092 else
1093 *space = true;
1094
1095 fputs(kind, f);
1096 fputs("-", f);
1097 fputs(table[i].name, f);
1098
1099 mask &= ~table[i].mask;
1100 }
1101 }
1102
1103 assert(mask == 0);
1104 }
1105
1106 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1107 char *t, **j;
1108 UnitDependency d;
1109 Iterator i;
1110 const char *prefix2;
1111 char
1112 timestamp0[FORMAT_TIMESTAMP_MAX],
1113 timestamp1[FORMAT_TIMESTAMP_MAX],
1114 timestamp2[FORMAT_TIMESTAMP_MAX],
1115 timestamp3[FORMAT_TIMESTAMP_MAX],
1116 timestamp4[FORMAT_TIMESTAMP_MAX],
1117 timespan[FORMAT_TIMESPAN_MAX];
1118 Unit *following;
1119 _cleanup_set_free_ Set *following_set = NULL;
1120 const char *n;
1121 CGroupMask m;
1122 int r;
1123
1124 assert(u);
1125 assert(u->type >= 0);
1126
1127 prefix = strempty(prefix);
1128 prefix2 = strjoina(prefix, "\t");
1129
1130 fprintf(f,
1131 "%s-> Unit %s:\n"
1132 "%s\tDescription: %s\n"
1133 "%s\tInstance: %s\n"
1134 "%s\tUnit Load State: %s\n"
1135 "%s\tUnit Active State: %s\n"
1136 "%s\tState Change Timestamp: %s\n"
1137 "%s\tInactive Exit Timestamp: %s\n"
1138 "%s\tActive Enter Timestamp: %s\n"
1139 "%s\tActive Exit Timestamp: %s\n"
1140 "%s\tInactive Enter Timestamp: %s\n"
1141 "%s\tMay GC: %s\n"
1142 "%s\tNeed Daemon Reload: %s\n"
1143 "%s\tTransient: %s\n"
1144 "%s\tPerpetual: %s\n"
1145 "%s\tGarbage Collection Mode: %s\n"
1146 "%s\tSlice: %s\n"
1147 "%s\tCGroup: %s\n"
1148 "%s\tCGroup realized: %s\n",
1149 prefix, u->id,
1150 prefix, unit_description(u),
1151 prefix, strna(u->instance),
1152 prefix, unit_load_state_to_string(u->load_state),
1153 prefix, unit_active_state_to_string(unit_active_state(u)),
1154 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1155 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1156 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1157 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1158 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1159 prefix, yes_no(unit_may_gc(u)),
1160 prefix, yes_no(unit_need_daemon_reload(u)),
1161 prefix, yes_no(u->transient),
1162 prefix, yes_no(u->perpetual),
1163 prefix, collect_mode_to_string(u->collect_mode),
1164 prefix, strna(unit_slice_name(u)),
1165 prefix, strna(u->cgroup_path),
1166 prefix, yes_no(u->cgroup_realized));
1167
1168 if (u->cgroup_realized_mask != 0) {
1169 _cleanup_free_ char *s = NULL;
1170 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1171 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1172 }
1173
1174 if (u->cgroup_enabled_mask != 0) {
1175 _cleanup_free_ char *s = NULL;
1176 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1177 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1178 }
1179
1180 m = unit_get_own_mask(u);
1181 if (m != 0) {
1182 _cleanup_free_ char *s = NULL;
1183 (void) cg_mask_to_string(m, &s);
1184 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1185 }
1186
1187 m = unit_get_members_mask(u);
1188 if (m != 0) {
1189 _cleanup_free_ char *s = NULL;
1190 (void) cg_mask_to_string(m, &s);
1191 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1192 }
1193
1194 m = unit_get_delegate_mask(u);
1195 if (m != 0) {
1196 _cleanup_free_ char *s = NULL;
1197 (void) cg_mask_to_string(m, &s);
1198 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1199 }
1200
1201 SET_FOREACH(t, u->names, i)
1202 fprintf(f, "%s\tName: %s\n", prefix, t);
1203
1204 if (!sd_id128_is_null(u->invocation_id))
1205 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1206 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1207
1208 STRV_FOREACH(j, u->documentation)
1209 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1210
1211 following = unit_following(u);
1212 if (following)
1213 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1214
1215 r = unit_following_set(u, &following_set);
1216 if (r >= 0) {
1217 Unit *other;
1218
1219 SET_FOREACH(other, following_set, i)
1220 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1221 }
1222
1223 if (u->fragment_path)
1224 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1225
1226 if (u->source_path)
1227 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1228
1229 STRV_FOREACH(j, u->dropin_paths)
1230 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1231
1232 if (u->failure_action != EMERGENCY_ACTION_NONE)
1233 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1234 if (u->failure_action_exit_status >= 0)
1235 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1236 if (u->success_action != EMERGENCY_ACTION_NONE)
1237 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1238 if (u->success_action_exit_status >= 0)
1239 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1240
1241 if (u->job_timeout != USEC_INFINITY)
1242 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1243
1244 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1245 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1246
1247 if (u->job_timeout_reboot_arg)
1248 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1249
1250 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1251 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1252
1253 if (dual_timestamp_is_set(&u->condition_timestamp))
1254 fprintf(f,
1255 "%s\tCondition Timestamp: %s\n"
1256 "%s\tCondition Result: %s\n",
1257 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1258 prefix, yes_no(u->condition_result));
1259
1260 if (dual_timestamp_is_set(&u->assert_timestamp))
1261 fprintf(f,
1262 "%s\tAssert Timestamp: %s\n"
1263 "%s\tAssert Result: %s\n",
1264 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1265 prefix, yes_no(u->assert_result));
1266
1267 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1268 UnitDependencyInfo di;
1269 Unit *other;
1270
1271 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1272 bool space = false;
1273
1274 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1275
1276 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1277 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1278
1279 fputs(")\n", f);
1280 }
1281 }
1282
1283 if (!hashmap_isempty(u->requires_mounts_for)) {
1284 UnitDependencyInfo di;
1285 const char *path;
1286
1287 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1288 bool space = false;
1289
1290 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1291
1292 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1293 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1294
1295 fputs(")\n", f);
1296 }
1297 }
1298
1299 if (u->load_state == UNIT_LOADED) {
1300
1301 fprintf(f,
1302 "%s\tStopWhenUnneeded: %s\n"
1303 "%s\tRefuseManualStart: %s\n"
1304 "%s\tRefuseManualStop: %s\n"
1305 "%s\tDefaultDependencies: %s\n"
1306 "%s\tOnFailureJobMode: %s\n"
1307 "%s\tIgnoreOnIsolate: %s\n",
1308 prefix, yes_no(u->stop_when_unneeded),
1309 prefix, yes_no(u->refuse_manual_start),
1310 prefix, yes_no(u->refuse_manual_stop),
1311 prefix, yes_no(u->default_dependencies),
1312 prefix, job_mode_to_string(u->on_failure_job_mode),
1313 prefix, yes_no(u->ignore_on_isolate));
1314
1315 if (UNIT_VTABLE(u)->dump)
1316 UNIT_VTABLE(u)->dump(u, f, prefix2);
1317
1318 } else if (u->load_state == UNIT_MERGED)
1319 fprintf(f,
1320 "%s\tMerged into: %s\n",
1321 prefix, u->merged_into->id);
1322 else if (u->load_state == UNIT_ERROR)
1323 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1324
1325 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1326 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1327
1328 if (u->job)
1329 job_dump(u->job, f, prefix2);
1330
1331 if (u->nop_job)
1332 job_dump(u->nop_job, f, prefix2);
1333 }
1334
1335 /* Common implementation for multiple backends */
1336 int unit_load_fragment_and_dropin(Unit *u) {
1337 int r;
1338
1339 assert(u);
1340
1341 /* Load a .{service,socket,...} file */
1342 r = unit_load_fragment(u);
1343 if (r < 0)
1344 return r;
1345
1346 if (u->load_state == UNIT_STUB)
1347 return -ENOENT;
1348
1349 /* Load drop-in directory data. If u is an alias, we might be reloading the
1350 * target unit needlessly. But we cannot be sure which drops-ins have already
1351 * been loaded and which not, at least without doing complicated book-keeping,
1352 * so let's always reread all drop-ins. */
1353 return unit_load_dropin(unit_follow_merge(u));
1354 }
1355
1356 /* Common implementation for multiple backends */
1357 int unit_load_fragment_and_dropin_optional(Unit *u) {
1358 int r;
1359
1360 assert(u);
1361
1362 /* Same as unit_load_fragment_and_dropin(), but whether
1363 * something can be loaded or not doesn't matter. */
1364
1365 /* Load a .service/.socket/.slice/… file */
1366 r = unit_load_fragment(u);
1367 if (r < 0)
1368 return r;
1369
1370 if (u->load_state == UNIT_STUB)
1371 u->load_state = UNIT_LOADED;
1372
1373 /* Load drop-in directory data */
1374 return unit_load_dropin(unit_follow_merge(u));
1375 }
1376
1377 void unit_add_to_target_deps_queue(Unit *u) {
1378 Manager *m = u->manager;
1379
1380 assert(u);
1381
1382 if (u->in_target_deps_queue)
1383 return;
1384
1385 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1386 u->in_target_deps_queue = true;
1387 }
1388
1389 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1390 assert(u);
1391 assert(target);
1392
1393 if (target->type != UNIT_TARGET)
1394 return 0;
1395
1396 /* Only add the dependency if both units are loaded, so that
1397 * that loop check below is reliable */
1398 if (u->load_state != UNIT_LOADED ||
1399 target->load_state != UNIT_LOADED)
1400 return 0;
1401
1402 /* If either side wants no automatic dependencies, then let's
1403 * skip this */
1404 if (!u->default_dependencies ||
1405 !target->default_dependencies)
1406 return 0;
1407
1408 /* Don't create loops */
1409 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1410 return 0;
1411
1412 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1413 }
1414
1415 static int unit_add_slice_dependencies(Unit *u) {
1416 UnitDependencyMask mask;
1417 assert(u);
1418
1419 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1420 return 0;
1421
1422 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1423 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1424 relationship). */
1425 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1426
1427 if (UNIT_ISSET(u->slice))
1428 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1429
1430 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1431 return 0;
1432
1433 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1434 }
1435
1436 static int unit_add_mount_dependencies(Unit *u) {
1437 UnitDependencyInfo di;
1438 const char *path;
1439 Iterator i;
1440 int r;
1441
1442 assert(u);
1443
1444 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1445 char prefix[strlen(path) + 1];
1446
1447 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1448 _cleanup_free_ char *p = NULL;
1449 Unit *m;
1450
1451 r = unit_name_from_path(prefix, ".mount", &p);
1452 if (r < 0)
1453 return r;
1454
1455 m = manager_get_unit(u->manager, p);
1456 if (!m) {
1457 /* Make sure to load the mount unit if
1458 * it exists. If so the dependencies
1459 * on this unit will be added later
1460 * during the loading of the mount
1461 * unit. */
1462 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1463 continue;
1464 }
1465 if (m == u)
1466 continue;
1467
1468 if (m->load_state != UNIT_LOADED)
1469 continue;
1470
1471 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1472 if (r < 0)
1473 return r;
1474
1475 if (m->fragment_path) {
1476 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1477 if (r < 0)
1478 return r;
1479 }
1480 }
1481 }
1482
1483 return 0;
1484 }
1485
1486 static int unit_add_startup_units(Unit *u) {
1487 CGroupContext *c;
1488 int r;
1489
1490 c = unit_get_cgroup_context(u);
1491 if (!c)
1492 return 0;
1493
1494 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1495 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1496 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1497 return 0;
1498
1499 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1500 if (r < 0)
1501 return r;
1502
1503 return set_put(u->manager->startup_units, u);
1504 }
1505
1506 int unit_load(Unit *u) {
1507 int r;
1508
1509 assert(u);
1510
1511 if (u->in_load_queue) {
1512 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1513 u->in_load_queue = false;
1514 }
1515
1516 if (u->type == _UNIT_TYPE_INVALID)
1517 return -EINVAL;
1518
1519 if (u->load_state != UNIT_STUB)
1520 return 0;
1521
1522 if (u->transient_file) {
1523 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1524 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1525
1526 r = fflush_and_check(u->transient_file);
1527 if (r < 0)
1528 goto fail;
1529
1530 u->transient_file = safe_fclose(u->transient_file);
1531 u->fragment_mtime = now(CLOCK_REALTIME);
1532 }
1533
1534 if (UNIT_VTABLE(u)->load) {
1535 r = UNIT_VTABLE(u)->load(u);
1536 if (r < 0)
1537 goto fail;
1538 }
1539
1540 if (u->load_state == UNIT_STUB) {
1541 r = -ENOENT;
1542 goto fail;
1543 }
1544
1545 if (u->load_state == UNIT_LOADED) {
1546 unit_add_to_target_deps_queue(u);
1547
1548 r = unit_add_slice_dependencies(u);
1549 if (r < 0)
1550 goto fail;
1551
1552 r = unit_add_mount_dependencies(u);
1553 if (r < 0)
1554 goto fail;
1555
1556 r = unit_add_startup_units(u);
1557 if (r < 0)
1558 goto fail;
1559
1560 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1561 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1562 r = -ENOEXEC;
1563 goto fail;
1564 }
1565
1566 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1567 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1568
1569 /* We finished loading, let's ensure our parents recalculate the members mask */
1570 unit_invalidate_cgroup_members_masks(u);
1571 }
1572
1573 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1574
1575 unit_add_to_dbus_queue(unit_follow_merge(u));
1576 unit_add_to_gc_queue(u);
1577
1578 return 0;
1579
1580 fail:
1581 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1582 * return ENOEXEC to ensure units are placed in this state after loading */
1583
1584 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1585 r == -ENOEXEC ? UNIT_BAD_SETTING :
1586 UNIT_ERROR;
1587 u->load_error = r;
1588
1589 unit_add_to_dbus_queue(u);
1590 unit_add_to_gc_queue(u);
1591
1592 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1593 }
1594
1595 _printf_(7, 8)
1596 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1597 Unit *u = userdata;
1598 va_list ap;
1599 int r;
1600
1601 va_start(ap, format);
1602 if (u)
1603 r = log_object_internalv(level, error, file, line, func,
1604 u->manager->unit_log_field,
1605 u->id,
1606 u->manager->invocation_log_field,
1607 u->invocation_id_string,
1608 format, ap);
1609 else
1610 r = log_internalv(level, error, file, line, func, format, ap);
1611 va_end(ap);
1612
1613 return r;
1614 }
1615
1616 static bool unit_test_condition(Unit *u) {
1617 assert(u);
1618
1619 dual_timestamp_get(&u->condition_timestamp);
1620 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1621
1622 unit_add_to_dbus_queue(u);
1623
1624 return u->condition_result;
1625 }
1626
1627 static bool unit_test_assert(Unit *u) {
1628 assert(u);
1629
1630 dual_timestamp_get(&u->assert_timestamp);
1631 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1632
1633 unit_add_to_dbus_queue(u);
1634
1635 return u->assert_result;
1636 }
1637
1638 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1639 const char *d;
1640
1641 d = unit_description(u);
1642 if (log_get_show_color())
1643 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1644
1645 DISABLE_WARNING_FORMAT_NONLITERAL;
1646 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1647 REENABLE_WARNING;
1648 }
1649
1650 int unit_test_start_limit(Unit *u) {
1651 const char *reason;
1652
1653 assert(u);
1654
1655 if (ratelimit_below(&u->start_limit)) {
1656 u->start_limit_hit = false;
1657 return 0;
1658 }
1659
1660 log_unit_warning(u, "Start request repeated too quickly.");
1661 u->start_limit_hit = true;
1662
1663 reason = strjoina("unit ", u->id, " failed");
1664
1665 emergency_action(u->manager, u->start_limit_action,
1666 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1667 u->reboot_arg, -1, reason);
1668
1669 return -ECANCELED;
1670 }
1671
1672 bool unit_shall_confirm_spawn(Unit *u) {
1673 assert(u);
1674
1675 if (manager_is_confirm_spawn_disabled(u->manager))
1676 return false;
1677
1678 /* For some reasons units remaining in the same process group
1679 * as PID 1 fail to acquire the console even if it's not used
1680 * by any process. So skip the confirmation question for them. */
1681 return !unit_get_exec_context(u)->same_pgrp;
1682 }
1683
1684 static bool unit_verify_deps(Unit *u) {
1685 Unit *other;
1686 Iterator j;
1687 void *v;
1688
1689 assert(u);
1690
1691 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1692 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1693 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1694 * conjunction with After= as for them any such check would make things entirely racy. */
1695
1696 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1697
1698 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1699 continue;
1700
1701 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1702 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1703 return false;
1704 }
1705 }
1706
1707 return true;
1708 }
1709
1710 /* Errors that aren't really errors:
1711 * -EALREADY: Unit is already started.
1712 * -ECOMM: Condition failed
1713 * -EAGAIN: An operation is already in progress. Retry later.
1714 *
1715 * Errors that are real errors:
1716 * -EBADR: This unit type does not support starting.
1717 * -ECANCELED: Start limit hit, too many requests for now
1718 * -EPROTO: Assert failed
1719 * -EINVAL: Unit not loaded
1720 * -EOPNOTSUPP: Unit type not supported
1721 * -ENOLINK: The necessary dependencies are not fulfilled.
1722 * -ESTALE: This unit has been started before and can't be started a second time
1723 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1724 */
1725 int unit_start(Unit *u) {
1726 UnitActiveState state;
1727 Unit *following;
1728 int r;
1729
1730 assert(u);
1731
1732 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1733 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1734 * waiting is finished. */
1735 state = unit_active_state(u);
1736 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1737 return -EALREADY;
1738
1739 /* Units that aren't loaded cannot be started */
1740 if (u->load_state != UNIT_LOADED)
1741 return -EINVAL;
1742
1743 /* Refuse starting scope units more than once */
1744 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1745 return -ESTALE;
1746
1747 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1748 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1749 * recheck the condition in that case. */
1750 if (state != UNIT_ACTIVATING &&
1751 !unit_test_condition(u)) {
1752
1753 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1754 * .start() method of the unit type after it did some additional checks verifying everything
1755 * is in order (so that those other checks can propagate errors properly). However, if a
1756 * condition check doesn't hold we don't get that far but we should still ensure we are not
1757 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1758 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1759 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1760 * condition check is a bit different from the condition check inside the per-unit .start()
1761 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1762 * after all the condition failed). */
1763
1764 r = unit_test_start_limit(u);
1765 if (r < 0)
1766 return r;
1767
1768 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1769 }
1770
1771 /* If the asserts failed, fail the entire job */
1772 if (state != UNIT_ACTIVATING &&
1773 !unit_test_assert(u))
1774 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1775
1776 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1777 * condition checks, so that we rather return condition check errors (which are usually not
1778 * considered a true failure) than "not supported" errors (which are considered a failure).
1779 */
1780 if (!unit_supported(u))
1781 return -EOPNOTSUPP;
1782
1783 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1784 * should have taken care of this already, but let's check this here again. After all, our
1785 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1786 if (!unit_verify_deps(u))
1787 return -ENOLINK;
1788
1789 /* Forward to the main object, if we aren't it. */
1790 following = unit_following(u);
1791 if (following) {
1792 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1793 return unit_start(following);
1794 }
1795
1796 /* If it is stopped, but we cannot start it, then fail */
1797 if (!UNIT_VTABLE(u)->start)
1798 return -EBADR;
1799
1800 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1801 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1802 * waits for a holdoff timer to elapse before it will start again. */
1803
1804 unit_add_to_dbus_queue(u);
1805
1806 return UNIT_VTABLE(u)->start(u);
1807 }
1808
1809 bool unit_can_start(Unit *u) {
1810 assert(u);
1811
1812 if (u->load_state != UNIT_LOADED)
1813 return false;
1814
1815 if (!unit_supported(u))
1816 return false;
1817
1818 /* Scope units may be started only once */
1819 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1820 return false;
1821
1822 return !!UNIT_VTABLE(u)->start;
1823 }
1824
1825 bool unit_can_isolate(Unit *u) {
1826 assert(u);
1827
1828 return unit_can_start(u) &&
1829 u->allow_isolate;
1830 }
1831
1832 /* Errors:
1833 * -EBADR: This unit type does not support stopping.
1834 * -EALREADY: Unit is already stopped.
1835 * -EAGAIN: An operation is already in progress. Retry later.
1836 */
1837 int unit_stop(Unit *u) {
1838 UnitActiveState state;
1839 Unit *following;
1840
1841 assert(u);
1842
1843 state = unit_active_state(u);
1844 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1845 return -EALREADY;
1846
1847 following = unit_following(u);
1848 if (following) {
1849 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1850 return unit_stop(following);
1851 }
1852
1853 if (!UNIT_VTABLE(u)->stop)
1854 return -EBADR;
1855
1856 unit_add_to_dbus_queue(u);
1857
1858 return UNIT_VTABLE(u)->stop(u);
1859 }
1860
1861 bool unit_can_stop(Unit *u) {
1862 assert(u);
1863
1864 if (!unit_supported(u))
1865 return false;
1866
1867 if (u->perpetual)
1868 return false;
1869
1870 return !!UNIT_VTABLE(u)->stop;
1871 }
1872
1873 /* Errors:
1874 * -EBADR: This unit type does not support reloading.
1875 * -ENOEXEC: Unit is not started.
1876 * -EAGAIN: An operation is already in progress. Retry later.
1877 */
1878 int unit_reload(Unit *u) {
1879 UnitActiveState state;
1880 Unit *following;
1881
1882 assert(u);
1883
1884 if (u->load_state != UNIT_LOADED)
1885 return -EINVAL;
1886
1887 if (!unit_can_reload(u))
1888 return -EBADR;
1889
1890 state = unit_active_state(u);
1891 if (state == UNIT_RELOADING)
1892 return -EAGAIN;
1893
1894 if (state != UNIT_ACTIVE) {
1895 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1896 return -ENOEXEC;
1897 }
1898
1899 following = unit_following(u);
1900 if (following) {
1901 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1902 return unit_reload(following);
1903 }
1904
1905 unit_add_to_dbus_queue(u);
1906
1907 if (!UNIT_VTABLE(u)->reload) {
1908 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1909 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1910 return 0;
1911 }
1912
1913 return UNIT_VTABLE(u)->reload(u);
1914 }
1915
1916 bool unit_can_reload(Unit *u) {
1917 assert(u);
1918
1919 if (UNIT_VTABLE(u)->can_reload)
1920 return UNIT_VTABLE(u)->can_reload(u);
1921
1922 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1923 return true;
1924
1925 return UNIT_VTABLE(u)->reload;
1926 }
1927
1928 bool unit_is_unneeded(Unit *u) {
1929 static const UnitDependency deps[] = {
1930 UNIT_REQUIRED_BY,
1931 UNIT_REQUISITE_OF,
1932 UNIT_WANTED_BY,
1933 UNIT_BOUND_BY,
1934 };
1935 size_t j;
1936
1937 assert(u);
1938
1939 if (!u->stop_when_unneeded)
1940 return false;
1941
1942 /* Don't clean up while the unit is transitioning or is even inactive. */
1943 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1944 return false;
1945 if (u->job)
1946 return false;
1947
1948 for (j = 0; j < ELEMENTSOF(deps); j++) {
1949 Unit *other;
1950 Iterator i;
1951 void *v;
1952
1953 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1954 * restart, then don't clean this one up. */
1955
1956 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1957 if (other->job)
1958 return false;
1959
1960 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1961 return false;
1962
1963 if (unit_will_restart(other))
1964 return false;
1965 }
1966 }
1967
1968 return true;
1969 }
1970
1971 static void check_unneeded_dependencies(Unit *u) {
1972
1973 static const UnitDependency deps[] = {
1974 UNIT_REQUIRES,
1975 UNIT_REQUISITE,
1976 UNIT_WANTS,
1977 UNIT_BINDS_TO,
1978 };
1979 size_t j;
1980
1981 assert(u);
1982
1983 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1984
1985 for (j = 0; j < ELEMENTSOF(deps); j++) {
1986 Unit *other;
1987 Iterator i;
1988 void *v;
1989
1990 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
1991 unit_submit_to_stop_when_unneeded_queue(other);
1992 }
1993 }
1994
1995 static void unit_check_binds_to(Unit *u) {
1996 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1997 bool stop = false;
1998 Unit *other;
1999 Iterator i;
2000 void *v;
2001 int r;
2002
2003 assert(u);
2004
2005 if (u->job)
2006 return;
2007
2008 if (unit_active_state(u) != UNIT_ACTIVE)
2009 return;
2010
2011 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2012 if (other->job)
2013 continue;
2014
2015 if (!other->coldplugged)
2016 /* We might yet create a job for the other unit… */
2017 continue;
2018
2019 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2020 continue;
2021
2022 stop = true;
2023 break;
2024 }
2025
2026 if (!stop)
2027 return;
2028
2029 /* If stopping a unit fails continuously we might enter a stop
2030 * loop here, hence stop acting on the service being
2031 * unnecessary after a while. */
2032 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2033 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2034 return;
2035 }
2036
2037 assert(other);
2038 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2039
2040 /* A unit we need to run is gone. Sniff. Let's stop this. */
2041 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2042 if (r < 0)
2043 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2044 }
2045
2046 static void retroactively_start_dependencies(Unit *u) {
2047 Iterator i;
2048 Unit *other;
2049 void *v;
2050
2051 assert(u);
2052 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2053
2054 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2055 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2056 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2057 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2058
2059 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2060 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2061 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2062 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2063
2064 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2065 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2066 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2067 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2068
2069 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2070 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2071 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2072
2073 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2074 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2075 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2076 }
2077
2078 static void retroactively_stop_dependencies(Unit *u) {
2079 Unit *other;
2080 Iterator i;
2081 void *v;
2082
2083 assert(u);
2084 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2085
2086 /* Pull down units which are bound to us recursively if enabled */
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2090 }
2091
2092 void unit_start_on_failure(Unit *u) {
2093 Unit *other;
2094 Iterator i;
2095 void *v;
2096 int r;
2097
2098 assert(u);
2099
2100 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2101 return;
2102
2103 log_unit_info(u, "Triggering OnFailure= dependencies.");
2104
2105 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2106 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2107
2108 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2109 if (r < 0)
2110 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2111 }
2112 }
2113
2114 void unit_trigger_notify(Unit *u) {
2115 Unit *other;
2116 Iterator i;
2117 void *v;
2118
2119 assert(u);
2120
2121 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2122 if (UNIT_VTABLE(other)->trigger_notify)
2123 UNIT_VTABLE(other)->trigger_notify(other, u);
2124 }
2125
2126 static int unit_log_resources(Unit *u) {
2127 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2128 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2129 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2130 size_t n_message_parts = 0, n_iovec = 0;
2131 char* message_parts[1 + 2 + 2 + 1], *t;
2132 nsec_t nsec = NSEC_INFINITY;
2133 CGroupIPAccountingMetric m;
2134 size_t i;
2135 int r;
2136 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2137 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2138 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2139 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2140 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2141 };
2142 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2143 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2144 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2145 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2146 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2147 };
2148
2149 assert(u);
2150
2151 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2152 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2153 * information and the complete data in structured fields. */
2154
2155 (void) unit_get_cpu_usage(u, &nsec);
2156 if (nsec != NSEC_INFINITY) {
2157 char buf[FORMAT_TIMESPAN_MAX] = "";
2158
2159 /* Format the CPU time for inclusion in the structured log message */
2160 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2161 r = log_oom();
2162 goto finish;
2163 }
2164 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2165
2166 /* Format the CPU time for inclusion in the human language message string */
2167 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2168 t = strjoin("consumed ", buf, " CPU time");
2169 if (!t) {
2170 r = log_oom();
2171 goto finish;
2172 }
2173
2174 message_parts[n_message_parts++] = t;
2175 }
2176
2177 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2178 char buf[FORMAT_BYTES_MAX] = "";
2179 uint64_t value = UINT64_MAX;
2180
2181 assert(io_fields[k]);
2182
2183 (void) unit_get_io_accounting(u, k, k > 0, &value);
2184 if (value == UINT64_MAX)
2185 continue;
2186
2187 have_io_accounting = true;
2188 if (value > 0)
2189 any_io = true;
2190
2191 /* Format IO accounting data for inclusion in the structured log message */
2192 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2193 r = log_oom();
2194 goto finish;
2195 }
2196 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2197
2198 /* Format the IO accounting data for inclusion in the human language message string, but only
2199 * for the bytes counters (and not for the operations counters) */
2200 if (k == CGROUP_IO_READ_BYTES) {
2201 assert(!rr);
2202 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2203 if (!rr) {
2204 r = log_oom();
2205 goto finish;
2206 }
2207 } else if (k == CGROUP_IO_WRITE_BYTES) {
2208 assert(!wr);
2209 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2210 if (!wr) {
2211 r = log_oom();
2212 goto finish;
2213 }
2214 }
2215 }
2216
2217 if (have_io_accounting) {
2218 if (any_io) {
2219 if (rr)
2220 message_parts[n_message_parts++] = TAKE_PTR(rr);
2221 if (wr)
2222 message_parts[n_message_parts++] = TAKE_PTR(wr);
2223
2224 } else {
2225 char *k;
2226
2227 k = strdup("no IO");
2228 if (!k) {
2229 r = log_oom();
2230 goto finish;
2231 }
2232
2233 message_parts[n_message_parts++] = k;
2234 }
2235 }
2236
2237 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2238 char buf[FORMAT_BYTES_MAX] = "";
2239 uint64_t value = UINT64_MAX;
2240
2241 assert(ip_fields[m]);
2242
2243 (void) unit_get_ip_accounting(u, m, &value);
2244 if (value == UINT64_MAX)
2245 continue;
2246
2247 have_ip_accounting = true;
2248 if (value > 0)
2249 any_traffic = true;
2250
2251 /* Format IP accounting data for inclusion in the structured log message */
2252 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2253 r = log_oom();
2254 goto finish;
2255 }
2256 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2257
2258 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2259 * bytes counters (and not for the packets counters) */
2260 if (m == CGROUP_IP_INGRESS_BYTES) {
2261 assert(!igress);
2262 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2263 if (!igress) {
2264 r = log_oom();
2265 goto finish;
2266 }
2267 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2268 assert(!egress);
2269 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2270 if (!egress) {
2271 r = log_oom();
2272 goto finish;
2273 }
2274 }
2275 }
2276
2277 if (have_ip_accounting) {
2278 if (any_traffic) {
2279 if (igress)
2280 message_parts[n_message_parts++] = TAKE_PTR(igress);
2281 if (egress)
2282 message_parts[n_message_parts++] = TAKE_PTR(egress);
2283
2284 } else {
2285 char *k;
2286
2287 k = strdup("no IP traffic");
2288 if (!k) {
2289 r = log_oom();
2290 goto finish;
2291 }
2292
2293 message_parts[n_message_parts++] = k;
2294 }
2295 }
2296
2297 /* Is there any accounting data available at all? */
2298 if (n_iovec == 0) {
2299 r = 0;
2300 goto finish;
2301 }
2302
2303 if (n_message_parts == 0)
2304 t = strjoina("MESSAGE=", u->id, ": Completed.");
2305 else {
2306 _cleanup_free_ char *joined;
2307
2308 message_parts[n_message_parts] = NULL;
2309
2310 joined = strv_join(message_parts, ", ");
2311 if (!joined) {
2312 r = log_oom();
2313 goto finish;
2314 }
2315
2316 joined[0] = ascii_toupper(joined[0]);
2317 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2318 }
2319
2320 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2321 * and hence don't increase n_iovec for them */
2322 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2323 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2324
2325 t = strjoina(u->manager->unit_log_field, u->id);
2326 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2327
2328 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2329 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2330
2331 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2332 r = 0;
2333
2334 finish:
2335 for (i = 0; i < n_message_parts; i++)
2336 free(message_parts[i]);
2337
2338 for (i = 0; i < n_iovec; i++)
2339 free(iovec[i].iov_base);
2340
2341 return r;
2342
2343 }
2344
2345 static void unit_update_on_console(Unit *u) {
2346 bool b;
2347
2348 assert(u);
2349
2350 b = unit_needs_console(u);
2351 if (u->on_console == b)
2352 return;
2353
2354 u->on_console = b;
2355 if (b)
2356 manager_ref_console(u->manager);
2357 else
2358 manager_unref_console(u->manager);
2359 }
2360
2361 static void unit_emit_audit_start(Unit *u) {
2362 assert(u);
2363
2364 if (u->type != UNIT_SERVICE)
2365 return;
2366
2367 /* Write audit record if we have just finished starting up */
2368 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2369 u->in_audit = true;
2370 }
2371
2372 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2373 assert(u);
2374
2375 if (u->type != UNIT_SERVICE)
2376 return;
2377
2378 if (u->in_audit) {
2379 /* Write audit record if we have just finished shutting down */
2380 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2381 u->in_audit = false;
2382 } else {
2383 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2384 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2385
2386 if (state == UNIT_INACTIVE)
2387 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2388 }
2389 }
2390
2391 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2392 bool unexpected = false;
2393
2394 assert(j);
2395
2396 if (j->state == JOB_WAITING)
2397
2398 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2399 * due to EAGAIN. */
2400 job_add_to_run_queue(j);
2401
2402 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2403 * hence needs to invalidate jobs. */
2404
2405 switch (j->type) {
2406
2407 case JOB_START:
2408 case JOB_VERIFY_ACTIVE:
2409
2410 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2411 job_finish_and_invalidate(j, JOB_DONE, true, false);
2412 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2413 unexpected = true;
2414
2415 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2416 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2417 }
2418
2419 break;
2420
2421 case JOB_RELOAD:
2422 case JOB_RELOAD_OR_START:
2423 case JOB_TRY_RELOAD:
2424
2425 if (j->state == JOB_RUNNING) {
2426 if (ns == UNIT_ACTIVE)
2427 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2428 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2429 unexpected = true;
2430
2431 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2432 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2433 }
2434 }
2435
2436 break;
2437
2438 case JOB_STOP:
2439 case JOB_RESTART:
2440 case JOB_TRY_RESTART:
2441
2442 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2443 job_finish_and_invalidate(j, JOB_DONE, true, false);
2444 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2445 unexpected = true;
2446 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2447 }
2448
2449 break;
2450
2451 default:
2452 assert_not_reached("Job type unknown");
2453 }
2454
2455 return unexpected;
2456 }
2457
2458 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2459 const char *reason;
2460 Manager *m;
2461
2462 assert(u);
2463 assert(os < _UNIT_ACTIVE_STATE_MAX);
2464 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2465
2466 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2467 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2468 * remounted this function will be called too! */
2469
2470 m = u->manager;
2471
2472 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2473 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2474 unit_add_to_dbus_queue(u);
2475
2476 /* Update timestamps for state changes */
2477 if (!MANAGER_IS_RELOADING(m)) {
2478 dual_timestamp_get(&u->state_change_timestamp);
2479
2480 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2481 u->inactive_exit_timestamp = u->state_change_timestamp;
2482 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2483 u->inactive_enter_timestamp = u->state_change_timestamp;
2484
2485 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2486 u->active_enter_timestamp = u->state_change_timestamp;
2487 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2488 u->active_exit_timestamp = u->state_change_timestamp;
2489 }
2490
2491 /* Keep track of failed units */
2492 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2493
2494 /* Make sure the cgroup and state files are always removed when we become inactive */
2495 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2496 unit_prune_cgroup(u);
2497 unit_unlink_state_files(u);
2498 }
2499
2500 unit_update_on_console(u);
2501
2502 if (!MANAGER_IS_RELOADING(m)) {
2503 bool unexpected;
2504
2505 /* Let's propagate state changes to the job */
2506 if (u->job)
2507 unexpected = unit_process_job(u->job, ns, flags);
2508 else
2509 unexpected = true;
2510
2511 /* If this state change happened without being requested by a job, then let's retroactively start or
2512 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2513 * additional jobs just because something is already activated. */
2514
2515 if (unexpected) {
2516 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2517 retroactively_start_dependencies(u);
2518 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2519 retroactively_stop_dependencies(u);
2520 }
2521
2522 /* stop unneeded units regardless if going down was expected or not */
2523 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2524 check_unneeded_dependencies(u);
2525
2526 if (ns != os && ns == UNIT_FAILED) {
2527 log_unit_debug(u, "Unit entered failed state.");
2528
2529 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2530 unit_start_on_failure(u);
2531 }
2532
2533 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2534 /* This unit just finished starting up */
2535
2536 unit_emit_audit_start(u);
2537 manager_send_unit_plymouth(m, u);
2538 }
2539
2540 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2541 /* This unit just stopped/failed. */
2542
2543 unit_emit_audit_stop(u, ns);
2544 unit_log_resources(u);
2545 }
2546 }
2547
2548 manager_recheck_journal(m);
2549 manager_recheck_dbus(m);
2550
2551 unit_trigger_notify(u);
2552
2553 if (!MANAGER_IS_RELOADING(m)) {
2554 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2555 unit_submit_to_stop_when_unneeded_queue(u);
2556
2557 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2558 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2559 * without ever entering started.) */
2560 unit_check_binds_to(u);
2561
2562 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2563 reason = strjoina("unit ", u->id, " failed");
2564 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2565 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2566 reason = strjoina("unit ", u->id, " succeeded");
2567 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2568 }
2569 }
2570
2571 unit_add_to_gc_queue(u);
2572 }
2573
2574 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2575 int r;
2576
2577 assert(u);
2578 assert(pid_is_valid(pid));
2579
2580 /* Watch a specific PID */
2581
2582 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2583 * opportunity to remove any stalled references to this PID as they can be created
2584 * easily (when watching a process which is not our direct child). */
2585 if (exclusive)
2586 manager_unwatch_pid(u->manager, pid);
2587
2588 r = set_ensure_allocated(&u->pids, NULL);
2589 if (r < 0)
2590 return r;
2591
2592 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2593 if (r < 0)
2594 return r;
2595
2596 /* First try, let's add the unit keyed by "pid". */
2597 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2598 if (r == -EEXIST) {
2599 Unit **array;
2600 bool found = false;
2601 size_t n = 0;
2602
2603 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2604 * to an array of Units rather than just a Unit), lists us already. */
2605
2606 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2607 if (array)
2608 for (; array[n]; n++)
2609 if (array[n] == u)
2610 found = true;
2611
2612 if (found) /* Found it already? if so, do nothing */
2613 r = 0;
2614 else {
2615 Unit **new_array;
2616
2617 /* Allocate a new array */
2618 new_array = new(Unit*, n + 2);
2619 if (!new_array)
2620 return -ENOMEM;
2621
2622 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2623 new_array[n] = u;
2624 new_array[n+1] = NULL;
2625
2626 /* Add or replace the old array */
2627 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2628 if (r < 0) {
2629 free(new_array);
2630 return r;
2631 }
2632
2633 free(array);
2634 }
2635 } else if (r < 0)
2636 return r;
2637
2638 r = set_put(u->pids, PID_TO_PTR(pid));
2639 if (r < 0)
2640 return r;
2641
2642 return 0;
2643 }
2644
2645 void unit_unwatch_pid(Unit *u, pid_t pid) {
2646 Unit **array;
2647
2648 assert(u);
2649 assert(pid_is_valid(pid));
2650
2651 /* First let's drop the unit in case it's keyed as "pid". */
2652 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2653
2654 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2655 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2656 if (array) {
2657 size_t n, m = 0;
2658
2659 /* Let's iterate through the array, dropping our own entry */
2660 for (n = 0; array[n]; n++)
2661 if (array[n] != u)
2662 array[m++] = array[n];
2663 array[m] = NULL;
2664
2665 if (m == 0) {
2666 /* The array is now empty, remove the entire entry */
2667 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2668 free(array);
2669 }
2670 }
2671
2672 (void) set_remove(u->pids, PID_TO_PTR(pid));
2673 }
2674
2675 void unit_unwatch_all_pids(Unit *u) {
2676 assert(u);
2677
2678 while (!set_isempty(u->pids))
2679 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2680
2681 u->pids = set_free(u->pids);
2682 }
2683
2684 static void unit_tidy_watch_pids(Unit *u) {
2685 pid_t except1, except2;
2686 Iterator i;
2687 void *e;
2688
2689 assert(u);
2690
2691 /* Cleans dead PIDs from our list */
2692
2693 except1 = unit_main_pid(u);
2694 except2 = unit_control_pid(u);
2695
2696 SET_FOREACH(e, u->pids, i) {
2697 pid_t pid = PTR_TO_PID(e);
2698
2699 if (pid == except1 || pid == except2)
2700 continue;
2701
2702 if (!pid_is_unwaited(pid))
2703 unit_unwatch_pid(u, pid);
2704 }
2705 }
2706
2707 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2708 Unit *u = userdata;
2709
2710 assert(s);
2711 assert(u);
2712
2713 unit_tidy_watch_pids(u);
2714 unit_watch_all_pids(u);
2715
2716 /* If the PID set is empty now, then let's finish this off. */
2717 unit_synthesize_cgroup_empty_event(u);
2718
2719 return 0;
2720 }
2721
2722 int unit_enqueue_rewatch_pids(Unit *u) {
2723 int r;
2724
2725 assert(u);
2726
2727 if (!u->cgroup_path)
2728 return -ENOENT;
2729
2730 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2731 if (r < 0)
2732 return r;
2733 if (r > 0) /* On unified we can use proper notifications */
2734 return 0;
2735
2736 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2737 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2738 * involves issuing kill(pid, 0) on all processes we watch. */
2739
2740 if (!u->rewatch_pids_event_source) {
2741 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2742
2743 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2744 if (r < 0)
2745 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2746
2747 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2748 if (r < 0)
2749 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2750
2751 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2752
2753 u->rewatch_pids_event_source = TAKE_PTR(s);
2754 }
2755
2756 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2757 if (r < 0)
2758 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2759
2760 return 0;
2761 }
2762
2763 void unit_dequeue_rewatch_pids(Unit *u) {
2764 int r;
2765 assert(u);
2766
2767 if (!u->rewatch_pids_event_source)
2768 return;
2769
2770 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2771 if (r < 0)
2772 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2773
2774 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2775 }
2776
2777 bool unit_job_is_applicable(Unit *u, JobType j) {
2778 assert(u);
2779 assert(j >= 0 && j < _JOB_TYPE_MAX);
2780
2781 switch (j) {
2782
2783 case JOB_VERIFY_ACTIVE:
2784 case JOB_START:
2785 case JOB_NOP:
2786 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2787 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2788 * jobs for it. */
2789 return true;
2790
2791 case JOB_STOP:
2792 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2793 * external events), hence it makes no sense to permit enqueing such a request either. */
2794 return !u->perpetual;
2795
2796 case JOB_RESTART:
2797 case JOB_TRY_RESTART:
2798 return unit_can_stop(u) && unit_can_start(u);
2799
2800 case JOB_RELOAD:
2801 case JOB_TRY_RELOAD:
2802 return unit_can_reload(u);
2803
2804 case JOB_RELOAD_OR_START:
2805 return unit_can_reload(u) && unit_can_start(u);
2806
2807 default:
2808 assert_not_reached("Invalid job type");
2809 }
2810 }
2811
2812 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2813 assert(u);
2814
2815 /* Only warn about some unit types */
2816 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2817 return;
2818
2819 if (streq_ptr(u->id, other))
2820 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2821 else
2822 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2823 }
2824
2825 static int unit_add_dependency_hashmap(
2826 Hashmap **h,
2827 Unit *other,
2828 UnitDependencyMask origin_mask,
2829 UnitDependencyMask destination_mask) {
2830
2831 UnitDependencyInfo info;
2832 int r;
2833
2834 assert(h);
2835 assert(other);
2836 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2837 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2838 assert(origin_mask > 0 || destination_mask > 0);
2839
2840 r = hashmap_ensure_allocated(h, NULL);
2841 if (r < 0)
2842 return r;
2843
2844 assert_cc(sizeof(void*) == sizeof(info));
2845
2846 info.data = hashmap_get(*h, other);
2847 if (info.data) {
2848 /* Entry already exists. Add in our mask. */
2849
2850 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2851 FLAGS_SET(destination_mask, info.destination_mask))
2852 return 0; /* NOP */
2853
2854 info.origin_mask |= origin_mask;
2855 info.destination_mask |= destination_mask;
2856
2857 r = hashmap_update(*h, other, info.data);
2858 } else {
2859 info = (UnitDependencyInfo) {
2860 .origin_mask = origin_mask,
2861 .destination_mask = destination_mask,
2862 };
2863
2864 r = hashmap_put(*h, other, info.data);
2865 }
2866 if (r < 0)
2867 return r;
2868
2869 return 1;
2870 }
2871
2872 int unit_add_dependency(
2873 Unit *u,
2874 UnitDependency d,
2875 Unit *other,
2876 bool add_reference,
2877 UnitDependencyMask mask) {
2878
2879 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2880 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2881 [UNIT_WANTS] = UNIT_WANTED_BY,
2882 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2883 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2884 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2885 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2886 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2887 [UNIT_WANTED_BY] = UNIT_WANTS,
2888 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2889 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2890 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2891 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2892 [UNIT_BEFORE] = UNIT_AFTER,
2893 [UNIT_AFTER] = UNIT_BEFORE,
2894 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2895 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2896 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2897 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2898 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2899 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2900 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2901 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2902 };
2903 Unit *original_u = u, *original_other = other;
2904 int r;
2905
2906 assert(u);
2907 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2908 assert(other);
2909
2910 u = unit_follow_merge(u);
2911 other = unit_follow_merge(other);
2912
2913 /* We won't allow dependencies on ourselves. We will not
2914 * consider them an error however. */
2915 if (u == other) {
2916 maybe_warn_about_dependency(original_u, original_other->id, d);
2917 return 0;
2918 }
2919
2920 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2921 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2922 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2923 return 0;
2924 }
2925
2926 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2927 if (r < 0)
2928 return r;
2929
2930 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2931 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2932 if (r < 0)
2933 return r;
2934 }
2935
2936 if (add_reference) {
2937 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2938 if (r < 0)
2939 return r;
2940
2941 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2942 if (r < 0)
2943 return r;
2944 }
2945
2946 unit_add_to_dbus_queue(u);
2947 return 0;
2948 }
2949
2950 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2951 int r;
2952
2953 assert(u);
2954
2955 r = unit_add_dependency(u, d, other, add_reference, mask);
2956 if (r < 0)
2957 return r;
2958
2959 return unit_add_dependency(u, e, other, add_reference, mask);
2960 }
2961
2962 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2963 int r;
2964
2965 assert(u);
2966 assert(name);
2967 assert(buf);
2968 assert(ret);
2969
2970 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2971 *buf = NULL;
2972 *ret = name;
2973 return 0;
2974 }
2975
2976 if (u->instance)
2977 r = unit_name_replace_instance(name, u->instance, buf);
2978 else {
2979 _cleanup_free_ char *i = NULL;
2980
2981 r = unit_name_to_prefix(u->id, &i);
2982 if (r < 0)
2983 return r;
2984
2985 r = unit_name_replace_instance(name, i, buf);
2986 }
2987 if (r < 0)
2988 return r;
2989
2990 *ret = *buf;
2991 return 0;
2992 }
2993
2994 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2995 _cleanup_free_ char *buf = NULL;
2996 Unit *other;
2997 int r;
2998
2999 assert(u);
3000 assert(name);
3001
3002 r = resolve_template(u, name, &buf, &name);
3003 if (r < 0)
3004 return r;
3005
3006 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3007 if (r < 0)
3008 return r;
3009
3010 return unit_add_dependency(u, d, other, add_reference, mask);
3011 }
3012
3013 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3014 _cleanup_free_ char *buf = NULL;
3015 Unit *other;
3016 int r;
3017
3018 assert(u);
3019 assert(name);
3020
3021 r = resolve_template(u, name, &buf, &name);
3022 if (r < 0)
3023 return r;
3024
3025 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3026 if (r < 0)
3027 return r;
3028
3029 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3030 }
3031
3032 int set_unit_path(const char *p) {
3033 /* This is mostly for debug purposes */
3034 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3035 return -errno;
3036
3037 return 0;
3038 }
3039
3040 char *unit_dbus_path(Unit *u) {
3041 assert(u);
3042
3043 if (!u->id)
3044 return NULL;
3045
3046 return unit_dbus_path_from_name(u->id);
3047 }
3048
3049 char *unit_dbus_path_invocation_id(Unit *u) {
3050 assert(u);
3051
3052 if (sd_id128_is_null(u->invocation_id))
3053 return NULL;
3054
3055 return unit_dbus_path_from_name(u->invocation_id_string);
3056 }
3057
3058 int unit_set_slice(Unit *u, Unit *slice) {
3059 assert(u);
3060 assert(slice);
3061
3062 /* Sets the unit slice if it has not been set before. Is extra
3063 * careful, to only allow this for units that actually have a
3064 * cgroup context. Also, we don't allow to set this for slices
3065 * (since the parent slice is derived from the name). Make
3066 * sure the unit we set is actually a slice. */
3067
3068 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3069 return -EOPNOTSUPP;
3070
3071 if (u->type == UNIT_SLICE)
3072 return -EINVAL;
3073
3074 if (unit_active_state(u) != UNIT_INACTIVE)
3075 return -EBUSY;
3076
3077 if (slice->type != UNIT_SLICE)
3078 return -EINVAL;
3079
3080 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3081 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3082 return -EPERM;
3083
3084 if (UNIT_DEREF(u->slice) == slice)
3085 return 0;
3086
3087 /* Disallow slice changes if @u is already bound to cgroups */
3088 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3089 return -EBUSY;
3090
3091 unit_ref_set(&u->slice, u, slice);
3092 return 1;
3093 }
3094
3095 int unit_set_default_slice(Unit *u) {
3096 const char *slice_name;
3097 Unit *slice;
3098 int r;
3099
3100 assert(u);
3101
3102 if (UNIT_ISSET(u->slice))
3103 return 0;
3104
3105 if (u->instance) {
3106 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3107
3108 /* Implicitly place all instantiated units in their
3109 * own per-template slice */
3110
3111 r = unit_name_to_prefix(u->id, &prefix);
3112 if (r < 0)
3113 return r;
3114
3115 /* The prefix is already escaped, but it might include
3116 * "-" which has a special meaning for slice units,
3117 * hence escape it here extra. */
3118 escaped = unit_name_escape(prefix);
3119 if (!escaped)
3120 return -ENOMEM;
3121
3122 if (MANAGER_IS_SYSTEM(u->manager))
3123 slice_name = strjoina("system-", escaped, ".slice");
3124 else
3125 slice_name = strjoina(escaped, ".slice");
3126 } else
3127 slice_name =
3128 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3129 ? SPECIAL_SYSTEM_SLICE
3130 : SPECIAL_ROOT_SLICE;
3131
3132 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3133 if (r < 0)
3134 return r;
3135
3136 return unit_set_slice(u, slice);
3137 }
3138
3139 const char *unit_slice_name(Unit *u) {
3140 assert(u);
3141
3142 if (!UNIT_ISSET(u->slice))
3143 return NULL;
3144
3145 return UNIT_DEREF(u->slice)->id;
3146 }
3147
3148 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3149 _cleanup_free_ char *t = NULL;
3150 int r;
3151
3152 assert(u);
3153 assert(type);
3154 assert(_found);
3155
3156 r = unit_name_change_suffix(u->id, type, &t);
3157 if (r < 0)
3158 return r;
3159 if (unit_has_name(u, t))
3160 return -EINVAL;
3161
3162 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3163 assert(r < 0 || *_found != u);
3164 return r;
3165 }
3166
3167 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3168 const char *name, *old_owner, *new_owner;
3169 Unit *u = userdata;
3170 int r;
3171
3172 assert(message);
3173 assert(u);
3174
3175 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3176 if (r < 0) {
3177 bus_log_parse_error(r);
3178 return 0;
3179 }
3180
3181 old_owner = empty_to_null(old_owner);
3182 new_owner = empty_to_null(new_owner);
3183
3184 if (UNIT_VTABLE(u)->bus_name_owner_change)
3185 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3186
3187 return 0;
3188 }
3189
3190 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3191 const char *match;
3192
3193 assert(u);
3194 assert(bus);
3195 assert(name);
3196
3197 if (u->match_bus_slot)
3198 return -EBUSY;
3199
3200 match = strjoina("type='signal',"
3201 "sender='org.freedesktop.DBus',"
3202 "path='/org/freedesktop/DBus',"
3203 "interface='org.freedesktop.DBus',"
3204 "member='NameOwnerChanged',"
3205 "arg0='", name, "'");
3206
3207 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3208 }
3209
3210 int unit_watch_bus_name(Unit *u, const char *name) {
3211 int r;
3212
3213 assert(u);
3214 assert(name);
3215
3216 /* Watch a specific name on the bus. We only support one unit
3217 * watching each name for now. */
3218
3219 if (u->manager->api_bus) {
3220 /* If the bus is already available, install the match directly.
3221 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3222 r = unit_install_bus_match(u, u->manager->api_bus, name);
3223 if (r < 0)
3224 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3225 }
3226
3227 r = hashmap_put(u->manager->watch_bus, name, u);
3228 if (r < 0) {
3229 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3230 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3231 }
3232
3233 return 0;
3234 }
3235
3236 void unit_unwatch_bus_name(Unit *u, const char *name) {
3237 assert(u);
3238 assert(name);
3239
3240 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3241 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3242 }
3243
3244 bool unit_can_serialize(Unit *u) {
3245 assert(u);
3246
3247 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3248 }
3249
3250 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3251 _cleanup_free_ char *s = NULL;
3252 int r;
3253
3254 assert(f);
3255 assert(key);
3256
3257 if (mask == 0)
3258 return 0;
3259
3260 r = cg_mask_to_string(mask, &s);
3261 if (r < 0)
3262 return log_error_errno(r, "Failed to format cgroup mask: %m");
3263
3264 return serialize_item(f, key, s);
3265 }
3266
3267 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3268 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3269 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3270 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3271 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3272 };
3273
3274 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3275 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3276 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3277 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3278 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3279 };
3280
3281 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3282 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3283 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3284 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3285 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3286 };
3287
3288 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3289 CGroupIPAccountingMetric m;
3290 int r;
3291
3292 assert(u);
3293 assert(f);
3294 assert(fds);
3295
3296 if (unit_can_serialize(u)) {
3297 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3298 if (r < 0)
3299 return r;
3300 }
3301
3302 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3303
3304 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3305 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3306 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3307 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3308
3309 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3310 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3311
3312 if (dual_timestamp_is_set(&u->condition_timestamp))
3313 (void) serialize_bool(f, "condition-result", u->condition_result);
3314
3315 if (dual_timestamp_is_set(&u->assert_timestamp))
3316 (void) serialize_bool(f, "assert-result", u->assert_result);
3317
3318 (void) serialize_bool(f, "transient", u->transient);
3319 (void) serialize_bool(f, "in-audit", u->in_audit);
3320
3321 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3322 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3323 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3324 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3325 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3326
3327 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3328 if (u->cpu_usage_last != NSEC_INFINITY)
3329 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3330
3331 if (u->oom_kill_last > 0)
3332 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3333
3334 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3335 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3336
3337 if (u->io_accounting_last[im] != UINT64_MAX)
3338 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3339 }
3340
3341 if (u->cgroup_path)
3342 (void) serialize_item(f, "cgroup", u->cgroup_path);
3343
3344 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3345 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3346 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3347 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3348
3349 if (uid_is_valid(u->ref_uid))
3350 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3351 if (gid_is_valid(u->ref_gid))
3352 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3353
3354 if (!sd_id128_is_null(u->invocation_id))
3355 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3356
3357 bus_track_serialize(u->bus_track, f, "ref");
3358
3359 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3360 uint64_t v;
3361
3362 r = unit_get_ip_accounting(u, m, &v);
3363 if (r >= 0)
3364 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3365 }
3366
3367 if (serialize_jobs) {
3368 if (u->job) {
3369 fputs("job\n", f);
3370 job_serialize(u->job, f);
3371 }
3372
3373 if (u->nop_job) {
3374 fputs("job\n", f);
3375 job_serialize(u->nop_job, f);
3376 }
3377 }
3378
3379 /* End marker */
3380 fputc('\n', f);
3381 return 0;
3382 }
3383
3384 static int unit_deserialize_job(Unit *u, FILE *f) {
3385 _cleanup_(job_freep) Job *j = NULL;
3386 int r;
3387
3388 assert(u);
3389 assert(f);
3390
3391 j = job_new_raw(u);
3392 if (!j)
3393 return log_oom();
3394
3395 r = job_deserialize(j, f);
3396 if (r < 0)
3397 return r;
3398
3399 r = job_install_deserialized(j);
3400 if (r < 0)
3401 return r;
3402
3403 TAKE_PTR(j);
3404 return 0;
3405 }
3406
3407 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3408 int r;
3409
3410 assert(u);
3411 assert(f);
3412 assert(fds);
3413
3414 for (;;) {
3415 _cleanup_free_ char *line = NULL;
3416 char *l, *v;
3417 ssize_t m;
3418 size_t k;
3419
3420 r = read_line(f, LONG_LINE_MAX, &line);
3421 if (r < 0)
3422 return log_error_errno(r, "Failed to read serialization line: %m");
3423 if (r == 0) /* eof */
3424 break;
3425
3426 l = strstrip(line);
3427 if (isempty(l)) /* End marker */
3428 break;
3429
3430 k = strcspn(l, "=");
3431
3432 if (l[k] == '=') {
3433 l[k] = 0;
3434 v = l+k+1;
3435 } else
3436 v = l+k;
3437
3438 if (streq(l, "job")) {
3439 if (v[0] == '\0') {
3440 /* New-style serialized job */
3441 r = unit_deserialize_job(u, f);
3442 if (r < 0)
3443 return r;
3444 } else /* Legacy for pre-44 */
3445 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3446 continue;
3447 } else if (streq(l, "state-change-timestamp")) {
3448 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3449 continue;
3450 } else if (streq(l, "inactive-exit-timestamp")) {
3451 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3452 continue;
3453 } else if (streq(l, "active-enter-timestamp")) {
3454 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3455 continue;
3456 } else if (streq(l, "active-exit-timestamp")) {
3457 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3458 continue;
3459 } else if (streq(l, "inactive-enter-timestamp")) {
3460 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3461 continue;
3462 } else if (streq(l, "condition-timestamp")) {
3463 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3464 continue;
3465 } else if (streq(l, "assert-timestamp")) {
3466 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3467 continue;
3468 } else if (streq(l, "condition-result")) {
3469
3470 r = parse_boolean(v);
3471 if (r < 0)
3472 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3473 else
3474 u->condition_result = r;
3475
3476 continue;
3477
3478 } else if (streq(l, "assert-result")) {
3479
3480 r = parse_boolean(v);
3481 if (r < 0)
3482 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3483 else
3484 u->assert_result = r;
3485
3486 continue;
3487
3488 } else if (streq(l, "transient")) {
3489
3490 r = parse_boolean(v);
3491 if (r < 0)
3492 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3493 else
3494 u->transient = r;
3495
3496 continue;
3497
3498 } else if (streq(l, "in-audit")) {
3499
3500 r = parse_boolean(v);
3501 if (r < 0)
3502 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3503 else
3504 u->in_audit = r;
3505
3506 continue;
3507
3508 } else if (streq(l, "exported-invocation-id")) {
3509
3510 r = parse_boolean(v);
3511 if (r < 0)
3512 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3513 else
3514 u->exported_invocation_id = r;
3515
3516 continue;
3517
3518 } else if (streq(l, "exported-log-level-max")) {
3519
3520 r = parse_boolean(v);
3521 if (r < 0)
3522 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3523 else
3524 u->exported_log_level_max = r;
3525
3526 continue;
3527
3528 } else if (streq(l, "exported-log-extra-fields")) {
3529
3530 r = parse_boolean(v);
3531 if (r < 0)
3532 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3533 else
3534 u->exported_log_extra_fields = r;
3535
3536 continue;
3537
3538 } else if (streq(l, "exported-log-rate-limit-interval")) {
3539
3540 r = parse_boolean(v);
3541 if (r < 0)
3542 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3543 else
3544 u->exported_log_rate_limit_interval = r;
3545
3546 continue;
3547
3548 } else if (streq(l, "exported-log-rate-limit-burst")) {
3549
3550 r = parse_boolean(v);
3551 if (r < 0)
3552 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3553 else
3554 u->exported_log_rate_limit_burst = r;
3555
3556 continue;
3557
3558 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3559
3560 r = safe_atou64(v, &u->cpu_usage_base);
3561 if (r < 0)
3562 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3563
3564 continue;
3565
3566 } else if (streq(l, "cpu-usage-last")) {
3567
3568 r = safe_atou64(v, &u->cpu_usage_last);
3569 if (r < 0)
3570 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3571
3572 continue;
3573
3574 } else if (streq(l, "oom-kill-last")) {
3575
3576 r = safe_atou64(v, &u->oom_kill_last);
3577 if (r < 0)
3578 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3579
3580 continue;
3581
3582 } else if (streq(l, "cgroup")) {
3583
3584 r = unit_set_cgroup_path(u, v);
3585 if (r < 0)
3586 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3587
3588 (void) unit_watch_cgroup(u);
3589 (void) unit_watch_cgroup_memory(u);
3590
3591 continue;
3592 } else if (streq(l, "cgroup-realized")) {
3593 int b;
3594
3595 b = parse_boolean(v);
3596 if (b < 0)
3597 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3598 else
3599 u->cgroup_realized = b;
3600
3601 continue;
3602
3603 } else if (streq(l, "cgroup-realized-mask")) {
3604
3605 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3606 if (r < 0)
3607 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3608 continue;
3609
3610 } else if (streq(l, "cgroup-enabled-mask")) {
3611
3612 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3613 if (r < 0)
3614 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3615 continue;
3616
3617 } else if (streq(l, "cgroup-invalidated-mask")) {
3618
3619 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3620 if (r < 0)
3621 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3622 continue;
3623
3624 } else if (streq(l, "ref-uid")) {
3625 uid_t uid;
3626
3627 r = parse_uid(v, &uid);
3628 if (r < 0)
3629 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3630 else
3631 unit_ref_uid_gid(u, uid, GID_INVALID);
3632
3633 continue;
3634
3635 } else if (streq(l, "ref-gid")) {
3636 gid_t gid;
3637
3638 r = parse_gid(v, &gid);
3639 if (r < 0)
3640 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3641 else
3642 unit_ref_uid_gid(u, UID_INVALID, gid);
3643
3644 continue;
3645
3646 } else if (streq(l, "ref")) {
3647
3648 r = strv_extend(&u->deserialized_refs, v);
3649 if (r < 0)
3650 return log_oom();
3651
3652 continue;
3653 } else if (streq(l, "invocation-id")) {
3654 sd_id128_t id;
3655
3656 r = sd_id128_from_string(v, &id);
3657 if (r < 0)
3658 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3659 else {
3660 r = unit_set_invocation_id(u, id);
3661 if (r < 0)
3662 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3663 }
3664
3665 continue;
3666 }
3667
3668 /* Check if this is an IP accounting metric serialization field */
3669 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3670 if (m >= 0) {
3671 uint64_t c;
3672
3673 r = safe_atou64(v, &c);
3674 if (r < 0)
3675 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3676 else
3677 u->ip_accounting_extra[m] = c;
3678 continue;
3679 }
3680
3681 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3682 if (m >= 0) {
3683 uint64_t c;
3684
3685 r = safe_atou64(v, &c);
3686 if (r < 0)
3687 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3688 else
3689 u->io_accounting_base[m] = c;
3690 continue;
3691 }
3692
3693 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3694 if (m >= 0) {
3695 uint64_t c;
3696
3697 r = safe_atou64(v, &c);
3698 if (r < 0)
3699 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3700 else
3701 u->io_accounting_last[m] = c;
3702 continue;
3703 }
3704
3705 if (unit_can_serialize(u)) {
3706 r = exec_runtime_deserialize_compat(u, l, v, fds);
3707 if (r < 0) {
3708 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3709 continue;
3710 }
3711
3712 /* Returns positive if key was handled by the call */
3713 if (r > 0)
3714 continue;
3715
3716 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3717 if (r < 0)
3718 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3719 }
3720 }
3721
3722 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3723 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3724 * before 228 where the base for timeouts was not persistent across reboots. */
3725
3726 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3727 dual_timestamp_get(&u->state_change_timestamp);
3728
3729 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3730 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3731 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3732 unit_invalidate_cgroup_bpf(u);
3733
3734 return 0;
3735 }
3736
3737 int unit_deserialize_skip(FILE *f) {
3738 int r;
3739 assert(f);
3740
3741 /* Skip serialized data for this unit. We don't know what it is. */
3742
3743 for (;;) {
3744 _cleanup_free_ char *line = NULL;
3745 char *l;
3746
3747 r = read_line(f, LONG_LINE_MAX, &line);
3748 if (r < 0)
3749 return log_error_errno(r, "Failed to read serialization line: %m");
3750 if (r == 0)
3751 return 0;
3752
3753 l = strstrip(line);
3754
3755 /* End marker */
3756 if (isempty(l))
3757 return 1;
3758 }
3759 }
3760
3761 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3762 Unit *device;
3763 _cleanup_free_ char *e = NULL;
3764 int r;
3765
3766 assert(u);
3767
3768 /* Adds in links to the device node that this unit is based on */
3769 if (isempty(what))
3770 return 0;
3771
3772 if (!is_device_path(what))
3773 return 0;
3774
3775 /* When device units aren't supported (such as in a
3776 * container), don't create dependencies on them. */
3777 if (!unit_type_supported(UNIT_DEVICE))
3778 return 0;
3779
3780 r = unit_name_from_path(what, ".device", &e);
3781 if (r < 0)
3782 return r;
3783
3784 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3785 if (r < 0)
3786 return r;
3787
3788 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3789 dep = UNIT_BINDS_TO;
3790
3791 r = unit_add_two_dependencies(u, UNIT_AFTER,
3792 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3793 device, true, mask);
3794 if (r < 0)
3795 return r;
3796
3797 if (wants) {
3798 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3799 if (r < 0)
3800 return r;
3801 }
3802
3803 return 0;
3804 }
3805
3806 int unit_coldplug(Unit *u) {
3807 int r = 0, q;
3808 char **i;
3809
3810 assert(u);
3811
3812 /* Make sure we don't enter a loop, when coldplugging recursively. */
3813 if (u->coldplugged)
3814 return 0;
3815
3816 u->coldplugged = true;
3817
3818 STRV_FOREACH(i, u->deserialized_refs) {
3819 q = bus_unit_track_add_name(u, *i);
3820 if (q < 0 && r >= 0)
3821 r = q;
3822 }
3823 u->deserialized_refs = strv_free(u->deserialized_refs);
3824
3825 if (UNIT_VTABLE(u)->coldplug) {
3826 q = UNIT_VTABLE(u)->coldplug(u);
3827 if (q < 0 && r >= 0)
3828 r = q;
3829 }
3830
3831 if (u->job) {
3832 q = job_coldplug(u->job);
3833 if (q < 0 && r >= 0)
3834 r = q;
3835 }
3836
3837 return r;
3838 }
3839
3840 void unit_catchup(Unit *u) {
3841 assert(u);
3842
3843 if (UNIT_VTABLE(u)->catchup)
3844 UNIT_VTABLE(u)->catchup(u);
3845 }
3846
3847 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3848 struct stat st;
3849
3850 if (!path)
3851 return false;
3852
3853 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3854 * are never out-of-date. */
3855 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3856 return false;
3857
3858 if (stat(path, &st) < 0)
3859 /* What, cannot access this anymore? */
3860 return true;
3861
3862 if (path_masked)
3863 /* For masked files check if they are still so */
3864 return !null_or_empty(&st);
3865 else
3866 /* For non-empty files check the mtime */
3867 return timespec_load(&st.st_mtim) > mtime;
3868
3869 return false;
3870 }
3871
3872 bool unit_need_daemon_reload(Unit *u) {
3873 _cleanup_strv_free_ char **t = NULL;
3874 char **path;
3875
3876 assert(u);
3877
3878 /* For unit files, we allow masking… */
3879 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3880 u->load_state == UNIT_MASKED))
3881 return true;
3882
3883 /* Source paths should not be masked… */
3884 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3885 return true;
3886
3887 if (u->load_state == UNIT_LOADED)
3888 (void) unit_find_dropin_paths(u, &t);
3889 if (!strv_equal(u->dropin_paths, t))
3890 return true;
3891
3892 /* … any drop-ins that are masked are simply omitted from the list. */
3893 STRV_FOREACH(path, u->dropin_paths)
3894 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3895 return true;
3896
3897 return false;
3898 }
3899
3900 void unit_reset_failed(Unit *u) {
3901 assert(u);
3902
3903 if (UNIT_VTABLE(u)->reset_failed)
3904 UNIT_VTABLE(u)->reset_failed(u);
3905
3906 RATELIMIT_RESET(u->start_limit);
3907 u->start_limit_hit = false;
3908 }
3909
3910 Unit *unit_following(Unit *u) {
3911 assert(u);
3912
3913 if (UNIT_VTABLE(u)->following)
3914 return UNIT_VTABLE(u)->following(u);
3915
3916 return NULL;
3917 }
3918
3919 bool unit_stop_pending(Unit *u) {
3920 assert(u);
3921
3922 /* This call does check the current state of the unit. It's
3923 * hence useful to be called from state change calls of the
3924 * unit itself, where the state isn't updated yet. This is
3925 * different from unit_inactive_or_pending() which checks both
3926 * the current state and for a queued job. */
3927
3928 return u->job && u->job->type == JOB_STOP;
3929 }
3930
3931 bool unit_inactive_or_pending(Unit *u) {
3932 assert(u);
3933
3934 /* Returns true if the unit is inactive or going down */
3935
3936 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3937 return true;
3938
3939 if (unit_stop_pending(u))
3940 return true;
3941
3942 return false;
3943 }
3944
3945 bool unit_active_or_pending(Unit *u) {
3946 assert(u);
3947
3948 /* Returns true if the unit is active or going up */
3949
3950 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3951 return true;
3952
3953 if (u->job &&
3954 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3955 return true;
3956
3957 return false;
3958 }
3959
3960 bool unit_will_restart(Unit *u) {
3961 assert(u);
3962
3963 if (!UNIT_VTABLE(u)->will_restart)
3964 return false;
3965
3966 return UNIT_VTABLE(u)->will_restart(u);
3967 }
3968
3969 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3970 assert(u);
3971 assert(w >= 0 && w < _KILL_WHO_MAX);
3972 assert(SIGNAL_VALID(signo));
3973
3974 if (!UNIT_VTABLE(u)->kill)
3975 return -EOPNOTSUPP;
3976
3977 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3978 }
3979
3980 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3981 _cleanup_set_free_ Set *pid_set = NULL;
3982 int r;
3983
3984 pid_set = set_new(NULL);
3985 if (!pid_set)
3986 return NULL;
3987
3988 /* Exclude the main/control pids from being killed via the cgroup */
3989 if (main_pid > 0) {
3990 r = set_put(pid_set, PID_TO_PTR(main_pid));
3991 if (r < 0)
3992 return NULL;
3993 }
3994
3995 if (control_pid > 0) {
3996 r = set_put(pid_set, PID_TO_PTR(control_pid));
3997 if (r < 0)
3998 return NULL;
3999 }
4000
4001 return TAKE_PTR(pid_set);
4002 }
4003
4004 int unit_kill_common(
4005 Unit *u,
4006 KillWho who,
4007 int signo,
4008 pid_t main_pid,
4009 pid_t control_pid,
4010 sd_bus_error *error) {
4011
4012 int r = 0;
4013 bool killed = false;
4014
4015 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4016 if (main_pid < 0)
4017 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4018 else if (main_pid == 0)
4019 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4020 }
4021
4022 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4023 if (control_pid < 0)
4024 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4025 else if (control_pid == 0)
4026 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4027 }
4028
4029 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4030 if (control_pid > 0) {
4031 if (kill(control_pid, signo) < 0)
4032 r = -errno;
4033 else
4034 killed = true;
4035 }
4036
4037 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4038 if (main_pid > 0) {
4039 if (kill(main_pid, signo) < 0)
4040 r = -errno;
4041 else
4042 killed = true;
4043 }
4044
4045 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4046 _cleanup_set_free_ Set *pid_set = NULL;
4047 int q;
4048
4049 /* Exclude the main/control pids from being killed via the cgroup */
4050 pid_set = unit_pid_set(main_pid, control_pid);
4051 if (!pid_set)
4052 return -ENOMEM;
4053
4054 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4055 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4056 r = q;
4057 else
4058 killed = true;
4059 }
4060
4061 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4062 return -ESRCH;
4063
4064 return r;
4065 }
4066
4067 int unit_following_set(Unit *u, Set **s) {
4068 assert(u);
4069 assert(s);
4070
4071 if (UNIT_VTABLE(u)->following_set)
4072 return UNIT_VTABLE(u)->following_set(u, s);
4073
4074 *s = NULL;
4075 return 0;
4076 }
4077
4078 UnitFileState unit_get_unit_file_state(Unit *u) {
4079 int r;
4080
4081 assert(u);
4082
4083 if (u->unit_file_state < 0 && u->fragment_path) {
4084 r = unit_file_get_state(
4085 u->manager->unit_file_scope,
4086 NULL,
4087 u->id,
4088 &u->unit_file_state);
4089 if (r < 0)
4090 u->unit_file_state = UNIT_FILE_BAD;
4091 }
4092
4093 return u->unit_file_state;
4094 }
4095
4096 int unit_get_unit_file_preset(Unit *u) {
4097 assert(u);
4098
4099 if (u->unit_file_preset < 0 && u->fragment_path)
4100 u->unit_file_preset = unit_file_query_preset(
4101 u->manager->unit_file_scope,
4102 NULL,
4103 basename(u->fragment_path));
4104
4105 return u->unit_file_preset;
4106 }
4107
4108 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4109 assert(ref);
4110 assert(source);
4111 assert(target);
4112
4113 if (ref->target)
4114 unit_ref_unset(ref);
4115
4116 ref->source = source;
4117 ref->target = target;
4118 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4119 return target;
4120 }
4121
4122 void unit_ref_unset(UnitRef *ref) {
4123 assert(ref);
4124
4125 if (!ref->target)
4126 return;
4127
4128 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4129 * be unreferenced now. */
4130 unit_add_to_gc_queue(ref->target);
4131
4132 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4133 ref->source = ref->target = NULL;
4134 }
4135
4136 static int user_from_unit_name(Unit *u, char **ret) {
4137
4138 static const uint8_t hash_key[] = {
4139 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4140 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4141 };
4142
4143 _cleanup_free_ char *n = NULL;
4144 int r;
4145
4146 r = unit_name_to_prefix(u->id, &n);
4147 if (r < 0)
4148 return r;
4149
4150 if (valid_user_group_name(n)) {
4151 *ret = TAKE_PTR(n);
4152 return 0;
4153 }
4154
4155 /* If we can't use the unit name as a user name, then let's hash it and use that */
4156 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4157 return -ENOMEM;
4158
4159 return 0;
4160 }
4161
4162 int unit_patch_contexts(Unit *u) {
4163 CGroupContext *cc;
4164 ExecContext *ec;
4165 unsigned i;
4166 int r;
4167
4168 assert(u);
4169
4170 /* Patch in the manager defaults into the exec and cgroup
4171 * contexts, _after_ the rest of the settings have been
4172 * initialized */
4173
4174 ec = unit_get_exec_context(u);
4175 if (ec) {
4176 /* This only copies in the ones that need memory */
4177 for (i = 0; i < _RLIMIT_MAX; i++)
4178 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4179 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4180 if (!ec->rlimit[i])
4181 return -ENOMEM;
4182 }
4183
4184 if (MANAGER_IS_USER(u->manager) &&
4185 !ec->working_directory) {
4186
4187 r = get_home_dir(&ec->working_directory);
4188 if (r < 0)
4189 return r;
4190
4191 /* Allow user services to run, even if the
4192 * home directory is missing */
4193 ec->working_directory_missing_ok = true;
4194 }
4195
4196 if (ec->private_devices)
4197 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4198
4199 if (ec->protect_kernel_modules)
4200 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4201
4202 if (ec->dynamic_user) {
4203 if (!ec->user) {
4204 r = user_from_unit_name(u, &ec->user);
4205 if (r < 0)
4206 return r;
4207 }
4208
4209 if (!ec->group) {
4210 ec->group = strdup(ec->user);
4211 if (!ec->group)
4212 return -ENOMEM;
4213 }
4214
4215 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4216 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4217 * sandbox. */
4218
4219 ec->private_tmp = true;
4220 ec->remove_ipc = true;
4221 ec->protect_system = PROTECT_SYSTEM_STRICT;
4222 if (ec->protect_home == PROTECT_HOME_NO)
4223 ec->protect_home = PROTECT_HOME_READ_ONLY;
4224
4225 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4226 * them. */
4227 ec->no_new_privileges = true;
4228 ec->restrict_suid_sgid = true;
4229 }
4230 }
4231
4232 cc = unit_get_cgroup_context(u);
4233 if (cc && ec) {
4234
4235 if (ec->private_devices &&
4236 cc->device_policy == CGROUP_AUTO)
4237 cc->device_policy = CGROUP_CLOSED;
4238
4239 if (ec->root_image &&
4240 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4241
4242 /* When RootImage= is specified, the following devices are touched. */
4243 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4244 if (r < 0)
4245 return r;
4246
4247 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4248 if (r < 0)
4249 return r;
4250
4251 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4252 if (r < 0)
4253 return r;
4254 }
4255 }
4256
4257 return 0;
4258 }
4259
4260 ExecContext *unit_get_exec_context(Unit *u) {
4261 size_t offset;
4262 assert(u);
4263
4264 if (u->type < 0)
4265 return NULL;
4266
4267 offset = UNIT_VTABLE(u)->exec_context_offset;
4268 if (offset <= 0)
4269 return NULL;
4270
4271 return (ExecContext*) ((uint8_t*) u + offset);
4272 }
4273
4274 KillContext *unit_get_kill_context(Unit *u) {
4275 size_t offset;
4276 assert(u);
4277
4278 if (u->type < 0)
4279 return NULL;
4280
4281 offset = UNIT_VTABLE(u)->kill_context_offset;
4282 if (offset <= 0)
4283 return NULL;
4284
4285 return (KillContext*) ((uint8_t*) u + offset);
4286 }
4287
4288 CGroupContext *unit_get_cgroup_context(Unit *u) {
4289 size_t offset;
4290
4291 if (u->type < 0)
4292 return NULL;
4293
4294 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4295 if (offset <= 0)
4296 return NULL;
4297
4298 return (CGroupContext*) ((uint8_t*) u + offset);
4299 }
4300
4301 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4302 size_t offset;
4303
4304 if (u->type < 0)
4305 return NULL;
4306
4307 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4308 if (offset <= 0)
4309 return NULL;
4310
4311 return *(ExecRuntime**) ((uint8_t*) u + offset);
4312 }
4313
4314 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4315 assert(u);
4316
4317 if (UNIT_WRITE_FLAGS_NOOP(flags))
4318 return NULL;
4319
4320 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4321 return u->manager->lookup_paths.transient;
4322
4323 if (flags & UNIT_PERSISTENT)
4324 return u->manager->lookup_paths.persistent_control;
4325
4326 if (flags & UNIT_RUNTIME)
4327 return u->manager->lookup_paths.runtime_control;
4328
4329 return NULL;
4330 }
4331
4332 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4333 char *ret = NULL;
4334
4335 if (!s)
4336 return NULL;
4337
4338 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4339 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4340 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4341 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4342 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4343 * allocations. */
4344
4345 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4346 ret = specifier_escape(s);
4347 if (!ret)
4348 return NULL;
4349
4350 s = ret;
4351 }
4352
4353 if (flags & UNIT_ESCAPE_C) {
4354 char *a;
4355
4356 a = cescape(s);
4357 free(ret);
4358 if (!a)
4359 return NULL;
4360
4361 ret = a;
4362 }
4363
4364 if (buf) {
4365 *buf = ret;
4366 return ret ?: (char*) s;
4367 }
4368
4369 return ret ?: strdup(s);
4370 }
4371
4372 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4373 _cleanup_free_ char *result = NULL;
4374 size_t n = 0, allocated = 0;
4375 char **i;
4376
4377 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4378 * way suitable for ExecStart= stanzas */
4379
4380 STRV_FOREACH(i, l) {
4381 _cleanup_free_ char *buf = NULL;
4382 const char *p;
4383 size_t a;
4384 char *q;
4385
4386 p = unit_escape_setting(*i, flags, &buf);
4387 if (!p)
4388 return NULL;
4389
4390 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4391 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4392 return NULL;
4393
4394 q = result + n;
4395 if (n > 0)
4396 *(q++) = ' ';
4397
4398 *(q++) = '"';
4399 q = stpcpy(q, p);
4400 *(q++) = '"';
4401
4402 n += a;
4403 }
4404
4405 if (!GREEDY_REALLOC(result, allocated, n + 1))
4406 return NULL;
4407
4408 result[n] = 0;
4409
4410 return TAKE_PTR(result);
4411 }
4412
4413 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4414 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4415 const char *dir, *wrapped;
4416 int r;
4417
4418 assert(u);
4419 assert(name);
4420 assert(data);
4421
4422 if (UNIT_WRITE_FLAGS_NOOP(flags))
4423 return 0;
4424
4425 data = unit_escape_setting(data, flags, &escaped);
4426 if (!data)
4427 return -ENOMEM;
4428
4429 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4430 * previous section header is the same */
4431
4432 if (flags & UNIT_PRIVATE) {
4433 if (!UNIT_VTABLE(u)->private_section)
4434 return -EINVAL;
4435
4436 if (!u->transient_file || u->last_section_private < 0)
4437 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4438 else if (u->last_section_private == 0)
4439 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4440 } else {
4441 if (!u->transient_file || u->last_section_private < 0)
4442 data = strjoina("[Unit]\n", data);
4443 else if (u->last_section_private > 0)
4444 data = strjoina("\n[Unit]\n", data);
4445 }
4446
4447 if (u->transient_file) {
4448 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4449 * write to the transient unit file. */
4450 fputs(data, u->transient_file);
4451
4452 if (!endswith(data, "\n"))
4453 fputc('\n', u->transient_file);
4454
4455 /* Remember which section we wrote this entry to */
4456 u->last_section_private = !!(flags & UNIT_PRIVATE);
4457 return 0;
4458 }
4459
4460 dir = unit_drop_in_dir(u, flags);
4461 if (!dir)
4462 return -EINVAL;
4463
4464 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4465 "# or an equivalent operation. Do not edit.\n",
4466 data,
4467 "\n");
4468
4469 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4470 if (r < 0)
4471 return r;
4472
4473 (void) mkdir_p_label(p, 0755);
4474 r = write_string_file_atomic_label(q, wrapped);
4475 if (r < 0)
4476 return r;
4477
4478 r = strv_push(&u->dropin_paths, q);
4479 if (r < 0)
4480 return r;
4481 q = NULL;
4482
4483 strv_uniq(u->dropin_paths);
4484
4485 u->dropin_mtime = now(CLOCK_REALTIME);
4486
4487 return 0;
4488 }
4489
4490 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4491 _cleanup_free_ char *p = NULL;
4492 va_list ap;
4493 int r;
4494
4495 assert(u);
4496 assert(name);
4497 assert(format);
4498
4499 if (UNIT_WRITE_FLAGS_NOOP(flags))
4500 return 0;
4501
4502 va_start(ap, format);
4503 r = vasprintf(&p, format, ap);
4504 va_end(ap);
4505
4506 if (r < 0)
4507 return -ENOMEM;
4508
4509 return unit_write_setting(u, flags, name, p);
4510 }
4511
4512 int unit_make_transient(Unit *u) {
4513 _cleanup_free_ char *path = NULL;
4514 FILE *f;
4515
4516 assert(u);
4517
4518 if (!UNIT_VTABLE(u)->can_transient)
4519 return -EOPNOTSUPP;
4520
4521 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4522
4523 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4524 if (!path)
4525 return -ENOMEM;
4526
4527 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4528 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4529
4530 RUN_WITH_UMASK(0022) {
4531 f = fopen(path, "we");
4532 if (!f)
4533 return -errno;
4534 }
4535
4536 safe_fclose(u->transient_file);
4537 u->transient_file = f;
4538
4539 free_and_replace(u->fragment_path, path);
4540
4541 u->source_path = mfree(u->source_path);
4542 u->dropin_paths = strv_free(u->dropin_paths);
4543 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4544
4545 u->load_state = UNIT_STUB;
4546 u->load_error = 0;
4547 u->transient = true;
4548
4549 unit_add_to_dbus_queue(u);
4550 unit_add_to_gc_queue(u);
4551
4552 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4553 u->transient_file);
4554
4555 return 0;
4556 }
4557
4558 static int log_kill(pid_t pid, int sig, void *userdata) {
4559 _cleanup_free_ char *comm = NULL;
4560
4561 (void) get_process_comm(pid, &comm);
4562
4563 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4564 only, like for example systemd's own PAM stub process. */
4565 if (comm && comm[0] == '(')
4566 return 0;
4567
4568 log_unit_notice(userdata,
4569 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4570 pid,
4571 strna(comm),
4572 signal_to_string(sig));
4573
4574 return 1;
4575 }
4576
4577 static int operation_to_signal(KillContext *c, KillOperation k) {
4578 assert(c);
4579
4580 switch (k) {
4581
4582 case KILL_TERMINATE:
4583 case KILL_TERMINATE_AND_LOG:
4584 return c->kill_signal;
4585
4586 case KILL_KILL:
4587 return c->final_kill_signal;
4588
4589 case KILL_WATCHDOG:
4590 return c->watchdog_signal;
4591
4592 default:
4593 assert_not_reached("KillOperation unknown");
4594 }
4595 }
4596
4597 int unit_kill_context(
4598 Unit *u,
4599 KillContext *c,
4600 KillOperation k,
4601 pid_t main_pid,
4602 pid_t control_pid,
4603 bool main_pid_alien) {
4604
4605 bool wait_for_exit = false, send_sighup;
4606 cg_kill_log_func_t log_func = NULL;
4607 int sig, r;
4608
4609 assert(u);
4610 assert(c);
4611
4612 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4613 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4614
4615 if (c->kill_mode == KILL_NONE)
4616 return 0;
4617
4618 sig = operation_to_signal(c, k);
4619
4620 send_sighup =
4621 c->send_sighup &&
4622 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4623 sig != SIGHUP;
4624
4625 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4626 log_func = log_kill;
4627
4628 if (main_pid > 0) {
4629 if (log_func)
4630 log_func(main_pid, sig, u);
4631
4632 r = kill_and_sigcont(main_pid, sig);
4633 if (r < 0 && r != -ESRCH) {
4634 _cleanup_free_ char *comm = NULL;
4635 (void) get_process_comm(main_pid, &comm);
4636
4637 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4638 } else {
4639 if (!main_pid_alien)
4640 wait_for_exit = true;
4641
4642 if (r != -ESRCH && send_sighup)
4643 (void) kill(main_pid, SIGHUP);
4644 }
4645 }
4646
4647 if (control_pid > 0) {
4648 if (log_func)
4649 log_func(control_pid, sig, u);
4650
4651 r = kill_and_sigcont(control_pid, sig);
4652 if (r < 0 && r != -ESRCH) {
4653 _cleanup_free_ char *comm = NULL;
4654 (void) get_process_comm(control_pid, &comm);
4655
4656 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4657 } else {
4658 wait_for_exit = true;
4659
4660 if (r != -ESRCH && send_sighup)
4661 (void) kill(control_pid, SIGHUP);
4662 }
4663 }
4664
4665 if (u->cgroup_path &&
4666 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4667 _cleanup_set_free_ Set *pid_set = NULL;
4668
4669 /* Exclude the main/control pids from being killed via the cgroup */
4670 pid_set = unit_pid_set(main_pid, control_pid);
4671 if (!pid_set)
4672 return -ENOMEM;
4673
4674 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4675 sig,
4676 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4677 pid_set,
4678 log_func, u);
4679 if (r < 0) {
4680 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4681 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4682
4683 } else if (r > 0) {
4684
4685 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4686 * we are running in a container or if this is a delegation unit, simply because cgroup
4687 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4688 * of containers it can be confused easily by left-over directories in the cgroup — which
4689 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4690 * there we get proper events. Hence rely on them. */
4691
4692 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4693 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4694 wait_for_exit = true;
4695
4696 if (send_sighup) {
4697 set_free(pid_set);
4698
4699 pid_set = unit_pid_set(main_pid, control_pid);
4700 if (!pid_set)
4701 return -ENOMEM;
4702
4703 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4704 SIGHUP,
4705 CGROUP_IGNORE_SELF,
4706 pid_set,
4707 NULL, NULL);
4708 }
4709 }
4710 }
4711
4712 return wait_for_exit;
4713 }
4714
4715 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4716 _cleanup_free_ char *p = NULL;
4717 UnitDependencyInfo di;
4718 int r;
4719
4720 assert(u);
4721 assert(path);
4722
4723 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4724 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4725 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4726 * determine which units to make themselves a dependency of. */
4727
4728 if (!path_is_absolute(path))
4729 return -EINVAL;
4730
4731 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4732 if (r < 0)
4733 return r;
4734
4735 p = strdup(path);
4736 if (!p)
4737 return -ENOMEM;
4738
4739 path = path_simplify(p, true);
4740
4741 if (!path_is_normalized(path))
4742 return -EPERM;
4743
4744 if (hashmap_contains(u->requires_mounts_for, path))
4745 return 0;
4746
4747 di = (UnitDependencyInfo) {
4748 .origin_mask = mask
4749 };
4750
4751 r = hashmap_put(u->requires_mounts_for, path, di.data);
4752 if (r < 0)
4753 return r;
4754 p = NULL;
4755
4756 char prefix[strlen(path) + 1];
4757 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4758 Set *x;
4759
4760 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4761 if (!x) {
4762 _cleanup_free_ char *q = NULL;
4763
4764 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4765 if (r < 0)
4766 return r;
4767
4768 q = strdup(prefix);
4769 if (!q)
4770 return -ENOMEM;
4771
4772 x = set_new(NULL);
4773 if (!x)
4774 return -ENOMEM;
4775
4776 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4777 if (r < 0) {
4778 set_free(x);
4779 return r;
4780 }
4781 q = NULL;
4782 }
4783
4784 r = set_put(x, u);
4785 if (r < 0)
4786 return r;
4787 }
4788
4789 return 0;
4790 }
4791
4792 int unit_setup_exec_runtime(Unit *u) {
4793 ExecRuntime **rt;
4794 size_t offset;
4795 Unit *other;
4796 Iterator i;
4797 void *v;
4798 int r;
4799
4800 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4801 assert(offset > 0);
4802
4803 /* Check if there already is an ExecRuntime for this unit? */
4804 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4805 if (*rt)
4806 return 0;
4807
4808 /* Try to get it from somebody else */
4809 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4810 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4811 if (r == 1)
4812 return 1;
4813 }
4814
4815 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4816 }
4817
4818 int unit_setup_dynamic_creds(Unit *u) {
4819 ExecContext *ec;
4820 DynamicCreds *dcreds;
4821 size_t offset;
4822
4823 assert(u);
4824
4825 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4826 assert(offset > 0);
4827 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4828
4829 ec = unit_get_exec_context(u);
4830 assert(ec);
4831
4832 if (!ec->dynamic_user)
4833 return 0;
4834
4835 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4836 }
4837
4838 bool unit_type_supported(UnitType t) {
4839 if (_unlikely_(t < 0))
4840 return false;
4841 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4842 return false;
4843
4844 if (!unit_vtable[t]->supported)
4845 return true;
4846
4847 return unit_vtable[t]->supported();
4848 }
4849
4850 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4851 int r;
4852
4853 assert(u);
4854 assert(where);
4855
4856 r = dir_is_empty(where);
4857 if (r > 0 || r == -ENOTDIR)
4858 return;
4859 if (r < 0) {
4860 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4861 return;
4862 }
4863
4864 log_struct(LOG_NOTICE,
4865 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4866 LOG_UNIT_ID(u),
4867 LOG_UNIT_INVOCATION_ID(u),
4868 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4869 "WHERE=%s", where);
4870 }
4871
4872 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4873 _cleanup_free_ char *canonical_where = NULL;
4874 int r;
4875
4876 assert(u);
4877 assert(where);
4878
4879 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4880 if (r < 0) {
4881 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4882 return 0;
4883 }
4884
4885 /* We will happily ignore a trailing slash (or any redundant slashes) */
4886 if (path_equal(where, canonical_where))
4887 return 0;
4888
4889 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4890 log_struct(LOG_ERR,
4891 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4892 LOG_UNIT_ID(u),
4893 LOG_UNIT_INVOCATION_ID(u),
4894 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4895 "WHERE=%s", where);
4896
4897 return -ELOOP;
4898 }
4899
4900 bool unit_is_pristine(Unit *u) {
4901 assert(u);
4902
4903 /* Check if the unit already exists or is already around,
4904 * in a number of different ways. Note that to cater for unit
4905 * types such as slice, we are generally fine with units that
4906 * are marked UNIT_LOADED even though nothing was actually
4907 * loaded, as those unit types don't require a file on disk. */
4908
4909 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4910 u->fragment_path ||
4911 u->source_path ||
4912 !strv_isempty(u->dropin_paths) ||
4913 u->job ||
4914 u->merged_into);
4915 }
4916
4917 pid_t unit_control_pid(Unit *u) {
4918 assert(u);
4919
4920 if (UNIT_VTABLE(u)->control_pid)
4921 return UNIT_VTABLE(u)->control_pid(u);
4922
4923 return 0;
4924 }
4925
4926 pid_t unit_main_pid(Unit *u) {
4927 assert(u);
4928
4929 if (UNIT_VTABLE(u)->main_pid)
4930 return UNIT_VTABLE(u)->main_pid(u);
4931
4932 return 0;
4933 }
4934
4935 static void unit_unref_uid_internal(
4936 Unit *u,
4937 uid_t *ref_uid,
4938 bool destroy_now,
4939 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4940
4941 assert(u);
4942 assert(ref_uid);
4943 assert(_manager_unref_uid);
4944
4945 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4946 * gid_t are actually the same time, with the same validity rules.
4947 *
4948 * Drops a reference to UID/GID from a unit. */
4949
4950 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4951 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4952
4953 if (!uid_is_valid(*ref_uid))
4954 return;
4955
4956 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4957 *ref_uid = UID_INVALID;
4958 }
4959
4960 void unit_unref_uid(Unit *u, bool destroy_now) {
4961 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4962 }
4963
4964 void unit_unref_gid(Unit *u, bool destroy_now) {
4965 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4966 }
4967
4968 static int unit_ref_uid_internal(
4969 Unit *u,
4970 uid_t *ref_uid,
4971 uid_t uid,
4972 bool clean_ipc,
4973 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4974
4975 int r;
4976
4977 assert(u);
4978 assert(ref_uid);
4979 assert(uid_is_valid(uid));
4980 assert(_manager_ref_uid);
4981
4982 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4983 * are actually the same type, and have the same validity rules.
4984 *
4985 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4986 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4987 * drops to zero. */
4988
4989 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4990 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4991
4992 if (*ref_uid == uid)
4993 return 0;
4994
4995 if (uid_is_valid(*ref_uid)) /* Already set? */
4996 return -EBUSY;
4997
4998 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4999 if (r < 0)
5000 return r;
5001
5002 *ref_uid = uid;
5003 return 1;
5004 }
5005
5006 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5007 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5008 }
5009
5010 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5011 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5012 }
5013
5014 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5015 int r = 0, q = 0;
5016
5017 assert(u);
5018
5019 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5020
5021 if (uid_is_valid(uid)) {
5022 r = unit_ref_uid(u, uid, clean_ipc);
5023 if (r < 0)
5024 return r;
5025 }
5026
5027 if (gid_is_valid(gid)) {
5028 q = unit_ref_gid(u, gid, clean_ipc);
5029 if (q < 0) {
5030 if (r > 0)
5031 unit_unref_uid(u, false);
5032
5033 return q;
5034 }
5035 }
5036
5037 return r > 0 || q > 0;
5038 }
5039
5040 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5041 ExecContext *c;
5042 int r;
5043
5044 assert(u);
5045
5046 c = unit_get_exec_context(u);
5047
5048 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5049 if (r < 0)
5050 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5051
5052 return r;
5053 }
5054
5055 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5056 assert(u);
5057
5058 unit_unref_uid(u, destroy_now);
5059 unit_unref_gid(u, destroy_now);
5060 }
5061
5062 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5063 int r;
5064
5065 assert(u);
5066
5067 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5068 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5069 * objects when no service references the UID/GID anymore. */
5070
5071 r = unit_ref_uid_gid(u, uid, gid);
5072 if (r > 0)
5073 unit_add_to_dbus_queue(u);
5074 }
5075
5076 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5077 int r;
5078
5079 assert(u);
5080
5081 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5082
5083 if (sd_id128_equal(u->invocation_id, id))
5084 return 0;
5085
5086 if (!sd_id128_is_null(u->invocation_id))
5087 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5088
5089 if (sd_id128_is_null(id)) {
5090 r = 0;
5091 goto reset;
5092 }
5093
5094 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5095 if (r < 0)
5096 goto reset;
5097
5098 u->invocation_id = id;
5099 sd_id128_to_string(id, u->invocation_id_string);
5100
5101 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5102 if (r < 0)
5103 goto reset;
5104
5105 return 0;
5106
5107 reset:
5108 u->invocation_id = SD_ID128_NULL;
5109 u->invocation_id_string[0] = 0;
5110 return r;
5111 }
5112
5113 int unit_acquire_invocation_id(Unit *u) {
5114 sd_id128_t id;
5115 int r;
5116
5117 assert(u);
5118
5119 r = sd_id128_randomize(&id);
5120 if (r < 0)
5121 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5122
5123 r = unit_set_invocation_id(u, id);
5124 if (r < 0)
5125 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5126
5127 unit_add_to_dbus_queue(u);
5128 return 0;
5129 }
5130
5131 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5132 int r;
5133
5134 assert(u);
5135 assert(p);
5136
5137 /* Copy parameters from manager */
5138 r = manager_get_effective_environment(u->manager, &p->environment);
5139 if (r < 0)
5140 return r;
5141
5142 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5143 p->cgroup_supported = u->manager->cgroup_supported;
5144 p->prefix = u->manager->prefix;
5145 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5146
5147 /* Copy parameters from unit */
5148 p->cgroup_path = u->cgroup_path;
5149 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5150
5151 return 0;
5152 }
5153
5154 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5155 int r;
5156
5157 assert(u);
5158 assert(ret);
5159
5160 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5161 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5162
5163 (void) unit_realize_cgroup(u);
5164
5165 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5166 if (r != 0)
5167 return r;
5168
5169 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5170 (void) ignore_signals(SIGPIPE, -1);
5171
5172 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5173
5174 if (u->cgroup_path) {
5175 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5176 if (r < 0) {
5177 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5178 _exit(EXIT_CGROUP);
5179 }
5180 }
5181
5182 return 0;
5183 }
5184
5185 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5186 assert(u);
5187 assert(d >= 0);
5188 assert(d < _UNIT_DEPENDENCY_MAX);
5189 assert(other);
5190
5191 if (di.origin_mask == 0 && di.destination_mask == 0) {
5192 /* No bit set anymore, let's drop the whole entry */
5193 assert_se(hashmap_remove(u->dependencies[d], other));
5194 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5195 } else
5196 /* Mask was reduced, let's update the entry */
5197 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5198 }
5199
5200 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5201 UnitDependency d;
5202
5203 assert(u);
5204
5205 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5206
5207 if (mask == 0)
5208 return;
5209
5210 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5211 bool done;
5212
5213 do {
5214 UnitDependencyInfo di;
5215 Unit *other;
5216 Iterator i;
5217
5218 done = true;
5219
5220 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5221 UnitDependency q;
5222
5223 if ((di.origin_mask & ~mask) == di.origin_mask)
5224 continue;
5225 di.origin_mask &= ~mask;
5226 unit_update_dependency_mask(u, d, other, di);
5227
5228 /* We updated the dependency from our unit to the other unit now. But most dependencies
5229 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5230 * all dependency types on the other unit and delete all those which point to us and
5231 * have the right mask set. */
5232
5233 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5234 UnitDependencyInfo dj;
5235
5236 dj.data = hashmap_get(other->dependencies[q], u);
5237 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5238 continue;
5239 dj.destination_mask &= ~mask;
5240
5241 unit_update_dependency_mask(other, q, u, dj);
5242 }
5243
5244 unit_add_to_gc_queue(other);
5245
5246 done = false;
5247 break;
5248 }
5249
5250 } while (!done);
5251 }
5252 }
5253
5254 static int unit_export_invocation_id(Unit *u) {
5255 const char *p;
5256 int r;
5257
5258 assert(u);
5259
5260 if (u->exported_invocation_id)
5261 return 0;
5262
5263 if (sd_id128_is_null(u->invocation_id))
5264 return 0;
5265
5266 p = strjoina("/run/systemd/units/invocation:", u->id);
5267 r = symlink_atomic(u->invocation_id_string, p);
5268 if (r < 0)
5269 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5270
5271 u->exported_invocation_id = true;
5272 return 0;
5273 }
5274
5275 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5276 const char *p;
5277 char buf[2];
5278 int r;
5279
5280 assert(u);
5281 assert(c);
5282
5283 if (u->exported_log_level_max)
5284 return 0;
5285
5286 if (c->log_level_max < 0)
5287 return 0;
5288
5289 assert(c->log_level_max <= 7);
5290
5291 buf[0] = '0' + c->log_level_max;
5292 buf[1] = 0;
5293
5294 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5295 r = symlink_atomic(buf, p);
5296 if (r < 0)
5297 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5298
5299 u->exported_log_level_max = true;
5300 return 0;
5301 }
5302
5303 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5304 _cleanup_close_ int fd = -1;
5305 struct iovec *iovec;
5306 const char *p;
5307 char *pattern;
5308 le64_t *sizes;
5309 ssize_t n;
5310 size_t i;
5311 int r;
5312
5313 if (u->exported_log_extra_fields)
5314 return 0;
5315
5316 if (c->n_log_extra_fields <= 0)
5317 return 0;
5318
5319 sizes = newa(le64_t, c->n_log_extra_fields);
5320 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5321
5322 for (i = 0; i < c->n_log_extra_fields; i++) {
5323 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5324
5325 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5326 iovec[i*2+1] = c->log_extra_fields[i];
5327 }
5328
5329 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5330 pattern = strjoina(p, ".XXXXXX");
5331
5332 fd = mkostemp_safe(pattern);
5333 if (fd < 0)
5334 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5335
5336 n = writev(fd, iovec, c->n_log_extra_fields*2);
5337 if (n < 0) {
5338 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5339 goto fail;
5340 }
5341
5342 (void) fchmod(fd, 0644);
5343
5344 if (rename(pattern, p) < 0) {
5345 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5346 goto fail;
5347 }
5348
5349 u->exported_log_extra_fields = true;
5350 return 0;
5351
5352 fail:
5353 (void) unlink(pattern);
5354 return r;
5355 }
5356
5357 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5358 _cleanup_free_ char *buf = NULL;
5359 const char *p;
5360 int r;
5361
5362 assert(u);
5363 assert(c);
5364
5365 if (u->exported_log_rate_limit_interval)
5366 return 0;
5367
5368 if (c->log_rate_limit_interval_usec == 0)
5369 return 0;
5370
5371 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5372
5373 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5374 return log_oom();
5375
5376 r = symlink_atomic(buf, p);
5377 if (r < 0)
5378 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5379
5380 u->exported_log_rate_limit_interval = true;
5381 return 0;
5382 }
5383
5384 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5385 _cleanup_free_ char *buf = NULL;
5386 const char *p;
5387 int r;
5388
5389 assert(u);
5390 assert(c);
5391
5392 if (u->exported_log_rate_limit_burst)
5393 return 0;
5394
5395 if (c->log_rate_limit_burst == 0)
5396 return 0;
5397
5398 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5399
5400 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5401 return log_oom();
5402
5403 r = symlink_atomic(buf, p);
5404 if (r < 0)
5405 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5406
5407 u->exported_log_rate_limit_burst = true;
5408 return 0;
5409 }
5410
5411 void unit_export_state_files(Unit *u) {
5412 const ExecContext *c;
5413
5414 assert(u);
5415
5416 if (!u->id)
5417 return;
5418
5419 if (!MANAGER_IS_SYSTEM(u->manager))
5420 return;
5421
5422 if (MANAGER_IS_TEST_RUN(u->manager))
5423 return;
5424
5425 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5426 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5427 * the IPC system itself and PID 1 also log to the journal.
5428 *
5429 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5430 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5431 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5432 * namespace at least.
5433 *
5434 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5435 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5436 * them with one. */
5437
5438 (void) unit_export_invocation_id(u);
5439
5440 c = unit_get_exec_context(u);
5441 if (c) {
5442 (void) unit_export_log_level_max(u, c);
5443 (void) unit_export_log_extra_fields(u, c);
5444 (void) unit_export_log_rate_limit_interval(u, c);
5445 (void) unit_export_log_rate_limit_burst(u, c);
5446 }
5447 }
5448
5449 void unit_unlink_state_files(Unit *u) {
5450 const char *p;
5451
5452 assert(u);
5453
5454 if (!u->id)
5455 return;
5456
5457 if (!MANAGER_IS_SYSTEM(u->manager))
5458 return;
5459
5460 /* Undoes the effect of unit_export_state() */
5461
5462 if (u->exported_invocation_id) {
5463 p = strjoina("/run/systemd/units/invocation:", u->id);
5464 (void) unlink(p);
5465
5466 u->exported_invocation_id = false;
5467 }
5468
5469 if (u->exported_log_level_max) {
5470 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5471 (void) unlink(p);
5472
5473 u->exported_log_level_max = false;
5474 }
5475
5476 if (u->exported_log_extra_fields) {
5477 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5478 (void) unlink(p);
5479
5480 u->exported_log_extra_fields = false;
5481 }
5482
5483 if (u->exported_log_rate_limit_interval) {
5484 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5485 (void) unlink(p);
5486
5487 u->exported_log_rate_limit_interval = false;
5488 }
5489
5490 if (u->exported_log_rate_limit_burst) {
5491 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5492 (void) unlink(p);
5493
5494 u->exported_log_rate_limit_burst = false;
5495 }
5496 }
5497
5498 int unit_prepare_exec(Unit *u) {
5499 int r;
5500
5501 assert(u);
5502
5503 /* Prepares everything so that we can fork of a process for this unit */
5504
5505 (void) unit_realize_cgroup(u);
5506
5507 if (u->reset_accounting) {
5508 (void) unit_reset_accounting(u);
5509 u->reset_accounting = false;
5510 }
5511
5512 unit_export_state_files(u);
5513
5514 r = unit_setup_exec_runtime(u);
5515 if (r < 0)
5516 return r;
5517
5518 r = unit_setup_dynamic_creds(u);
5519 if (r < 0)
5520 return r;
5521
5522 return 0;
5523 }
5524
5525 static int log_leftover(pid_t pid, int sig, void *userdata) {
5526 _cleanup_free_ char *comm = NULL;
5527
5528 (void) get_process_comm(pid, &comm);
5529
5530 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5531 return 0;
5532
5533 log_unit_warning(userdata,
5534 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5535 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5536 pid, strna(comm));
5537
5538 return 1;
5539 }
5540
5541 int unit_warn_leftover_processes(Unit *u) {
5542 assert(u);
5543
5544 (void) unit_pick_cgroup_path(u);
5545
5546 if (!u->cgroup_path)
5547 return 0;
5548
5549 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5550 }
5551
5552 bool unit_needs_console(Unit *u) {
5553 ExecContext *ec;
5554 UnitActiveState state;
5555
5556 assert(u);
5557
5558 state = unit_active_state(u);
5559
5560 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5561 return false;
5562
5563 if (UNIT_VTABLE(u)->needs_console)
5564 return UNIT_VTABLE(u)->needs_console(u);
5565
5566 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5567 ec = unit_get_exec_context(u);
5568 if (!ec)
5569 return false;
5570
5571 return exec_context_may_touch_console(ec);
5572 }
5573
5574 const char *unit_label_path(Unit *u) {
5575 const char *p;
5576
5577 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5578 * when validating access checks. */
5579
5580 p = u->source_path ?: u->fragment_path;
5581 if (!p)
5582 return NULL;
5583
5584 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5585 if (path_equal(p, "/dev/null"))
5586 return NULL;
5587
5588 return p;
5589 }
5590
5591 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5592 int r;
5593
5594 assert(u);
5595
5596 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5597 * and not a kernel thread either */
5598
5599 /* First, a simple range check */
5600 if (!pid_is_valid(pid))
5601 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5602
5603 /* Some extra safety check */
5604 if (pid == 1 || pid == getpid_cached())
5605 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5606
5607 /* Don't even begin to bother with kernel threads */
5608 r = is_kernel_thread(pid);
5609 if (r == -ESRCH)
5610 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5611 if (r < 0)
5612 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5613 if (r > 0)
5614 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5615
5616 return 0;
5617 }
5618
5619 void unit_log_success(Unit *u) {
5620 assert(u);
5621
5622 log_struct(LOG_INFO,
5623 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5624 LOG_UNIT_ID(u),
5625 LOG_UNIT_INVOCATION_ID(u),
5626 LOG_UNIT_MESSAGE(u, "Succeeded."));
5627 }
5628
5629 void unit_log_failure(Unit *u, const char *result) {
5630 assert(u);
5631 assert(result);
5632
5633 log_struct(LOG_WARNING,
5634 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5635 LOG_UNIT_ID(u),
5636 LOG_UNIT_INVOCATION_ID(u),
5637 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5638 "UNIT_RESULT=%s", result);
5639 }
5640
5641 void unit_log_process_exit(
5642 Unit *u,
5643 int level,
5644 const char *kind,
5645 const char *command,
5646 int code,
5647 int status) {
5648
5649 assert(u);
5650 assert(kind);
5651
5652 if (code != CLD_EXITED)
5653 level = LOG_WARNING;
5654
5655 log_struct(level,
5656 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5657 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5658 kind,
5659 sigchld_code_to_string(code), status,
5660 strna(code == CLD_EXITED
5661 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5662 : signal_to_string(status))),
5663 "EXIT_CODE=%s", sigchld_code_to_string(code),
5664 "EXIT_STATUS=%i", status,
5665 "COMMAND=%s", strna(command),
5666 LOG_UNIT_ID(u),
5667 LOG_UNIT_INVOCATION_ID(u));
5668 }
5669
5670 int unit_exit_status(Unit *u) {
5671 assert(u);
5672
5673 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5674 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5675 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5676 * service process has exited abnormally (signal/coredump). */
5677
5678 if (!UNIT_VTABLE(u)->exit_status)
5679 return -EOPNOTSUPP;
5680
5681 return UNIT_VTABLE(u)->exit_status(u);
5682 }
5683
5684 int unit_failure_action_exit_status(Unit *u) {
5685 int r;
5686
5687 assert(u);
5688
5689 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5690
5691 if (u->failure_action_exit_status >= 0)
5692 return u->failure_action_exit_status;
5693
5694 r = unit_exit_status(u);
5695 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5696 return 255;
5697
5698 return r;
5699 }
5700
5701 int unit_success_action_exit_status(Unit *u) {
5702 int r;
5703
5704 assert(u);
5705
5706 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5707
5708 if (u->success_action_exit_status >= 0)
5709 return u->success_action_exit_status;
5710
5711 r = unit_exit_status(u);
5712 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5713 return 255;
5714
5715 return r;
5716 }
5717
5718 int unit_test_trigger_loaded(Unit *u) {
5719 Unit *trigger;
5720
5721 /* Tests whether the unit to trigger is loaded */
5722
5723 trigger = UNIT_TRIGGER(u);
5724 if (!trigger)
5725 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit to trigger not loaded.");
5726 if (trigger->load_state != UNIT_LOADED)
5727 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT), "Refusing to start, unit %s to trigger not loaded.", u->id);
5728
5729 return 0;
5730 }
5731
5732 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5733 [COLLECT_INACTIVE] = "inactive",
5734 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5735 };
5736
5737 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);