]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #30284 from YHNdnzj/fstab-wantedby-defaultdeps
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio-label.h"
32 #include "fileio.h"
33 #include "format-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "macro.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
46 #include "rm-rf.h"
47 #include "serialize.h"
48 #include "set.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
51 #include "special.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "strv.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
62 #include "unit.h"
63 #include "user-util.h"
64 #include "virt.h"
65 #if BPF_FRAMEWORK
66 #include "bpf-link.h"
67 #endif
68
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
74
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
80
81 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
82 [UNIT_SERVICE] = &service_vtable,
83 [UNIT_SOCKET] = &socket_vtable,
84 [UNIT_TARGET] = &target_vtable,
85 [UNIT_DEVICE] = &device_vtable,
86 [UNIT_MOUNT] = &mount_vtable,
87 [UNIT_AUTOMOUNT] = &automount_vtable,
88 [UNIT_SWAP] = &swap_vtable,
89 [UNIT_TIMER] = &timer_vtable,
90 [UNIT_PATH] = &path_vtable,
91 [UNIT_SLICE] = &slice_vtable,
92 [UNIT_SCOPE] = &scope_vtable,
93 };
94
95 Unit* unit_new(Manager *m, size_t size) {
96 Unit *u;
97
98 assert(m);
99 assert(size >= sizeof(Unit));
100
101 u = malloc0(size);
102 if (!u)
103 return NULL;
104
105 u->manager = m;
106 u->type = _UNIT_TYPE_INVALID;
107 u->default_dependencies = true;
108 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
109 u->unit_file_preset = -1;
110 u->on_failure_job_mode = JOB_REPLACE;
111 u->on_success_job_mode = JOB_FAIL;
112 u->cgroup_control_inotify_wd = -1;
113 u->cgroup_memory_inotify_wd = -1;
114 u->job_timeout = USEC_INFINITY;
115 u->job_running_timeout = USEC_INFINITY;
116 u->ref_uid = UID_INVALID;
117 u->ref_gid = GID_INVALID;
118 u->cpu_usage_last = NSEC_INFINITY;
119
120 unit_reset_memory_accounting_last(u);
121
122 unit_reset_io_accounting_last(u);
123
124 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
125 u->failure_action_exit_status = u->success_action_exit_status = -1;
126
127 u->ip_accounting_ingress_map_fd = -EBADF;
128 u->ip_accounting_egress_map_fd = -EBADF;
129
130 u->ipv4_allow_map_fd = -EBADF;
131 u->ipv6_allow_map_fd = -EBADF;
132 u->ipv4_deny_map_fd = -EBADF;
133 u->ipv6_deny_map_fd = -EBADF;
134
135 u->last_section_private = -1;
136
137 u->start_ratelimit = (const RateLimit) {
138 m->defaults.start_limit_interval,
139 m->defaults.start_limit_burst,
140 };
141
142 u->auto_start_stop_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_SEC, .burst = 16 };
143
144 return u;
145 }
146
147 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
148 _cleanup_(unit_freep) Unit *u = NULL;
149 int r;
150
151 u = unit_new(m, size);
152 if (!u)
153 return -ENOMEM;
154
155 r = unit_add_name(u, name);
156 if (r < 0)
157 return r;
158
159 *ret = TAKE_PTR(u);
160
161 return r;
162 }
163
164 bool unit_has_name(const Unit *u, const char *name) {
165 assert(u);
166 assert(name);
167
168 return streq_ptr(name, u->id) ||
169 set_contains(u->aliases, name);
170 }
171
172 static void unit_init(Unit *u) {
173 CGroupContext *cc;
174 ExecContext *ec;
175 KillContext *kc;
176
177 assert(u);
178 assert(u->manager);
179 assert(u->type >= 0);
180
181 cc = unit_get_cgroup_context(u);
182 if (cc) {
183 cgroup_context_init(cc);
184
185 /* Copy in the manager defaults into the cgroup
186 * context, _before_ the rest of the settings have
187 * been initialized */
188
189 cc->cpu_accounting = u->manager->defaults.cpu_accounting;
190 cc->io_accounting = u->manager->defaults.io_accounting;
191 cc->blockio_accounting = u->manager->defaults.blockio_accounting;
192 cc->memory_accounting = u->manager->defaults.memory_accounting;
193 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
194 cc->ip_accounting = u->manager->defaults.ip_accounting;
195
196 if (u->type != UNIT_SLICE)
197 cc->tasks_max = u->manager->defaults.tasks_max;
198
199 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
200 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
201 }
202
203 ec = unit_get_exec_context(u);
204 if (ec) {
205 exec_context_init(ec);
206
207 if (u->manager->defaults.oom_score_adjust_set) {
208 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
209 ec->oom_score_adjust_set = true;
210 }
211
212 if (MANAGER_IS_SYSTEM(u->manager))
213 ec->keyring_mode = EXEC_KEYRING_SHARED;
214 else {
215 ec->keyring_mode = EXEC_KEYRING_INHERIT;
216
217 /* User manager might have its umask redefined by PAM or UMask=. In this
218 * case let the units it manages inherit this value by default. They can
219 * still tune this value through their own unit file */
220 (void) get_process_umask(0, &ec->umask);
221 }
222 }
223
224 kc = unit_get_kill_context(u);
225 if (kc)
226 kill_context_init(kc);
227
228 if (UNIT_VTABLE(u)->init)
229 UNIT_VTABLE(u)->init(u);
230 }
231
232 static int unit_add_alias(Unit *u, char *donated_name) {
233 int r;
234
235 /* Make sure that u->names is allocated. We may leave u->names
236 * empty if we fail later, but this is not a problem. */
237 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
238 if (r < 0)
239 return r;
240 assert(r > 0);
241
242 return 0;
243 }
244
245 int unit_add_name(Unit *u, const char *text) {
246 _cleanup_free_ char *name = NULL, *instance = NULL;
247 UnitType t;
248 int r;
249
250 assert(u);
251 assert(text);
252
253 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
254 if (!u->instance)
255 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
256 "instance is not set when adding name '%s': %m", text);
257
258 r = unit_name_replace_instance(text, u->instance, &name);
259 if (r < 0)
260 return log_unit_debug_errno(u, r,
261 "failed to build instance name from '%s': %m", text);
262 } else {
263 name = strdup(text);
264 if (!name)
265 return -ENOMEM;
266 }
267
268 if (unit_has_name(u, name))
269 return 0;
270
271 if (hashmap_contains(u->manager->units, name))
272 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
273 "unit already exist when adding name '%s': %m", name);
274
275 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
276 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
277 "name '%s' is invalid: %m", name);
278
279 t = unit_name_to_type(name);
280 if (t < 0)
281 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
282 "failed to derive unit type from name '%s': %m", name);
283
284 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
285 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
286 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
287 u->type, t, name);
288
289 r = unit_name_to_instance(name, &instance);
290 if (r < 0)
291 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
292
293 if (instance && !unit_type_may_template(t))
294 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
295
296 /* Ensure that this unit either has no instance, or that the instance matches. */
297 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
298 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
299 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
300 name, instance, u->instance);
301
302 if (u->id && !unit_type_may_alias(t))
303 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
304 "cannot add name %s, aliases are not allowed for %s units.",
305 name, unit_type_to_string(t));
306
307 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
308 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
309
310 /* Add name to the global hashmap first, because that's easier to undo */
311 r = hashmap_put(u->manager->units, name, u);
312 if (r < 0)
313 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
314
315 if (u->id) {
316 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
317 if (r < 0) {
318 hashmap_remove(u->manager->units, name);
319 return r;
320 }
321 TAKE_PTR(name);
322
323 } else {
324 /* A new name, we don't need the set yet. */
325 assert(u->type == _UNIT_TYPE_INVALID);
326 assert(!u->instance);
327
328 u->type = t;
329 u->id = TAKE_PTR(name);
330 u->instance = TAKE_PTR(instance);
331
332 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
333 unit_init(u);
334 }
335
336 unit_add_to_dbus_queue(u);
337 return 0;
338 }
339
340 int unit_choose_id(Unit *u, const char *name) {
341 _cleanup_free_ char *t = NULL;
342 char *s;
343 int r;
344
345 assert(u);
346 assert(name);
347
348 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
349 if (!u->instance)
350 return -EINVAL;
351
352 r = unit_name_replace_instance(name, u->instance, &t);
353 if (r < 0)
354 return r;
355
356 name = t;
357 }
358
359 if (streq_ptr(u->id, name))
360 return 0; /* Nothing to do. */
361
362 /* Selects one of the aliases of this unit as the id */
363 s = set_get(u->aliases, (char*) name);
364 if (!s)
365 return -ENOENT;
366
367 if (u->id) {
368 r = set_remove_and_put(u->aliases, name, u->id);
369 if (r < 0)
370 return r;
371 } else
372 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
373
374 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
375 unit_add_to_dbus_queue(u);
376
377 return 0;
378 }
379
380 int unit_set_description(Unit *u, const char *description) {
381 int r;
382
383 assert(u);
384
385 r = free_and_strdup(&u->description, empty_to_null(description));
386 if (r < 0)
387 return r;
388 if (r > 0)
389 unit_add_to_dbus_queue(u);
390
391 return 0;
392 }
393
394 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
395 Unit *other;
396
397 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
398 if (other->job || other->nop_job)
399 return true;
400
401 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
402 if (other->job || other->nop_job)
403 return true;
404
405 return false;
406 }
407
408 void unit_release_resources(Unit *u) {
409 UnitActiveState state;
410 ExecContext *ec;
411
412 assert(u);
413
414 if (u->job || u->nop_job)
415 return;
416
417 if (u->perpetual)
418 return;
419
420 state = unit_active_state(u);
421 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
422 return;
423
424 if (unit_will_restart(u))
425 return;
426
427 ec = unit_get_exec_context(u);
428 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
429 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
430
431 if (UNIT_VTABLE(u)->release_resources)
432 UNIT_VTABLE(u)->release_resources(u);
433 }
434
435 bool unit_may_gc(Unit *u) {
436 UnitActiveState state;
437 int r;
438
439 assert(u);
440
441 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
442 * unit may be collected, and false if there's some reason to keep it loaded.
443 *
444 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
445 * using markers to properly collect dependency loops.
446 */
447
448 if (u->job || u->nop_job)
449 return false;
450
451 if (u->perpetual)
452 return false;
453
454 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
455 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
456 * before we release the unit. */
457 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
458 return false;
459
460 /* Make sure to send out D-Bus events before we unload the unit */
461 if (u->in_dbus_queue)
462 return false;
463
464 if (sd_bus_track_count(u->bus_track) > 0)
465 return false;
466
467 state = unit_active_state(u);
468
469 /* But we keep the unit object around for longer when it is referenced or configured to not be
470 * gc'ed */
471 switch (u->collect_mode) {
472
473 case COLLECT_INACTIVE:
474 if (state != UNIT_INACTIVE)
475 return false;
476
477 break;
478
479 case COLLECT_INACTIVE_OR_FAILED:
480 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
481 return false;
482
483 break;
484
485 default:
486 assert_not_reached();
487 }
488
489 /* Check if any OnFailure= or on Success= jobs may be pending */
490 if (unit_success_failure_handler_has_jobs(u))
491 return false;
492
493 if (u->cgroup_path) {
494 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
495 * around. Units with active processes should never be collected. */
496
497 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
498 if (r < 0)
499 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
500 if (r <= 0)
501 return false;
502 }
503
504 if (!UNIT_VTABLE(u)->may_gc)
505 return true;
506
507 return UNIT_VTABLE(u)->may_gc(u);
508 }
509
510 void unit_add_to_load_queue(Unit *u) {
511 assert(u);
512 assert(u->type != _UNIT_TYPE_INVALID);
513
514 if (u->load_state != UNIT_STUB || u->in_load_queue)
515 return;
516
517 LIST_PREPEND(load_queue, u->manager->load_queue, u);
518 u->in_load_queue = true;
519 }
520
521 void unit_add_to_cleanup_queue(Unit *u) {
522 assert(u);
523
524 if (u->in_cleanup_queue)
525 return;
526
527 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
528 u->in_cleanup_queue = true;
529 }
530
531 void unit_add_to_gc_queue(Unit *u) {
532 assert(u);
533
534 if (u->in_gc_queue || u->in_cleanup_queue)
535 return;
536
537 if (!unit_may_gc(u))
538 return;
539
540 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
541 u->in_gc_queue = true;
542 }
543
544 void unit_add_to_dbus_queue(Unit *u) {
545 assert(u);
546 assert(u->type != _UNIT_TYPE_INVALID);
547
548 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
549 return;
550
551 /* Shortcut things if nobody cares */
552 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
553 sd_bus_track_count(u->bus_track) <= 0 &&
554 set_isempty(u->manager->private_buses)) {
555 u->sent_dbus_new_signal = true;
556 return;
557 }
558
559 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
560 u->in_dbus_queue = true;
561 }
562
563 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
564 assert(u);
565
566 if (u->in_stop_when_unneeded_queue)
567 return;
568
569 if (!u->stop_when_unneeded)
570 return;
571
572 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
573 return;
574
575 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
576 u->in_stop_when_unneeded_queue = true;
577 }
578
579 void unit_submit_to_start_when_upheld_queue(Unit *u) {
580 assert(u);
581
582 if (u->in_start_when_upheld_queue)
583 return;
584
585 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
586 return;
587
588 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
589 return;
590
591 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
592 u->in_start_when_upheld_queue = true;
593 }
594
595 void unit_submit_to_stop_when_bound_queue(Unit *u) {
596 assert(u);
597
598 if (u->in_stop_when_bound_queue)
599 return;
600
601 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
602 return;
603
604 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
605 return;
606
607 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
608 u->in_stop_when_bound_queue = true;
609 }
610
611 static bool unit_can_release_resources(Unit *u) {
612 ExecContext *ec;
613
614 assert(u);
615
616 if (UNIT_VTABLE(u)->release_resources)
617 return true;
618
619 ec = unit_get_exec_context(u);
620 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
621 return true;
622
623 return false;
624 }
625
626 void unit_submit_to_release_resources_queue(Unit *u) {
627 assert(u);
628
629 if (u->in_release_resources_queue)
630 return;
631
632 if (u->job || u->nop_job)
633 return;
634
635 if (u->perpetual)
636 return;
637
638 if (!unit_can_release_resources(u))
639 return;
640
641 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
642 u->in_release_resources_queue = true;
643 }
644
645 static void unit_clear_dependencies(Unit *u) {
646 assert(u);
647
648 /* Removes all dependencies configured on u and their reverse dependencies. */
649
650 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
651
652 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
653 Hashmap *other_deps;
654
655 HASHMAP_FOREACH(other_deps, other->dependencies)
656 hashmap_remove(other_deps, u);
657
658 unit_add_to_gc_queue(other);
659 }
660
661 hashmap_free(deps);
662 }
663
664 u->dependencies = hashmap_free(u->dependencies);
665 }
666
667 static void unit_remove_transient(Unit *u) {
668 assert(u);
669
670 if (!u->transient)
671 return;
672
673 if (u->fragment_path)
674 (void) unlink(u->fragment_path);
675
676 STRV_FOREACH(i, u->dropin_paths) {
677 _cleanup_free_ char *p = NULL, *pp = NULL;
678
679 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
680 continue;
681
682 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
683 continue;
684
685 /* Only drop transient drop-ins */
686 if (!path_equal(u->manager->lookup_paths.transient, pp))
687 continue;
688
689 (void) unlink(*i);
690 (void) rmdir(p);
691 }
692 }
693
694 static void unit_free_mounts_for(Unit *u) {
695 assert(u);
696
697 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
698 for (;;) {
699 _cleanup_free_ char *path = NULL;
700
701 path = hashmap_steal_first_key(u->mounts_for[t]);
702 if (!path)
703 break;
704
705 char s[strlen(path) + 1];
706
707 PATH_FOREACH_PREFIX_MORE(s, path) {
708 char *y;
709 Set *x;
710
711 x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y);
712 if (!x)
713 continue;
714
715 (void) set_remove(x, u);
716
717 if (set_isempty(x)) {
718 assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y));
719 free(y);
720 set_free(x);
721 }
722 }
723 }
724
725 u->mounts_for[t] = hashmap_free(u->mounts_for[t]);
726 }
727 }
728
729 static void unit_done(Unit *u) {
730 ExecContext *ec;
731 CGroupContext *cc;
732
733 assert(u);
734
735 if (u->type < 0)
736 return;
737
738 if (UNIT_VTABLE(u)->done)
739 UNIT_VTABLE(u)->done(u);
740
741 ec = unit_get_exec_context(u);
742 if (ec)
743 exec_context_done(ec);
744
745 cc = unit_get_cgroup_context(u);
746 if (cc)
747 cgroup_context_done(cc);
748 }
749
750 Unit* unit_free(Unit *u) {
751 Unit *slice;
752 char *t;
753
754 if (!u)
755 return NULL;
756
757 sd_event_source_disable_unref(u->auto_start_stop_event_source);
758
759 u->transient_file = safe_fclose(u->transient_file);
760
761 if (!MANAGER_IS_RELOADING(u->manager))
762 unit_remove_transient(u);
763
764 bus_unit_send_removed_signal(u);
765
766 unit_done(u);
767
768 unit_dequeue_rewatch_pids(u);
769
770 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
771 u->bus_track = sd_bus_track_unref(u->bus_track);
772 u->deserialized_refs = strv_free(u->deserialized_refs);
773 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
774
775 unit_free_mounts_for(u);
776
777 SET_FOREACH(t, u->aliases)
778 hashmap_remove_value(u->manager->units, t, u);
779 if (u->id)
780 hashmap_remove_value(u->manager->units, u->id, u);
781
782 if (!sd_id128_is_null(u->invocation_id))
783 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
784
785 if (u->job) {
786 Job *j = u->job;
787 job_uninstall(j);
788 job_free(j);
789 }
790
791 if (u->nop_job) {
792 Job *j = u->nop_job;
793 job_uninstall(j);
794 job_free(j);
795 }
796
797 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
798 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
799 slice = UNIT_GET_SLICE(u);
800 unit_clear_dependencies(u);
801 if (slice)
802 unit_add_family_to_cgroup_realize_queue(slice);
803
804 if (u->on_console)
805 manager_unref_console(u->manager);
806
807 fdset_free(u->initial_socket_bind_link_fds);
808 #if BPF_FRAMEWORK
809 bpf_link_free(u->ipv4_socket_bind_link);
810 bpf_link_free(u->ipv6_socket_bind_link);
811 #endif
812
813 unit_release_cgroup(u);
814
815 if (!MANAGER_IS_RELOADING(u->manager))
816 unit_unlink_state_files(u);
817
818 unit_unref_uid_gid(u, false);
819
820 (void) manager_update_failed_units(u->manager, u, false);
821 set_remove(u->manager->startup_units, u);
822
823 unit_unwatch_all_pids(u);
824
825 while (u->refs_by_target)
826 unit_ref_unset(u->refs_by_target);
827
828 if (u->type != _UNIT_TYPE_INVALID)
829 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
830
831 if (u->in_load_queue)
832 LIST_REMOVE(load_queue, u->manager->load_queue, u);
833
834 if (u->in_dbus_queue)
835 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
836
837 if (u->in_cleanup_queue)
838 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
839
840 if (u->in_gc_queue)
841 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
842
843 if (u->in_cgroup_realize_queue)
844 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
845
846 if (u->in_cgroup_empty_queue)
847 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
848
849 if (u->in_cgroup_oom_queue)
850 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
851
852 if (u->in_target_deps_queue)
853 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
854
855 if (u->in_stop_when_unneeded_queue)
856 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
857
858 if (u->in_start_when_upheld_queue)
859 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
860
861 if (u->in_stop_when_bound_queue)
862 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
863
864 if (u->in_release_resources_queue)
865 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
866
867 bpf_firewall_close(u);
868
869 hashmap_free(u->bpf_foreign_by_key);
870
871 bpf_program_free(u->bpf_device_control_installed);
872
873 #if BPF_FRAMEWORK
874 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
875 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
876 #endif
877 fdset_free(u->initial_restric_ifaces_link_fds);
878
879 condition_free_list(u->conditions);
880 condition_free_list(u->asserts);
881
882 free(u->description);
883 strv_free(u->documentation);
884 free(u->fragment_path);
885 free(u->source_path);
886 strv_free(u->dropin_paths);
887 free(u->instance);
888
889 free(u->job_timeout_reboot_arg);
890 free(u->reboot_arg);
891
892 free(u->access_selinux_context);
893
894 set_free_free(u->aliases);
895 free(u->id);
896
897 activation_details_unref(u->activation_details);
898
899 return mfree(u);
900 }
901
902 FreezerState unit_freezer_state(Unit *u) {
903 assert(u);
904
905 return u->freezer_state;
906 }
907
908 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
909 char *values[1] = {};
910 int r;
911
912 assert(u);
913
914 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
915 STRV_MAKE("frozen"), values);
916 if (r < 0)
917 return r;
918
919 r = _FREEZER_STATE_INVALID;
920
921 if (values[0]) {
922 if (streq(values[0], "0"))
923 r = FREEZER_RUNNING;
924 else if (streq(values[0], "1"))
925 r = FREEZER_FROZEN;
926 }
927
928 free(values[0]);
929 *ret = r;
930
931 return 0;
932 }
933
934 UnitActiveState unit_active_state(Unit *u) {
935 assert(u);
936
937 if (u->load_state == UNIT_MERGED)
938 return unit_active_state(unit_follow_merge(u));
939
940 /* After a reload it might happen that a unit is not correctly
941 * loaded but still has a process around. That's why we won't
942 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
943
944 return UNIT_VTABLE(u)->active_state(u);
945 }
946
947 const char* unit_sub_state_to_string(Unit *u) {
948 assert(u);
949
950 return UNIT_VTABLE(u)->sub_state_to_string(u);
951 }
952
953 static int unit_merge_names(Unit *u, Unit *other) {
954 char *name;
955 int r;
956
957 assert(u);
958 assert(other);
959
960 r = unit_add_alias(u, other->id);
961 if (r < 0)
962 return r;
963
964 r = set_move(u->aliases, other->aliases);
965 if (r < 0) {
966 set_remove(u->aliases, other->id);
967 return r;
968 }
969
970 TAKE_PTR(other->id);
971 other->aliases = set_free_free(other->aliases);
972
973 SET_FOREACH(name, u->aliases)
974 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
975
976 return 0;
977 }
978
979 static int unit_reserve_dependencies(Unit *u, Unit *other) {
980 size_t n_reserve;
981 Hashmap* deps;
982 void *d;
983 int r;
984
985 assert(u);
986 assert(other);
987
988 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
989 * fail.
990 *
991 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
992 * hashmaps is an estimate that is likely too high since they probably use some of the same
993 * types. But it's never too low, and that's all we need. */
994
995 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
996 if (n_reserve > 0) {
997 r = hashmap_ensure_allocated(&u->dependencies, NULL);
998 if (r < 0)
999 return r;
1000
1001 r = hashmap_reserve(u->dependencies, n_reserve);
1002 if (r < 0)
1003 return r;
1004 }
1005
1006 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
1007 * other unit's dependencies.
1008 *
1009 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
1010 * reserve anything for. In that case other's set will be transferred as a whole to u by
1011 * complete_move(). */
1012
1013 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
1014 Hashmap *other_deps;
1015
1016 other_deps = hashmap_get(other->dependencies, d);
1017
1018 r = hashmap_reserve(deps, hashmap_size(other_deps));
1019 if (r < 0)
1020 return r;
1021 }
1022
1023 return 0;
1024 }
1025
1026 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
1027 /* Only warn about some unit types */
1028 return IN_SET(dependency,
1029 UNIT_CONFLICTS,
1030 UNIT_CONFLICTED_BY,
1031 UNIT_BEFORE,
1032 UNIT_AFTER,
1033 UNIT_ON_SUCCESS,
1034 UNIT_ON_FAILURE,
1035 UNIT_TRIGGERS,
1036 UNIT_TRIGGERED_BY);
1037 }
1038
1039 static int unit_per_dependency_type_hashmap_update(
1040 Hashmap *per_type,
1041 Unit *other,
1042 UnitDependencyMask origin_mask,
1043 UnitDependencyMask destination_mask) {
1044
1045 UnitDependencyInfo info;
1046 int r;
1047
1048 assert(other);
1049 assert_cc(sizeof(void*) == sizeof(info));
1050
1051 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1052 * exists, or insert it anew if not. */
1053
1054 info.data = hashmap_get(per_type, other);
1055 if (info.data) {
1056 /* Entry already exists. Add in our mask. */
1057
1058 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1059 FLAGS_SET(destination_mask, info.destination_mask))
1060 return 0; /* NOP */
1061
1062 info.origin_mask |= origin_mask;
1063 info.destination_mask |= destination_mask;
1064
1065 r = hashmap_update(per_type, other, info.data);
1066 } else {
1067 info = (UnitDependencyInfo) {
1068 .origin_mask = origin_mask,
1069 .destination_mask = destination_mask,
1070 };
1071
1072 r = hashmap_put(per_type, other, info.data);
1073 }
1074 if (r < 0)
1075 return r;
1076
1077 return 1;
1078 }
1079
1080 static void unit_merge_dependencies(Unit *u, Unit *other) {
1081 Hashmap *deps;
1082 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1083 * since the hashmaps all want it as void pointer. */
1084
1085 assert(u);
1086 assert(other);
1087
1088 if (u == other)
1089 return;
1090
1091 /* First, remove dependency to other. */
1092 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1093 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1094 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1095 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1096 other->id, other->id, u->id);
1097
1098 if (hashmap_isempty(deps))
1099 hashmap_free(hashmap_remove(u->dependencies, dt));
1100 }
1101
1102 for (;;) {
1103 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1104 UnitDependencyInfo di_back;
1105 Unit *back;
1106
1107 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1108 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1109 if (!other_deps)
1110 break; /* done! */
1111
1112 deps = hashmap_get(u->dependencies, dt);
1113
1114 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1115 * referenced units as 'back'. */
1116 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1117 Hashmap *back_deps;
1118 void *back_dt;
1119
1120 if (back == u) {
1121 /* This is a dependency pointing back to the unit we want to merge with?
1122 * Suppress it (but warn) */
1123 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1124 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1125 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1126 u->id, other->id, other->id, u->id);
1127
1128 hashmap_remove(other_deps, back);
1129 continue;
1130 }
1131
1132 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1133 * point to 'u' instead. */
1134 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1135 UnitDependencyInfo di_move;
1136
1137 di_move.data = hashmap_remove(back_deps, other);
1138 if (!di_move.data)
1139 continue;
1140
1141 assert_se(unit_per_dependency_type_hashmap_update(
1142 back_deps,
1143 u,
1144 di_move.origin_mask,
1145 di_move.destination_mask) >= 0);
1146 }
1147
1148 /* The target unit already has dependencies of this type, let's then merge this individually. */
1149 if (deps)
1150 assert_se(unit_per_dependency_type_hashmap_update(
1151 deps,
1152 back,
1153 di_back.origin_mask,
1154 di_back.destination_mask) >= 0);
1155 }
1156
1157 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1158 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1159 * dependencies of this type, let's move them per type wholesale. */
1160 if (!deps)
1161 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1162 }
1163
1164 other->dependencies = hashmap_free(other->dependencies);
1165 }
1166
1167 int unit_merge(Unit *u, Unit *other) {
1168 int r;
1169
1170 assert(u);
1171 assert(other);
1172 assert(u->manager == other->manager);
1173 assert(u->type != _UNIT_TYPE_INVALID);
1174
1175 other = unit_follow_merge(other);
1176
1177 if (other == u)
1178 return 0;
1179
1180 if (u->type != other->type)
1181 return -EINVAL;
1182
1183 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1184 return -EEXIST;
1185
1186 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1187 return -EEXIST;
1188
1189 if (!streq_ptr(u->instance, other->instance))
1190 return -EINVAL;
1191
1192 if (other->job)
1193 return -EEXIST;
1194
1195 if (other->nop_job)
1196 return -EEXIST;
1197
1198 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1199 return -EEXIST;
1200
1201 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1202 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1203 r = unit_reserve_dependencies(u, other);
1204 if (r < 0)
1205 return r;
1206
1207 /* Redirect all references */
1208 while (other->refs_by_target)
1209 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1210
1211 /* Merge dependencies */
1212 unit_merge_dependencies(u, other);
1213
1214 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1215 r = unit_merge_names(u, other);
1216 if (r < 0)
1217 return r;
1218
1219 other->load_state = UNIT_MERGED;
1220 other->merged_into = u;
1221
1222 if (!u->activation_details)
1223 u->activation_details = activation_details_ref(other->activation_details);
1224
1225 /* If there is still some data attached to the other node, we
1226 * don't need it anymore, and can free it. */
1227 if (other->load_state != UNIT_STUB)
1228 if (UNIT_VTABLE(other)->done)
1229 UNIT_VTABLE(other)->done(other);
1230
1231 unit_add_to_dbus_queue(u);
1232 unit_add_to_cleanup_queue(other);
1233
1234 return 0;
1235 }
1236
1237 int unit_merge_by_name(Unit *u, const char *name) {
1238 _cleanup_free_ char *s = NULL;
1239 Unit *other;
1240 int r;
1241
1242 /* Either add name to u, or if a unit with name already exists, merge it with u.
1243 * If name is a template, do the same for name@instance, where instance is u's instance. */
1244
1245 assert(u);
1246 assert(name);
1247
1248 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1249 if (!u->instance)
1250 return -EINVAL;
1251
1252 r = unit_name_replace_instance(name, u->instance, &s);
1253 if (r < 0)
1254 return r;
1255
1256 name = s;
1257 }
1258
1259 other = manager_get_unit(u->manager, name);
1260 if (other)
1261 return unit_merge(u, other);
1262
1263 return unit_add_name(u, name);
1264 }
1265
1266 Unit* unit_follow_merge(Unit *u) {
1267 assert(u);
1268
1269 while (u->load_state == UNIT_MERGED)
1270 assert_se(u = u->merged_into);
1271
1272 return u;
1273 }
1274
1275 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1276 int r;
1277
1278 assert(u);
1279 assert(c);
1280
1281 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1282
1283 if (c->working_directory) {
1284 r = unit_add_mounts_for(
1285 u,
1286 c->working_directory,
1287 UNIT_DEPENDENCY_FILE,
1288 c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES);
1289 if (r < 0)
1290 return r;
1291 }
1292
1293 if (c->root_directory) {
1294 r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1295 if (r < 0)
1296 return r;
1297 }
1298
1299 if (c->root_image) {
1300 r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1301 if (r < 0)
1302 return r;
1303 }
1304
1305 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1306 if (!u->manager->prefix[dt])
1307 continue;
1308
1309 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1310 _cleanup_free_ char *p = NULL;
1311
1312 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1313 if (!p)
1314 return -ENOMEM;
1315
1316 r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES);
1317 if (r < 0)
1318 return r;
1319 }
1320 }
1321
1322 if (!MANAGER_IS_SYSTEM(u->manager))
1323 return 0;
1324
1325 /* For the following three directory types we need write access, and /var/ is possibly on the root
1326 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1327 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1328 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1329 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1330 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1331 if (r < 0)
1332 return r;
1333 }
1334
1335 if (c->private_tmp) {
1336 r = unit_add_mounts_for(u, "/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1337 if (r < 0)
1338 return r;
1339
1340 r = unit_add_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1341 if (r < 0)
1342 return r;
1343
1344 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1345 if (r < 0)
1346 return r;
1347 }
1348
1349 if (c->root_image) {
1350 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1351 * implicit dependency on udev */
1352
1353 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1354 if (r < 0)
1355 return r;
1356 }
1357
1358 if (!IN_SET(c->std_output,
1359 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1360 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1361 !IN_SET(c->std_error,
1362 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1363 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1364 !c->log_namespace)
1365 return 0;
1366
1367 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1368 * is run first. */
1369
1370 if (c->log_namespace) {
1371 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1372
1373 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1374 if (r < 0)
1375 return r;
1376
1377 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1378 if (r < 0)
1379 return r;
1380
1381 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1382 if (r < 0)
1383 return r;
1384
1385 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1386 if (r < 0)
1387 return r;
1388 } else {
1389 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1390 if (r < 0)
1391 return r;
1392 }
1393
1394 r = unit_add_default_credential_dependencies(u, c);
1395 if (r < 0)
1396 return r;
1397
1398 return 0;
1399 }
1400
1401 const char* unit_description(Unit *u) {
1402 assert(u);
1403
1404 if (u->description)
1405 return u->description;
1406
1407 return strna(u->id);
1408 }
1409
1410 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1411 assert(u);
1412 assert(u->id);
1413
1414 /* Return u->id, u->description, or "{u->id} - {u->description}".
1415 * Versions with u->description are only used if it is set.
1416 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1417 * pointer.
1418 *
1419 * Note that *ret_combined_buffer may be set to NULL. */
1420
1421 if (!u->description ||
1422 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1423 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1424 streq(u->description, u->id)) {
1425
1426 if (ret_combined_buffer)
1427 *ret_combined_buffer = NULL;
1428 return u->id;
1429 }
1430
1431 if (ret_combined_buffer) {
1432 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1433 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1434 if (*ret_combined_buffer)
1435 return *ret_combined_buffer;
1436 log_oom(); /* Fall back to ->description */
1437 } else
1438 *ret_combined_buffer = NULL;
1439 }
1440
1441 return u->description;
1442 }
1443
1444 /* Common implementation for multiple backends */
1445 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1446 int r;
1447
1448 assert(u);
1449
1450 /* Load a .{service,socket,...} file */
1451 r = unit_load_fragment(u);
1452 if (r < 0)
1453 return r;
1454
1455 if (u->load_state == UNIT_STUB) {
1456 if (fragment_required)
1457 return -ENOENT;
1458
1459 u->load_state = UNIT_LOADED;
1460 }
1461
1462 /* Load drop-in directory data. If u is an alias, we might be reloading the
1463 * target unit needlessly. But we cannot be sure which drops-ins have already
1464 * been loaded and which not, at least without doing complicated book-keeping,
1465 * so let's always reread all drop-ins. */
1466 r = unit_load_dropin(unit_follow_merge(u));
1467 if (r < 0)
1468 return r;
1469
1470 if (u->source_path) {
1471 struct stat st;
1472
1473 if (stat(u->source_path, &st) >= 0)
1474 u->source_mtime = timespec_load(&st.st_mtim);
1475 else
1476 u->source_mtime = 0;
1477 }
1478
1479 return 0;
1480 }
1481
1482 void unit_add_to_target_deps_queue(Unit *u) {
1483 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1484
1485 if (u->in_target_deps_queue)
1486 return;
1487
1488 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1489 u->in_target_deps_queue = true;
1490 }
1491
1492 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1493 assert(u);
1494 assert(target);
1495
1496 if (target->type != UNIT_TARGET)
1497 return 0;
1498
1499 /* Only add the dependency if both units are loaded, so that
1500 * that loop check below is reliable */
1501 if (u->load_state != UNIT_LOADED ||
1502 target->load_state != UNIT_LOADED)
1503 return 0;
1504
1505 /* If either side wants no automatic dependencies, then let's
1506 * skip this */
1507 if (!u->default_dependencies ||
1508 !target->default_dependencies)
1509 return 0;
1510
1511 /* Don't create loops */
1512 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1513 return 0;
1514
1515 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1516 }
1517
1518 static int unit_add_slice_dependencies(Unit *u) {
1519 Unit *slice;
1520 assert(u);
1521
1522 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1523 return 0;
1524
1525 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1526 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1527 relationship). */
1528 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1529
1530 slice = UNIT_GET_SLICE(u);
1531 if (slice)
1532 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1533
1534 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1535 return 0;
1536
1537 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1538 }
1539
1540 static int unit_add_mount_dependencies(Unit *u) {
1541 bool changed = false;
1542 int r;
1543
1544 assert(u);
1545
1546 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
1547 UnitDependencyInfo di;
1548 const char *path;
1549
1550 HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) {
1551
1552 char prefix[strlen(ASSERT_PTR(path)) + 1];
1553
1554 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1555 _cleanup_free_ char *p = NULL;
1556 Unit *m;
1557
1558 r = unit_name_from_path(prefix, ".mount", &p);
1559 if (r == -EINVAL)
1560 continue; /* If the path cannot be converted to a mount unit name,
1561 * then it's not manageable as a unit by systemd, and
1562 * hence we don't need a dependency on it. Let's thus
1563 * silently ignore the issue. */
1564 if (r < 0)
1565 return r;
1566
1567 m = manager_get_unit(u->manager, p);
1568 if (!m) {
1569 /* Make sure to load the mount unit if it exists. If so the
1570 * dependencies on this unit will be added later during the loading
1571 * of the mount unit. */
1572 (void) manager_load_unit_prepare(
1573 u->manager,
1574 p,
1575 /* path= */NULL,
1576 /* e= */NULL,
1577 &m);
1578 continue;
1579 }
1580 if (m == u)
1581 continue;
1582
1583 if (m->load_state != UNIT_LOADED)
1584 continue;
1585
1586 r = unit_add_dependency(
1587 u,
1588 UNIT_AFTER,
1589 m,
1590 /* add_reference= */ true,
1591 di.origin_mask);
1592 if (r < 0)
1593 return r;
1594 changed = changed || r > 0;
1595
1596 if (m->fragment_path) {
1597 r = unit_add_dependency(
1598 u,
1599 unit_mount_dependency_type_to_dependency_type(t),
1600 m,
1601 /* add_reference= */ true,
1602 di.origin_mask);
1603 if (r < 0)
1604 return r;
1605 changed = changed || r > 0;
1606 }
1607 }
1608 }
1609 }
1610
1611 return changed;
1612 }
1613
1614 static int unit_add_oomd_dependencies(Unit *u) {
1615 CGroupContext *c;
1616 CGroupMask mask;
1617 int r;
1618
1619 assert(u);
1620
1621 if (!u->default_dependencies)
1622 return 0;
1623
1624 c = unit_get_cgroup_context(u);
1625 if (!c)
1626 return 0;
1627
1628 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1629 if (!wants_oomd)
1630 return 0;
1631
1632 if (!cg_all_unified())
1633 return 0;
1634
1635 r = cg_mask_supported(&mask);
1636 if (r < 0)
1637 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1638
1639 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1640 return 0;
1641
1642 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1643 }
1644
1645 static int unit_add_startup_units(Unit *u) {
1646 if (!unit_has_startup_cgroup_constraints(u))
1647 return 0;
1648
1649 return set_ensure_put(&u->manager->startup_units, NULL, u);
1650 }
1651
1652 static int unit_validate_on_failure_job_mode(
1653 Unit *u,
1654 const char *job_mode_setting,
1655 JobMode job_mode,
1656 const char *dependency_name,
1657 UnitDependencyAtom atom) {
1658
1659 Unit *other, *found = NULL;
1660
1661 if (job_mode != JOB_ISOLATE)
1662 return 0;
1663
1664 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1665 if (!found)
1666 found = other;
1667 else if (found != other)
1668 return log_unit_error_errno(
1669 u, SYNTHETIC_ERRNO(ENOEXEC),
1670 "More than one %s dependencies specified but %sisolate set. Refusing.",
1671 dependency_name, job_mode_setting);
1672 }
1673
1674 return 0;
1675 }
1676
1677 int unit_load(Unit *u) {
1678 int r;
1679
1680 assert(u);
1681
1682 if (u->in_load_queue) {
1683 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1684 u->in_load_queue = false;
1685 }
1686
1687 if (u->type == _UNIT_TYPE_INVALID)
1688 return -EINVAL;
1689
1690 if (u->load_state != UNIT_STUB)
1691 return 0;
1692
1693 if (u->transient_file) {
1694 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1695 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1696
1697 r = fflush_and_check(u->transient_file);
1698 if (r < 0)
1699 goto fail;
1700
1701 u->transient_file = safe_fclose(u->transient_file);
1702 u->fragment_mtime = now(CLOCK_REALTIME);
1703 }
1704
1705 r = UNIT_VTABLE(u)->load(u);
1706 if (r < 0)
1707 goto fail;
1708
1709 assert(u->load_state != UNIT_STUB);
1710
1711 if (u->load_state == UNIT_LOADED) {
1712 unit_add_to_target_deps_queue(u);
1713
1714 r = unit_add_slice_dependencies(u);
1715 if (r < 0)
1716 goto fail;
1717
1718 r = unit_add_mount_dependencies(u);
1719 if (r < 0)
1720 goto fail;
1721
1722 r = unit_add_oomd_dependencies(u);
1723 if (r < 0)
1724 goto fail;
1725
1726 r = unit_add_startup_units(u);
1727 if (r < 0)
1728 goto fail;
1729
1730 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1731 if (r < 0)
1732 goto fail;
1733
1734 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1735 if (r < 0)
1736 goto fail;
1737
1738 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1739 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1740
1741 /* We finished loading, let's ensure our parents recalculate the members mask */
1742 unit_invalidate_cgroup_members_masks(u);
1743 }
1744
1745 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1746
1747 unit_add_to_dbus_queue(unit_follow_merge(u));
1748 unit_add_to_gc_queue(u);
1749 (void) manager_varlink_send_managed_oom_update(u);
1750
1751 return 0;
1752
1753 fail:
1754 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1755 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1756
1757 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1758 r == -ENOEXEC ? UNIT_BAD_SETTING :
1759 UNIT_ERROR;
1760 u->load_error = r;
1761
1762 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1763 * an attempt is made to load this unit, we know we need to check again. */
1764 if (u->load_state == UNIT_NOT_FOUND)
1765 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1766
1767 unit_add_to_dbus_queue(u);
1768 unit_add_to_gc_queue(u);
1769
1770 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1771 }
1772
1773 _printf_(7, 8)
1774 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1775 Unit *u = userdata;
1776 va_list ap;
1777 int r;
1778
1779 if (u && !unit_log_level_test(u, level))
1780 return -ERRNO_VALUE(error);
1781
1782 va_start(ap, format);
1783 if (u)
1784 r = log_object_internalv(level, error, file, line, func,
1785 u->manager->unit_log_field,
1786 u->id,
1787 u->manager->invocation_log_field,
1788 u->invocation_id_string,
1789 format, ap);
1790 else
1791 r = log_internalv(level, error, file, line, func, format, ap);
1792 va_end(ap);
1793
1794 return r;
1795 }
1796
1797 static bool unit_test_condition(Unit *u) {
1798 _cleanup_strv_free_ char **env = NULL;
1799 int r;
1800
1801 assert(u);
1802
1803 dual_timestamp_now(&u->condition_timestamp);
1804
1805 r = manager_get_effective_environment(u->manager, &env);
1806 if (r < 0) {
1807 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1808 u->condition_result = true;
1809 } else
1810 u->condition_result = condition_test_list(
1811 u->conditions,
1812 env,
1813 condition_type_to_string,
1814 log_unit_internal,
1815 u);
1816
1817 unit_add_to_dbus_queue(u);
1818 return u->condition_result;
1819 }
1820
1821 static bool unit_test_assert(Unit *u) {
1822 _cleanup_strv_free_ char **env = NULL;
1823 int r;
1824
1825 assert(u);
1826
1827 dual_timestamp_now(&u->assert_timestamp);
1828
1829 r = manager_get_effective_environment(u->manager, &env);
1830 if (r < 0) {
1831 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1832 u->assert_result = CONDITION_ERROR;
1833 } else
1834 u->assert_result = condition_test_list(
1835 u->asserts,
1836 env,
1837 assert_type_to_string,
1838 log_unit_internal,
1839 u);
1840
1841 unit_add_to_dbus_queue(u);
1842 return u->assert_result;
1843 }
1844
1845 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1846 if (log_get_show_color()) {
1847 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1848 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1849 else
1850 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1851 }
1852
1853 DISABLE_WARNING_FORMAT_NONLITERAL;
1854 manager_status_printf(u->manager, status_type, status, format, ident);
1855 REENABLE_WARNING;
1856 }
1857
1858 int unit_test_start_limit(Unit *u) {
1859 const char *reason;
1860
1861 assert(u);
1862
1863 if (ratelimit_below(&u->start_ratelimit)) {
1864 u->start_limit_hit = false;
1865 return 0;
1866 }
1867
1868 log_unit_warning(u, "Start request repeated too quickly.");
1869 u->start_limit_hit = true;
1870
1871 reason = strjoina("unit ", u->id, " failed");
1872
1873 emergency_action(u->manager, u->start_limit_action,
1874 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1875 u->reboot_arg, -1, reason);
1876
1877 return -ECANCELED;
1878 }
1879
1880 static bool unit_verify_deps(Unit *u) {
1881 Unit *other;
1882
1883 assert(u);
1884
1885 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1886 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1887 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1888 * that are not used in conjunction with After= as for them any such check would make things entirely
1889 * racy. */
1890
1891 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1892
1893 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1894 continue;
1895
1896 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1897 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1898 return false;
1899 }
1900 }
1901
1902 return true;
1903 }
1904
1905 /* Errors that aren't really errors:
1906 * -EALREADY: Unit is already started.
1907 * -ECOMM: Condition failed
1908 * -EAGAIN: An operation is already in progress. Retry later.
1909 *
1910 * Errors that are real errors:
1911 * -EBADR: This unit type does not support starting.
1912 * -ECANCELED: Start limit hit, too many requests for now
1913 * -EPROTO: Assert failed
1914 * -EINVAL: Unit not loaded
1915 * -EOPNOTSUPP: Unit type not supported
1916 * -ENOLINK: The necessary dependencies are not fulfilled.
1917 * -ESTALE: This unit has been started before and can't be started a second time
1918 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1919 */
1920 int unit_start(Unit *u, ActivationDetails *details) {
1921 UnitActiveState state;
1922 Unit *following;
1923 int r;
1924
1925 assert(u);
1926
1927 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1928 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1929 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1930 if (r < 0)
1931 return r;
1932 if (r > 0)
1933 return -EAGAIN;
1934 }
1935
1936 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1937 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1938 * waiting is finished. */
1939 state = unit_active_state(u);
1940 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1941 return -EALREADY;
1942 if (state == UNIT_MAINTENANCE)
1943 return -EAGAIN;
1944
1945 /* Units that aren't loaded cannot be started */
1946 if (u->load_state != UNIT_LOADED)
1947 return -EINVAL;
1948
1949 /* Refuse starting scope units more than once */
1950 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1951 return -ESTALE;
1952
1953 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1954 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1955 * recheck the condition in that case. */
1956 if (state != UNIT_ACTIVATING &&
1957 !unit_test_condition(u))
1958 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1959
1960 /* If the asserts failed, fail the entire job */
1961 if (state != UNIT_ACTIVATING &&
1962 !unit_test_assert(u))
1963 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1964
1965 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1966 * condition checks, so that we rather return condition check errors (which are usually not
1967 * considered a true failure) than "not supported" errors (which are considered a failure).
1968 */
1969 if (!unit_type_supported(u->type))
1970 return -EOPNOTSUPP;
1971
1972 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1973 * should have taken care of this already, but let's check this here again. After all, our
1974 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1975 if (!unit_verify_deps(u))
1976 return -ENOLINK;
1977
1978 /* Forward to the main object, if we aren't it. */
1979 following = unit_following(u);
1980 if (following) {
1981 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1982 return unit_start(following, details);
1983 }
1984
1985 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1986 if (UNIT_VTABLE(u)->can_start) {
1987 r = UNIT_VTABLE(u)->can_start(u);
1988 if (r < 0)
1989 return r;
1990 }
1991
1992 /* If it is stopped, but we cannot start it, then fail */
1993 if (!UNIT_VTABLE(u)->start)
1994 return -EBADR;
1995
1996 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1997 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1998 * waits for a holdoff timer to elapse before it will start again. */
1999
2000 unit_add_to_dbus_queue(u);
2001 unit_cgroup_freezer_action(u, FREEZER_THAW);
2002
2003 if (!u->activation_details) /* Older details object wins */
2004 u->activation_details = activation_details_ref(details);
2005
2006 return UNIT_VTABLE(u)->start(u);
2007 }
2008
2009 bool unit_can_start(Unit *u) {
2010 assert(u);
2011
2012 if (u->load_state != UNIT_LOADED)
2013 return false;
2014
2015 if (!unit_type_supported(u->type))
2016 return false;
2017
2018 /* Scope units may be started only once */
2019 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
2020 return false;
2021
2022 return !!UNIT_VTABLE(u)->start;
2023 }
2024
2025 bool unit_can_isolate(Unit *u) {
2026 assert(u);
2027
2028 return unit_can_start(u) &&
2029 u->allow_isolate;
2030 }
2031
2032 /* Errors:
2033 * -EBADR: This unit type does not support stopping.
2034 * -EALREADY: Unit is already stopped.
2035 * -EAGAIN: An operation is already in progress. Retry later.
2036 */
2037 int unit_stop(Unit *u) {
2038 UnitActiveState state;
2039 Unit *following;
2040
2041 assert(u);
2042
2043 state = unit_active_state(u);
2044 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2045 return -EALREADY;
2046
2047 following = unit_following(u);
2048 if (following) {
2049 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2050 return unit_stop(following);
2051 }
2052
2053 if (!UNIT_VTABLE(u)->stop)
2054 return -EBADR;
2055
2056 unit_add_to_dbus_queue(u);
2057 unit_cgroup_freezer_action(u, FREEZER_THAW);
2058
2059 return UNIT_VTABLE(u)->stop(u);
2060 }
2061
2062 bool unit_can_stop(Unit *u) {
2063 assert(u);
2064
2065 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2066 * Extrinsic units follow external state and they may stop following external state changes
2067 * (hence we return true here), but an attempt to do this through the manager will fail. */
2068
2069 if (!unit_type_supported(u->type))
2070 return false;
2071
2072 if (u->perpetual)
2073 return false;
2074
2075 return !!UNIT_VTABLE(u)->stop;
2076 }
2077
2078 /* Errors:
2079 * -EBADR: This unit type does not support reloading.
2080 * -ENOEXEC: Unit is not started.
2081 * -EAGAIN: An operation is already in progress. Retry later.
2082 */
2083 int unit_reload(Unit *u) {
2084 UnitActiveState state;
2085 Unit *following;
2086
2087 assert(u);
2088
2089 if (u->load_state != UNIT_LOADED)
2090 return -EINVAL;
2091
2092 if (!unit_can_reload(u))
2093 return -EBADR;
2094
2095 state = unit_active_state(u);
2096 if (state == UNIT_RELOADING)
2097 return -EAGAIN;
2098
2099 if (state != UNIT_ACTIVE)
2100 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2101
2102 following = unit_following(u);
2103 if (following) {
2104 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2105 return unit_reload(following);
2106 }
2107
2108 unit_add_to_dbus_queue(u);
2109
2110 if (!UNIT_VTABLE(u)->reload) {
2111 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2112 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2113 return 0;
2114 }
2115
2116 unit_cgroup_freezer_action(u, FREEZER_THAW);
2117
2118 return UNIT_VTABLE(u)->reload(u);
2119 }
2120
2121 bool unit_can_reload(Unit *u) {
2122 assert(u);
2123
2124 if (UNIT_VTABLE(u)->can_reload)
2125 return UNIT_VTABLE(u)->can_reload(u);
2126
2127 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2128 return true;
2129
2130 return UNIT_VTABLE(u)->reload;
2131 }
2132
2133 bool unit_is_unneeded(Unit *u) {
2134 Unit *other;
2135 assert(u);
2136
2137 if (!u->stop_when_unneeded)
2138 return false;
2139
2140 /* Don't clean up while the unit is transitioning or is even inactive. */
2141 if (unit_active_state(u) != UNIT_ACTIVE)
2142 return false;
2143 if (u->job)
2144 return false;
2145
2146 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2147 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2148 * restart, then don't clean this one up. */
2149
2150 if (other->job)
2151 return false;
2152
2153 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2154 return false;
2155
2156 if (unit_will_restart(other))
2157 return false;
2158 }
2159
2160 return true;
2161 }
2162
2163 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2164 Unit *other;
2165
2166 assert(u);
2167
2168 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2169 * that is active declared an Uphold= dependencies on it */
2170
2171 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2172 if (ret_culprit)
2173 *ret_culprit = NULL;
2174 return false;
2175 }
2176
2177 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2178 if (other->job)
2179 continue;
2180
2181 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2182 if (ret_culprit)
2183 *ret_culprit = other;
2184 return true;
2185 }
2186 }
2187
2188 if (ret_culprit)
2189 *ret_culprit = NULL;
2190 return false;
2191 }
2192
2193 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2194 Unit *other;
2195
2196 assert(u);
2197
2198 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2199 * because the other unit is down. */
2200
2201 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2202 /* Don't clean up while the unit is transitioning or is even inactive. */
2203 if (ret_culprit)
2204 *ret_culprit = NULL;
2205 return false;
2206 }
2207
2208 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2209 if (other->job)
2210 continue;
2211
2212 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2213 if (ret_culprit)
2214 *ret_culprit = other;
2215
2216 return true;
2217 }
2218 }
2219
2220 if (ret_culprit)
2221 *ret_culprit = NULL;
2222 return false;
2223 }
2224
2225 static void check_unneeded_dependencies(Unit *u) {
2226 Unit *other;
2227 assert(u);
2228
2229 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2230
2231 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2232 unit_submit_to_stop_when_unneeded_queue(other);
2233 }
2234
2235 static void check_uphold_dependencies(Unit *u) {
2236 Unit *other;
2237 assert(u);
2238
2239 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2240
2241 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2242 unit_submit_to_start_when_upheld_queue(other);
2243 }
2244
2245 static void check_bound_by_dependencies(Unit *u) {
2246 Unit *other;
2247 assert(u);
2248
2249 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2250
2251 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2252 unit_submit_to_stop_when_bound_queue(other);
2253 }
2254
2255 static void retroactively_start_dependencies(Unit *u) {
2256 Unit *other;
2257
2258 assert(u);
2259 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2260
2261 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2262 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2263 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2264 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2265
2266 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2267 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2268 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2269 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2270
2271 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2272 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2273 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2274 }
2275
2276 static void retroactively_stop_dependencies(Unit *u) {
2277 Unit *other;
2278
2279 assert(u);
2280 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2281
2282 /* Pull down units which are bound to us recursively if enabled */
2283 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2284 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2285 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2286 }
2287
2288 void unit_start_on_failure(
2289 Unit *u,
2290 const char *dependency_name,
2291 UnitDependencyAtom atom,
2292 JobMode job_mode) {
2293
2294 int n_jobs = -1;
2295 Unit *other;
2296 int r;
2297
2298 assert(u);
2299 assert(dependency_name);
2300 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2301
2302 /* Act on OnFailure= and OnSuccess= dependencies */
2303
2304 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2305 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2306
2307 if (n_jobs < 0) {
2308 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2309 n_jobs = 0;
2310 }
2311
2312 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2313 if (r < 0)
2314 log_unit_warning_errno(
2315 u, r, "Failed to enqueue %s job, ignoring: %s",
2316 dependency_name, bus_error_message(&error, r));
2317 n_jobs ++;
2318 }
2319
2320 if (n_jobs >= 0)
2321 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2322 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2323 }
2324
2325 void unit_trigger_notify(Unit *u) {
2326 Unit *other;
2327
2328 assert(u);
2329
2330 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2331 if (UNIT_VTABLE(other)->trigger_notify)
2332 UNIT_VTABLE(other)->trigger_notify(other, u);
2333 }
2334
2335 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2336 if (condition_notice && log_level > LOG_NOTICE)
2337 return LOG_NOTICE;
2338 if (condition_info && log_level > LOG_INFO)
2339 return LOG_INFO;
2340 return log_level;
2341 }
2342
2343 static int unit_log_resources(Unit *u) {
2344
2345 static const struct {
2346 const char *journal_field;
2347 const char *message_suffix;
2348 } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
2349 [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" },
2350 [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2351 }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2352 [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2353 [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2354 [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL },
2355 [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL },
2356 }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2357 [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" },
2358 [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2359 [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL },
2360 [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL },
2361 };
2362
2363 struct iovec *iovec = NULL;
2364 size_t n_iovec = 0;
2365 _cleanup_free_ char *message = NULL, *t = NULL;
2366 nsec_t cpu_nsec = NSEC_INFINITY;
2367 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2368
2369 assert(u);
2370
2371 CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free);
2372
2373 iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) +
2374 _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4);
2375 if (!iovec)
2376 return log_oom();
2377
2378 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2379 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2380 * information and the complete data in structured fields. */
2381
2382 (void) unit_get_cpu_usage(u, &cpu_nsec);
2383 if (cpu_nsec != NSEC_INFINITY) {
2384 /* Format the CPU time for inclusion in the structured log message */
2385 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0)
2386 return log_oom();
2387 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2388
2389 /* Format the CPU time for inclusion in the human language message string */
2390 if (strextendf_with_separator(&message, ", ",
2391 "Consumed %s CPU time",
2392 FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0)
2393 return log_oom();
2394
2395 log_level = raise_level(log_level,
2396 cpu_nsec > MENTIONWORTHY_CPU_NSEC,
2397 cpu_nsec > NOTICEWORTHY_CPU_NSEC);
2398 }
2399
2400 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
2401 uint64_t v = UINT64_MAX;
2402
2403 assert(memory_fields[metric].journal_field);
2404 assert(memory_fields[metric].message_suffix);
2405
2406 (void) unit_get_memory_accounting(u, metric, &v);
2407 if (v == UINT64_MAX)
2408 continue;
2409
2410 if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, v) < 0)
2411 return log_oom();
2412 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2413
2414 if (strextendf_with_separator(&message, ", ", "%s %s",
2415 FORMAT_BYTES(v), memory_fields[metric].message_suffix) < 0)
2416 return log_oom();
2417
2418 log_level = raise_level(log_level,
2419 v > MENTIONWORTHY_MEMORY_BYTES,
2420 v > NOTICEWORTHY_MEMORY_BYTES);
2421 }
2422
2423 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2424 uint64_t value = UINT64_MAX;
2425
2426 assert(io_fields[k].journal_field);
2427
2428 (void) unit_get_io_accounting(u, k, k > 0, &value);
2429 if (value == UINT64_MAX)
2430 continue;
2431
2432 /* Format IO accounting data for inclusion in the structured log message */
2433 if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0)
2434 return log_oom();
2435 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2436
2437 /* Format the IO accounting data for inclusion in the human language message string, but only
2438 * for the bytes counters (and not for the operations counters) */
2439 if (io_fields[k].message_suffix) {
2440 if (strextendf_with_separator(&message, ", ", "%s %s",
2441 FORMAT_BYTES(value), io_fields[k].message_suffix) < 0)
2442 return log_oom();
2443
2444 log_level = raise_level(log_level,
2445 value > MENTIONWORTHY_IO_BYTES,
2446 value > NOTICEWORTHY_IO_BYTES);
2447 }
2448 }
2449
2450 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2451 uint64_t value = UINT64_MAX;
2452
2453 assert(ip_fields[m].journal_field);
2454
2455 (void) unit_get_ip_accounting(u, m, &value);
2456 if (value == UINT64_MAX)
2457 continue;
2458
2459 /* Format IP accounting data for inclusion in the structured log message */
2460 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0)
2461 return log_oom();
2462 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2463
2464 /* Format the IP accounting data for inclusion in the human language message string, but only
2465 * for the bytes counters (and not for the packets counters) */
2466 if (ip_fields[m].message_suffix) {
2467 if (strextendf_with_separator(&message, ", ", "%s %s",
2468 FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0)
2469 return log_oom();
2470
2471 log_level = raise_level(log_level,
2472 value > MENTIONWORTHY_IP_BYTES,
2473 value > NOTICEWORTHY_IP_BYTES);
2474 }
2475 }
2476
2477 /* This check is here because it is the earliest point following all possible log_level assignments.
2478 * (If log_level is assigned anywhere after this point, move this check.) */
2479 if (!unit_log_level_test(u, log_level))
2480 return 0;
2481
2482 /* Is there any accounting data available at all? */
2483 if (n_iovec == 0) {
2484 assert(!message);
2485 return 0;
2486 }
2487
2488 t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", ".");
2489 if (!t)
2490 return log_oom();
2491 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2492
2493 if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR))
2494 return log_oom();
2495
2496 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->unit_log_field, u->id))
2497 return log_oom();
2498
2499 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->invocation_log_field, u->invocation_id_string))
2500 return log_oom();
2501
2502 log_unit_struct_iovec(u, log_level, iovec, n_iovec);
2503
2504 return 0;
2505 }
2506
2507 static void unit_update_on_console(Unit *u) {
2508 bool b;
2509
2510 assert(u);
2511
2512 b = unit_needs_console(u);
2513 if (u->on_console == b)
2514 return;
2515
2516 u->on_console = b;
2517 if (b)
2518 manager_ref_console(u->manager);
2519 else
2520 manager_unref_console(u->manager);
2521 }
2522
2523 static void unit_emit_audit_start(Unit *u) {
2524 assert(u);
2525
2526 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2527 return;
2528
2529 /* Write audit record if we have just finished starting up */
2530 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2531 u->in_audit = true;
2532 }
2533
2534 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2535 assert(u);
2536
2537 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2538 return;
2539
2540 if (u->in_audit) {
2541 /* Write audit record if we have just finished shutting down */
2542 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2543 u->in_audit = false;
2544 } else {
2545 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2546 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2547
2548 if (state == UNIT_INACTIVE)
2549 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2550 }
2551 }
2552
2553 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2554 bool unexpected = false;
2555 JobResult result;
2556
2557 assert(j);
2558
2559 if (j->state == JOB_WAITING)
2560 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2561 * due to EAGAIN. */
2562 job_add_to_run_queue(j);
2563
2564 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2565 * hence needs to invalidate jobs. */
2566
2567 switch (j->type) {
2568
2569 case JOB_START:
2570 case JOB_VERIFY_ACTIVE:
2571
2572 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2573 job_finish_and_invalidate(j, JOB_DONE, true, false);
2574 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2575 unexpected = true;
2576
2577 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2578 if (ns == UNIT_FAILED)
2579 result = JOB_FAILED;
2580 else
2581 result = JOB_DONE;
2582
2583 job_finish_and_invalidate(j, result, true, false);
2584 }
2585 }
2586
2587 break;
2588
2589 case JOB_RELOAD:
2590 case JOB_RELOAD_OR_START:
2591 case JOB_TRY_RELOAD:
2592
2593 if (j->state == JOB_RUNNING) {
2594 if (ns == UNIT_ACTIVE)
2595 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2596 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2597 unexpected = true;
2598
2599 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2600 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2601 }
2602 }
2603
2604 break;
2605
2606 case JOB_STOP:
2607 case JOB_RESTART:
2608 case JOB_TRY_RESTART:
2609
2610 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2611 job_finish_and_invalidate(j, JOB_DONE, true, false);
2612 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2613 unexpected = true;
2614 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2615 }
2616
2617 break;
2618
2619 default:
2620 assert_not_reached();
2621 }
2622
2623 return unexpected;
2624 }
2625
2626 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2627 const char *reason;
2628 Manager *m;
2629
2630 assert(u);
2631 assert(os < _UNIT_ACTIVE_STATE_MAX);
2632 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2633
2634 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2635 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2636 * remounted this function will be called too! */
2637
2638 m = u->manager;
2639
2640 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2641 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2642 unit_add_to_dbus_queue(u);
2643
2644 /* Update systemd-oomd on the property/state change */
2645 if (os != ns) {
2646 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2647 * monitoring.
2648 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2649 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2650 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2651 * have the information on the property. Thus, indiscriminately send an update. */
2652 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2653 (void) manager_varlink_send_managed_oom_update(u);
2654 }
2655
2656 /* Update timestamps for state changes */
2657 if (!MANAGER_IS_RELOADING(m)) {
2658 dual_timestamp_now(&u->state_change_timestamp);
2659
2660 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2661 u->inactive_exit_timestamp = u->state_change_timestamp;
2662 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2663 u->inactive_enter_timestamp = u->state_change_timestamp;
2664
2665 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2666 u->active_enter_timestamp = u->state_change_timestamp;
2667 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2668 u->active_exit_timestamp = u->state_change_timestamp;
2669 }
2670
2671 /* Keep track of failed units */
2672 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2673
2674 /* Make sure the cgroup and state files are always removed when we become inactive */
2675 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2676 SET_FLAG(u->markers,
2677 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2678 false);
2679 unit_prune_cgroup(u);
2680 unit_unlink_state_files(u);
2681 } else if (ns != os && ns == UNIT_RELOADING)
2682 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2683
2684 unit_update_on_console(u);
2685
2686 if (!MANAGER_IS_RELOADING(m)) {
2687 bool unexpected;
2688
2689 /* Let's propagate state changes to the job */
2690 if (u->job)
2691 unexpected = unit_process_job(u->job, ns, reload_success);
2692 else
2693 unexpected = true;
2694
2695 /* If this state change happened without being requested by a job, then let's retroactively start or
2696 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2697 * additional jobs just because something is already activated. */
2698
2699 if (unexpected) {
2700 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2701 retroactively_start_dependencies(u);
2702 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2703 retroactively_stop_dependencies(u);
2704 }
2705
2706 if (ns != os && ns == UNIT_FAILED) {
2707 log_unit_debug(u, "Unit entered failed state.");
2708 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2709 }
2710
2711 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2712 /* This unit just finished starting up */
2713
2714 unit_emit_audit_start(u);
2715 manager_send_unit_plymouth(m, u);
2716 }
2717
2718 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2719 /* This unit just stopped/failed. */
2720
2721 unit_emit_audit_stop(u, ns);
2722 unit_log_resources(u);
2723 }
2724
2725 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2726 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2727 }
2728
2729 manager_recheck_journal(m);
2730 manager_recheck_dbus(m);
2731
2732 unit_trigger_notify(u);
2733
2734 if (!MANAGER_IS_RELOADING(m)) {
2735 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2736 reason = strjoina("unit ", u->id, " failed");
2737 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2738 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2739 reason = strjoina("unit ", u->id, " succeeded");
2740 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2741 }
2742 }
2743
2744 /* And now, add the unit or depending units to various queues that will act on the new situation if
2745 * needed. These queues generally check for continuous state changes rather than events (like most of
2746 * the state propagation above), and do work deferred instead of instantly, since they typically
2747 * don't want to run during reloading, and usually involve checking combined state of multiple units
2748 * at once. */
2749
2750 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2751 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2752 check_unneeded_dependencies(u);
2753 check_bound_by_dependencies(u);
2754
2755 /* Maybe someone wants us to remain up? */
2756 unit_submit_to_start_when_upheld_queue(u);
2757
2758 /* Maybe the unit should be GC'ed now? */
2759 unit_add_to_gc_queue(u);
2760
2761 /* Maybe we can release some resources now? */
2762 unit_submit_to_release_resources_queue(u);
2763 }
2764
2765 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2766 /* Start uphold units regardless if going up was expected or not */
2767 check_uphold_dependencies(u);
2768
2769 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2770 unit_submit_to_stop_when_unneeded_queue(u);
2771
2772 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2773 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2774 * inactive, without ever entering started.) */
2775 unit_submit_to_stop_when_bound_queue(u);
2776 }
2777 }
2778
2779 int unit_watch_pidref(Unit *u, PidRef *pid, bool exclusive) {
2780 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2781 int r;
2782
2783 /* Adds a specific PID to the set of PIDs this unit watches. */
2784
2785 assert(u);
2786 assert(pidref_is_set(pid));
2787
2788 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2789 * opportunity to remove any stalled references to this PID as they can be created
2790 * easily (when watching a process which is not our direct child). */
2791 if (exclusive)
2792 manager_unwatch_pidref(u->manager, pid);
2793
2794 if (set_contains(u->pids, pid)) /* early exit if already being watched */
2795 return 0;
2796
2797 r = pidref_dup(pid, &pid_dup);
2798 if (r < 0)
2799 return r;
2800
2801 /* First, insert into the set of PIDs maintained by the unit */
2802 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2803 if (r < 0)
2804 return r;
2805
2806 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2807
2808 /* Second, insert it into the simple global table, see if that works */
2809 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops_free, pid, u);
2810 if (r != -EEXIST)
2811 return r;
2812
2813 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2814 * hashmap that points to an array. */
2815
2816 PidRef *old_pid = NULL;
2817 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2818
2819 /* Count entries in array */
2820 size_t n = 0;
2821 for (; array && array[n]; n++)
2822 ;
2823
2824 /* Allocate a new array */
2825 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2826 if (!new_array)
2827 return -ENOMEM;
2828
2829 /* Append us to the end */
2830 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2831 new_array[n] = u;
2832 new_array[n+1] = NULL;
2833
2834 /* Make sure the hashmap is allocated */
2835 r = hashmap_ensure_allocated(&u->manager->watch_pids_more, &pidref_hash_ops_free);
2836 if (r < 0)
2837 return r;
2838
2839 /* Add or replace the old array */
2840 r = hashmap_replace(u->manager->watch_pids_more, old_pid ?: pid, new_array);
2841 if (r < 0)
2842 return r;
2843
2844 TAKE_PTR(new_array); /* Now part of the hash table */
2845 free(array); /* Which means we can now delete the old version */
2846 return 0;
2847 }
2848
2849 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2850 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
2851 int r;
2852
2853 assert(u);
2854 assert(pid_is_valid(pid));
2855
2856 r = pidref_set_pid(&pidref, pid);
2857 if (r < 0)
2858 return r;
2859
2860 return unit_watch_pidref(u, &pidref, exclusive);
2861 }
2862
2863 void unit_unwatch_pidref(Unit *u, PidRef *pid) {
2864 assert(u);
2865 assert(pidref_is_set(pid));
2866
2867 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2868 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2869 if (!pid1)
2870 return; /* Early exit if this PID was never watched by us */
2871
2872 /* First let's drop the unit from the simple hash table, if it is included there */
2873 PidRef *pid2 = NULL;
2874 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2875
2876 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2877 assert((uu == u) == (pid1 == pid2));
2878
2879 if (uu == u)
2880 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2881 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2882 else {
2883 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2884 PidRef *pid3 = NULL;
2885 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2886
2887 /* Let's iterate through the array, dropping our own entry */
2888 size_t m = 0, n = 0;
2889 for (; array && array[n]; n++)
2890 if (array[n] != u)
2891 array[m++] = array[n];
2892 if (n == m)
2893 return; /* Not there */
2894
2895 array[m] = NULL; /* set trailing NULL marker on the new end */
2896
2897 if (m == 0) {
2898 /* The array is now empty, remove the entire entry */
2899 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2900 free(array);
2901 } else {
2902 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2903 * we will delete, but by the PidRef object of the Unit that is now first in the
2904 * array. */
2905
2906 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2907 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2908 }
2909 }
2910 }
2911
2912 void unit_unwatch_pid(Unit *u, pid_t pid) {
2913 return unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(pid));
2914 }
2915
2916 void unit_unwatch_all_pids(Unit *u) {
2917 assert(u);
2918
2919 while (!set_isempty(u->pids))
2920 unit_unwatch_pidref(u, set_first(u->pids));
2921
2922 u->pids = set_free(u->pids);
2923 }
2924
2925 static void unit_tidy_watch_pids(Unit *u) {
2926 PidRef *except1, *except2, *e;
2927
2928 assert(u);
2929
2930 /* Cleans dead PIDs from our list */
2931
2932 except1 = unit_main_pid(u);
2933 except2 = unit_control_pid(u);
2934
2935 SET_FOREACH(e, u->pids) {
2936 if (pidref_equal(except1, e) || pidref_equal(except2, e))
2937 continue;
2938
2939 if (pidref_is_unwaited(e) <= 0)
2940 unit_unwatch_pidref(u, e);
2941 }
2942 }
2943
2944 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2945 Unit *u = ASSERT_PTR(userdata);
2946
2947 assert(s);
2948
2949 unit_tidy_watch_pids(u);
2950 unit_watch_all_pids(u);
2951
2952 /* If the PID set is empty now, then let's finish this off. */
2953 unit_synthesize_cgroup_empty_event(u);
2954
2955 return 0;
2956 }
2957
2958 int unit_enqueue_rewatch_pids(Unit *u) {
2959 int r;
2960
2961 assert(u);
2962
2963 if (!u->cgroup_path)
2964 return -ENOENT;
2965
2966 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2967 if (r < 0)
2968 return r;
2969 if (r > 0) /* On unified we can use proper notifications */
2970 return 0;
2971
2972 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2973 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2974 * involves issuing kill(pid, 0) on all processes we watch. */
2975
2976 if (!u->rewatch_pids_event_source) {
2977 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2978
2979 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2980 if (r < 0)
2981 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2982
2983 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2984 if (r < 0)
2985 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2986
2987 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2988
2989 u->rewatch_pids_event_source = TAKE_PTR(s);
2990 }
2991
2992 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2993 if (r < 0)
2994 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2995
2996 return 0;
2997 }
2998
2999 void unit_dequeue_rewatch_pids(Unit *u) {
3000 int r;
3001 assert(u);
3002
3003 if (!u->rewatch_pids_event_source)
3004 return;
3005
3006 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
3007 if (r < 0)
3008 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3009
3010 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
3011 }
3012
3013 bool unit_job_is_applicable(Unit *u, JobType j) {
3014 assert(u);
3015 assert(j >= 0 && j < _JOB_TYPE_MAX);
3016
3017 switch (j) {
3018
3019 case JOB_VERIFY_ACTIVE:
3020 case JOB_START:
3021 case JOB_NOP:
3022 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3023 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3024 * jobs for it. */
3025 return true;
3026
3027 case JOB_STOP:
3028 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3029 * external events), hence it makes no sense to permit enqueuing such a request either. */
3030 return !u->perpetual;
3031
3032 case JOB_RESTART:
3033 case JOB_TRY_RESTART:
3034 return unit_can_stop(u) && unit_can_start(u);
3035
3036 case JOB_RELOAD:
3037 case JOB_TRY_RELOAD:
3038 return unit_can_reload(u);
3039
3040 case JOB_RELOAD_OR_START:
3041 return unit_can_reload(u) && unit_can_start(u);
3042
3043 default:
3044 assert_not_reached();
3045 }
3046 }
3047
3048 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3049 Hashmap *deps;
3050
3051 assert(u);
3052 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3053
3054 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3055 if (!deps) {
3056 _cleanup_hashmap_free_ Hashmap *h = NULL;
3057
3058 h = hashmap_new(NULL);
3059 if (!h)
3060 return NULL;
3061
3062 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3063 return NULL;
3064
3065 deps = TAKE_PTR(h);
3066 }
3067
3068 return deps;
3069 }
3070
3071 typedef enum NotifyDependencyFlags {
3072 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3073 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3074 } NotifyDependencyFlags;
3075
3076 static int unit_add_dependency_impl(
3077 Unit *u,
3078 UnitDependency d,
3079 Unit *other,
3080 UnitDependencyMask mask) {
3081
3082 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3083 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3084 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3085 [UNIT_WANTS] = UNIT_WANTED_BY,
3086 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3087 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3088 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3089 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3090 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3091 [UNIT_WANTED_BY] = UNIT_WANTS,
3092 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3093 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3094 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3095 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3096 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3097 [UNIT_BEFORE] = UNIT_AFTER,
3098 [UNIT_AFTER] = UNIT_BEFORE,
3099 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3100 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3101 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3102 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3103 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3104 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3105 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3106 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3107 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3108 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3109 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3110 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3111 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3112 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3113 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3114 };
3115
3116 Hashmap *u_deps, *other_deps;
3117 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3118 NotifyDependencyFlags flags = 0;
3119 int r;
3120
3121 assert(u);
3122 assert(other);
3123 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3124 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3125 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3126
3127 /* Ensure the following two hashmaps for each unit exist:
3128 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3129 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3130 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3131 if (!u_deps)
3132 return -ENOMEM;
3133
3134 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3135 if (!other_deps)
3136 return -ENOMEM;
3137
3138 /* Save the original dependency info. */
3139 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3140 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3141
3142 /* Update dependency info. */
3143 u_info.origin_mask |= mask;
3144 other_info.destination_mask |= mask;
3145
3146 /* Save updated dependency info. */
3147 if (u_info.data != u_info_old.data) {
3148 r = hashmap_replace(u_deps, other, u_info.data);
3149 if (r < 0)
3150 return r;
3151
3152 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3153 }
3154
3155 if (other_info.data != other_info_old.data) {
3156 r = hashmap_replace(other_deps, u, other_info.data);
3157 if (r < 0) {
3158 if (u_info.data != u_info_old.data) {
3159 /* Restore the old dependency. */
3160 if (u_info_old.data)
3161 (void) hashmap_update(u_deps, other, u_info_old.data);
3162 else
3163 hashmap_remove(u_deps, other);
3164 }
3165 return r;
3166 }
3167
3168 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3169 }
3170
3171 return flags;
3172 }
3173
3174 int unit_add_dependency(
3175 Unit *u,
3176 UnitDependency d,
3177 Unit *other,
3178 bool add_reference,
3179 UnitDependencyMask mask) {
3180
3181 UnitDependencyAtom a;
3182 int r;
3183
3184 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3185 * there, no need to notify! */
3186 NotifyDependencyFlags notify_flags;
3187
3188 assert(u);
3189 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3190 assert(other);
3191
3192 u = unit_follow_merge(u);
3193 other = unit_follow_merge(other);
3194 a = unit_dependency_to_atom(d);
3195 assert(a >= 0);
3196
3197 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3198 if (u == other) {
3199 if (unit_should_warn_about_dependency(d))
3200 log_unit_warning(u, "Dependency %s=%s is dropped.",
3201 unit_dependency_to_string(d), u->id);
3202 return 0;
3203 }
3204
3205 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3206 return 0;
3207
3208 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3209 * running timeout at a specific time. */
3210 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3211 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3212 return 0;
3213 }
3214
3215 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3216 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3217 return 0;
3218 }
3219
3220 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3221 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3222 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3223 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3224 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3225 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3226
3227 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3228 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3229 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3230 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3231 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3232 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3233
3234 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3235 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3236 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3237
3238 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3239 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3240 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3241
3242 r = unit_add_dependency_impl(u, d, other, mask);
3243 if (r < 0)
3244 return r;
3245 notify_flags = r;
3246
3247 if (add_reference) {
3248 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3249 if (r < 0)
3250 return r;
3251 notify_flags |= r;
3252 }
3253
3254 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3255 unit_add_to_dbus_queue(u);
3256 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3257 unit_add_to_dbus_queue(other);
3258
3259 return notify_flags != 0;
3260 }
3261
3262 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3263 int r = 0, s = 0;
3264
3265 assert(u);
3266 assert(d >= 0 || e >= 0);
3267
3268 if (d >= 0) {
3269 r = unit_add_dependency(u, d, other, add_reference, mask);
3270 if (r < 0)
3271 return r;
3272 }
3273
3274 if (e >= 0) {
3275 s = unit_add_dependency(u, e, other, add_reference, mask);
3276 if (s < 0)
3277 return s;
3278 }
3279
3280 return r > 0 || s > 0;
3281 }
3282
3283 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3284 int r;
3285
3286 assert(u);
3287 assert(name);
3288 assert(buf);
3289 assert(ret);
3290
3291 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3292 *buf = NULL;
3293 *ret = name;
3294 return 0;
3295 }
3296
3297 if (u->instance)
3298 r = unit_name_replace_instance(name, u->instance, buf);
3299 else {
3300 _cleanup_free_ char *i = NULL;
3301
3302 r = unit_name_to_prefix(u->id, &i);
3303 if (r < 0)
3304 return r;
3305
3306 r = unit_name_replace_instance(name, i, buf);
3307 }
3308 if (r < 0)
3309 return r;
3310
3311 *ret = *buf;
3312 return 0;
3313 }
3314
3315 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3316 _cleanup_free_ char *buf = NULL;
3317 Unit *other;
3318 int r;
3319
3320 assert(u);
3321 assert(name);
3322
3323 r = resolve_template(u, name, &buf, &name);
3324 if (r < 0)
3325 return r;
3326
3327 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3328 return 0;
3329
3330 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3331 if (r < 0)
3332 return r;
3333
3334 return unit_add_dependency(u, d, other, add_reference, mask);
3335 }
3336
3337 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3338 _cleanup_free_ char *buf = NULL;
3339 Unit *other;
3340 int r;
3341
3342 assert(u);
3343 assert(name);
3344
3345 r = resolve_template(u, name, &buf, &name);
3346 if (r < 0)
3347 return r;
3348
3349 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3350 return 0;
3351
3352 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3353 if (r < 0)
3354 return r;
3355
3356 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3357 }
3358
3359 int set_unit_path(const char *p) {
3360 /* This is mostly for debug purposes */
3361 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3362 }
3363
3364 char *unit_dbus_path(Unit *u) {
3365 assert(u);
3366
3367 if (!u->id)
3368 return NULL;
3369
3370 return unit_dbus_path_from_name(u->id);
3371 }
3372
3373 char *unit_dbus_path_invocation_id(Unit *u) {
3374 assert(u);
3375
3376 if (sd_id128_is_null(u->invocation_id))
3377 return NULL;
3378
3379 return unit_dbus_path_from_name(u->invocation_id_string);
3380 }
3381
3382 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3383 int r;
3384
3385 assert(u);
3386
3387 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3388
3389 if (sd_id128_equal(u->invocation_id, id))
3390 return 0;
3391
3392 if (!sd_id128_is_null(u->invocation_id))
3393 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3394
3395 if (sd_id128_is_null(id)) {
3396 r = 0;
3397 goto reset;
3398 }
3399
3400 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3401 if (r < 0)
3402 goto reset;
3403
3404 u->invocation_id = id;
3405 sd_id128_to_string(id, u->invocation_id_string);
3406
3407 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3408 if (r < 0)
3409 goto reset;
3410
3411 return 0;
3412
3413 reset:
3414 u->invocation_id = SD_ID128_NULL;
3415 u->invocation_id_string[0] = 0;
3416 return r;
3417 }
3418
3419 int unit_set_slice(Unit *u, Unit *slice) {
3420 int r;
3421
3422 assert(u);
3423 assert(slice);
3424
3425 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3426 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3427 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3428
3429 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3430 return -EOPNOTSUPP;
3431
3432 if (u->type == UNIT_SLICE)
3433 return -EINVAL;
3434
3435 if (unit_active_state(u) != UNIT_INACTIVE)
3436 return -EBUSY;
3437
3438 if (slice->type != UNIT_SLICE)
3439 return -EINVAL;
3440
3441 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3442 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3443 return -EPERM;
3444
3445 if (UNIT_GET_SLICE(u) == slice)
3446 return 0;
3447
3448 /* Disallow slice changes if @u is already bound to cgroups */
3449 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3450 return -EBUSY;
3451
3452 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3453 if (UNIT_GET_SLICE(u))
3454 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3455
3456 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3457 if (r < 0)
3458 return r;
3459
3460 return 1;
3461 }
3462
3463 int unit_set_default_slice(Unit *u) {
3464 const char *slice_name;
3465 Unit *slice;
3466 int r;
3467
3468 assert(u);
3469
3470 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3471 return 0;
3472
3473 if (UNIT_GET_SLICE(u))
3474 return 0;
3475
3476 if (u->instance) {
3477 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3478
3479 /* Implicitly place all instantiated units in their
3480 * own per-template slice */
3481
3482 r = unit_name_to_prefix(u->id, &prefix);
3483 if (r < 0)
3484 return r;
3485
3486 /* The prefix is already escaped, but it might include
3487 * "-" which has a special meaning for slice units,
3488 * hence escape it here extra. */
3489 escaped = unit_name_escape(prefix);
3490 if (!escaped)
3491 return -ENOMEM;
3492
3493 if (MANAGER_IS_SYSTEM(u->manager))
3494 slice_name = strjoina("system-", escaped, ".slice");
3495 else
3496 slice_name = strjoina("app-", escaped, ".slice");
3497
3498 } else if (unit_is_extrinsic(u))
3499 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3500 * the root slice. They don't really belong in one of the subslices. */
3501 slice_name = SPECIAL_ROOT_SLICE;
3502
3503 else if (MANAGER_IS_SYSTEM(u->manager))
3504 slice_name = SPECIAL_SYSTEM_SLICE;
3505 else
3506 slice_name = SPECIAL_APP_SLICE;
3507
3508 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3509 if (r < 0)
3510 return r;
3511
3512 return unit_set_slice(u, slice);
3513 }
3514
3515 const char *unit_slice_name(Unit *u) {
3516 Unit *slice;
3517 assert(u);
3518
3519 slice = UNIT_GET_SLICE(u);
3520 if (!slice)
3521 return NULL;
3522
3523 return slice->id;
3524 }
3525
3526 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3527 _cleanup_free_ char *t = NULL;
3528 int r;
3529
3530 assert(u);
3531 assert(type);
3532 assert(_found);
3533
3534 r = unit_name_change_suffix(u->id, type, &t);
3535 if (r < 0)
3536 return r;
3537 if (unit_has_name(u, t))
3538 return -EINVAL;
3539
3540 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3541 assert(r < 0 || *_found != u);
3542 return r;
3543 }
3544
3545 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3546 const char *new_owner;
3547 Unit *u = ASSERT_PTR(userdata);
3548 int r;
3549
3550 assert(message);
3551
3552 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3553 if (r < 0) {
3554 bus_log_parse_error(r);
3555 return 0;
3556 }
3557
3558 if (UNIT_VTABLE(u)->bus_name_owner_change)
3559 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3560
3561 return 0;
3562 }
3563
3564 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3565 const sd_bus_error *e;
3566 const char *new_owner;
3567 Unit *u = ASSERT_PTR(userdata);
3568 int r;
3569
3570 assert(message);
3571
3572 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3573
3574 e = sd_bus_message_get_error(message);
3575 if (e) {
3576 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3577 r = sd_bus_error_get_errno(e);
3578 log_unit_error_errno(u, r,
3579 "Unexpected error response from GetNameOwner(): %s",
3580 bus_error_message(e, r));
3581 }
3582
3583 new_owner = NULL;
3584 } else {
3585 r = sd_bus_message_read(message, "s", &new_owner);
3586 if (r < 0)
3587 return bus_log_parse_error(r);
3588
3589 assert(!isempty(new_owner));
3590 }
3591
3592 if (UNIT_VTABLE(u)->bus_name_owner_change)
3593 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3594
3595 return 0;
3596 }
3597
3598 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3599 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3600 const char *match;
3601 usec_t timeout_usec = 0;
3602 int r;
3603
3604 assert(u);
3605 assert(bus);
3606 assert(name);
3607
3608 if (u->match_bus_slot || u->get_name_owner_slot)
3609 return -EBUSY;
3610
3611 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3612 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3613 * value defined above. */
3614 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3615 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3616
3617 match = strjoina("type='signal',"
3618 "sender='org.freedesktop.DBus',"
3619 "path='/org/freedesktop/DBus',"
3620 "interface='org.freedesktop.DBus',"
3621 "member='NameOwnerChanged',"
3622 "arg0='", name, "'");
3623
3624 r = bus_add_match_full(
3625 bus,
3626 &u->match_bus_slot,
3627 true,
3628 match,
3629 signal_name_owner_changed,
3630 NULL,
3631 u,
3632 timeout_usec);
3633 if (r < 0)
3634 return r;
3635
3636 r = sd_bus_message_new_method_call(
3637 bus,
3638 &m,
3639 "org.freedesktop.DBus",
3640 "/org/freedesktop/DBus",
3641 "org.freedesktop.DBus",
3642 "GetNameOwner");
3643 if (r < 0)
3644 return r;
3645
3646 r = sd_bus_message_append(m, "s", name);
3647 if (r < 0)
3648 return r;
3649
3650 r = sd_bus_call_async(
3651 bus,
3652 &u->get_name_owner_slot,
3653 m,
3654 get_name_owner_handler,
3655 u,
3656 timeout_usec);
3657
3658 if (r < 0) {
3659 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3660 return r;
3661 }
3662
3663 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3664 return 0;
3665 }
3666
3667 int unit_watch_bus_name(Unit *u, const char *name) {
3668 int r;
3669
3670 assert(u);
3671 assert(name);
3672
3673 /* Watch a specific name on the bus. We only support one unit
3674 * watching each name for now. */
3675
3676 if (u->manager->api_bus) {
3677 /* If the bus is already available, install the match directly.
3678 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3679 r = unit_install_bus_match(u, u->manager->api_bus, name);
3680 if (r < 0)
3681 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3682 }
3683
3684 r = hashmap_put(u->manager->watch_bus, name, u);
3685 if (r < 0) {
3686 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3687 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3688 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3689 }
3690
3691 return 0;
3692 }
3693
3694 void unit_unwatch_bus_name(Unit *u, const char *name) {
3695 assert(u);
3696 assert(name);
3697
3698 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3699 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3700 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3701 }
3702
3703 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3704 _cleanup_free_ char *e = NULL;
3705 Unit *device;
3706 int r;
3707
3708 assert(u);
3709
3710 /* Adds in links to the device node that this unit is based on */
3711 if (isempty(what))
3712 return 0;
3713
3714 if (!is_device_path(what))
3715 return 0;
3716
3717 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3718 if (!unit_type_supported(UNIT_DEVICE))
3719 return 0;
3720
3721 r = unit_name_from_path(what, ".device", &e);
3722 if (r < 0)
3723 return r;
3724
3725 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3726 if (r < 0)
3727 return r;
3728
3729 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3730 dep = UNIT_BINDS_TO;
3731
3732 return unit_add_two_dependencies(u, UNIT_AFTER,
3733 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3734 device, true, mask);
3735 }
3736
3737 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3738 _cleanup_free_ char *escaped = NULL, *target = NULL;
3739 int r;
3740
3741 assert(u);
3742
3743 if (isempty(what))
3744 return 0;
3745
3746 if (!path_startswith(what, "/dev/"))
3747 return 0;
3748
3749 /* If we don't support devices, then also don't bother with blockdev@.target */
3750 if (!unit_type_supported(UNIT_DEVICE))
3751 return 0;
3752
3753 r = unit_name_path_escape(what, &escaped);
3754 if (r < 0)
3755 return r;
3756
3757 r = unit_name_build("blockdev", escaped, ".target", &target);
3758 if (r < 0)
3759 return r;
3760
3761 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3762 }
3763
3764 int unit_coldplug(Unit *u) {
3765 int r = 0;
3766
3767 assert(u);
3768
3769 /* Make sure we don't enter a loop, when coldplugging recursively. */
3770 if (u->coldplugged)
3771 return 0;
3772
3773 u->coldplugged = true;
3774
3775 STRV_FOREACH(i, u->deserialized_refs)
3776 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3777
3778 u->deserialized_refs = strv_free(u->deserialized_refs);
3779
3780 if (UNIT_VTABLE(u)->coldplug)
3781 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3782
3783 if (u->job)
3784 RET_GATHER(r, job_coldplug(u->job));
3785 if (u->nop_job)
3786 RET_GATHER(r, job_coldplug(u->nop_job));
3787
3788 unit_modify_nft_set(u, /* add = */ true);
3789 return r;
3790 }
3791
3792 void unit_catchup(Unit *u) {
3793 assert(u);
3794
3795 if (UNIT_VTABLE(u)->catchup)
3796 UNIT_VTABLE(u)->catchup(u);
3797
3798 unit_cgroup_catchup(u);
3799 }
3800
3801 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3802 struct stat st;
3803
3804 if (!path)
3805 return false;
3806
3807 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3808 * are never out-of-date. */
3809 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3810 return false;
3811
3812 if (stat(path, &st) < 0)
3813 /* What, cannot access this anymore? */
3814 return true;
3815
3816 if (path_masked)
3817 /* For masked files check if they are still so */
3818 return !null_or_empty(&st);
3819 else
3820 /* For non-empty files check the mtime */
3821 return timespec_load(&st.st_mtim) > mtime;
3822
3823 return false;
3824 }
3825
3826 bool unit_need_daemon_reload(Unit *u) {
3827 _cleanup_strv_free_ char **dropins = NULL;
3828
3829 assert(u);
3830 assert(u->manager);
3831
3832 if (u->manager->unit_file_state_outdated)
3833 return true;
3834
3835 /* For unit files, we allow masking… */
3836 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3837 u->load_state == UNIT_MASKED))
3838 return true;
3839
3840 /* Source paths should not be masked… */
3841 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3842 return true;
3843
3844 if (u->load_state == UNIT_LOADED)
3845 (void) unit_find_dropin_paths(u, &dropins);
3846 if (!strv_equal(u->dropin_paths, dropins))
3847 return true;
3848
3849 /* … any drop-ins that are masked are simply omitted from the list. */
3850 STRV_FOREACH(path, u->dropin_paths)
3851 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3852 return true;
3853
3854 return false;
3855 }
3856
3857 void unit_reset_failed(Unit *u) {
3858 assert(u);
3859
3860 if (UNIT_VTABLE(u)->reset_failed)
3861 UNIT_VTABLE(u)->reset_failed(u);
3862
3863 ratelimit_reset(&u->start_ratelimit);
3864 u->start_limit_hit = false;
3865 }
3866
3867 Unit *unit_following(Unit *u) {
3868 assert(u);
3869
3870 if (UNIT_VTABLE(u)->following)
3871 return UNIT_VTABLE(u)->following(u);
3872
3873 return NULL;
3874 }
3875
3876 bool unit_stop_pending(Unit *u) {
3877 assert(u);
3878
3879 /* This call does check the current state of the unit. It's
3880 * hence useful to be called from state change calls of the
3881 * unit itself, where the state isn't updated yet. This is
3882 * different from unit_inactive_or_pending() which checks both
3883 * the current state and for a queued job. */
3884
3885 return unit_has_job_type(u, JOB_STOP);
3886 }
3887
3888 bool unit_inactive_or_pending(Unit *u) {
3889 assert(u);
3890
3891 /* Returns true if the unit is inactive or going down */
3892
3893 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3894 return true;
3895
3896 if (unit_stop_pending(u))
3897 return true;
3898
3899 return false;
3900 }
3901
3902 bool unit_active_or_pending(Unit *u) {
3903 assert(u);
3904
3905 /* Returns true if the unit is active or going up */
3906
3907 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3908 return true;
3909
3910 if (u->job &&
3911 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3912 return true;
3913
3914 return false;
3915 }
3916
3917 bool unit_will_restart_default(Unit *u) {
3918 assert(u);
3919
3920 return unit_has_job_type(u, JOB_START);
3921 }
3922
3923 bool unit_will_restart(Unit *u) {
3924 assert(u);
3925
3926 if (!UNIT_VTABLE(u)->will_restart)
3927 return false;
3928
3929 return UNIT_VTABLE(u)->will_restart(u);
3930 }
3931
3932 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3933 assert(u);
3934
3935 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3936 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3937 }
3938
3939 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3940 _cleanup_set_free_ Set *pid_set = NULL;
3941 int r;
3942
3943 pid_set = set_new(NULL);
3944 if (!pid_set)
3945 return NULL;
3946
3947 /* Exclude the main/control pids from being killed via the cgroup */
3948 if (main_pid > 0) {
3949 r = set_put(pid_set, PID_TO_PTR(main_pid));
3950 if (r < 0)
3951 return NULL;
3952 }
3953
3954 if (control_pid > 0) {
3955 r = set_put(pid_set, PID_TO_PTR(control_pid));
3956 if (r < 0)
3957 return NULL;
3958 }
3959
3960 return TAKE_PTR(pid_set);
3961 }
3962
3963 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
3964 _cleanup_free_ char *comm = NULL;
3965 Unit *u = ASSERT_PTR(userdata);
3966
3967 (void) pidref_get_comm(pid, &comm);
3968
3969 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3970 signal_to_string(signo), pid->pid, strna(comm));
3971
3972 return 1;
3973 }
3974
3975 static int kill_or_sigqueue(PidRef* pidref, int signo, int code, int value) {
3976 assert(pidref_is_set(pidref));
3977 assert(SIGNAL_VALID(signo));
3978
3979 switch (code) {
3980
3981 case SI_USER:
3982 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
3983 return pidref_kill(pidref, signo);
3984
3985 case SI_QUEUE:
3986 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
3987 return pidref_sigqueue(pidref, signo, value);
3988
3989 default:
3990 assert_not_reached();
3991 }
3992 }
3993
3994 int unit_kill(
3995 Unit *u,
3996 KillWho who,
3997 int signo,
3998 int code,
3999 int value,
4000 sd_bus_error *error) {
4001
4002 PidRef *main_pid, *control_pid;
4003 bool killed = false;
4004 int ret = 0, r;
4005
4006 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4007 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4008 * stop a service ourselves. */
4009
4010 assert(u);
4011 assert(who >= 0);
4012 assert(who < _KILL_WHO_MAX);
4013 assert(SIGNAL_VALID(signo));
4014 assert(IN_SET(code, SI_USER, SI_QUEUE));
4015
4016 main_pid = unit_main_pid(u);
4017 control_pid = unit_control_pid(u);
4018
4019 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4020 return sd_bus_error_setf(error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4021
4022 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4023 if (!main_pid)
4024 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4025 if (!pidref_is_set(main_pid))
4026 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4027 }
4028
4029 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4030 if (!control_pid)
4031 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4032 if (!pidref_is_set(control_pid))
4033 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4034 }
4035
4036 if (pidref_is_set(control_pid) &&
4037 IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4038 _cleanup_free_ char *comm = NULL;
4039 (void) pidref_get_comm(control_pid, &comm);
4040
4041 r = kill_or_sigqueue(control_pid, signo, code, value);
4042 if (r < 0) {
4043 ret = r;
4044
4045 /* Report this failure both to the logs and to the client */
4046 sd_bus_error_set_errnof(
4047 error, r,
4048 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
4049 signal_to_string(signo), control_pid->pid, strna(comm));
4050 log_unit_warning_errno(
4051 u, r,
4052 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
4053 signal_to_string(signo), control_pid->pid, strna(comm));
4054 } else {
4055 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
4056 signal_to_string(signo), control_pid->pid, strna(comm));
4057 killed = true;
4058 }
4059 }
4060
4061 if (pidref_is_set(main_pid) &&
4062 IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4063 _cleanup_free_ char *comm = NULL;
4064 (void) pidref_get_comm(main_pid, &comm);
4065
4066 r = kill_or_sigqueue(main_pid, signo, code, value);
4067 if (r < 0) {
4068 if (ret == 0) {
4069 ret = r;
4070
4071 sd_bus_error_set_errnof(
4072 error, r,
4073 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
4074 signal_to_string(signo), main_pid->pid, strna(comm));
4075 }
4076
4077 log_unit_warning_errno(
4078 u, r,
4079 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
4080 signal_to_string(signo), main_pid->pid, strna(comm));
4081
4082 } else {
4083 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
4084 signal_to_string(signo), main_pid->pid, strna(comm));
4085 killed = true;
4086 }
4087 }
4088
4089 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4090 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4091 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4092 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path && code == SI_USER) {
4093 _cleanup_set_free_ Set *pid_set = NULL;
4094
4095 /* Exclude the main/control pids from being killed via the cgroup */
4096 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4097 if (!pid_set)
4098 return log_oom();
4099
4100 r = cg_kill_recursive(u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4101 if (r < 0) {
4102 if (!IN_SET(r, -ESRCH, -ENOENT)) {
4103 if (ret == 0) {
4104 ret = r;
4105
4106 sd_bus_error_set_errnof(
4107 error, r,
4108 "Failed to send signal SIG%s to auxiliary processes: %m",
4109 signal_to_string(signo));
4110 }
4111
4112 log_unit_warning_errno(
4113 u, r,
4114 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4115 signal_to_string(signo));
4116 }
4117 } else
4118 killed = true;
4119 }
4120
4121 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4122 if (ret == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4123 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4124
4125 return ret;
4126 }
4127
4128 int unit_following_set(Unit *u, Set **s) {
4129 assert(u);
4130 assert(s);
4131
4132 if (UNIT_VTABLE(u)->following_set)
4133 return UNIT_VTABLE(u)->following_set(u, s);
4134
4135 *s = NULL;
4136 return 0;
4137 }
4138
4139 UnitFileState unit_get_unit_file_state(Unit *u) {
4140 int r;
4141
4142 assert(u);
4143
4144 if (u->unit_file_state < 0 && u->fragment_path) {
4145 r = unit_file_get_state(
4146 u->manager->runtime_scope,
4147 NULL,
4148 u->id,
4149 &u->unit_file_state);
4150 if (r < 0)
4151 u->unit_file_state = UNIT_FILE_BAD;
4152 }
4153
4154 return u->unit_file_state;
4155 }
4156
4157 PresetAction unit_get_unit_file_preset(Unit *u) {
4158 int r;
4159
4160 assert(u);
4161
4162 if (u->unit_file_preset < 0 && u->fragment_path) {
4163 _cleanup_free_ char *bn = NULL;
4164
4165 r = path_extract_filename(u->fragment_path, &bn);
4166 if (r < 0)
4167 return (u->unit_file_preset = r);
4168
4169 if (r == O_DIRECTORY)
4170 return (u->unit_file_preset = -EISDIR);
4171
4172 u->unit_file_preset = unit_file_query_preset(
4173 u->manager->runtime_scope,
4174 NULL,
4175 bn,
4176 NULL);
4177 }
4178
4179 return u->unit_file_preset;
4180 }
4181
4182 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4183 assert(ref);
4184 assert(source);
4185 assert(target);
4186
4187 if (ref->target)
4188 unit_ref_unset(ref);
4189
4190 ref->source = source;
4191 ref->target = target;
4192 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4193 return target;
4194 }
4195
4196 void unit_ref_unset(UnitRef *ref) {
4197 assert(ref);
4198
4199 if (!ref->target)
4200 return;
4201
4202 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4203 * be unreferenced now. */
4204 unit_add_to_gc_queue(ref->target);
4205
4206 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4207 ref->source = ref->target = NULL;
4208 }
4209
4210 static int user_from_unit_name(Unit *u, char **ret) {
4211
4212 static const uint8_t hash_key[] = {
4213 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4214 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4215 };
4216
4217 _cleanup_free_ char *n = NULL;
4218 int r;
4219
4220 r = unit_name_to_prefix(u->id, &n);
4221 if (r < 0)
4222 return r;
4223
4224 if (valid_user_group_name(n, 0)) {
4225 *ret = TAKE_PTR(n);
4226 return 0;
4227 }
4228
4229 /* If we can't use the unit name as a user name, then let's hash it and use that */
4230 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4231 return -ENOMEM;
4232
4233 return 0;
4234 }
4235
4236 int unit_patch_contexts(Unit *u) {
4237 CGroupContext *cc;
4238 ExecContext *ec;
4239 int r;
4240
4241 assert(u);
4242
4243 /* Patch in the manager defaults into the exec and cgroup
4244 * contexts, _after_ the rest of the settings have been
4245 * initialized */
4246
4247 ec = unit_get_exec_context(u);
4248 if (ec) {
4249 /* This only copies in the ones that need memory */
4250 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4251 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4252 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4253 if (!ec->rlimit[i])
4254 return -ENOMEM;
4255 }
4256
4257 if (MANAGER_IS_USER(u->manager) &&
4258 !ec->working_directory) {
4259
4260 r = get_home_dir(&ec->working_directory);
4261 if (r < 0)
4262 return r;
4263
4264 /* Allow user services to run, even if the
4265 * home directory is missing */
4266 ec->working_directory_missing_ok = true;
4267 }
4268
4269 if (ec->private_devices)
4270 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4271
4272 if (ec->protect_kernel_modules)
4273 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4274
4275 if (ec->protect_kernel_logs)
4276 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4277
4278 if (ec->protect_clock)
4279 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4280
4281 if (ec->dynamic_user) {
4282 if (!ec->user) {
4283 r = user_from_unit_name(u, &ec->user);
4284 if (r < 0)
4285 return r;
4286 }
4287
4288 if (!ec->group) {
4289 ec->group = strdup(ec->user);
4290 if (!ec->group)
4291 return -ENOMEM;
4292 }
4293
4294 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4295 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4296 * sandbox. */
4297
4298 ec->private_tmp = true;
4299 ec->remove_ipc = true;
4300 ec->protect_system = PROTECT_SYSTEM_STRICT;
4301 if (ec->protect_home == PROTECT_HOME_NO)
4302 ec->protect_home = PROTECT_HOME_READ_ONLY;
4303
4304 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4305 * them. */
4306 ec->no_new_privileges = true;
4307 ec->restrict_suid_sgid = true;
4308 }
4309
4310 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4311 exec_directory_sort(ec->directories + dt);
4312 }
4313
4314 cc = unit_get_cgroup_context(u);
4315 if (cc && ec) {
4316
4317 if (ec->private_devices &&
4318 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4319 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4320
4321 /* Only add these if needed, as they imply that everything else is blocked. */
4322 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4323 if (ec->root_image || ec->mount_images) {
4324
4325 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4326 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4327 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4328 if (r < 0)
4329 return r;
4330 }
4331 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4332 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4333 if (r < 0)
4334 return r;
4335 }
4336
4337 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4338 * Same for mapper and verity. */
4339 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4340 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4341 if (r < 0)
4342 return r;
4343 }
4344 }
4345
4346 if (ec->protect_clock) {
4347 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4348 if (r < 0)
4349 return r;
4350 }
4351
4352 /* If there are encrypted credentials we might need to access the TPM. */
4353 if (exec_context_has_encrypted_credentials(ec)) {
4354 r = cgroup_context_add_device_allow(cc, "char-tpm", CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4355 if (r < 0)
4356 return r;
4357 }
4358 }
4359 }
4360
4361 return 0;
4362 }
4363
4364 ExecContext *unit_get_exec_context(const Unit *u) {
4365 size_t offset;
4366 assert(u);
4367
4368 if (u->type < 0)
4369 return NULL;
4370
4371 offset = UNIT_VTABLE(u)->exec_context_offset;
4372 if (offset <= 0)
4373 return NULL;
4374
4375 return (ExecContext*) ((uint8_t*) u + offset);
4376 }
4377
4378 KillContext *unit_get_kill_context(Unit *u) {
4379 size_t offset;
4380 assert(u);
4381
4382 if (u->type < 0)
4383 return NULL;
4384
4385 offset = UNIT_VTABLE(u)->kill_context_offset;
4386 if (offset <= 0)
4387 return NULL;
4388
4389 return (KillContext*) ((uint8_t*) u + offset);
4390 }
4391
4392 CGroupContext *unit_get_cgroup_context(Unit *u) {
4393 size_t offset;
4394
4395 if (u->type < 0)
4396 return NULL;
4397
4398 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4399 if (offset <= 0)
4400 return NULL;
4401
4402 return (CGroupContext*) ((uint8_t*) u + offset);
4403 }
4404
4405 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4406 size_t offset;
4407
4408 if (u->type < 0)
4409 return NULL;
4410
4411 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4412 if (offset <= 0)
4413 return NULL;
4414
4415 return *(ExecRuntime**) ((uint8_t*) u + offset);
4416 }
4417
4418 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4419 assert(u);
4420
4421 if (UNIT_WRITE_FLAGS_NOOP(flags))
4422 return NULL;
4423
4424 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4425 return u->manager->lookup_paths.transient;
4426
4427 if (flags & UNIT_PERSISTENT)
4428 return u->manager->lookup_paths.persistent_control;
4429
4430 if (flags & UNIT_RUNTIME)
4431 return u->manager->lookup_paths.runtime_control;
4432
4433 return NULL;
4434 }
4435
4436 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4437 assert(s);
4438 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4439 assert(buf);
4440
4441 _cleanup_free_ char *t = NULL;
4442
4443 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4444 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4445 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4446 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4447 * allocations. */
4448
4449 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4450 t = specifier_escape(s);
4451 if (!t)
4452 return NULL;
4453
4454 s = t;
4455 }
4456
4457 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4458 * ExecStart= and friends, i.e. '$' and quotes. */
4459
4460 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4461 char *t2;
4462
4463 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4464 t2 = strreplace(s, "$", "$$");
4465 if (!t2)
4466 return NULL;
4467 free_and_replace(t, t2);
4468 }
4469
4470 t2 = shell_escape(t ?: s, "\"");
4471 if (!t2)
4472 return NULL;
4473 free_and_replace(t, t2);
4474
4475 s = t;
4476
4477 } else if (flags & UNIT_ESCAPE_C) {
4478 char *t2;
4479
4480 t2 = cescape(s);
4481 if (!t2)
4482 return NULL;
4483 free_and_replace(t, t2);
4484
4485 s = t;
4486 }
4487
4488 *buf = TAKE_PTR(t);
4489 return s;
4490 }
4491
4492 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4493 _cleanup_free_ char *result = NULL;
4494 size_t n = 0;
4495
4496 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4497 * lines in a way suitable for ExecStart= stanzas. */
4498
4499 STRV_FOREACH(i, l) {
4500 _cleanup_free_ char *buf = NULL;
4501 const char *p;
4502 size_t a;
4503 char *q;
4504
4505 p = unit_escape_setting(*i, flags, &buf);
4506 if (!p)
4507 return NULL;
4508
4509 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4510 if (!GREEDY_REALLOC(result, n + a + 1))
4511 return NULL;
4512
4513 q = result + n;
4514 if (n > 0)
4515 *(q++) = ' ';
4516
4517 *(q++) = '"';
4518 q = stpcpy(q, p);
4519 *(q++) = '"';
4520
4521 n += a;
4522 }
4523
4524 if (!GREEDY_REALLOC(result, n + 1))
4525 return NULL;
4526
4527 result[n] = 0;
4528
4529 return TAKE_PTR(result);
4530 }
4531
4532 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4533 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4534 const char *dir, *wrapped;
4535 int r;
4536
4537 assert(u);
4538 assert(name);
4539 assert(data);
4540
4541 if (UNIT_WRITE_FLAGS_NOOP(flags))
4542 return 0;
4543
4544 data = unit_escape_setting(data, flags, &escaped);
4545 if (!data)
4546 return -ENOMEM;
4547
4548 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4549 * previous section header is the same */
4550
4551 if (flags & UNIT_PRIVATE) {
4552 if (!UNIT_VTABLE(u)->private_section)
4553 return -EINVAL;
4554
4555 if (!u->transient_file || u->last_section_private < 0)
4556 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4557 else if (u->last_section_private == 0)
4558 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4559 } else {
4560 if (!u->transient_file || u->last_section_private < 0)
4561 data = strjoina("[Unit]\n", data);
4562 else if (u->last_section_private > 0)
4563 data = strjoina("\n[Unit]\n", data);
4564 }
4565
4566 if (u->transient_file) {
4567 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4568 * write to the transient unit file. */
4569 fputs(data, u->transient_file);
4570
4571 if (!endswith(data, "\n"))
4572 fputc('\n', u->transient_file);
4573
4574 /* Remember which section we wrote this entry to */
4575 u->last_section_private = !!(flags & UNIT_PRIVATE);
4576 return 0;
4577 }
4578
4579 dir = unit_drop_in_dir(u, flags);
4580 if (!dir)
4581 return -EINVAL;
4582
4583 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4584 "# or an equivalent operation. Do not edit.\n",
4585 data,
4586 "\n");
4587
4588 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4589 if (r < 0)
4590 return r;
4591
4592 (void) mkdir_p_label(p, 0755);
4593
4594 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4595 * recreate the cache after every drop-in we write. */
4596 if (u->manager->unit_path_cache) {
4597 r = set_put_strdup(&u->manager->unit_path_cache, p);
4598 if (r < 0)
4599 return r;
4600 }
4601
4602 r = write_string_file_atomic_label(q, wrapped);
4603 if (r < 0)
4604 return r;
4605
4606 r = strv_push(&u->dropin_paths, q);
4607 if (r < 0)
4608 return r;
4609 q = NULL;
4610
4611 strv_uniq(u->dropin_paths);
4612
4613 u->dropin_mtime = now(CLOCK_REALTIME);
4614
4615 return 0;
4616 }
4617
4618 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4619 _cleanup_free_ char *p = NULL;
4620 va_list ap;
4621 int r;
4622
4623 assert(u);
4624 assert(name);
4625 assert(format);
4626
4627 if (UNIT_WRITE_FLAGS_NOOP(flags))
4628 return 0;
4629
4630 va_start(ap, format);
4631 r = vasprintf(&p, format, ap);
4632 va_end(ap);
4633
4634 if (r < 0)
4635 return -ENOMEM;
4636
4637 return unit_write_setting(u, flags, name, p);
4638 }
4639
4640 int unit_make_transient(Unit *u) {
4641 _cleanup_free_ char *path = NULL;
4642 FILE *f;
4643
4644 assert(u);
4645
4646 if (!UNIT_VTABLE(u)->can_transient)
4647 return -EOPNOTSUPP;
4648
4649 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4650
4651 path = path_join(u->manager->lookup_paths.transient, u->id);
4652 if (!path)
4653 return -ENOMEM;
4654
4655 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4656 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4657
4658 WITH_UMASK(0022) {
4659 f = fopen(path, "we");
4660 if (!f)
4661 return -errno;
4662 }
4663
4664 safe_fclose(u->transient_file);
4665 u->transient_file = f;
4666
4667 free_and_replace(u->fragment_path, path);
4668
4669 u->source_path = mfree(u->source_path);
4670 u->dropin_paths = strv_free(u->dropin_paths);
4671 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4672
4673 u->load_state = UNIT_STUB;
4674 u->load_error = 0;
4675 u->transient = true;
4676
4677 unit_add_to_dbus_queue(u);
4678 unit_add_to_gc_queue(u);
4679
4680 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4681 u->transient_file);
4682
4683 return 0;
4684 }
4685
4686 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4687 _cleanup_free_ char *comm = NULL;
4688
4689 assert(pidref_is_set(pid));
4690
4691 (void) pidref_get_comm(pid, &comm);
4692
4693 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4694 only, like for example systemd's own PAM stub process. */
4695 if (comm && comm[0] == '(')
4696 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4697 * here to let the manager know that a process was killed. */
4698 return 1;
4699
4700 log_unit_notice(userdata,
4701 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4702 pid->pid,
4703 strna(comm),
4704 signal_to_string(sig));
4705
4706 return 1;
4707 }
4708
4709 static int operation_to_signal(
4710 const KillContext *c,
4711 KillOperation k,
4712 bool *ret_noteworthy) {
4713
4714 assert(c);
4715
4716 switch (k) {
4717
4718 case KILL_TERMINATE:
4719 case KILL_TERMINATE_AND_LOG:
4720 *ret_noteworthy = false;
4721 return c->kill_signal;
4722
4723 case KILL_RESTART:
4724 *ret_noteworthy = false;
4725 return restart_kill_signal(c);
4726
4727 case KILL_KILL:
4728 *ret_noteworthy = true;
4729 return c->final_kill_signal;
4730
4731 case KILL_WATCHDOG:
4732 *ret_noteworthy = true;
4733 return c->watchdog_signal;
4734
4735 default:
4736 assert_not_reached();
4737 }
4738 }
4739
4740 int unit_kill_context(
4741 Unit *u,
4742 KillContext *c,
4743 KillOperation k,
4744 PidRef* main_pid,
4745 PidRef* control_pid,
4746 bool main_pid_alien) {
4747
4748 bool wait_for_exit = false, send_sighup;
4749 cg_kill_log_func_t log_func = NULL;
4750 int sig, r;
4751
4752 assert(u);
4753 assert(c);
4754
4755 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4756 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4757 * which is used for user-requested killing of unit processes. */
4758
4759 if (c->kill_mode == KILL_NONE)
4760 return 0;
4761
4762 bool noteworthy;
4763 sig = operation_to_signal(c, k, &noteworthy);
4764 if (noteworthy)
4765 log_func = log_kill;
4766
4767 send_sighup =
4768 c->send_sighup &&
4769 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4770 sig != SIGHUP;
4771
4772 if (pidref_is_set(main_pid)) {
4773 if (log_func)
4774 log_func(main_pid, sig, u);
4775
4776 r = pidref_kill_and_sigcont(main_pid, sig);
4777 if (r < 0 && r != -ESRCH) {
4778 _cleanup_free_ char *comm = NULL;
4779 (void) pidref_get_comm(main_pid, &comm);
4780
4781 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid->pid, strna(comm));
4782 } else {
4783 if (!main_pid_alien)
4784 wait_for_exit = true;
4785
4786 if (r != -ESRCH && send_sighup)
4787 (void) pidref_kill(main_pid, SIGHUP);
4788 }
4789 }
4790
4791 if (pidref_is_set(control_pid)) {
4792 if (log_func)
4793 log_func(control_pid, sig, u);
4794
4795 r = pidref_kill_and_sigcont(control_pid, sig);
4796 if (r < 0 && r != -ESRCH) {
4797 _cleanup_free_ char *comm = NULL;
4798 (void) pidref_get_comm(control_pid, &comm);
4799
4800 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid->pid, strna(comm));
4801 } else {
4802 wait_for_exit = true;
4803
4804 if (r != -ESRCH && send_sighup)
4805 (void) pidref_kill(control_pid, SIGHUP);
4806 }
4807 }
4808
4809 if (u->cgroup_path &&
4810 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4811 _cleanup_set_free_ Set *pid_set = NULL;
4812
4813 /* Exclude the main/control pids from being killed via the cgroup */
4814 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4815 if (!pid_set)
4816 return -ENOMEM;
4817
4818 r = cg_kill_recursive(
4819 u->cgroup_path,
4820 sig,
4821 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4822 pid_set,
4823 log_func, u);
4824 if (r < 0) {
4825 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4826 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4827
4828 } else if (r > 0) {
4829
4830 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4831 * we are running in a container or if this is a delegation unit, simply because cgroup
4832 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4833 * of containers it can be confused easily by left-over directories in the cgroup — which
4834 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4835 * there we get proper events. Hence rely on them. */
4836
4837 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4838 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4839 wait_for_exit = true;
4840
4841 if (send_sighup) {
4842 set_free(pid_set);
4843
4844 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4845 if (!pid_set)
4846 return -ENOMEM;
4847
4848 (void) cg_kill_recursive(
4849 u->cgroup_path,
4850 SIGHUP,
4851 CGROUP_IGNORE_SELF,
4852 pid_set,
4853 /* kill_log= */ NULL,
4854 /* userdata= */ NULL);
4855 }
4856 }
4857 }
4858
4859 return wait_for_exit;
4860 }
4861
4862 int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) {
4863 Hashmap **unit_map, **manager_map;
4864 int r;
4865
4866 assert(u);
4867 assert(path);
4868 assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4869
4870 unit_map = &u->mounts_for[type];
4871 manager_map = &u->manager->units_needing_mounts_for[type];
4872
4873 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4874 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4875 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4876 * appearing mount units can easily determine which units to make themselves a dependency of. */
4877
4878 if (!path_is_absolute(path))
4879 return -EINVAL;
4880
4881 if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */
4882 return 0;
4883
4884 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4885 * only after simplification, since path_is_normalized() rejects paths with '.'.
4886 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4887 _cleanup_free_ char *p = NULL;
4888 r = path_simplify_alloc(path, &p);
4889 if (r < 0)
4890 return r;
4891 path = p;
4892
4893 if (!path_is_normalized(path))
4894 return -EPERM;
4895
4896 UnitDependencyInfo di = {
4897 .origin_mask = mask
4898 };
4899
4900 r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data);
4901 if (r < 0)
4902 return r;
4903 assert(r > 0);
4904 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4905
4906 char prefix[strlen(path) + 1];
4907 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4908 Set *x;
4909
4910 x = hashmap_get(*manager_map, prefix);
4911 if (!x) {
4912 _cleanup_free_ char *q = NULL;
4913
4914 r = hashmap_ensure_allocated(manager_map, &path_hash_ops);
4915 if (r < 0)
4916 return r;
4917
4918 q = strdup(prefix);
4919 if (!q)
4920 return -ENOMEM;
4921
4922 x = set_new(NULL);
4923 if (!x)
4924 return -ENOMEM;
4925
4926 r = hashmap_put(*manager_map, q, x);
4927 if (r < 0) {
4928 set_free(x);
4929 return r;
4930 }
4931 q = NULL;
4932 }
4933
4934 r = set_put(x, u);
4935 if (r < 0)
4936 return r;
4937 }
4938
4939 return 0;
4940 }
4941
4942 int unit_setup_exec_runtime(Unit *u) {
4943 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
4944 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
4945 _cleanup_set_free_ Set *units = NULL;
4946 ExecRuntime **rt;
4947 ExecContext *ec;
4948 size_t offset;
4949 Unit *other;
4950 int r;
4951
4952 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4953 assert(offset > 0);
4954
4955 /* Check if there already is an ExecRuntime for this unit? */
4956 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4957 if (*rt)
4958 return 0;
4959
4960 ec = unit_get_exec_context(u);
4961 assert(ec);
4962
4963 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
4964 if (r < 0)
4965 return r;
4966
4967 /* Try to get it from somebody else */
4968 SET_FOREACH(other, units) {
4969 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
4970 if (r < 0)
4971 return r;
4972 if (r > 0)
4973 break;
4974 }
4975
4976 if (!esr) {
4977 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
4978 if (r < 0)
4979 return r;
4980 }
4981
4982 if (ec->dynamic_user) {
4983 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
4984 if (r < 0)
4985 return r;
4986 }
4987
4988 r = exec_runtime_make(u, ec, esr, dcreds, rt);
4989 if (r < 0)
4990 return r;
4991
4992 TAKE_PTR(esr);
4993 TAKE_PTR(dcreds);
4994
4995 return r;
4996 }
4997
4998 bool unit_type_supported(UnitType t) {
4999 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5000 int r;
5001
5002 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5003
5004 if (cache[t] == 0) {
5005 char *e;
5006
5007 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5008
5009 r = getenv_bool(ascii_strupper(e));
5010 if (r < 0 && r != -ENXIO)
5011 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5012
5013 cache[t] = r == 0 ? -1 : 1;
5014 }
5015 if (cache[t] < 0)
5016 return false;
5017
5018 if (!unit_vtable[t]->supported)
5019 return true;
5020
5021 return unit_vtable[t]->supported();
5022 }
5023
5024 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5025 int r;
5026
5027 assert(u);
5028 assert(where);
5029
5030 if (!unit_log_level_test(u, LOG_NOTICE))
5031 return;
5032
5033 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5034 if (r > 0 || r == -ENOTDIR)
5035 return;
5036 if (r < 0) {
5037 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5038 return;
5039 }
5040
5041 log_unit_struct(u, LOG_NOTICE,
5042 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5043 LOG_UNIT_INVOCATION_ID(u),
5044 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5045 "WHERE=%s", where);
5046 }
5047
5048 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5049 _cleanup_free_ char *canonical_where = NULL;
5050 int r;
5051
5052 assert(u);
5053 assert(where);
5054
5055 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5056 if (r < 0) {
5057 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5058 return 0;
5059 }
5060
5061 /* We will happily ignore a trailing slash (or any redundant slashes) */
5062 if (path_equal(where, canonical_where))
5063 return 0;
5064
5065 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5066 log_unit_struct(u, LOG_ERR,
5067 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5068 LOG_UNIT_INVOCATION_ID(u),
5069 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5070 "WHERE=%s", where);
5071
5072 return -ELOOP;
5073 }
5074
5075 bool unit_is_pristine(Unit *u) {
5076 assert(u);
5077
5078 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5079 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5080 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5081 *
5082 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5083 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5084 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5085 */
5086
5087 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5088 !u->fragment_path &&
5089 !u->source_path &&
5090 !u->job &&
5091 !u->merged_into;
5092 }
5093
5094 PidRef* unit_control_pid(Unit *u) {
5095 assert(u);
5096
5097 if (UNIT_VTABLE(u)->control_pid)
5098 return UNIT_VTABLE(u)->control_pid(u);
5099
5100 return NULL;
5101 }
5102
5103 PidRef* unit_main_pid(Unit *u) {
5104 assert(u);
5105
5106 if (UNIT_VTABLE(u)->main_pid)
5107 return UNIT_VTABLE(u)->main_pid(u);
5108
5109 return NULL;
5110 }
5111
5112 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5113 int r;
5114
5115 assert(u);
5116
5117 if (!MANAGER_IS_SYSTEM(u->manager))
5118 return;
5119
5120 CGroupContext *c;
5121 c = unit_get_cgroup_context(u);
5122 if (!c)
5123 return;
5124
5125 if (!u->manager->fw_ctx) {
5126 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5127 if (r < 0)
5128 return;
5129
5130 assert(u->manager->fw_ctx);
5131 }
5132
5133 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5134 if (nft_set->source != source)
5135 continue;
5136
5137 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5138 if (r < 0)
5139 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5140 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5141 else
5142 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5143 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5144 }
5145 }
5146
5147 static void unit_unref_uid_internal(
5148 Unit *u,
5149 uid_t *ref_uid,
5150 bool destroy_now,
5151 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5152
5153 assert(u);
5154 assert(ref_uid);
5155 assert(_manager_unref_uid);
5156
5157 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5158 * gid_t are actually the same time, with the same validity rules.
5159 *
5160 * Drops a reference to UID/GID from a unit. */
5161
5162 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5163 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5164
5165 if (!uid_is_valid(*ref_uid))
5166 return;
5167
5168 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5169 *ref_uid = UID_INVALID;
5170 }
5171
5172 static void unit_unref_uid(Unit *u, bool destroy_now) {
5173 assert(u);
5174
5175 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5176
5177 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5178 }
5179
5180 static void unit_unref_gid(Unit *u, bool destroy_now) {
5181 assert(u);
5182
5183 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5184
5185 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5186 }
5187
5188 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5189 assert(u);
5190
5191 unit_unref_uid(u, destroy_now);
5192 unit_unref_gid(u, destroy_now);
5193 }
5194
5195 static int unit_ref_uid_internal(
5196 Unit *u,
5197 uid_t *ref_uid,
5198 uid_t uid,
5199 bool clean_ipc,
5200 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5201
5202 int r;
5203
5204 assert(u);
5205 assert(ref_uid);
5206 assert(uid_is_valid(uid));
5207 assert(_manager_ref_uid);
5208
5209 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5210 * are actually the same type, and have the same validity rules.
5211 *
5212 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5213 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5214 * drops to zero. */
5215
5216 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5217 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5218
5219 if (*ref_uid == uid)
5220 return 0;
5221
5222 if (uid_is_valid(*ref_uid)) /* Already set? */
5223 return -EBUSY;
5224
5225 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5226 if (r < 0)
5227 return r;
5228
5229 *ref_uid = uid;
5230 return 1;
5231 }
5232
5233 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5234 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5235 }
5236
5237 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5238 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5239 }
5240
5241 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5242 int r = 0, q = 0;
5243
5244 assert(u);
5245
5246 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5247
5248 if (uid_is_valid(uid)) {
5249 r = unit_ref_uid(u, uid, clean_ipc);
5250 if (r < 0)
5251 return r;
5252 }
5253
5254 if (gid_is_valid(gid)) {
5255 q = unit_ref_gid(u, gid, clean_ipc);
5256 if (q < 0) {
5257 if (r > 0)
5258 unit_unref_uid(u, false);
5259
5260 return q;
5261 }
5262 }
5263
5264 return r > 0 || q > 0;
5265 }
5266
5267 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5268 ExecContext *c;
5269 int r;
5270
5271 assert(u);
5272
5273 c = unit_get_exec_context(u);
5274
5275 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5276 if (r < 0)
5277 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5278
5279 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5280 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5281
5282 return r;
5283 }
5284
5285 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5286 int r;
5287
5288 assert(u);
5289
5290 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5291 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5292 * objects when no service references the UID/GID anymore. */
5293
5294 r = unit_ref_uid_gid(u, uid, gid);
5295 if (r > 0)
5296 unit_add_to_dbus_queue(u);
5297 }
5298
5299 int unit_acquire_invocation_id(Unit *u) {
5300 sd_id128_t id;
5301 int r;
5302
5303 assert(u);
5304
5305 r = sd_id128_randomize(&id);
5306 if (r < 0)
5307 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5308
5309 r = unit_set_invocation_id(u, id);
5310 if (r < 0)
5311 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5312
5313 unit_add_to_dbus_queue(u);
5314 return 0;
5315 }
5316
5317 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5318 const char *confirm_spawn;
5319 int r;
5320
5321 assert(u);
5322 assert(p);
5323
5324 /* Copy parameters from manager */
5325 r = manager_get_effective_environment(u->manager, &p->environment);
5326 if (r < 0)
5327 return r;
5328
5329 p->runtime_scope = u->manager->runtime_scope;
5330
5331 confirm_spawn = manager_get_confirm_spawn(u->manager);
5332 if (confirm_spawn) {
5333 p->confirm_spawn = strdup(confirm_spawn);
5334 if (!p->confirm_spawn)
5335 return -ENOMEM;
5336 }
5337
5338 p->cgroup_supported = u->manager->cgroup_supported;
5339 p->prefix = u->manager->prefix;
5340 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5341
5342 /* Copy parameters from unit */
5343 p->cgroup_path = u->cgroup_path;
5344 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5345
5346 p->received_credentials_directory = u->manager->received_credentials_directory;
5347 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5348
5349 p->shall_confirm_spawn = u->manager->confirm_spawn;
5350
5351 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5352
5353 if (u->manager->restrict_fs && p->bpf_outer_map_fd < 0) {
5354 int fd = lsm_bpf_map_restrict_fs_fd(u);
5355 if (fd < 0)
5356 return fd;
5357
5358 p->bpf_outer_map_fd = fd;
5359 }
5360
5361 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5362
5363 p->cgroup_id = u->cgroup_id;
5364 p->invocation_id = u->invocation_id;
5365 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5366 p->unit_id = strdup(u->id);
5367 if (!p->unit_id)
5368 return -ENOMEM;
5369
5370 return 0;
5371 }
5372
5373 int unit_fork_helper_process(Unit *u, const char *name, PidRef *ret) {
5374 pid_t pid;
5375 int r;
5376
5377 assert(u);
5378 assert(ret);
5379
5380 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5381 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5382
5383 (void) unit_realize_cgroup(u);
5384
5385 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5386 if (r < 0)
5387 return r;
5388 if (r > 0) {
5389 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5390 int q;
5391
5392 /* Parent */
5393
5394 q = pidref_set_pid(&pidref, pid);
5395 if (q < 0)
5396 return q;
5397
5398 *ret = TAKE_PIDREF(pidref);
5399 return r;
5400 }
5401
5402 /* Child */
5403
5404 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5405 (void) ignore_signals(SIGPIPE);
5406
5407 if (u->cgroup_path) {
5408 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5409 if (r < 0) {
5410 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5411 _exit(EXIT_CGROUP);
5412 }
5413 }
5414
5415 return 0;
5416 }
5417
5418 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5419 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5420 int r;
5421
5422 assert(u);
5423 assert(ret_pid);
5424
5425 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5426 if (r < 0)
5427 return r;
5428 if (r == 0) {
5429 int ret = EXIT_SUCCESS;
5430
5431 STRV_FOREACH(i, paths) {
5432 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5433 if (r < 0) {
5434 log_error_errno(r, "Failed to remove '%s': %m", *i);
5435 ret = EXIT_FAILURE;
5436 }
5437 }
5438
5439 _exit(ret);
5440 }
5441
5442 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5443 if (r < 0)
5444 return r;
5445
5446 *ret_pid = TAKE_PIDREF(pid);
5447 return 0;
5448 }
5449
5450 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5451 assert(deps);
5452 assert(other);
5453
5454 if (di.origin_mask == 0 && di.destination_mask == 0)
5455 /* No bit set anymore, let's drop the whole entry */
5456 assert_se(hashmap_remove(deps, other));
5457 else
5458 /* Mask was reduced, let's update the entry */
5459 assert_se(hashmap_update(deps, other, di.data) == 0);
5460 }
5461
5462 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5463 Hashmap *deps;
5464 assert(u);
5465
5466 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5467
5468 if (mask == 0)
5469 return;
5470
5471 HASHMAP_FOREACH(deps, u->dependencies) {
5472 bool done;
5473
5474 do {
5475 UnitDependencyInfo di;
5476 Unit *other;
5477
5478 done = true;
5479
5480 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5481 Hashmap *other_deps;
5482
5483 if (FLAGS_SET(~mask, di.origin_mask))
5484 continue;
5485
5486 di.origin_mask &= ~mask;
5487 unit_update_dependency_mask(deps, other, di);
5488
5489 /* We updated the dependency from our unit to the other unit now. But most
5490 * dependencies imply a reverse dependency. Hence, let's delete that one
5491 * too. For that we go through all dependency types on the other unit and
5492 * delete all those which point to us and have the right mask set. */
5493
5494 HASHMAP_FOREACH(other_deps, other->dependencies) {
5495 UnitDependencyInfo dj;
5496
5497 dj.data = hashmap_get(other_deps, u);
5498 if (FLAGS_SET(~mask, dj.destination_mask))
5499 continue;
5500
5501 dj.destination_mask &= ~mask;
5502 unit_update_dependency_mask(other_deps, u, dj);
5503 }
5504
5505 unit_add_to_gc_queue(other);
5506
5507 /* The unit 'other' may not be wanted by the unit 'u'. */
5508 unit_submit_to_stop_when_unneeded_queue(other);
5509
5510 done = false;
5511 break;
5512 }
5513
5514 } while (!done);
5515 }
5516 }
5517
5518 static int unit_get_invocation_path(Unit *u, char **ret) {
5519 char *p;
5520 int r;
5521
5522 assert(u);
5523 assert(ret);
5524
5525 if (MANAGER_IS_SYSTEM(u->manager))
5526 p = strjoin("/run/systemd/units/invocation:", u->id);
5527 else {
5528 _cleanup_free_ char *user_path = NULL;
5529 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5530 if (r < 0)
5531 return r;
5532 p = strjoin(user_path, u->id);
5533 }
5534
5535 if (!p)
5536 return -ENOMEM;
5537
5538 *ret = p;
5539 return 0;
5540 }
5541
5542 static int unit_export_invocation_id(Unit *u) {
5543 _cleanup_free_ char *p = NULL;
5544 int r;
5545
5546 assert(u);
5547
5548 if (u->exported_invocation_id)
5549 return 0;
5550
5551 if (sd_id128_is_null(u->invocation_id))
5552 return 0;
5553
5554 r = unit_get_invocation_path(u, &p);
5555 if (r < 0)
5556 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5557
5558 r = symlink_atomic_label(u->invocation_id_string, p);
5559 if (r < 0)
5560 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5561
5562 u->exported_invocation_id = true;
5563 return 0;
5564 }
5565
5566 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5567 const char *p;
5568 char buf[2];
5569 int r;
5570
5571 assert(u);
5572 assert(c);
5573
5574 if (u->exported_log_level_max)
5575 return 0;
5576
5577 if (c->log_level_max < 0)
5578 return 0;
5579
5580 assert(c->log_level_max <= 7);
5581
5582 buf[0] = '0' + c->log_level_max;
5583 buf[1] = 0;
5584
5585 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5586 r = symlink_atomic(buf, p);
5587 if (r < 0)
5588 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5589
5590 u->exported_log_level_max = true;
5591 return 0;
5592 }
5593
5594 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5595 _cleanup_close_ int fd = -EBADF;
5596 struct iovec *iovec;
5597 const char *p;
5598 char *pattern;
5599 le64_t *sizes;
5600 ssize_t n;
5601 int r;
5602
5603 if (u->exported_log_extra_fields)
5604 return 0;
5605
5606 if (c->n_log_extra_fields <= 0)
5607 return 0;
5608
5609 sizes = newa(le64_t, c->n_log_extra_fields);
5610 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5611
5612 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5613 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5614
5615 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5616 iovec[i*2+1] = c->log_extra_fields[i];
5617 }
5618
5619 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5620 pattern = strjoina(p, ".XXXXXX");
5621
5622 fd = mkostemp_safe(pattern);
5623 if (fd < 0)
5624 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5625
5626 n = writev(fd, iovec, c->n_log_extra_fields*2);
5627 if (n < 0) {
5628 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5629 goto fail;
5630 }
5631
5632 (void) fchmod(fd, 0644);
5633
5634 if (rename(pattern, p) < 0) {
5635 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5636 goto fail;
5637 }
5638
5639 u->exported_log_extra_fields = true;
5640 return 0;
5641
5642 fail:
5643 (void) unlink(pattern);
5644 return r;
5645 }
5646
5647 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5648 _cleanup_free_ char *buf = NULL;
5649 const char *p;
5650 int r;
5651
5652 assert(u);
5653 assert(c);
5654
5655 if (u->exported_log_ratelimit_interval)
5656 return 0;
5657
5658 if (c->log_ratelimit_interval_usec == 0)
5659 return 0;
5660
5661 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5662
5663 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5664 return log_oom();
5665
5666 r = symlink_atomic(buf, p);
5667 if (r < 0)
5668 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5669
5670 u->exported_log_ratelimit_interval = true;
5671 return 0;
5672 }
5673
5674 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5675 _cleanup_free_ char *buf = NULL;
5676 const char *p;
5677 int r;
5678
5679 assert(u);
5680 assert(c);
5681
5682 if (u->exported_log_ratelimit_burst)
5683 return 0;
5684
5685 if (c->log_ratelimit_burst == 0)
5686 return 0;
5687
5688 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5689
5690 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5691 return log_oom();
5692
5693 r = symlink_atomic(buf, p);
5694 if (r < 0)
5695 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5696
5697 u->exported_log_ratelimit_burst = true;
5698 return 0;
5699 }
5700
5701 void unit_export_state_files(Unit *u) {
5702 const ExecContext *c;
5703
5704 assert(u);
5705
5706 if (!u->id)
5707 return;
5708
5709 if (MANAGER_IS_TEST_RUN(u->manager))
5710 return;
5711
5712 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5713 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5714 * the IPC system itself and PID 1 also log to the journal.
5715 *
5716 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5717 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5718 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5719 * namespace at least.
5720 *
5721 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5722 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5723 * them with one. */
5724
5725 (void) unit_export_invocation_id(u);
5726
5727 if (!MANAGER_IS_SYSTEM(u->manager))
5728 return;
5729
5730 c = unit_get_exec_context(u);
5731 if (c) {
5732 (void) unit_export_log_level_max(u, c);
5733 (void) unit_export_log_extra_fields(u, c);
5734 (void) unit_export_log_ratelimit_interval(u, c);
5735 (void) unit_export_log_ratelimit_burst(u, c);
5736 }
5737 }
5738
5739 void unit_unlink_state_files(Unit *u) {
5740 const char *p;
5741
5742 assert(u);
5743
5744 if (!u->id)
5745 return;
5746
5747 /* Undoes the effect of unit_export_state() */
5748
5749 if (u->exported_invocation_id) {
5750 _cleanup_free_ char *invocation_path = NULL;
5751 int r = unit_get_invocation_path(u, &invocation_path);
5752 if (r >= 0) {
5753 (void) unlink(invocation_path);
5754 u->exported_invocation_id = false;
5755 }
5756 }
5757
5758 if (!MANAGER_IS_SYSTEM(u->manager))
5759 return;
5760
5761 if (u->exported_log_level_max) {
5762 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5763 (void) unlink(p);
5764
5765 u->exported_log_level_max = false;
5766 }
5767
5768 if (u->exported_log_extra_fields) {
5769 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5770 (void) unlink(p);
5771
5772 u->exported_log_extra_fields = false;
5773 }
5774
5775 if (u->exported_log_ratelimit_interval) {
5776 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5777 (void) unlink(p);
5778
5779 u->exported_log_ratelimit_interval = false;
5780 }
5781
5782 if (u->exported_log_ratelimit_burst) {
5783 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5784 (void) unlink(p);
5785
5786 u->exported_log_ratelimit_burst = false;
5787 }
5788 }
5789
5790 int unit_prepare_exec(Unit *u) {
5791 int r;
5792
5793 assert(u);
5794
5795 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5796 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5797 r = bpf_firewall_load_custom(u);
5798 if (r < 0)
5799 return r;
5800
5801 /* Prepares everything so that we can fork of a process for this unit */
5802
5803 (void) unit_realize_cgroup(u);
5804
5805 if (u->reset_accounting) {
5806 (void) unit_reset_accounting(u);
5807 u->reset_accounting = false;
5808 }
5809
5810 unit_export_state_files(u);
5811
5812 r = unit_setup_exec_runtime(u);
5813 if (r < 0)
5814 return r;
5815
5816 return 0;
5817 }
5818
5819 static bool ignore_leftover_process(const char *comm) {
5820 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5821 }
5822
5823 int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5824 _cleanup_free_ char *comm = NULL;
5825
5826 assert(pidref_is_set(pid));
5827
5828 (void) pidref_get_comm(pid, &comm);
5829
5830 if (ignore_leftover_process(comm))
5831 return 0;
5832
5833 /* During start we print a warning */
5834
5835 log_unit_warning(userdata,
5836 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5837 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5838 pid->pid, strna(comm));
5839
5840 return 1;
5841 }
5842
5843 int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5844 _cleanup_free_ char *comm = NULL;
5845
5846 assert(pidref_is_set(pid));
5847
5848 (void) pidref_get_comm(pid, &comm);
5849
5850 if (ignore_leftover_process(comm))
5851 return 0;
5852
5853 /* During stop we only print an informational message */
5854
5855 log_unit_info(userdata,
5856 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5857 pid->pid, strna(comm));
5858
5859 return 1;
5860 }
5861
5862 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5863 assert(u);
5864
5865 (void) unit_pick_cgroup_path(u);
5866
5867 if (!u->cgroup_path)
5868 return 0;
5869
5870 return cg_kill_recursive(
5871 u->cgroup_path,
5872 /* sig= */ 0,
5873 /* flags= */ 0,
5874 /* set= */ NULL,
5875 log_func,
5876 u);
5877 }
5878
5879 bool unit_needs_console(Unit *u) {
5880 ExecContext *ec;
5881 UnitActiveState state;
5882
5883 assert(u);
5884
5885 state = unit_active_state(u);
5886
5887 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5888 return false;
5889
5890 if (UNIT_VTABLE(u)->needs_console)
5891 return UNIT_VTABLE(u)->needs_console(u);
5892
5893 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5894 ec = unit_get_exec_context(u);
5895 if (!ec)
5896 return false;
5897
5898 return exec_context_may_touch_console(ec);
5899 }
5900
5901 int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) {
5902 int r;
5903
5904 assert(u);
5905
5906 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5907 * and not a kernel thread either */
5908
5909 /* First, a simple range check */
5910 if (!pidref_is_set(pid))
5911 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
5912
5913 /* Some extra safety check */
5914 if (pid->pid == 1 || pidref_is_self(pid))
5915 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
5916
5917 /* Don't even begin to bother with kernel threads */
5918 r = pidref_is_kernel_thread(pid);
5919 if (r == -ESRCH)
5920 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
5921 if (r < 0)
5922 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
5923 if (r > 0)
5924 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
5925
5926 return 0;
5927 }
5928
5929 void unit_log_success(Unit *u) {
5930 assert(u);
5931
5932 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5933 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5934 * a lot of devices. */
5935 log_unit_struct(u,
5936 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5937 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5938 LOG_UNIT_INVOCATION_ID(u),
5939 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5940 }
5941
5942 void unit_log_failure(Unit *u, const char *result) {
5943 assert(u);
5944 assert(result);
5945
5946 log_unit_struct(u, LOG_WARNING,
5947 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5948 LOG_UNIT_INVOCATION_ID(u),
5949 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5950 "UNIT_RESULT=%s", result);
5951 }
5952
5953 void unit_log_skip(Unit *u, const char *result) {
5954 assert(u);
5955 assert(result);
5956
5957 log_unit_struct(u, LOG_INFO,
5958 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5959 LOG_UNIT_INVOCATION_ID(u),
5960 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5961 "UNIT_RESULT=%s", result);
5962 }
5963
5964 void unit_log_process_exit(
5965 Unit *u,
5966 const char *kind,
5967 const char *command,
5968 bool success,
5969 int code,
5970 int status) {
5971
5972 int level;
5973
5974 assert(u);
5975 assert(kind);
5976
5977 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5978 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5979 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5980 * WARNING. */
5981 if (success)
5982 level = LOG_DEBUG;
5983 else if (code == CLD_EXITED)
5984 level = LOG_NOTICE;
5985 else
5986 level = LOG_WARNING;
5987
5988 log_unit_struct(u, level,
5989 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5990 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
5991 kind,
5992 sigchld_code_to_string(code), status,
5993 strna(code == CLD_EXITED
5994 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5995 : signal_to_string(status)),
5996 success ? " (success)" : ""),
5997 "EXIT_CODE=%s", sigchld_code_to_string(code),
5998 "EXIT_STATUS=%i", status,
5999 "COMMAND=%s", strna(command),
6000 LOG_UNIT_INVOCATION_ID(u));
6001 }
6002
6003 int unit_exit_status(Unit *u) {
6004 assert(u);
6005
6006 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6007 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6008 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6009 * service process has exited abnormally (signal/coredump). */
6010
6011 if (!UNIT_VTABLE(u)->exit_status)
6012 return -EOPNOTSUPP;
6013
6014 return UNIT_VTABLE(u)->exit_status(u);
6015 }
6016
6017 int unit_failure_action_exit_status(Unit *u) {
6018 int r;
6019
6020 assert(u);
6021
6022 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6023
6024 if (u->failure_action_exit_status >= 0)
6025 return u->failure_action_exit_status;
6026
6027 r = unit_exit_status(u);
6028 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6029 return 255;
6030
6031 return r;
6032 }
6033
6034 int unit_success_action_exit_status(Unit *u) {
6035 int r;
6036
6037 assert(u);
6038
6039 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6040
6041 if (u->success_action_exit_status >= 0)
6042 return u->success_action_exit_status;
6043
6044 r = unit_exit_status(u);
6045 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6046 return 255;
6047
6048 return r;
6049 }
6050
6051 int unit_test_trigger_loaded(Unit *u) {
6052 Unit *trigger;
6053
6054 /* Tests whether the unit to trigger is loaded */
6055
6056 trigger = UNIT_TRIGGER(u);
6057 if (!trigger)
6058 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6059 "Refusing to start, no unit to trigger.");
6060 if (trigger->load_state != UNIT_LOADED)
6061 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6062 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6063
6064 return 0;
6065 }
6066
6067 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
6068 assert(u);
6069 assert(context);
6070
6071 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6072 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6073 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6074
6075 exec_context_destroy_credentials(u);
6076 exec_context_destroy_mount_ns_dir(u);
6077 }
6078
6079 int unit_clean(Unit *u, ExecCleanMask mask) {
6080 UnitActiveState state;
6081
6082 assert(u);
6083
6084 /* Special return values:
6085 *
6086 * -EOPNOTSUPP → cleaning not supported for this unit type
6087 * -EUNATCH → cleaning not defined for this resource type
6088 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6089 * a job queued or similar
6090 */
6091
6092 if (!UNIT_VTABLE(u)->clean)
6093 return -EOPNOTSUPP;
6094
6095 if (mask == 0)
6096 return -EUNATCH;
6097
6098 if (u->load_state != UNIT_LOADED)
6099 return -EBUSY;
6100
6101 if (u->job)
6102 return -EBUSY;
6103
6104 state = unit_active_state(u);
6105 if (state != UNIT_INACTIVE)
6106 return -EBUSY;
6107
6108 return UNIT_VTABLE(u)->clean(u, mask);
6109 }
6110
6111 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6112 assert(u);
6113
6114 if (!UNIT_VTABLE(u)->clean ||
6115 u->load_state != UNIT_LOADED) {
6116 *ret = 0;
6117 return 0;
6118 }
6119
6120 /* When the clean() method is set, can_clean() really should be set too */
6121 assert(UNIT_VTABLE(u)->can_clean);
6122
6123 return UNIT_VTABLE(u)->can_clean(u, ret);
6124 }
6125
6126 bool unit_can_start_refuse_manual(Unit *u) {
6127 return unit_can_start(u) && !u->refuse_manual_start;
6128 }
6129
6130 bool unit_can_stop_refuse_manual(Unit *u) {
6131 return unit_can_stop(u) && !u->refuse_manual_stop;
6132 }
6133
6134 bool unit_can_isolate_refuse_manual(Unit *u) {
6135 return unit_can_isolate(u) && !u->refuse_manual_start;
6136 }
6137
6138 bool unit_can_freeze(Unit *u) {
6139 assert(u);
6140
6141 if (UNIT_VTABLE(u)->can_freeze)
6142 return UNIT_VTABLE(u)->can_freeze(u);
6143
6144 return UNIT_VTABLE(u)->freeze;
6145 }
6146
6147 void unit_frozen(Unit *u) {
6148 assert(u);
6149
6150 u->freezer_state = FREEZER_FROZEN;
6151
6152 bus_unit_send_pending_freezer_message(u, false);
6153 }
6154
6155 void unit_thawed(Unit *u) {
6156 assert(u);
6157
6158 u->freezer_state = FREEZER_RUNNING;
6159
6160 bus_unit_send_pending_freezer_message(u, false);
6161 }
6162
6163 static int unit_freezer_action(Unit *u, FreezerAction action) {
6164 UnitActiveState s;
6165 int (*method)(Unit*);
6166 int r;
6167
6168 assert(u);
6169 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6170
6171 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
6172 if (!method || !cg_freezer_supported())
6173 return -EOPNOTSUPP;
6174
6175 if (u->job)
6176 return -EBUSY;
6177
6178 if (u->load_state != UNIT_LOADED)
6179 return -EHOSTDOWN;
6180
6181 s = unit_active_state(u);
6182 if (s != UNIT_ACTIVE)
6183 return -EHOSTDOWN;
6184
6185 if ((IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING) && action == FREEZER_FREEZE) ||
6186 (u->freezer_state == FREEZER_THAWING && action == FREEZER_THAW))
6187 return -EALREADY;
6188
6189 r = method(u);
6190 if (r <= 0)
6191 return r;
6192
6193 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING));
6194
6195 return 1;
6196 }
6197
6198 int unit_freeze(Unit *u) {
6199 return unit_freezer_action(u, FREEZER_FREEZE);
6200 }
6201
6202 int unit_thaw(Unit *u) {
6203 return unit_freezer_action(u, FREEZER_THAW);
6204 }
6205
6206 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
6207 int unit_freeze_vtable_common(Unit *u) {
6208 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
6209 }
6210
6211 int unit_thaw_vtable_common(Unit *u) {
6212 return unit_cgroup_freezer_action(u, FREEZER_THAW);
6213 }
6214
6215 Condition *unit_find_failed_condition(Unit *u) {
6216 Condition *failed_trigger = NULL;
6217 bool has_succeeded_trigger = false;
6218
6219 if (u->condition_result)
6220 return NULL;
6221
6222 LIST_FOREACH(conditions, c, u->conditions)
6223 if (c->trigger) {
6224 if (c->result == CONDITION_SUCCEEDED)
6225 has_succeeded_trigger = true;
6226 else if (!failed_trigger)
6227 failed_trigger = c;
6228 } else if (c->result != CONDITION_SUCCEEDED)
6229 return c;
6230
6231 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6232 }
6233
6234 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6235 [COLLECT_INACTIVE] = "inactive",
6236 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6237 };
6238
6239 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6240
6241 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6242 Unit *i;
6243
6244 assert(u);
6245
6246 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6247 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6248 * is NULL the first entry found), or NULL if not found. */
6249
6250 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6251 if (!other || other == i)
6252 return i;
6253
6254 return NULL;
6255 }
6256
6257 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6258 _cleanup_free_ Unit **array = NULL;
6259 size_t n = 0;
6260 Unit *other;
6261
6262 assert(u);
6263 assert(ret_array);
6264
6265 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6266 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6267 * while the dependency table is continuously updated. */
6268
6269 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6270 if (!GREEDY_REALLOC(array, n + 1))
6271 return -ENOMEM;
6272
6273 array[n++] = other;
6274 }
6275
6276 *ret_array = TAKE_PTR(array);
6277
6278 assert(n <= INT_MAX);
6279 return (int) n;
6280 }
6281
6282 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6283 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6284 Unit *other;
6285 int r;
6286
6287 assert(u);
6288 assert(ret);
6289
6290 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6291
6292 do {
6293 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6294 r = set_ensure_put(&units, NULL, other);
6295 if (r < 0)
6296 return r;
6297 if (r == 0)
6298 continue;
6299 r = set_ensure_put(&queue, NULL, other);
6300 if (r < 0)
6301 return r;
6302 }
6303 } while ((u = set_steal_first(queue)));
6304
6305 *ret = TAKE_PTR(units);
6306 return 0;
6307 }
6308
6309 int unit_arm_timer(
6310 Unit *u,
6311 sd_event_source **source,
6312 bool relative,
6313 usec_t usec,
6314 sd_event_time_handler_t handler) {
6315
6316 int r;
6317
6318 assert(u);
6319 assert(source);
6320 assert(handler);
6321
6322 if (*source) {
6323 if (usec == USEC_INFINITY)
6324 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6325
6326 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6327 if (r < 0)
6328 return r;
6329
6330 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6331 }
6332
6333 if (usec == USEC_INFINITY)
6334 return 0;
6335
6336 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6337 u->manager->event,
6338 source,
6339 CLOCK_MONOTONIC,
6340 usec, 0,
6341 handler,
6342 u);
6343 if (r < 0)
6344 return r;
6345
6346 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6347 (void) sd_event_source_set_description(*source, d);
6348
6349 return 0;
6350 }
6351
6352 static int unit_get_nice(Unit *u) {
6353 ExecContext *ec;
6354
6355 ec = unit_get_exec_context(u);
6356 return ec ? ec->nice : 0;
6357 }
6358
6359 static uint64_t unit_get_cpu_weight(Unit *u) {
6360 CGroupContext *cc;
6361
6362 cc = unit_get_cgroup_context(u);
6363 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6364 }
6365
6366 int unit_compare_priority(Unit *a, Unit *b) {
6367 int ret;
6368
6369 ret = CMP(a->type, b->type);
6370 if (ret != 0)
6371 return -ret;
6372
6373 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6374 if (ret != 0)
6375 return -ret;
6376
6377 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6378 if (ret != 0)
6379 return ret;
6380
6381 return strcmp(a->id, b->id);
6382 }
6383
6384 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6385 [UNIT_PATH] = &activation_details_path_vtable,
6386 [UNIT_TIMER] = &activation_details_timer_vtable,
6387 };
6388
6389 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6390 _cleanup_free_ ActivationDetails *details = NULL;
6391
6392 assert(trigger_unit);
6393 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6394 assert(trigger_unit->id);
6395
6396 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6397 if (!details)
6398 return NULL;
6399
6400 *details = (ActivationDetails) {
6401 .n_ref = 1,
6402 .trigger_unit_type = trigger_unit->type,
6403 };
6404
6405 details->trigger_unit_name = strdup(trigger_unit->id);
6406 if (!details->trigger_unit_name)
6407 return NULL;
6408
6409 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6410 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6411
6412 return TAKE_PTR(details);
6413 }
6414
6415 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6416 if (!details)
6417 return NULL;
6418
6419 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6420 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6421
6422 free(details->trigger_unit_name);
6423
6424 return mfree(details);
6425 }
6426
6427 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6428 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6429 return;
6430
6431 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6432 if (details->trigger_unit_name)
6433 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6434 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6435 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6436 }
6437
6438 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6439 int r;
6440
6441 assert(key);
6442 assert(value);
6443 assert(details);
6444
6445 if (!*details) {
6446 UnitType t;
6447
6448 if (!streq(key, "activation-details-unit-type"))
6449 return -EINVAL;
6450
6451 t = unit_type_from_string(value);
6452 if (t < 0)
6453 return t;
6454
6455 /* The activation details vtable has defined ops only for path and timer units */
6456 if (!activation_details_vtable[t])
6457 return -EINVAL;
6458
6459 *details = malloc0(activation_details_vtable[t]->object_size);
6460 if (!*details)
6461 return -ENOMEM;
6462
6463 **details = (ActivationDetails) {
6464 .n_ref = 1,
6465 .trigger_unit_type = t,
6466 };
6467
6468 return 0;
6469 }
6470
6471 if (streq(key, "activation-details-unit-name")) {
6472 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6473 if (r < 0)
6474 return r;
6475
6476 return 0;
6477 }
6478
6479 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6480 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6481
6482 return -EINVAL;
6483 }
6484
6485 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6486 int r = 0;
6487
6488 assert(strv);
6489
6490 if (!details)
6491 return 0;
6492
6493 if (!isempty(details->trigger_unit_name)) {
6494 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6495 if (!s)
6496 return -ENOMEM;
6497
6498 r = strv_consume(strv, TAKE_PTR(s));
6499 if (r < 0)
6500 return r;
6501 }
6502
6503 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6504 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6505 if (r < 0)
6506 return r;
6507 }
6508
6509 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6510 }
6511
6512 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6513 int r = 0;
6514
6515 assert(strv);
6516
6517 if (!details)
6518 return 0;
6519
6520 if (!isempty(details->trigger_unit_name)) {
6521 r = strv_extend(strv, "trigger_unit");
6522 if (r < 0)
6523 return r;
6524
6525 r = strv_extend(strv, details->trigger_unit_name);
6526 if (r < 0)
6527 return r;
6528 }
6529
6530 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6531 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6532 if (r < 0)
6533 return r;
6534 }
6535
6536 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6537 }
6538
6539 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);
6540
6541 static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = {
6542 [UNIT_MOUNT_WANTS] = "WantsMountsFor",
6543 [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor",
6544 };
6545
6546 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType);
6547
6548 UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) {
6549 switch (t) {
6550
6551 case UNIT_MOUNT_WANTS:
6552 return UNIT_WANTS;
6553
6554 case UNIT_MOUNT_REQUIRES:
6555 return UNIT_REQUIRES;
6556
6557 default:
6558 assert_not_reached();
6559 }
6560 }