]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
6dc13bcc9ad3138cca53be73338a8b77819b9fc7
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio-label.h"
32 #include "fileio.h"
33 #include "format-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "macro.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
46 #include "rm-rf.h"
47 #include "serialize.h"
48 #include "set.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
51 #include "special.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "strv.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
62 #include "unit.h"
63 #include "user-util.h"
64 #include "virt.h"
65 #if BPF_FRAMEWORK
66 #include "bpf-link.h"
67 #endif
68
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
74
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
80
81 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
82 [UNIT_SERVICE] = &service_vtable,
83 [UNIT_SOCKET] = &socket_vtable,
84 [UNIT_TARGET] = &target_vtable,
85 [UNIT_DEVICE] = &device_vtable,
86 [UNIT_MOUNT] = &mount_vtable,
87 [UNIT_AUTOMOUNT] = &automount_vtable,
88 [UNIT_SWAP] = &swap_vtable,
89 [UNIT_TIMER] = &timer_vtable,
90 [UNIT_PATH] = &path_vtable,
91 [UNIT_SLICE] = &slice_vtable,
92 [UNIT_SCOPE] = &scope_vtable,
93 };
94
95 Unit* unit_new(Manager *m, size_t size) {
96 Unit *u;
97
98 assert(m);
99 assert(size >= sizeof(Unit));
100
101 u = malloc0(size);
102 if (!u)
103 return NULL;
104
105 u->manager = m;
106 u->type = _UNIT_TYPE_INVALID;
107 u->default_dependencies = true;
108 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
109 u->unit_file_preset = -1;
110 u->on_failure_job_mode = JOB_REPLACE;
111 u->on_success_job_mode = JOB_FAIL;
112 u->cgroup_control_inotify_wd = -1;
113 u->cgroup_memory_inotify_wd = -1;
114 u->job_timeout = USEC_INFINITY;
115 u->job_running_timeout = USEC_INFINITY;
116 u->ref_uid = UID_INVALID;
117 u->ref_gid = GID_INVALID;
118 u->cpu_usage_last = NSEC_INFINITY;
119
120 unit_reset_memory_accounting_last(u);
121
122 unit_reset_io_accounting_last(u);
123
124 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
125 u->failure_action_exit_status = u->success_action_exit_status = -1;
126
127 u->ip_accounting_ingress_map_fd = -EBADF;
128 u->ip_accounting_egress_map_fd = -EBADF;
129
130 u->ipv4_allow_map_fd = -EBADF;
131 u->ipv6_allow_map_fd = -EBADF;
132 u->ipv4_deny_map_fd = -EBADF;
133 u->ipv6_deny_map_fd = -EBADF;
134
135 u->last_section_private = -1;
136
137 u->start_ratelimit = (const RateLimit) {
138 m->defaults.start_limit_interval,
139 m->defaults.start_limit_burst,
140 };
141
142 u->auto_start_stop_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_SEC, .burst = 16 };
143
144 return u;
145 }
146
147 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
148 _cleanup_(unit_freep) Unit *u = NULL;
149 int r;
150
151 u = unit_new(m, size);
152 if (!u)
153 return -ENOMEM;
154
155 r = unit_add_name(u, name);
156 if (r < 0)
157 return r;
158
159 *ret = TAKE_PTR(u);
160
161 return r;
162 }
163
164 bool unit_has_name(const Unit *u, const char *name) {
165 assert(u);
166 assert(name);
167
168 return streq_ptr(name, u->id) ||
169 set_contains(u->aliases, name);
170 }
171
172 static void unit_init(Unit *u) {
173 CGroupContext *cc;
174 ExecContext *ec;
175 KillContext *kc;
176
177 assert(u);
178 assert(u->manager);
179 assert(u->type >= 0);
180
181 cc = unit_get_cgroup_context(u);
182 if (cc) {
183 cgroup_context_init(cc);
184
185 /* Copy in the manager defaults into the cgroup
186 * context, _before_ the rest of the settings have
187 * been initialized */
188
189 cc->cpu_accounting = u->manager->defaults.cpu_accounting;
190 cc->io_accounting = u->manager->defaults.io_accounting;
191 cc->blockio_accounting = u->manager->defaults.blockio_accounting;
192 cc->memory_accounting = u->manager->defaults.memory_accounting;
193 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
194 cc->ip_accounting = u->manager->defaults.ip_accounting;
195
196 if (u->type != UNIT_SLICE)
197 cc->tasks_max = u->manager->defaults.tasks_max;
198
199 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
200 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
201 }
202
203 ec = unit_get_exec_context(u);
204 if (ec) {
205 exec_context_init(ec);
206
207 if (u->manager->defaults.oom_score_adjust_set) {
208 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
209 ec->oom_score_adjust_set = true;
210 }
211
212 if (MANAGER_IS_SYSTEM(u->manager))
213 ec->keyring_mode = EXEC_KEYRING_SHARED;
214 else {
215 ec->keyring_mode = EXEC_KEYRING_INHERIT;
216
217 /* User manager might have its umask redefined by PAM or UMask=. In this
218 * case let the units it manages inherit this value by default. They can
219 * still tune this value through their own unit file */
220 (void) get_process_umask(0, &ec->umask);
221 }
222 }
223
224 kc = unit_get_kill_context(u);
225 if (kc)
226 kill_context_init(kc);
227
228 if (UNIT_VTABLE(u)->init)
229 UNIT_VTABLE(u)->init(u);
230 }
231
232 static int unit_add_alias(Unit *u, char *donated_name) {
233 int r;
234
235 /* Make sure that u->names is allocated. We may leave u->names
236 * empty if we fail later, but this is not a problem. */
237 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
238 if (r < 0)
239 return r;
240 assert(r > 0);
241
242 return 0;
243 }
244
245 int unit_add_name(Unit *u, const char *text) {
246 _cleanup_free_ char *name = NULL, *instance = NULL;
247 UnitType t;
248 int r;
249
250 assert(u);
251 assert(text);
252
253 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
254 if (!u->instance)
255 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
256 "instance is not set when adding name '%s': %m", text);
257
258 r = unit_name_replace_instance(text, u->instance, &name);
259 if (r < 0)
260 return log_unit_debug_errno(u, r,
261 "failed to build instance name from '%s': %m", text);
262 } else {
263 name = strdup(text);
264 if (!name)
265 return -ENOMEM;
266 }
267
268 if (unit_has_name(u, name))
269 return 0;
270
271 if (hashmap_contains(u->manager->units, name))
272 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
273 "unit already exist when adding name '%s': %m", name);
274
275 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
276 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
277 "name '%s' is invalid: %m", name);
278
279 t = unit_name_to_type(name);
280 if (t < 0)
281 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
282 "failed to derive unit type from name '%s': %m", name);
283
284 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
285 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
286 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
287 u->type, t, name);
288
289 r = unit_name_to_instance(name, &instance);
290 if (r < 0)
291 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
292
293 if (instance && !unit_type_may_template(t))
294 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
295
296 /* Ensure that this unit either has no instance, or that the instance matches. */
297 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
298 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
299 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
300 name, instance, u->instance);
301
302 if (u->id && !unit_type_may_alias(t))
303 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
304 "cannot add name %s, aliases are not allowed for %s units.",
305 name, unit_type_to_string(t));
306
307 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
308 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
309
310 /* Add name to the global hashmap first, because that's easier to undo */
311 r = hashmap_put(u->manager->units, name, u);
312 if (r < 0)
313 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
314
315 if (u->id) {
316 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
317 if (r < 0) {
318 hashmap_remove(u->manager->units, name);
319 return r;
320 }
321 TAKE_PTR(name);
322
323 } else {
324 /* A new name, we don't need the set yet. */
325 assert(u->type == _UNIT_TYPE_INVALID);
326 assert(!u->instance);
327
328 u->type = t;
329 u->id = TAKE_PTR(name);
330 u->instance = TAKE_PTR(instance);
331
332 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
333 unit_init(u);
334 }
335
336 unit_add_to_dbus_queue(u);
337 return 0;
338 }
339
340 int unit_choose_id(Unit *u, const char *name) {
341 _cleanup_free_ char *t = NULL;
342 char *s;
343 int r;
344
345 assert(u);
346 assert(name);
347
348 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
349 if (!u->instance)
350 return -EINVAL;
351
352 r = unit_name_replace_instance(name, u->instance, &t);
353 if (r < 0)
354 return r;
355
356 name = t;
357 }
358
359 if (streq_ptr(u->id, name))
360 return 0; /* Nothing to do. */
361
362 /* Selects one of the aliases of this unit as the id */
363 s = set_get(u->aliases, (char*) name);
364 if (!s)
365 return -ENOENT;
366
367 if (u->id) {
368 r = set_remove_and_put(u->aliases, name, u->id);
369 if (r < 0)
370 return r;
371 } else
372 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
373
374 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
375 unit_add_to_dbus_queue(u);
376
377 return 0;
378 }
379
380 int unit_set_description(Unit *u, const char *description) {
381 int r;
382
383 assert(u);
384
385 r = free_and_strdup(&u->description, empty_to_null(description));
386 if (r < 0)
387 return r;
388 if (r > 0)
389 unit_add_to_dbus_queue(u);
390
391 return 0;
392 }
393
394 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
395 Unit *other;
396
397 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
398 if (other->job || other->nop_job)
399 return true;
400
401 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
402 if (other->job || other->nop_job)
403 return true;
404
405 return false;
406 }
407
408 void unit_release_resources(Unit *u) {
409 UnitActiveState state;
410 ExecContext *ec;
411
412 assert(u);
413
414 if (u->job || u->nop_job)
415 return;
416
417 if (u->perpetual)
418 return;
419
420 state = unit_active_state(u);
421 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
422 return;
423
424 if (unit_will_restart(u))
425 return;
426
427 ec = unit_get_exec_context(u);
428 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
429 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
430
431 if (UNIT_VTABLE(u)->release_resources)
432 UNIT_VTABLE(u)->release_resources(u);
433 }
434
435 bool unit_may_gc(Unit *u) {
436 UnitActiveState state;
437 int r;
438
439 assert(u);
440
441 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
442 * unit may be collected, and false if there's some reason to keep it loaded.
443 *
444 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
445 * using markers to properly collect dependency loops.
446 */
447
448 if (u->job || u->nop_job)
449 return false;
450
451 if (u->perpetual)
452 return false;
453
454 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
455 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
456 * before we release the unit. */
457 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
458 return false;
459
460 /* Make sure to send out D-Bus events before we unload the unit */
461 if (u->in_dbus_queue)
462 return false;
463
464 if (sd_bus_track_count(u->bus_track) > 0)
465 return false;
466
467 state = unit_active_state(u);
468
469 /* But we keep the unit object around for longer when it is referenced or configured to not be
470 * gc'ed */
471 switch (u->collect_mode) {
472
473 case COLLECT_INACTIVE:
474 if (state != UNIT_INACTIVE)
475 return false;
476
477 break;
478
479 case COLLECT_INACTIVE_OR_FAILED:
480 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
481 return false;
482
483 break;
484
485 default:
486 assert_not_reached();
487 }
488
489 /* Check if any OnFailure= or on Success= jobs may be pending */
490 if (unit_success_failure_handler_has_jobs(u))
491 return false;
492
493 if (u->cgroup_path) {
494 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
495 * around. Units with active processes should never be collected. */
496
497 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
498 if (r < 0)
499 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
500 if (r <= 0)
501 return false;
502 }
503
504 if (!UNIT_VTABLE(u)->may_gc)
505 return true;
506
507 return UNIT_VTABLE(u)->may_gc(u);
508 }
509
510 void unit_add_to_load_queue(Unit *u) {
511 assert(u);
512 assert(u->type != _UNIT_TYPE_INVALID);
513
514 if (u->load_state != UNIT_STUB || u->in_load_queue)
515 return;
516
517 LIST_PREPEND(load_queue, u->manager->load_queue, u);
518 u->in_load_queue = true;
519 }
520
521 void unit_add_to_cleanup_queue(Unit *u) {
522 assert(u);
523
524 if (u->in_cleanup_queue)
525 return;
526
527 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
528 u->in_cleanup_queue = true;
529 }
530
531 void unit_add_to_gc_queue(Unit *u) {
532 assert(u);
533
534 if (u->in_gc_queue || u->in_cleanup_queue)
535 return;
536
537 if (!unit_may_gc(u))
538 return;
539
540 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
541 u->in_gc_queue = true;
542 }
543
544 void unit_add_to_dbus_queue(Unit *u) {
545 assert(u);
546 assert(u->type != _UNIT_TYPE_INVALID);
547
548 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
549 return;
550
551 /* Shortcut things if nobody cares */
552 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
553 sd_bus_track_count(u->bus_track) <= 0 &&
554 set_isempty(u->manager->private_buses)) {
555 u->sent_dbus_new_signal = true;
556 return;
557 }
558
559 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
560 u->in_dbus_queue = true;
561 }
562
563 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
564 assert(u);
565
566 if (u->in_stop_when_unneeded_queue)
567 return;
568
569 if (!u->stop_when_unneeded)
570 return;
571
572 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
573 return;
574
575 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
576 u->in_stop_when_unneeded_queue = true;
577 }
578
579 void unit_submit_to_start_when_upheld_queue(Unit *u) {
580 assert(u);
581
582 if (u->in_start_when_upheld_queue)
583 return;
584
585 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
586 return;
587
588 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
589 return;
590
591 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
592 u->in_start_when_upheld_queue = true;
593 }
594
595 void unit_submit_to_stop_when_bound_queue(Unit *u) {
596 assert(u);
597
598 if (u->in_stop_when_bound_queue)
599 return;
600
601 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
602 return;
603
604 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
605 return;
606
607 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
608 u->in_stop_when_bound_queue = true;
609 }
610
611 static bool unit_can_release_resources(Unit *u) {
612 ExecContext *ec;
613
614 assert(u);
615
616 if (UNIT_VTABLE(u)->release_resources)
617 return true;
618
619 ec = unit_get_exec_context(u);
620 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
621 return true;
622
623 return false;
624 }
625
626 void unit_submit_to_release_resources_queue(Unit *u) {
627 assert(u);
628
629 if (u->in_release_resources_queue)
630 return;
631
632 if (u->job || u->nop_job)
633 return;
634
635 if (u->perpetual)
636 return;
637
638 if (!unit_can_release_resources(u))
639 return;
640
641 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
642 u->in_release_resources_queue = true;
643 }
644
645 static void unit_clear_dependencies(Unit *u) {
646 assert(u);
647
648 /* Removes all dependencies configured on u and their reverse dependencies. */
649
650 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
651
652 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
653 Hashmap *other_deps;
654
655 HASHMAP_FOREACH(other_deps, other->dependencies)
656 hashmap_remove(other_deps, u);
657
658 unit_add_to_gc_queue(other);
659 }
660
661 hashmap_free(deps);
662 }
663
664 u->dependencies = hashmap_free(u->dependencies);
665 }
666
667 static void unit_remove_transient(Unit *u) {
668 assert(u);
669
670 if (!u->transient)
671 return;
672
673 if (u->fragment_path)
674 (void) unlink(u->fragment_path);
675
676 STRV_FOREACH(i, u->dropin_paths) {
677 _cleanup_free_ char *p = NULL, *pp = NULL;
678
679 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
680 continue;
681
682 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
683 continue;
684
685 /* Only drop transient drop-ins */
686 if (!path_equal(u->manager->lookup_paths.transient, pp))
687 continue;
688
689 (void) unlink(*i);
690 (void) rmdir(p);
691 }
692 }
693
694 static void unit_free_mounts_for(Unit *u) {
695 assert(u);
696
697 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
698 for (;;) {
699 _cleanup_free_ char *path = NULL;
700
701 path = hashmap_steal_first_key(u->mounts_for[t]);
702 if (!path)
703 break;
704
705 char s[strlen(path) + 1];
706
707 PATH_FOREACH_PREFIX_MORE(s, path) {
708 char *y;
709 Set *x;
710
711 x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y);
712 if (!x)
713 continue;
714
715 (void) set_remove(x, u);
716
717 if (set_isempty(x)) {
718 assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y));
719 free(y);
720 set_free(x);
721 }
722 }
723 }
724
725 u->mounts_for[t] = hashmap_free(u->mounts_for[t]);
726 }
727 }
728
729 static void unit_done(Unit *u) {
730 ExecContext *ec;
731 CGroupContext *cc;
732
733 assert(u);
734
735 if (u->type < 0)
736 return;
737
738 if (UNIT_VTABLE(u)->done)
739 UNIT_VTABLE(u)->done(u);
740
741 ec = unit_get_exec_context(u);
742 if (ec)
743 exec_context_done(ec);
744
745 cc = unit_get_cgroup_context(u);
746 if (cc)
747 cgroup_context_done(cc);
748 }
749
750 Unit* unit_free(Unit *u) {
751 Unit *slice;
752 char *t;
753
754 if (!u)
755 return NULL;
756
757 sd_event_source_disable_unref(u->auto_start_stop_event_source);
758
759 u->transient_file = safe_fclose(u->transient_file);
760
761 if (!MANAGER_IS_RELOADING(u->manager))
762 unit_remove_transient(u);
763
764 bus_unit_send_removed_signal(u);
765
766 unit_done(u);
767
768 unit_dequeue_rewatch_pids(u);
769
770 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
771 u->bus_track = sd_bus_track_unref(u->bus_track);
772 u->deserialized_refs = strv_free(u->deserialized_refs);
773 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
774
775 unit_free_mounts_for(u);
776
777 SET_FOREACH(t, u->aliases)
778 hashmap_remove_value(u->manager->units, t, u);
779 if (u->id)
780 hashmap_remove_value(u->manager->units, u->id, u);
781
782 if (!sd_id128_is_null(u->invocation_id))
783 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
784
785 if (u->job) {
786 Job *j = u->job;
787 job_uninstall(j);
788 job_free(j);
789 }
790
791 if (u->nop_job) {
792 Job *j = u->nop_job;
793 job_uninstall(j);
794 job_free(j);
795 }
796
797 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
798 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
799 slice = UNIT_GET_SLICE(u);
800 unit_clear_dependencies(u);
801 if (slice)
802 unit_add_family_to_cgroup_realize_queue(slice);
803
804 if (u->on_console)
805 manager_unref_console(u->manager);
806
807 fdset_free(u->initial_socket_bind_link_fds);
808 #if BPF_FRAMEWORK
809 bpf_link_free(u->ipv4_socket_bind_link);
810 bpf_link_free(u->ipv6_socket_bind_link);
811 #endif
812
813 unit_release_cgroup(u);
814
815 if (!MANAGER_IS_RELOADING(u->manager))
816 unit_unlink_state_files(u);
817
818 unit_unref_uid_gid(u, false);
819
820 (void) manager_update_failed_units(u->manager, u, false);
821 set_remove(u->manager->startup_units, u);
822
823 unit_unwatch_all_pids(u);
824
825 while (u->refs_by_target)
826 unit_ref_unset(u->refs_by_target);
827
828 if (u->type != _UNIT_TYPE_INVALID)
829 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
830
831 if (u->in_load_queue)
832 LIST_REMOVE(load_queue, u->manager->load_queue, u);
833
834 if (u->in_dbus_queue)
835 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
836
837 if (u->in_cleanup_queue)
838 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
839
840 if (u->in_gc_queue)
841 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
842
843 if (u->in_cgroup_realize_queue)
844 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
845
846 if (u->in_cgroup_empty_queue)
847 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
848
849 if (u->in_cgroup_oom_queue)
850 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
851
852 if (u->in_target_deps_queue)
853 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
854
855 if (u->in_stop_when_unneeded_queue)
856 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
857
858 if (u->in_start_when_upheld_queue)
859 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
860
861 if (u->in_stop_when_bound_queue)
862 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
863
864 if (u->in_release_resources_queue)
865 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
866
867 bpf_firewall_close(u);
868
869 hashmap_free(u->bpf_foreign_by_key);
870
871 bpf_program_free(u->bpf_device_control_installed);
872
873 #if BPF_FRAMEWORK
874 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
875 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
876 #endif
877 fdset_free(u->initial_restric_ifaces_link_fds);
878
879 condition_free_list(u->conditions);
880 condition_free_list(u->asserts);
881
882 free(u->description);
883 strv_free(u->documentation);
884 free(u->fragment_path);
885 free(u->source_path);
886 strv_free(u->dropin_paths);
887 free(u->instance);
888
889 free(u->job_timeout_reboot_arg);
890 free(u->reboot_arg);
891
892 free(u->access_selinux_context);
893
894 set_free_free(u->aliases);
895 free(u->id);
896
897 activation_details_unref(u->activation_details);
898
899 return mfree(u);
900 }
901
902 FreezerState unit_freezer_state(Unit *u) {
903 assert(u);
904
905 return u->freezer_state;
906 }
907
908 UnitActiveState unit_active_state(Unit *u) {
909 assert(u);
910
911 if (u->load_state == UNIT_MERGED)
912 return unit_active_state(unit_follow_merge(u));
913
914 /* After a reload it might happen that a unit is not correctly
915 * loaded but still has a process around. That's why we won't
916 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
917
918 return UNIT_VTABLE(u)->active_state(u);
919 }
920
921 const char* unit_sub_state_to_string(Unit *u) {
922 assert(u);
923
924 return UNIT_VTABLE(u)->sub_state_to_string(u);
925 }
926
927 static int unit_merge_names(Unit *u, Unit *other) {
928 char *name;
929 int r;
930
931 assert(u);
932 assert(other);
933
934 r = unit_add_alias(u, other->id);
935 if (r < 0)
936 return r;
937
938 r = set_move(u->aliases, other->aliases);
939 if (r < 0) {
940 set_remove(u->aliases, other->id);
941 return r;
942 }
943
944 TAKE_PTR(other->id);
945 other->aliases = set_free_free(other->aliases);
946
947 SET_FOREACH(name, u->aliases)
948 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
949
950 return 0;
951 }
952
953 static int unit_reserve_dependencies(Unit *u, Unit *other) {
954 size_t n_reserve;
955 Hashmap* deps;
956 void *d;
957 int r;
958
959 assert(u);
960 assert(other);
961
962 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
963 * fail.
964 *
965 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
966 * hashmaps is an estimate that is likely too high since they probably use some of the same
967 * types. But it's never too low, and that's all we need. */
968
969 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
970 if (n_reserve > 0) {
971 r = hashmap_ensure_allocated(&u->dependencies, NULL);
972 if (r < 0)
973 return r;
974
975 r = hashmap_reserve(u->dependencies, n_reserve);
976 if (r < 0)
977 return r;
978 }
979
980 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
981 * other unit's dependencies.
982 *
983 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
984 * reserve anything for. In that case other's set will be transferred as a whole to u by
985 * complete_move(). */
986
987 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
988 Hashmap *other_deps;
989
990 other_deps = hashmap_get(other->dependencies, d);
991
992 r = hashmap_reserve(deps, hashmap_size(other_deps));
993 if (r < 0)
994 return r;
995 }
996
997 return 0;
998 }
999
1000 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
1001 /* Only warn about some unit types */
1002 return IN_SET(dependency,
1003 UNIT_CONFLICTS,
1004 UNIT_CONFLICTED_BY,
1005 UNIT_BEFORE,
1006 UNIT_AFTER,
1007 UNIT_ON_SUCCESS,
1008 UNIT_ON_FAILURE,
1009 UNIT_TRIGGERS,
1010 UNIT_TRIGGERED_BY);
1011 }
1012
1013 static int unit_per_dependency_type_hashmap_update(
1014 Hashmap *per_type,
1015 Unit *other,
1016 UnitDependencyMask origin_mask,
1017 UnitDependencyMask destination_mask) {
1018
1019 UnitDependencyInfo info;
1020 int r;
1021
1022 assert(other);
1023 assert_cc(sizeof(void*) == sizeof(info));
1024
1025 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1026 * exists, or insert it anew if not. */
1027
1028 info.data = hashmap_get(per_type, other);
1029 if (info.data) {
1030 /* Entry already exists. Add in our mask. */
1031
1032 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1033 FLAGS_SET(destination_mask, info.destination_mask))
1034 return 0; /* NOP */
1035
1036 info.origin_mask |= origin_mask;
1037 info.destination_mask |= destination_mask;
1038
1039 r = hashmap_update(per_type, other, info.data);
1040 } else {
1041 info = (UnitDependencyInfo) {
1042 .origin_mask = origin_mask,
1043 .destination_mask = destination_mask,
1044 };
1045
1046 r = hashmap_put(per_type, other, info.data);
1047 }
1048 if (r < 0)
1049 return r;
1050
1051 return 1;
1052 }
1053
1054 static void unit_merge_dependencies(Unit *u, Unit *other) {
1055 Hashmap *deps;
1056 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1057 * since the hashmaps all want it as void pointer. */
1058
1059 assert(u);
1060 assert(other);
1061
1062 if (u == other)
1063 return;
1064
1065 /* First, remove dependency to other. */
1066 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1067 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1068 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1069 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1070 other->id, other->id, u->id);
1071
1072 if (hashmap_isempty(deps))
1073 hashmap_free(hashmap_remove(u->dependencies, dt));
1074 }
1075
1076 for (;;) {
1077 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1078 UnitDependencyInfo di_back;
1079 Unit *back;
1080
1081 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1082 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1083 if (!other_deps)
1084 break; /* done! */
1085
1086 deps = hashmap_get(u->dependencies, dt);
1087
1088 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1089 * referenced units as 'back'. */
1090 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1091 Hashmap *back_deps;
1092 void *back_dt;
1093
1094 if (back == u) {
1095 /* This is a dependency pointing back to the unit we want to merge with?
1096 * Suppress it (but warn) */
1097 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1098 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1099 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1100 u->id, other->id, other->id, u->id);
1101
1102 hashmap_remove(other_deps, back);
1103 continue;
1104 }
1105
1106 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1107 * point to 'u' instead. */
1108 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1109 UnitDependencyInfo di_move;
1110
1111 di_move.data = hashmap_remove(back_deps, other);
1112 if (!di_move.data)
1113 continue;
1114
1115 assert_se(unit_per_dependency_type_hashmap_update(
1116 back_deps,
1117 u,
1118 di_move.origin_mask,
1119 di_move.destination_mask) >= 0);
1120 }
1121
1122 /* The target unit already has dependencies of this type, let's then merge this individually. */
1123 if (deps)
1124 assert_se(unit_per_dependency_type_hashmap_update(
1125 deps,
1126 back,
1127 di_back.origin_mask,
1128 di_back.destination_mask) >= 0);
1129 }
1130
1131 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1132 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1133 * dependencies of this type, let's move them per type wholesale. */
1134 if (!deps)
1135 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1136 }
1137
1138 other->dependencies = hashmap_free(other->dependencies);
1139 }
1140
1141 int unit_merge(Unit *u, Unit *other) {
1142 int r;
1143
1144 assert(u);
1145 assert(other);
1146 assert(u->manager == other->manager);
1147 assert(u->type != _UNIT_TYPE_INVALID);
1148
1149 other = unit_follow_merge(other);
1150
1151 if (other == u)
1152 return 0;
1153
1154 if (u->type != other->type)
1155 return -EINVAL;
1156
1157 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1158 return -EEXIST;
1159
1160 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1161 return -EEXIST;
1162
1163 if (!streq_ptr(u->instance, other->instance))
1164 return -EINVAL;
1165
1166 if (other->job)
1167 return -EEXIST;
1168
1169 if (other->nop_job)
1170 return -EEXIST;
1171
1172 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1173 return -EEXIST;
1174
1175 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1176 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1177 r = unit_reserve_dependencies(u, other);
1178 if (r < 0)
1179 return r;
1180
1181 /* Redirect all references */
1182 while (other->refs_by_target)
1183 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1184
1185 /* Merge dependencies */
1186 unit_merge_dependencies(u, other);
1187
1188 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1189 r = unit_merge_names(u, other);
1190 if (r < 0)
1191 return r;
1192
1193 other->load_state = UNIT_MERGED;
1194 other->merged_into = u;
1195
1196 if (!u->activation_details)
1197 u->activation_details = activation_details_ref(other->activation_details);
1198
1199 /* If there is still some data attached to the other node, we
1200 * don't need it anymore, and can free it. */
1201 if (other->load_state != UNIT_STUB)
1202 if (UNIT_VTABLE(other)->done)
1203 UNIT_VTABLE(other)->done(other);
1204
1205 unit_add_to_dbus_queue(u);
1206 unit_add_to_cleanup_queue(other);
1207
1208 return 0;
1209 }
1210
1211 int unit_merge_by_name(Unit *u, const char *name) {
1212 _cleanup_free_ char *s = NULL;
1213 Unit *other;
1214 int r;
1215
1216 /* Either add name to u, or if a unit with name already exists, merge it with u.
1217 * If name is a template, do the same for name@instance, where instance is u's instance. */
1218
1219 assert(u);
1220 assert(name);
1221
1222 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1223 if (!u->instance)
1224 return -EINVAL;
1225
1226 r = unit_name_replace_instance(name, u->instance, &s);
1227 if (r < 0)
1228 return r;
1229
1230 name = s;
1231 }
1232
1233 other = manager_get_unit(u->manager, name);
1234 if (other)
1235 return unit_merge(u, other);
1236
1237 return unit_add_name(u, name);
1238 }
1239
1240 Unit* unit_follow_merge(Unit *u) {
1241 assert(u);
1242
1243 while (u->load_state == UNIT_MERGED)
1244 assert_se(u = u->merged_into);
1245
1246 return u;
1247 }
1248
1249 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1250 int r;
1251
1252 assert(u);
1253 assert(c);
1254
1255 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1256
1257 if (c->working_directory) {
1258 r = unit_add_mounts_for(
1259 u,
1260 c->working_directory,
1261 UNIT_DEPENDENCY_FILE,
1262 c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES);
1263 if (r < 0)
1264 return r;
1265 }
1266
1267 if (c->root_directory) {
1268 r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1269 if (r < 0)
1270 return r;
1271 }
1272
1273 if (c->root_image) {
1274 r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1275 if (r < 0)
1276 return r;
1277 }
1278
1279 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1280 if (!u->manager->prefix[dt])
1281 continue;
1282
1283 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1284 _cleanup_free_ char *p = NULL;
1285
1286 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1287 if (!p)
1288 return -ENOMEM;
1289
1290 r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES);
1291 if (r < 0)
1292 return r;
1293 }
1294 }
1295
1296 if (!MANAGER_IS_SYSTEM(u->manager))
1297 return 0;
1298
1299 /* For the following three directory types we need write access, and /var/ is possibly on the root
1300 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1301 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1302 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1303 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1304 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1305 if (r < 0)
1306 return r;
1307 }
1308
1309 if (c->private_tmp) {
1310 r = unit_add_mounts_for(u, "/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1311 if (r < 0)
1312 return r;
1313
1314 r = unit_add_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1315 if (r < 0)
1316 return r;
1317
1318 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1319 if (r < 0)
1320 return r;
1321 }
1322
1323 if (c->root_image) {
1324 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1325 * implicit dependency on udev */
1326
1327 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1328 if (r < 0)
1329 return r;
1330 }
1331
1332 if (!IN_SET(c->std_output,
1333 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1334 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1335 !IN_SET(c->std_error,
1336 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1337 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1338 !c->log_namespace)
1339 return 0;
1340
1341 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1342 * is run first. */
1343
1344 if (c->log_namespace) {
1345 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1346
1347 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1348 if (r < 0)
1349 return r;
1350
1351 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1352 if (r < 0)
1353 return r;
1354
1355 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1356 if (r < 0)
1357 return r;
1358
1359 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1360 if (r < 0)
1361 return r;
1362 } else {
1363 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1364 if (r < 0)
1365 return r;
1366 }
1367
1368 r = unit_add_default_credential_dependencies(u, c);
1369 if (r < 0)
1370 return r;
1371
1372 return 0;
1373 }
1374
1375 const char* unit_description(Unit *u) {
1376 assert(u);
1377
1378 if (u->description)
1379 return u->description;
1380
1381 return strna(u->id);
1382 }
1383
1384 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1385 assert(u);
1386 assert(u->id);
1387
1388 /* Return u->id, u->description, or "{u->id} - {u->description}".
1389 * Versions with u->description are only used if it is set.
1390 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1391 * pointer.
1392 *
1393 * Note that *ret_combined_buffer may be set to NULL. */
1394
1395 if (!u->description ||
1396 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1397 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1398 streq(u->description, u->id)) {
1399
1400 if (ret_combined_buffer)
1401 *ret_combined_buffer = NULL;
1402 return u->id;
1403 }
1404
1405 if (ret_combined_buffer) {
1406 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1407 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1408 if (*ret_combined_buffer)
1409 return *ret_combined_buffer;
1410 log_oom(); /* Fall back to ->description */
1411 } else
1412 *ret_combined_buffer = NULL;
1413 }
1414
1415 return u->description;
1416 }
1417
1418 /* Common implementation for multiple backends */
1419 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1420 int r;
1421
1422 assert(u);
1423
1424 /* Load a .{service,socket,...} file */
1425 r = unit_load_fragment(u);
1426 if (r < 0)
1427 return r;
1428
1429 if (u->load_state == UNIT_STUB) {
1430 if (fragment_required)
1431 return -ENOENT;
1432
1433 u->load_state = UNIT_LOADED;
1434 }
1435
1436 /* Load drop-in directory data. If u is an alias, we might be reloading the
1437 * target unit needlessly. But we cannot be sure which drops-ins have already
1438 * been loaded and which not, at least without doing complicated book-keeping,
1439 * so let's always reread all drop-ins. */
1440 r = unit_load_dropin(unit_follow_merge(u));
1441 if (r < 0)
1442 return r;
1443
1444 if (u->source_path) {
1445 struct stat st;
1446
1447 if (stat(u->source_path, &st) >= 0)
1448 u->source_mtime = timespec_load(&st.st_mtim);
1449 else
1450 u->source_mtime = 0;
1451 }
1452
1453 return 0;
1454 }
1455
1456 void unit_add_to_target_deps_queue(Unit *u) {
1457 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1458
1459 if (u->in_target_deps_queue)
1460 return;
1461
1462 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1463 u->in_target_deps_queue = true;
1464 }
1465
1466 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1467 assert(u);
1468 assert(target);
1469
1470 if (target->type != UNIT_TARGET)
1471 return 0;
1472
1473 /* Only add the dependency if both units are loaded, so that
1474 * that loop check below is reliable */
1475 if (u->load_state != UNIT_LOADED ||
1476 target->load_state != UNIT_LOADED)
1477 return 0;
1478
1479 /* If either side wants no automatic dependencies, then let's
1480 * skip this */
1481 if (!u->default_dependencies ||
1482 !target->default_dependencies)
1483 return 0;
1484
1485 /* Don't create loops */
1486 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1487 return 0;
1488
1489 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1490 }
1491
1492 static int unit_add_slice_dependencies(Unit *u) {
1493 Unit *slice;
1494 assert(u);
1495
1496 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1497 return 0;
1498
1499 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1500 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1501 relationship). */
1502 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1503
1504 slice = UNIT_GET_SLICE(u);
1505 if (slice)
1506 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1507
1508 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1509 return 0;
1510
1511 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1512 }
1513
1514 static int unit_add_mount_dependencies(Unit *u) {
1515 bool changed = false;
1516 int r;
1517
1518 assert(u);
1519
1520 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
1521 UnitDependencyInfo di;
1522 const char *path;
1523
1524 HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) {
1525
1526 char prefix[strlen(ASSERT_PTR(path)) + 1];
1527
1528 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1529 _cleanup_free_ char *p = NULL;
1530 Unit *m;
1531
1532 r = unit_name_from_path(prefix, ".mount", &p);
1533 if (r == -EINVAL)
1534 continue; /* If the path cannot be converted to a mount unit name,
1535 * then it's not manageable as a unit by systemd, and
1536 * hence we don't need a dependency on it. Let's thus
1537 * silently ignore the issue. */
1538 if (r < 0)
1539 return r;
1540
1541 m = manager_get_unit(u->manager, p);
1542 if (!m) {
1543 /* Make sure to load the mount unit if it exists. If so the
1544 * dependencies on this unit will be added later during the loading
1545 * of the mount unit. */
1546 (void) manager_load_unit_prepare(
1547 u->manager,
1548 p,
1549 /* path= */NULL,
1550 /* e= */NULL,
1551 &m);
1552 continue;
1553 }
1554 if (m == u)
1555 continue;
1556
1557 if (m->load_state != UNIT_LOADED)
1558 continue;
1559
1560 r = unit_add_dependency(
1561 u,
1562 UNIT_AFTER,
1563 m,
1564 /* add_reference= */ true,
1565 di.origin_mask);
1566 if (r < 0)
1567 return r;
1568 changed = changed || r > 0;
1569
1570 if (m->fragment_path) {
1571 r = unit_add_dependency(
1572 u,
1573 unit_mount_dependency_type_to_dependency_type(t),
1574 m,
1575 /* add_reference= */ true,
1576 di.origin_mask);
1577 if (r < 0)
1578 return r;
1579 changed = changed || r > 0;
1580 }
1581 }
1582 }
1583 }
1584
1585 return changed;
1586 }
1587
1588 static int unit_add_oomd_dependencies(Unit *u) {
1589 CGroupContext *c;
1590 CGroupMask mask;
1591 int r;
1592
1593 assert(u);
1594
1595 if (!u->default_dependencies)
1596 return 0;
1597
1598 c = unit_get_cgroup_context(u);
1599 if (!c)
1600 return 0;
1601
1602 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1603 if (!wants_oomd)
1604 return 0;
1605
1606 if (!cg_all_unified())
1607 return 0;
1608
1609 r = cg_mask_supported(&mask);
1610 if (r < 0)
1611 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1612
1613 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1614 return 0;
1615
1616 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1617 }
1618
1619 static int unit_add_startup_units(Unit *u) {
1620 if (!unit_has_startup_cgroup_constraints(u))
1621 return 0;
1622
1623 return set_ensure_put(&u->manager->startup_units, NULL, u);
1624 }
1625
1626 static int unit_validate_on_failure_job_mode(
1627 Unit *u,
1628 const char *job_mode_setting,
1629 JobMode job_mode,
1630 const char *dependency_name,
1631 UnitDependencyAtom atom) {
1632
1633 Unit *other, *found = NULL;
1634
1635 if (job_mode != JOB_ISOLATE)
1636 return 0;
1637
1638 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1639 if (!found)
1640 found = other;
1641 else if (found != other)
1642 return log_unit_error_errno(
1643 u, SYNTHETIC_ERRNO(ENOEXEC),
1644 "More than one %s dependencies specified but %sisolate set. Refusing.",
1645 dependency_name, job_mode_setting);
1646 }
1647
1648 return 0;
1649 }
1650
1651 int unit_load(Unit *u) {
1652 int r;
1653
1654 assert(u);
1655
1656 if (u->in_load_queue) {
1657 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1658 u->in_load_queue = false;
1659 }
1660
1661 if (u->type == _UNIT_TYPE_INVALID)
1662 return -EINVAL;
1663
1664 if (u->load_state != UNIT_STUB)
1665 return 0;
1666
1667 if (u->transient_file) {
1668 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1669 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1670
1671 r = fflush_and_check(u->transient_file);
1672 if (r < 0)
1673 goto fail;
1674
1675 u->transient_file = safe_fclose(u->transient_file);
1676 u->fragment_mtime = now(CLOCK_REALTIME);
1677 }
1678
1679 r = UNIT_VTABLE(u)->load(u);
1680 if (r < 0)
1681 goto fail;
1682
1683 assert(u->load_state != UNIT_STUB);
1684
1685 if (u->load_state == UNIT_LOADED) {
1686 unit_add_to_target_deps_queue(u);
1687
1688 r = unit_add_slice_dependencies(u);
1689 if (r < 0)
1690 goto fail;
1691
1692 r = unit_add_mount_dependencies(u);
1693 if (r < 0)
1694 goto fail;
1695
1696 r = unit_add_oomd_dependencies(u);
1697 if (r < 0)
1698 goto fail;
1699
1700 r = unit_add_startup_units(u);
1701 if (r < 0)
1702 goto fail;
1703
1704 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1705 if (r < 0)
1706 goto fail;
1707
1708 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1709 if (r < 0)
1710 goto fail;
1711
1712 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1713 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1714
1715 /* We finished loading, let's ensure our parents recalculate the members mask */
1716 unit_invalidate_cgroup_members_masks(u);
1717 }
1718
1719 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1720
1721 unit_add_to_dbus_queue(unit_follow_merge(u));
1722 unit_add_to_gc_queue(u);
1723 (void) manager_varlink_send_managed_oom_update(u);
1724
1725 return 0;
1726
1727 fail:
1728 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1729 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1730
1731 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1732 r == -ENOEXEC ? UNIT_BAD_SETTING :
1733 UNIT_ERROR;
1734 u->load_error = r;
1735
1736 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1737 * an attempt is made to load this unit, we know we need to check again. */
1738 if (u->load_state == UNIT_NOT_FOUND)
1739 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1740
1741 unit_add_to_dbus_queue(u);
1742 unit_add_to_gc_queue(u);
1743
1744 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1745 }
1746
1747 _printf_(7, 8)
1748 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1749 Unit *u = userdata;
1750 va_list ap;
1751 int r;
1752
1753 if (u && !unit_log_level_test(u, level))
1754 return -ERRNO_VALUE(error);
1755
1756 va_start(ap, format);
1757 if (u)
1758 r = log_object_internalv(level, error, file, line, func,
1759 u->manager->unit_log_field,
1760 u->id,
1761 u->manager->invocation_log_field,
1762 u->invocation_id_string,
1763 format, ap);
1764 else
1765 r = log_internalv(level, error, file, line, func, format, ap);
1766 va_end(ap);
1767
1768 return r;
1769 }
1770
1771 static bool unit_test_condition(Unit *u) {
1772 _cleanup_strv_free_ char **env = NULL;
1773 int r;
1774
1775 assert(u);
1776
1777 dual_timestamp_now(&u->condition_timestamp);
1778
1779 r = manager_get_effective_environment(u->manager, &env);
1780 if (r < 0) {
1781 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1782 u->condition_result = true;
1783 } else
1784 u->condition_result = condition_test_list(
1785 u->conditions,
1786 env,
1787 condition_type_to_string,
1788 log_unit_internal,
1789 u);
1790
1791 unit_add_to_dbus_queue(u);
1792 return u->condition_result;
1793 }
1794
1795 static bool unit_test_assert(Unit *u) {
1796 _cleanup_strv_free_ char **env = NULL;
1797 int r;
1798
1799 assert(u);
1800
1801 dual_timestamp_now(&u->assert_timestamp);
1802
1803 r = manager_get_effective_environment(u->manager, &env);
1804 if (r < 0) {
1805 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1806 u->assert_result = CONDITION_ERROR;
1807 } else
1808 u->assert_result = condition_test_list(
1809 u->asserts,
1810 env,
1811 assert_type_to_string,
1812 log_unit_internal,
1813 u);
1814
1815 unit_add_to_dbus_queue(u);
1816 return u->assert_result;
1817 }
1818
1819 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1820 if (log_get_show_color()) {
1821 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1822 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1823 else
1824 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1825 }
1826
1827 DISABLE_WARNING_FORMAT_NONLITERAL;
1828 manager_status_printf(u->manager, status_type, status, format, ident);
1829 REENABLE_WARNING;
1830 }
1831
1832 int unit_test_start_limit(Unit *u) {
1833 const char *reason;
1834
1835 assert(u);
1836
1837 if (ratelimit_below(&u->start_ratelimit)) {
1838 u->start_limit_hit = false;
1839 return 0;
1840 }
1841
1842 log_unit_warning(u, "Start request repeated too quickly.");
1843 u->start_limit_hit = true;
1844
1845 reason = strjoina("unit ", u->id, " failed");
1846
1847 emergency_action(u->manager, u->start_limit_action,
1848 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1849 u->reboot_arg, -1, reason);
1850
1851 return -ECANCELED;
1852 }
1853
1854 static bool unit_verify_deps(Unit *u) {
1855 Unit *other;
1856
1857 assert(u);
1858
1859 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1860 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1861 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1862 * that are not used in conjunction with After= as for them any such check would make things entirely
1863 * racy. */
1864
1865 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1866
1867 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1868 continue;
1869
1870 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1871 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1872 return false;
1873 }
1874 }
1875
1876 return true;
1877 }
1878
1879 /* Errors that aren't really errors:
1880 * -EALREADY: Unit is already started.
1881 * -ECOMM: Condition failed
1882 * -EAGAIN: An operation is already in progress. Retry later.
1883 *
1884 * Errors that are real errors:
1885 * -EBADR: This unit type does not support starting.
1886 * -ECANCELED: Start limit hit, too many requests for now
1887 * -EPROTO: Assert failed
1888 * -EINVAL: Unit not loaded
1889 * -EOPNOTSUPP: Unit type not supported
1890 * -ENOLINK: The necessary dependencies are not fulfilled.
1891 * -ESTALE: This unit has been started before and can't be started a second time
1892 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1893 */
1894 int unit_start(Unit *u, ActivationDetails *details) {
1895 UnitActiveState state;
1896 Unit *following;
1897 int r;
1898
1899 assert(u);
1900
1901 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1902 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1903 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1904 if (r < 0)
1905 return r;
1906 if (r > 0)
1907 return -EAGAIN;
1908 }
1909
1910 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1911 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1912 * waiting is finished. */
1913 state = unit_active_state(u);
1914 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1915 return -EALREADY;
1916 if (state == UNIT_MAINTENANCE)
1917 return -EAGAIN;
1918
1919 /* Units that aren't loaded cannot be started */
1920 if (u->load_state != UNIT_LOADED)
1921 return -EINVAL;
1922
1923 /* Refuse starting scope units more than once */
1924 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1925 return -ESTALE;
1926
1927 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1928 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1929 * recheck the condition in that case. */
1930 if (state != UNIT_ACTIVATING &&
1931 !unit_test_condition(u))
1932 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1933
1934 /* If the asserts failed, fail the entire job */
1935 if (state != UNIT_ACTIVATING &&
1936 !unit_test_assert(u))
1937 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1938
1939 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1940 * condition checks, so that we rather return condition check errors (which are usually not
1941 * considered a true failure) than "not supported" errors (which are considered a failure).
1942 */
1943 if (!unit_type_supported(u->type))
1944 return -EOPNOTSUPP;
1945
1946 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1947 * should have taken care of this already, but let's check this here again. After all, our
1948 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1949 if (!unit_verify_deps(u))
1950 return -ENOLINK;
1951
1952 /* Forward to the main object, if we aren't it. */
1953 following = unit_following(u);
1954 if (following) {
1955 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1956 return unit_start(following, details);
1957 }
1958
1959 /* Check to make sure the unit isn't frozen */
1960 if (u->freezer_state != FREEZER_RUNNING)
1961 return -EDEADLK;
1962
1963 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1964 if (UNIT_VTABLE(u)->can_start) {
1965 r = UNIT_VTABLE(u)->can_start(u);
1966 if (r < 0)
1967 return r;
1968 }
1969
1970 /* If it is stopped, but we cannot start it, then fail */
1971 if (!UNIT_VTABLE(u)->start)
1972 return -EBADR;
1973
1974 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1975 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1976 * waits for a holdoff timer to elapse before it will start again. */
1977
1978 unit_add_to_dbus_queue(u);
1979
1980 if (!u->activation_details) /* Older details object wins */
1981 u->activation_details = activation_details_ref(details);
1982
1983 return UNIT_VTABLE(u)->start(u);
1984 }
1985
1986 bool unit_can_start(Unit *u) {
1987 assert(u);
1988
1989 if (u->load_state != UNIT_LOADED)
1990 return false;
1991
1992 if (!unit_type_supported(u->type))
1993 return false;
1994
1995 /* Scope units may be started only once */
1996 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1997 return false;
1998
1999 return !!UNIT_VTABLE(u)->start;
2000 }
2001
2002 bool unit_can_isolate(Unit *u) {
2003 assert(u);
2004
2005 return unit_can_start(u) &&
2006 u->allow_isolate;
2007 }
2008
2009 /* Errors:
2010 * -EBADR: This unit type does not support stopping.
2011 * -EALREADY: Unit is already stopped.
2012 * -EAGAIN: An operation is already in progress. Retry later.
2013 * -EDEADLK: Unit is frozen
2014 */
2015 int unit_stop(Unit *u) {
2016 UnitActiveState state;
2017 Unit *following;
2018
2019 assert(u);
2020
2021 state = unit_active_state(u);
2022 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2023 return -EALREADY;
2024
2025 following = unit_following(u);
2026 if (following) {
2027 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2028 return unit_stop(following);
2029 }
2030
2031 /* Check to make sure the unit isn't frozen */
2032 if (u->freezer_state != FREEZER_RUNNING)
2033 return -EDEADLK;
2034
2035 if (!UNIT_VTABLE(u)->stop)
2036 return -EBADR;
2037
2038 unit_add_to_dbus_queue(u);
2039
2040 return UNIT_VTABLE(u)->stop(u);
2041 }
2042
2043 bool unit_can_stop(Unit *u) {
2044 assert(u);
2045
2046 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2047 * Extrinsic units follow external state and they may stop following external state changes
2048 * (hence we return true here), but an attempt to do this through the manager will fail. */
2049
2050 if (!unit_type_supported(u->type))
2051 return false;
2052
2053 if (u->perpetual)
2054 return false;
2055
2056 return !!UNIT_VTABLE(u)->stop;
2057 }
2058
2059 /* Errors:
2060 * -EBADR: This unit type does not support reloading.
2061 * -ENOEXEC: Unit is not started.
2062 * -EAGAIN: An operation is already in progress. Retry later.
2063 * -EDEADLK: Unit is frozen.
2064 */
2065 int unit_reload(Unit *u) {
2066 UnitActiveState state;
2067 Unit *following;
2068
2069 assert(u);
2070
2071 if (u->load_state != UNIT_LOADED)
2072 return -EINVAL;
2073
2074 if (!unit_can_reload(u))
2075 return -EBADR;
2076
2077 state = unit_active_state(u);
2078 if (state == UNIT_RELOADING)
2079 return -EAGAIN;
2080
2081 if (state != UNIT_ACTIVE)
2082 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2083
2084 following = unit_following(u);
2085 if (following) {
2086 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2087 return unit_reload(following);
2088 }
2089
2090 /* Check to make sure the unit isn't frozen */
2091 if (u->freezer_state != FREEZER_RUNNING)
2092 return -EDEADLK;
2093
2094 unit_add_to_dbus_queue(u);
2095
2096 if (!UNIT_VTABLE(u)->reload) {
2097 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2098 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2099 return 0;
2100 }
2101
2102 return UNIT_VTABLE(u)->reload(u);
2103 }
2104
2105 bool unit_can_reload(Unit *u) {
2106 assert(u);
2107
2108 if (UNIT_VTABLE(u)->can_reload)
2109 return UNIT_VTABLE(u)->can_reload(u);
2110
2111 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2112 return true;
2113
2114 return UNIT_VTABLE(u)->reload;
2115 }
2116
2117 bool unit_is_unneeded(Unit *u) {
2118 Unit *other;
2119 assert(u);
2120
2121 if (!u->stop_when_unneeded)
2122 return false;
2123
2124 /* Don't clean up while the unit is transitioning or is even inactive. */
2125 if (unit_active_state(u) != UNIT_ACTIVE)
2126 return false;
2127 if (u->job)
2128 return false;
2129
2130 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2131 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2132 * restart, then don't clean this one up. */
2133
2134 if (other->job)
2135 return false;
2136
2137 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2138 return false;
2139
2140 if (unit_will_restart(other))
2141 return false;
2142 }
2143
2144 return true;
2145 }
2146
2147 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2148 Unit *other;
2149
2150 assert(u);
2151
2152 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2153 * that is active declared an Uphold= dependencies on it */
2154
2155 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2156 if (ret_culprit)
2157 *ret_culprit = NULL;
2158 return false;
2159 }
2160
2161 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2162 if (other->job)
2163 continue;
2164
2165 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2166 if (ret_culprit)
2167 *ret_culprit = other;
2168 return true;
2169 }
2170 }
2171
2172 if (ret_culprit)
2173 *ret_culprit = NULL;
2174 return false;
2175 }
2176
2177 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2178 Unit *other;
2179
2180 assert(u);
2181
2182 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2183 * because the other unit is down. */
2184
2185 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2186 /* Don't clean up while the unit is transitioning or is even inactive. */
2187 if (ret_culprit)
2188 *ret_culprit = NULL;
2189 return false;
2190 }
2191
2192 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2193 if (other->job)
2194 continue;
2195
2196 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2197 if (ret_culprit)
2198 *ret_culprit = other;
2199
2200 return true;
2201 }
2202 }
2203
2204 if (ret_culprit)
2205 *ret_culprit = NULL;
2206 return false;
2207 }
2208
2209 static void check_unneeded_dependencies(Unit *u) {
2210 Unit *other;
2211 assert(u);
2212
2213 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2214
2215 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2216 unit_submit_to_stop_when_unneeded_queue(other);
2217 }
2218
2219 static void check_uphold_dependencies(Unit *u) {
2220 Unit *other;
2221 assert(u);
2222
2223 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2224
2225 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2226 unit_submit_to_start_when_upheld_queue(other);
2227 }
2228
2229 static void check_bound_by_dependencies(Unit *u) {
2230 Unit *other;
2231 assert(u);
2232
2233 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2234
2235 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2236 unit_submit_to_stop_when_bound_queue(other);
2237 }
2238
2239 static void retroactively_start_dependencies(Unit *u) {
2240 Unit *other;
2241
2242 assert(u);
2243 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2244
2245 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2246 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2247 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2248 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2249
2250 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2251 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2252 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2253 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2254
2255 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2256 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2257 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2258 }
2259
2260 static void retroactively_stop_dependencies(Unit *u) {
2261 Unit *other;
2262
2263 assert(u);
2264 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2265
2266 /* Pull down units which are bound to us recursively if enabled */
2267 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2268 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2269 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2270 }
2271
2272 void unit_start_on_failure(
2273 Unit *u,
2274 const char *dependency_name,
2275 UnitDependencyAtom atom,
2276 JobMode job_mode) {
2277
2278 int n_jobs = -1;
2279 Unit *other;
2280 int r;
2281
2282 assert(u);
2283 assert(dependency_name);
2284 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2285
2286 /* Act on OnFailure= and OnSuccess= dependencies */
2287
2288 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2289 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2290
2291 if (n_jobs < 0) {
2292 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2293 n_jobs = 0;
2294 }
2295
2296 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2297 if (r < 0)
2298 log_unit_warning_errno(
2299 u, r, "Failed to enqueue %s job, ignoring: %s",
2300 dependency_name, bus_error_message(&error, r));
2301 n_jobs++;
2302 }
2303
2304 if (n_jobs >= 0)
2305 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2306 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2307 }
2308
2309 void unit_trigger_notify(Unit *u) {
2310 Unit *other;
2311
2312 assert(u);
2313
2314 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2315 if (UNIT_VTABLE(other)->trigger_notify)
2316 UNIT_VTABLE(other)->trigger_notify(other, u);
2317 }
2318
2319 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2320 if (condition_notice && log_level > LOG_NOTICE)
2321 return LOG_NOTICE;
2322 if (condition_info && log_level > LOG_INFO)
2323 return LOG_INFO;
2324 return log_level;
2325 }
2326
2327 static int unit_log_resources(Unit *u) {
2328
2329 static const struct {
2330 const char *journal_field;
2331 const char *message_suffix;
2332 } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
2333 [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" },
2334 [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2335 }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2336 [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2337 [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2338 [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL },
2339 [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL },
2340 }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2341 [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" },
2342 [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2343 [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL },
2344 [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL },
2345 };
2346
2347 struct iovec *iovec = NULL;
2348 size_t n_iovec = 0;
2349 _cleanup_free_ char *message = NULL, *t = NULL;
2350 nsec_t cpu_nsec = NSEC_INFINITY;
2351 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2352
2353 assert(u);
2354
2355 CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free);
2356
2357 iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) +
2358 _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4);
2359 if (!iovec)
2360 return log_oom();
2361
2362 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2363 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2364 * information and the complete data in structured fields. */
2365
2366 (void) unit_get_cpu_usage(u, &cpu_nsec);
2367 if (cpu_nsec != NSEC_INFINITY) {
2368 /* Format the CPU time for inclusion in the structured log message */
2369 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0)
2370 return log_oom();
2371 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2372
2373 /* Format the CPU time for inclusion in the human language message string */
2374 if (strextendf_with_separator(&message, ", ",
2375 "Consumed %s CPU time",
2376 FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0)
2377 return log_oom();
2378
2379 log_level = raise_level(log_level,
2380 cpu_nsec > MENTIONWORTHY_CPU_NSEC,
2381 cpu_nsec > NOTICEWORTHY_CPU_NSEC);
2382 }
2383
2384 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
2385 uint64_t value = UINT64_MAX;
2386
2387 assert(memory_fields[metric].journal_field);
2388 assert(memory_fields[metric].message_suffix);
2389
2390 (void) unit_get_memory_accounting(u, metric, &value);
2391 if (value == UINT64_MAX)
2392 continue;
2393
2394 if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, value) < 0)
2395 return log_oom();
2396 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2397
2398 /* If value is 0, we don't log it in the MESSAGE= field. */
2399 if (value == 0)
2400 continue;
2401
2402 if (strextendf_with_separator(&message, ", ", "%s %s",
2403 FORMAT_BYTES(value), memory_fields[metric].message_suffix) < 0)
2404 return log_oom();
2405
2406 log_level = raise_level(log_level,
2407 value > MENTIONWORTHY_MEMORY_BYTES,
2408 value > NOTICEWORTHY_MEMORY_BYTES);
2409 }
2410
2411 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2412 uint64_t value = UINT64_MAX;
2413
2414 assert(io_fields[k].journal_field);
2415
2416 (void) unit_get_io_accounting(u, k, k > 0, &value);
2417 if (value == UINT64_MAX)
2418 continue;
2419
2420 /* Format IO accounting data for inclusion in the structured log message */
2421 if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0)
2422 return log_oom();
2423 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2424
2425 /* If value is 0, we don't log it in the MESSAGE= field. */
2426 if (value == 0)
2427 continue;
2428
2429 /* Format the IO accounting data for inclusion in the human language message string, but only
2430 * for the bytes counters (and not for the operations counters) */
2431 if (io_fields[k].message_suffix) {
2432 if (strextendf_with_separator(&message, ", ", "%s %s",
2433 FORMAT_BYTES(value), io_fields[k].message_suffix) < 0)
2434 return log_oom();
2435
2436 log_level = raise_level(log_level,
2437 value > MENTIONWORTHY_IO_BYTES,
2438 value > NOTICEWORTHY_IO_BYTES);
2439 }
2440 }
2441
2442 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2443 uint64_t value = UINT64_MAX;
2444
2445 assert(ip_fields[m].journal_field);
2446
2447 (void) unit_get_ip_accounting(u, m, &value);
2448 if (value == UINT64_MAX)
2449 continue;
2450
2451 /* Format IP accounting data for inclusion in the structured log message */
2452 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0)
2453 return log_oom();
2454 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2455
2456 /* If value is 0, we don't log it in the MESSAGE= field. */
2457 if (value == 0)
2458 continue;
2459
2460 /* Format the IP accounting data for inclusion in the human language message string, but only
2461 * for the bytes counters (and not for the packets counters) */
2462 if (ip_fields[m].message_suffix) {
2463 if (strextendf_with_separator(&message, ", ", "%s %s",
2464 FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0)
2465 return log_oom();
2466
2467 log_level = raise_level(log_level,
2468 value > MENTIONWORTHY_IP_BYTES,
2469 value > NOTICEWORTHY_IP_BYTES);
2470 }
2471 }
2472
2473 /* This check is here because it is the earliest point following all possible log_level assignments.
2474 * (If log_level is assigned anywhere after this point, move this check.) */
2475 if (!unit_log_level_test(u, log_level))
2476 return 0;
2477
2478 /* Is there any accounting data available at all? */
2479 if (n_iovec == 0) {
2480 assert(!message);
2481 return 0;
2482 }
2483
2484 t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", ".");
2485 if (!t)
2486 return log_oom();
2487 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2488
2489 if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR))
2490 return log_oom();
2491
2492 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->unit_log_field, u->id))
2493 return log_oom();
2494
2495 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->invocation_log_field, u->invocation_id_string))
2496 return log_oom();
2497
2498 log_unit_struct_iovec(u, log_level, iovec, n_iovec);
2499
2500 return 0;
2501 }
2502
2503 static void unit_update_on_console(Unit *u) {
2504 bool b;
2505
2506 assert(u);
2507
2508 b = unit_needs_console(u);
2509 if (u->on_console == b)
2510 return;
2511
2512 u->on_console = b;
2513 if (b)
2514 manager_ref_console(u->manager);
2515 else
2516 manager_unref_console(u->manager);
2517 }
2518
2519 static void unit_emit_audit_start(Unit *u) {
2520 assert(u);
2521
2522 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2523 return;
2524
2525 /* Write audit record if we have just finished starting up */
2526 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2527 u->in_audit = true;
2528 }
2529
2530 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2531 assert(u);
2532
2533 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2534 return;
2535
2536 if (u->in_audit) {
2537 /* Write audit record if we have just finished shutting down */
2538 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2539 u->in_audit = false;
2540 } else {
2541 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2542 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2543
2544 if (state == UNIT_INACTIVE)
2545 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2546 }
2547 }
2548
2549 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2550 bool unexpected = false;
2551 JobResult result;
2552
2553 assert(j);
2554
2555 if (j->state == JOB_WAITING)
2556 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2557 * due to EAGAIN. */
2558 job_add_to_run_queue(j);
2559
2560 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2561 * hence needs to invalidate jobs. */
2562
2563 switch (j->type) {
2564
2565 case JOB_START:
2566 case JOB_VERIFY_ACTIVE:
2567
2568 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2569 job_finish_and_invalidate(j, JOB_DONE, true, false);
2570 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2571 unexpected = true;
2572
2573 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2574 if (ns == UNIT_FAILED)
2575 result = JOB_FAILED;
2576 else
2577 result = JOB_DONE;
2578
2579 job_finish_and_invalidate(j, result, true, false);
2580 }
2581 }
2582
2583 break;
2584
2585 case JOB_RELOAD:
2586 case JOB_RELOAD_OR_START:
2587 case JOB_TRY_RELOAD:
2588
2589 if (j->state == JOB_RUNNING) {
2590 if (ns == UNIT_ACTIVE)
2591 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2592 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2593 unexpected = true;
2594
2595 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2596 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2597 }
2598 }
2599
2600 break;
2601
2602 case JOB_STOP:
2603 case JOB_RESTART:
2604 case JOB_TRY_RESTART:
2605
2606 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2607 job_finish_and_invalidate(j, JOB_DONE, true, false);
2608 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2609 unexpected = true;
2610 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2611 }
2612
2613 break;
2614
2615 default:
2616 assert_not_reached();
2617 }
2618
2619 return unexpected;
2620 }
2621
2622 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2623 const char *reason;
2624 Manager *m;
2625
2626 assert(u);
2627 assert(os < _UNIT_ACTIVE_STATE_MAX);
2628 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2629
2630 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2631 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2632 * remounted this function will be called too! */
2633
2634 m = u->manager;
2635
2636 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2637 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2638 unit_add_to_dbus_queue(u);
2639
2640 /* Update systemd-oomd on the property/state change */
2641 if (os != ns) {
2642 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2643 * monitoring.
2644 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2645 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2646 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2647 * have the information on the property. Thus, indiscriminately send an update. */
2648 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2649 (void) manager_varlink_send_managed_oom_update(u);
2650 }
2651
2652 /* Update timestamps for state changes */
2653 if (!MANAGER_IS_RELOADING(m)) {
2654 dual_timestamp_now(&u->state_change_timestamp);
2655
2656 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2657 u->inactive_exit_timestamp = u->state_change_timestamp;
2658 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2659 u->inactive_enter_timestamp = u->state_change_timestamp;
2660
2661 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2662 u->active_enter_timestamp = u->state_change_timestamp;
2663 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2664 u->active_exit_timestamp = u->state_change_timestamp;
2665 }
2666
2667 /* Keep track of failed units */
2668 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2669
2670 /* Make sure the cgroup and state files are always removed when we become inactive */
2671 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2672 SET_FLAG(u->markers,
2673 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2674 false);
2675 unit_prune_cgroup(u);
2676 unit_unlink_state_files(u);
2677 } else if (ns != os && ns == UNIT_RELOADING)
2678 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2679
2680 unit_update_on_console(u);
2681
2682 if (!MANAGER_IS_RELOADING(m)) {
2683 bool unexpected;
2684
2685 /* Let's propagate state changes to the job */
2686 if (u->job)
2687 unexpected = unit_process_job(u->job, ns, reload_success);
2688 else
2689 unexpected = true;
2690
2691 /* If this state change happened without being requested by a job, then let's retroactively start or
2692 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2693 * additional jobs just because something is already activated. */
2694
2695 if (unexpected) {
2696 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2697 retroactively_start_dependencies(u);
2698 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2699 retroactively_stop_dependencies(u);
2700 }
2701
2702 if (ns != os && ns == UNIT_FAILED) {
2703 log_unit_debug(u, "Unit entered failed state.");
2704 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2705 }
2706
2707 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2708 /* This unit just finished starting up */
2709
2710 unit_emit_audit_start(u);
2711 manager_send_unit_plymouth(m, u);
2712 }
2713
2714 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2715 /* This unit just stopped/failed. */
2716
2717 unit_emit_audit_stop(u, ns);
2718 unit_log_resources(u);
2719 }
2720
2721 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2722 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2723 }
2724
2725 manager_recheck_journal(m);
2726 manager_recheck_dbus(m);
2727
2728 unit_trigger_notify(u);
2729
2730 if (!MANAGER_IS_RELOADING(m)) {
2731 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2732 reason = strjoina("unit ", u->id, " failed");
2733 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2734 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2735 reason = strjoina("unit ", u->id, " succeeded");
2736 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2737 }
2738 }
2739
2740 /* And now, add the unit or depending units to various queues that will act on the new situation if
2741 * needed. These queues generally check for continuous state changes rather than events (like most of
2742 * the state propagation above), and do work deferred instead of instantly, since they typically
2743 * don't want to run during reloading, and usually involve checking combined state of multiple units
2744 * at once. */
2745
2746 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2747 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2748 check_unneeded_dependencies(u);
2749 check_bound_by_dependencies(u);
2750
2751 /* Maybe someone wants us to remain up? */
2752 unit_submit_to_start_when_upheld_queue(u);
2753
2754 /* Maybe the unit should be GC'ed now? */
2755 unit_add_to_gc_queue(u);
2756
2757 /* Maybe we can release some resources now? */
2758 unit_submit_to_release_resources_queue(u);
2759 }
2760
2761 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2762 /* Start uphold units regardless if going up was expected or not */
2763 check_uphold_dependencies(u);
2764
2765 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2766 unit_submit_to_stop_when_unneeded_queue(u);
2767
2768 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2769 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2770 * inactive, without ever entering started.) */
2771 unit_submit_to_stop_when_bound_queue(u);
2772 }
2773 }
2774
2775 int unit_watch_pidref(Unit *u, const PidRef *pid, bool exclusive) {
2776 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2777 int r;
2778
2779 /* Adds a specific PID to the set of PIDs this unit watches. */
2780
2781 assert(u);
2782 assert(pidref_is_set(pid));
2783
2784 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2785 * opportunity to remove any stalled references to this PID as they can be created
2786 * easily (when watching a process which is not our direct child). */
2787 if (exclusive)
2788 manager_unwatch_pidref(u->manager, pid);
2789
2790 if (set_contains(u->pids, pid)) /* early exit if already being watched */
2791 return 0;
2792
2793 r = pidref_dup(pid, &pid_dup);
2794 if (r < 0)
2795 return r;
2796
2797 /* First, insert into the set of PIDs maintained by the unit */
2798 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2799 if (r < 0)
2800 return r;
2801
2802 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2803
2804 /* Second, insert it into the simple global table, see if that works */
2805 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops_free, pid, u);
2806 if (r != -EEXIST)
2807 return r;
2808
2809 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2810 * hashmap that points to an array. */
2811
2812 PidRef *old_pid = NULL;
2813 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2814
2815 /* Count entries in array */
2816 size_t n = 0;
2817 for (; array && array[n]; n++)
2818 ;
2819
2820 /* Allocate a new array */
2821 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2822 if (!new_array)
2823 return -ENOMEM;
2824
2825 /* Append us to the end */
2826 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2827 new_array[n] = u;
2828 new_array[n+1] = NULL;
2829
2830 /* Make sure the hashmap is allocated */
2831 r = hashmap_ensure_allocated(&u->manager->watch_pids_more, &pidref_hash_ops_free);
2832 if (r < 0)
2833 return r;
2834
2835 /* Add or replace the old array */
2836 r = hashmap_replace(u->manager->watch_pids_more, old_pid ?: pid, new_array);
2837 if (r < 0)
2838 return r;
2839
2840 TAKE_PTR(new_array); /* Now part of the hash table */
2841 free(array); /* Which means we can now delete the old version */
2842 return 0;
2843 }
2844
2845 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2846 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
2847 int r;
2848
2849 assert(u);
2850 assert(pid_is_valid(pid));
2851
2852 r = pidref_set_pid(&pidref, pid);
2853 if (r < 0)
2854 return r;
2855
2856 return unit_watch_pidref(u, &pidref, exclusive);
2857 }
2858
2859 void unit_unwatch_pidref(Unit *u, const PidRef *pid) {
2860 assert(u);
2861 assert(pidref_is_set(pid));
2862
2863 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2864 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2865 if (!pid1)
2866 return; /* Early exit if this PID was never watched by us */
2867
2868 /* First let's drop the unit from the simple hash table, if it is included there */
2869 PidRef *pid2 = NULL;
2870 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2871
2872 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2873 assert((uu == u) == (pid1 == pid2));
2874
2875 if (uu == u)
2876 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2877 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2878 else {
2879 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2880 PidRef *pid3 = NULL;
2881 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2882
2883 /* Let's iterate through the array, dropping our own entry */
2884 size_t m = 0, n = 0;
2885 for (; array && array[n]; n++)
2886 if (array[n] != u)
2887 array[m++] = array[n];
2888 if (n == m)
2889 return; /* Not there */
2890
2891 array[m] = NULL; /* set trailing NULL marker on the new end */
2892
2893 if (m == 0) {
2894 /* The array is now empty, remove the entire entry */
2895 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2896 free(array);
2897 } else {
2898 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2899 * we will delete, but by the PidRef object of the Unit that is now first in the
2900 * array. */
2901
2902 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2903 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2904 }
2905 }
2906 }
2907
2908 void unit_unwatch_pid(Unit *u, pid_t pid) {
2909 return unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(pid));
2910 }
2911
2912 void unit_unwatch_all_pids(Unit *u) {
2913 assert(u);
2914
2915 while (!set_isempty(u->pids))
2916 unit_unwatch_pidref(u, set_first(u->pids));
2917
2918 u->pids = set_free(u->pids);
2919 }
2920
2921 void unit_unwatch_pidref_done(Unit *u, PidRef *pidref) {
2922 assert(u);
2923
2924 if (!pidref_is_set(pidref))
2925 return;
2926
2927 unit_unwatch_pidref(u, pidref);
2928 pidref_done(pidref);
2929 }
2930
2931 static void unit_tidy_watch_pids(Unit *u) {
2932 PidRef *except1, *except2, *e;
2933
2934 assert(u);
2935
2936 /* Cleans dead PIDs from our list */
2937
2938 except1 = unit_main_pid(u);
2939 except2 = unit_control_pid(u);
2940
2941 SET_FOREACH(e, u->pids) {
2942 if (pidref_equal(except1, e) || pidref_equal(except2, e))
2943 continue;
2944
2945 if (pidref_is_unwaited(e) <= 0)
2946 unit_unwatch_pidref(u, e);
2947 }
2948 }
2949
2950 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2951 Unit *u = ASSERT_PTR(userdata);
2952
2953 assert(s);
2954
2955 unit_tidy_watch_pids(u);
2956 unit_watch_all_pids(u);
2957
2958 /* If the PID set is empty now, then let's finish this off. */
2959 unit_synthesize_cgroup_empty_event(u);
2960
2961 return 0;
2962 }
2963
2964 int unit_enqueue_rewatch_pids(Unit *u) {
2965 int r;
2966
2967 assert(u);
2968
2969 if (!u->cgroup_path)
2970 return -ENOENT;
2971
2972 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2973 if (r < 0)
2974 return r;
2975 if (r > 0) /* On unified we can use proper notifications */
2976 return 0;
2977
2978 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2979 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2980 * involves issuing kill(pid, 0) on all processes we watch. */
2981
2982 if (!u->rewatch_pids_event_source) {
2983 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2984
2985 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2986 if (r < 0)
2987 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2988
2989 r = sd_event_source_set_priority(s, EVENT_PRIORITY_REWATCH_PIDS);
2990 if (r < 0)
2991 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2992
2993 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2994
2995 u->rewatch_pids_event_source = TAKE_PTR(s);
2996 }
2997
2998 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2999 if (r < 0)
3000 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
3001
3002 return 0;
3003 }
3004
3005 void unit_dequeue_rewatch_pids(Unit *u) {
3006 int r;
3007 assert(u);
3008
3009 if (!u->rewatch_pids_event_source)
3010 return;
3011
3012 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
3013 if (r < 0)
3014 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3015
3016 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
3017 }
3018
3019 bool unit_job_is_applicable(Unit *u, JobType j) {
3020 assert(u);
3021 assert(j >= 0 && j < _JOB_TYPE_MAX);
3022
3023 switch (j) {
3024
3025 case JOB_VERIFY_ACTIVE:
3026 case JOB_START:
3027 case JOB_NOP:
3028 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3029 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3030 * jobs for it. */
3031 return true;
3032
3033 case JOB_STOP:
3034 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3035 * external events), hence it makes no sense to permit enqueuing such a request either. */
3036 return !u->perpetual;
3037
3038 case JOB_RESTART:
3039 case JOB_TRY_RESTART:
3040 return unit_can_stop(u) && unit_can_start(u);
3041
3042 case JOB_RELOAD:
3043 case JOB_TRY_RELOAD:
3044 return unit_can_reload(u);
3045
3046 case JOB_RELOAD_OR_START:
3047 return unit_can_reload(u) && unit_can_start(u);
3048
3049 default:
3050 assert_not_reached();
3051 }
3052 }
3053
3054 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3055 Hashmap *deps;
3056
3057 assert(u);
3058 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3059
3060 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3061 if (!deps) {
3062 _cleanup_hashmap_free_ Hashmap *h = NULL;
3063
3064 h = hashmap_new(NULL);
3065 if (!h)
3066 return NULL;
3067
3068 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3069 return NULL;
3070
3071 deps = TAKE_PTR(h);
3072 }
3073
3074 return deps;
3075 }
3076
3077 typedef enum NotifyDependencyFlags {
3078 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3079 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3080 } NotifyDependencyFlags;
3081
3082 static int unit_add_dependency_impl(
3083 Unit *u,
3084 UnitDependency d,
3085 Unit *other,
3086 UnitDependencyMask mask) {
3087
3088 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3089 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3090 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3091 [UNIT_WANTS] = UNIT_WANTED_BY,
3092 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3093 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3094 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3095 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3096 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3097 [UNIT_WANTED_BY] = UNIT_WANTS,
3098 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3099 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3100 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3101 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3102 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3103 [UNIT_BEFORE] = UNIT_AFTER,
3104 [UNIT_AFTER] = UNIT_BEFORE,
3105 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3106 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3107 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3108 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3109 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3110 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3111 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3112 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3113 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3114 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3115 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3116 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3117 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3118 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3119 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3120 };
3121
3122 Hashmap *u_deps, *other_deps;
3123 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3124 NotifyDependencyFlags flags = 0;
3125 int r;
3126
3127 assert(u);
3128 assert(other);
3129 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3130 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3131 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3132
3133 /* Ensure the following two hashmaps for each unit exist:
3134 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3135 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3136 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3137 if (!u_deps)
3138 return -ENOMEM;
3139
3140 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3141 if (!other_deps)
3142 return -ENOMEM;
3143
3144 /* Save the original dependency info. */
3145 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3146 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3147
3148 /* Update dependency info. */
3149 u_info.origin_mask |= mask;
3150 other_info.destination_mask |= mask;
3151
3152 /* Save updated dependency info. */
3153 if (u_info.data != u_info_old.data) {
3154 r = hashmap_replace(u_deps, other, u_info.data);
3155 if (r < 0)
3156 return r;
3157
3158 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3159 }
3160
3161 if (other_info.data != other_info_old.data) {
3162 r = hashmap_replace(other_deps, u, other_info.data);
3163 if (r < 0) {
3164 if (u_info.data != u_info_old.data) {
3165 /* Restore the old dependency. */
3166 if (u_info_old.data)
3167 (void) hashmap_update(u_deps, other, u_info_old.data);
3168 else
3169 hashmap_remove(u_deps, other);
3170 }
3171 return r;
3172 }
3173
3174 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3175 }
3176
3177 return flags;
3178 }
3179
3180 int unit_add_dependency(
3181 Unit *u,
3182 UnitDependency d,
3183 Unit *other,
3184 bool add_reference,
3185 UnitDependencyMask mask) {
3186
3187 UnitDependencyAtom a;
3188 int r;
3189
3190 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3191 * there, no need to notify! */
3192 NotifyDependencyFlags notify_flags;
3193
3194 assert(u);
3195 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3196 assert(other);
3197
3198 u = unit_follow_merge(u);
3199 other = unit_follow_merge(other);
3200 a = unit_dependency_to_atom(d);
3201 assert(a >= 0);
3202
3203 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3204 if (u == other) {
3205 if (unit_should_warn_about_dependency(d))
3206 log_unit_warning(u, "Dependency %s=%s is dropped.",
3207 unit_dependency_to_string(d), u->id);
3208 return 0;
3209 }
3210
3211 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3212 return 0;
3213
3214 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3215 * running timeout at a specific time. */
3216 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3217 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3218 return 0;
3219 }
3220
3221 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3222 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3223 return 0;
3224 }
3225
3226 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3227 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3228 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3229 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3230 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3231 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3232
3233 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3234 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3235 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3236 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3237 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3238 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3239
3240 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3241 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3242 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3243
3244 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3245 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3246 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3247
3248 r = unit_add_dependency_impl(u, d, other, mask);
3249 if (r < 0)
3250 return r;
3251 notify_flags = r;
3252
3253 if (add_reference) {
3254 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3255 if (r < 0)
3256 return r;
3257 notify_flags |= r;
3258 }
3259
3260 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3261 unit_add_to_dbus_queue(u);
3262 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3263 unit_add_to_dbus_queue(other);
3264
3265 return notify_flags != 0;
3266 }
3267
3268 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3269 int r = 0, s = 0;
3270
3271 assert(u);
3272 assert(d >= 0 || e >= 0);
3273
3274 if (d >= 0) {
3275 r = unit_add_dependency(u, d, other, add_reference, mask);
3276 if (r < 0)
3277 return r;
3278 }
3279
3280 if (e >= 0) {
3281 s = unit_add_dependency(u, e, other, add_reference, mask);
3282 if (s < 0)
3283 return s;
3284 }
3285
3286 return r > 0 || s > 0;
3287 }
3288
3289 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3290 int r;
3291
3292 assert(u);
3293 assert(name);
3294 assert(buf);
3295 assert(ret);
3296
3297 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3298 *buf = NULL;
3299 *ret = name;
3300 return 0;
3301 }
3302
3303 if (u->instance)
3304 r = unit_name_replace_instance(name, u->instance, buf);
3305 else {
3306 _cleanup_free_ char *i = NULL;
3307
3308 r = unit_name_to_prefix(u->id, &i);
3309 if (r < 0)
3310 return r;
3311
3312 r = unit_name_replace_instance(name, i, buf);
3313 }
3314 if (r < 0)
3315 return r;
3316
3317 *ret = *buf;
3318 return 0;
3319 }
3320
3321 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3322 _cleanup_free_ char *buf = NULL;
3323 Unit *other;
3324 int r;
3325
3326 assert(u);
3327 assert(name);
3328
3329 r = resolve_template(u, name, &buf, &name);
3330 if (r < 0)
3331 return r;
3332
3333 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3334 return 0;
3335
3336 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3337 if (r < 0)
3338 return r;
3339
3340 return unit_add_dependency(u, d, other, add_reference, mask);
3341 }
3342
3343 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3344 _cleanup_free_ char *buf = NULL;
3345 Unit *other;
3346 int r;
3347
3348 assert(u);
3349 assert(name);
3350
3351 r = resolve_template(u, name, &buf, &name);
3352 if (r < 0)
3353 return r;
3354
3355 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3356 return 0;
3357
3358 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3359 if (r < 0)
3360 return r;
3361
3362 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3363 }
3364
3365 int set_unit_path(const char *p) {
3366 /* This is mostly for debug purposes */
3367 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3368 }
3369
3370 char *unit_dbus_path(Unit *u) {
3371 assert(u);
3372
3373 if (!u->id)
3374 return NULL;
3375
3376 return unit_dbus_path_from_name(u->id);
3377 }
3378
3379 char *unit_dbus_path_invocation_id(Unit *u) {
3380 assert(u);
3381
3382 if (sd_id128_is_null(u->invocation_id))
3383 return NULL;
3384
3385 return unit_dbus_path_from_name(u->invocation_id_string);
3386 }
3387
3388 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3389 int r;
3390
3391 assert(u);
3392
3393 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3394
3395 if (sd_id128_equal(u->invocation_id, id))
3396 return 0;
3397
3398 if (!sd_id128_is_null(u->invocation_id))
3399 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3400
3401 if (sd_id128_is_null(id)) {
3402 r = 0;
3403 goto reset;
3404 }
3405
3406 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3407 if (r < 0)
3408 goto reset;
3409
3410 u->invocation_id = id;
3411 sd_id128_to_string(id, u->invocation_id_string);
3412
3413 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3414 if (r < 0)
3415 goto reset;
3416
3417 return 0;
3418
3419 reset:
3420 u->invocation_id = SD_ID128_NULL;
3421 u->invocation_id_string[0] = 0;
3422 return r;
3423 }
3424
3425 int unit_set_slice(Unit *u, Unit *slice) {
3426 int r;
3427
3428 assert(u);
3429 assert(slice);
3430
3431 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3432 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3433 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3434
3435 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3436 return -EOPNOTSUPP;
3437
3438 if (u->type == UNIT_SLICE)
3439 return -EINVAL;
3440
3441 if (unit_active_state(u) != UNIT_INACTIVE)
3442 return -EBUSY;
3443
3444 if (slice->type != UNIT_SLICE)
3445 return -EINVAL;
3446
3447 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3448 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3449 return -EPERM;
3450
3451 if (UNIT_GET_SLICE(u) == slice)
3452 return 0;
3453
3454 /* Disallow slice changes if @u is already bound to cgroups */
3455 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3456 return -EBUSY;
3457
3458 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3459 if (UNIT_GET_SLICE(u))
3460 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3461
3462 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3463 if (r < 0)
3464 return r;
3465
3466 return 1;
3467 }
3468
3469 int unit_set_default_slice(Unit *u) {
3470 const char *slice_name;
3471 Unit *slice;
3472 int r;
3473
3474 assert(u);
3475
3476 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3477 return 0;
3478
3479 if (UNIT_GET_SLICE(u))
3480 return 0;
3481
3482 if (u->instance) {
3483 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3484
3485 /* Implicitly place all instantiated units in their
3486 * own per-template slice */
3487
3488 r = unit_name_to_prefix(u->id, &prefix);
3489 if (r < 0)
3490 return r;
3491
3492 /* The prefix is already escaped, but it might include
3493 * "-" which has a special meaning for slice units,
3494 * hence escape it here extra. */
3495 escaped = unit_name_escape(prefix);
3496 if (!escaped)
3497 return -ENOMEM;
3498
3499 if (MANAGER_IS_SYSTEM(u->manager))
3500 slice_name = strjoina("system-", escaped, ".slice");
3501 else
3502 slice_name = strjoina("app-", escaped, ".slice");
3503
3504 } else if (unit_is_extrinsic(u))
3505 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3506 * the root slice. They don't really belong in one of the subslices. */
3507 slice_name = SPECIAL_ROOT_SLICE;
3508
3509 else if (MANAGER_IS_SYSTEM(u->manager))
3510 slice_name = SPECIAL_SYSTEM_SLICE;
3511 else
3512 slice_name = SPECIAL_APP_SLICE;
3513
3514 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3515 if (r < 0)
3516 return r;
3517
3518 return unit_set_slice(u, slice);
3519 }
3520
3521 const char *unit_slice_name(Unit *u) {
3522 Unit *slice;
3523 assert(u);
3524
3525 slice = UNIT_GET_SLICE(u);
3526 if (!slice)
3527 return NULL;
3528
3529 return slice->id;
3530 }
3531
3532 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3533 _cleanup_free_ char *t = NULL;
3534 int r;
3535
3536 assert(u);
3537 assert(type);
3538 assert(_found);
3539
3540 r = unit_name_change_suffix(u->id, type, &t);
3541 if (r < 0)
3542 return r;
3543 if (unit_has_name(u, t))
3544 return -EINVAL;
3545
3546 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3547 assert(r < 0 || *_found != u);
3548 return r;
3549 }
3550
3551 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3552 const char *new_owner;
3553 Unit *u = ASSERT_PTR(userdata);
3554 int r;
3555
3556 assert(message);
3557
3558 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3559 if (r < 0) {
3560 bus_log_parse_error(r);
3561 return 0;
3562 }
3563
3564 if (UNIT_VTABLE(u)->bus_name_owner_change)
3565 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3566
3567 return 0;
3568 }
3569
3570 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3571 const sd_bus_error *e;
3572 const char *new_owner;
3573 Unit *u = ASSERT_PTR(userdata);
3574 int r;
3575
3576 assert(message);
3577
3578 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3579
3580 e = sd_bus_message_get_error(message);
3581 if (e) {
3582 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3583 r = sd_bus_error_get_errno(e);
3584 log_unit_error_errno(u, r,
3585 "Unexpected error response from GetNameOwner(): %s",
3586 bus_error_message(e, r));
3587 }
3588
3589 new_owner = NULL;
3590 } else {
3591 r = sd_bus_message_read(message, "s", &new_owner);
3592 if (r < 0)
3593 return bus_log_parse_error(r);
3594
3595 assert(!isempty(new_owner));
3596 }
3597
3598 if (UNIT_VTABLE(u)->bus_name_owner_change)
3599 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3600
3601 return 0;
3602 }
3603
3604 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3605 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3606 const char *match;
3607 usec_t timeout_usec = 0;
3608 int r;
3609
3610 assert(u);
3611 assert(bus);
3612 assert(name);
3613
3614 if (u->match_bus_slot || u->get_name_owner_slot)
3615 return -EBUSY;
3616
3617 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3618 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3619 * value defined above. */
3620 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3621 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3622
3623 match = strjoina("type='signal',"
3624 "sender='org.freedesktop.DBus',"
3625 "path='/org/freedesktop/DBus',"
3626 "interface='org.freedesktop.DBus',"
3627 "member='NameOwnerChanged',"
3628 "arg0='", name, "'");
3629
3630 r = bus_add_match_full(
3631 bus,
3632 &u->match_bus_slot,
3633 true,
3634 match,
3635 signal_name_owner_changed,
3636 NULL,
3637 u,
3638 timeout_usec);
3639 if (r < 0)
3640 return r;
3641
3642 r = sd_bus_message_new_method_call(
3643 bus,
3644 &m,
3645 "org.freedesktop.DBus",
3646 "/org/freedesktop/DBus",
3647 "org.freedesktop.DBus",
3648 "GetNameOwner");
3649 if (r < 0)
3650 return r;
3651
3652 r = sd_bus_message_append(m, "s", name);
3653 if (r < 0)
3654 return r;
3655
3656 r = sd_bus_call_async(
3657 bus,
3658 &u->get_name_owner_slot,
3659 m,
3660 get_name_owner_handler,
3661 u,
3662 timeout_usec);
3663
3664 if (r < 0) {
3665 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3666 return r;
3667 }
3668
3669 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3670 return 0;
3671 }
3672
3673 int unit_watch_bus_name(Unit *u, const char *name) {
3674 int r;
3675
3676 assert(u);
3677 assert(name);
3678
3679 /* Watch a specific name on the bus. We only support one unit
3680 * watching each name for now. */
3681
3682 if (u->manager->api_bus) {
3683 /* If the bus is already available, install the match directly.
3684 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3685 r = unit_install_bus_match(u, u->manager->api_bus, name);
3686 if (r < 0)
3687 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3688 }
3689
3690 r = hashmap_put(u->manager->watch_bus, name, u);
3691 if (r < 0) {
3692 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3693 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3694 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3695 }
3696
3697 return 0;
3698 }
3699
3700 void unit_unwatch_bus_name(Unit *u, const char *name) {
3701 assert(u);
3702 assert(name);
3703
3704 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3705 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3706 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3707 }
3708
3709 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3710 _cleanup_free_ char *e = NULL;
3711 Unit *device;
3712 int r;
3713
3714 assert(u);
3715
3716 /* Adds in links to the device node that this unit is based on */
3717 if (isempty(what))
3718 return 0;
3719
3720 if (!is_device_path(what))
3721 return 0;
3722
3723 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3724 if (!unit_type_supported(UNIT_DEVICE))
3725 return 0;
3726
3727 r = unit_name_from_path(what, ".device", &e);
3728 if (r < 0)
3729 return r;
3730
3731 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3732 if (r < 0)
3733 return r;
3734
3735 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3736 dep = UNIT_BINDS_TO;
3737
3738 return unit_add_two_dependencies(u, UNIT_AFTER,
3739 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3740 device, true, mask);
3741 }
3742
3743 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3744 _cleanup_free_ char *escaped = NULL, *target = NULL;
3745 int r;
3746
3747 assert(u);
3748
3749 if (isempty(what))
3750 return 0;
3751
3752 if (!path_startswith(what, "/dev/"))
3753 return 0;
3754
3755 /* If we don't support devices, then also don't bother with blockdev@.target */
3756 if (!unit_type_supported(UNIT_DEVICE))
3757 return 0;
3758
3759 r = unit_name_path_escape(what, &escaped);
3760 if (r < 0)
3761 return r;
3762
3763 r = unit_name_build("blockdev", escaped, ".target", &target);
3764 if (r < 0)
3765 return r;
3766
3767 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3768 }
3769
3770 int unit_coldplug(Unit *u) {
3771 int r = 0;
3772
3773 assert(u);
3774
3775 /* Make sure we don't enter a loop, when coldplugging recursively. */
3776 if (u->coldplugged)
3777 return 0;
3778
3779 u->coldplugged = true;
3780
3781 STRV_FOREACH(i, u->deserialized_refs)
3782 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3783
3784 u->deserialized_refs = strv_free(u->deserialized_refs);
3785
3786 if (UNIT_VTABLE(u)->coldplug)
3787 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3788
3789 if (u->job)
3790 RET_GATHER(r, job_coldplug(u->job));
3791 if (u->nop_job)
3792 RET_GATHER(r, job_coldplug(u->nop_job));
3793
3794 unit_modify_nft_set(u, /* add = */ true);
3795 return r;
3796 }
3797
3798 void unit_catchup(Unit *u) {
3799 assert(u);
3800
3801 if (UNIT_VTABLE(u)->catchup)
3802 UNIT_VTABLE(u)->catchup(u);
3803
3804 unit_cgroup_catchup(u);
3805 }
3806
3807 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3808 struct stat st;
3809
3810 if (!path)
3811 return false;
3812
3813 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3814 * are never out-of-date. */
3815 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3816 return false;
3817
3818 if (stat(path, &st) < 0)
3819 /* What, cannot access this anymore? */
3820 return true;
3821
3822 if (path_masked)
3823 /* For masked files check if they are still so */
3824 return !null_or_empty(&st);
3825 else
3826 /* For non-empty files check the mtime */
3827 return timespec_load(&st.st_mtim) > mtime;
3828
3829 return false;
3830 }
3831
3832 bool unit_need_daemon_reload(Unit *u) {
3833 _cleanup_strv_free_ char **dropins = NULL;
3834
3835 assert(u);
3836 assert(u->manager);
3837
3838 if (u->manager->unit_file_state_outdated)
3839 return true;
3840
3841 /* For unit files, we allow masking… */
3842 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3843 u->load_state == UNIT_MASKED))
3844 return true;
3845
3846 /* Source paths should not be masked… */
3847 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3848 return true;
3849
3850 if (u->load_state == UNIT_LOADED)
3851 (void) unit_find_dropin_paths(u, &dropins);
3852 if (!strv_equal(u->dropin_paths, dropins))
3853 return true;
3854
3855 /* … any drop-ins that are masked are simply omitted from the list. */
3856 STRV_FOREACH(path, u->dropin_paths)
3857 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3858 return true;
3859
3860 return false;
3861 }
3862
3863 void unit_reset_failed(Unit *u) {
3864 assert(u);
3865
3866 if (UNIT_VTABLE(u)->reset_failed)
3867 UNIT_VTABLE(u)->reset_failed(u);
3868
3869 ratelimit_reset(&u->start_ratelimit);
3870 u->start_limit_hit = false;
3871 }
3872
3873 Unit *unit_following(Unit *u) {
3874 assert(u);
3875
3876 if (UNIT_VTABLE(u)->following)
3877 return UNIT_VTABLE(u)->following(u);
3878
3879 return NULL;
3880 }
3881
3882 bool unit_stop_pending(Unit *u) {
3883 assert(u);
3884
3885 /* This call does check the current state of the unit. It's
3886 * hence useful to be called from state change calls of the
3887 * unit itself, where the state isn't updated yet. This is
3888 * different from unit_inactive_or_pending() which checks both
3889 * the current state and for a queued job. */
3890
3891 return unit_has_job_type(u, JOB_STOP);
3892 }
3893
3894 bool unit_inactive_or_pending(Unit *u) {
3895 assert(u);
3896
3897 /* Returns true if the unit is inactive or going down */
3898
3899 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3900 return true;
3901
3902 if (unit_stop_pending(u))
3903 return true;
3904
3905 return false;
3906 }
3907
3908 bool unit_active_or_pending(Unit *u) {
3909 assert(u);
3910
3911 /* Returns true if the unit is active or going up */
3912
3913 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3914 return true;
3915
3916 if (u->job &&
3917 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3918 return true;
3919
3920 return false;
3921 }
3922
3923 bool unit_will_restart_default(Unit *u) {
3924 assert(u);
3925
3926 return unit_has_job_type(u, JOB_START);
3927 }
3928
3929 bool unit_will_restart(Unit *u) {
3930 assert(u);
3931
3932 if (!UNIT_VTABLE(u)->will_restart)
3933 return false;
3934
3935 return UNIT_VTABLE(u)->will_restart(u);
3936 }
3937
3938 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3939 assert(u);
3940
3941 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3942 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3943 }
3944
3945 static int unit_pid_set(Unit *u, Set **pid_set) {
3946 int r;
3947
3948 assert(u);
3949 assert(pid_set);
3950
3951 set_clear(*pid_set); /* This updates input. */
3952
3953 /* Exclude the main/control pids from being killed via the cgroup */
3954
3955 PidRef *pid;
3956 FOREACH_ARGUMENT(pid, unit_main_pid(u), unit_control_pid(u))
3957 if (pidref_is_set(pid)) {
3958 r = set_ensure_put(pid_set, NULL, PID_TO_PTR(pid->pid));
3959 if (r < 0)
3960 return r;
3961 }
3962
3963 return 0;
3964 }
3965
3966 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
3967 _cleanup_free_ char *comm = NULL;
3968 Unit *u = ASSERT_PTR(userdata);
3969
3970 (void) pidref_get_comm(pid, &comm);
3971
3972 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3973 signal_to_string(signo), pid->pid, strna(comm));
3974
3975 return 1;
3976 }
3977
3978 static int kill_or_sigqueue(PidRef* pidref, int signo, int code, int value) {
3979 assert(pidref_is_set(pidref));
3980 assert(SIGNAL_VALID(signo));
3981
3982 switch (code) {
3983
3984 case SI_USER:
3985 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
3986 return pidref_kill(pidref, signo);
3987
3988 case SI_QUEUE:
3989 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
3990 return pidref_sigqueue(pidref, signo, value);
3991
3992 default:
3993 assert_not_reached();
3994 }
3995 }
3996
3997 static int unit_kill_one(
3998 Unit *u,
3999 PidRef *pidref,
4000 const char *type,
4001 int signo,
4002 int code,
4003 int value,
4004 sd_bus_error *ret_error) {
4005
4006 int r;
4007
4008 assert(u);
4009 assert(type);
4010
4011 if (!pidref_is_set(pidref))
4012 return 0;
4013
4014 _cleanup_free_ char *comm = NULL;
4015 (void) pidref_get_comm(pidref, &comm);
4016
4017 r = kill_or_sigqueue(pidref, signo, code, value);
4018 if (r == -ESRCH)
4019 return 0;
4020 if (r < 0) {
4021 /* Report this failure both to the logs and to the client */
4022 if (ret_error)
4023 sd_bus_error_set_errnof(
4024 ret_error, r,
4025 "Failed to send signal SIG%s to %s process " PID_FMT " (%s): %m",
4026 signal_to_string(signo), type, pidref->pid, strna(comm));
4027
4028 return log_unit_warning_errno(
4029 u, r,
4030 "Failed to send signal SIG%s to %s process " PID_FMT " (%s) on client request: %m",
4031 signal_to_string(signo), type, pidref->pid, strna(comm));
4032 }
4033
4034 log_unit_info(u, "Sent signal SIG%s to %s process " PID_FMT " (%s) on client request.",
4035 signal_to_string(signo), type, pidref->pid, strna(comm));
4036 return 1; /* killed */
4037 }
4038
4039 int unit_kill(
4040 Unit *u,
4041 KillWho who,
4042 int signo,
4043 int code,
4044 int value,
4045 sd_bus_error *ret_error) {
4046
4047 PidRef *main_pid, *control_pid;
4048 bool killed = false;
4049 int ret = 0, r;
4050
4051 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4052 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4053 * stop a service ourselves. */
4054
4055 assert(u);
4056 assert(who >= 0);
4057 assert(who < _KILL_WHO_MAX);
4058 assert(SIGNAL_VALID(signo));
4059 assert(IN_SET(code, SI_USER, SI_QUEUE));
4060
4061 main_pid = unit_main_pid(u);
4062 control_pid = unit_control_pid(u);
4063
4064 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4065 return sd_bus_error_setf(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4066
4067 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4068 if (!main_pid)
4069 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4070 if (!pidref_is_set(main_pid))
4071 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4072 }
4073
4074 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4075 if (!control_pid)
4076 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4077 if (!pidref_is_set(control_pid))
4078 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4079 }
4080
4081 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4082 r = unit_kill_one(u, control_pid, "control", signo, code, value, ret_error);
4083 RET_GATHER(ret, r);
4084 killed = killed || r > 0;
4085 }
4086
4087 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4088 r = unit_kill_one(u, main_pid, "main", signo, code, value, ret >= 0 ? ret_error : NULL);
4089 RET_GATHER(ret, r);
4090 killed = killed || r > 0;
4091 }
4092
4093 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4094 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4095 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4096 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path && code == SI_USER) {
4097 _cleanup_set_free_ Set *pid_set = NULL;
4098
4099 /* Exclude the main/control pids from being killed via the cgroup */
4100 r = unit_pid_set(u, &pid_set);
4101 if (r < 0)
4102 return log_oom();
4103
4104 r = cg_kill_recursive(u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4105 if (r < 0 && !IN_SET(r, -ESRCH, -ENOENT)) {
4106 if (ret >= 0)
4107 sd_bus_error_set_errnof(
4108 ret_error, r,
4109 "Failed to send signal SIG%s to auxiliary processes: %m",
4110 signal_to_string(signo));
4111
4112 log_unit_warning_errno(
4113 u, r,
4114 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4115 signal_to_string(signo));
4116
4117 RET_GATHER(ret, r);
4118 }
4119
4120 killed = killed || r >= 0;
4121 }
4122
4123 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4124 if (ret >= 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4125 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4126
4127 return ret;
4128 }
4129
4130 int unit_following_set(Unit *u, Set **s) {
4131 assert(u);
4132 assert(s);
4133
4134 if (UNIT_VTABLE(u)->following_set)
4135 return UNIT_VTABLE(u)->following_set(u, s);
4136
4137 *s = NULL;
4138 return 0;
4139 }
4140
4141 UnitFileState unit_get_unit_file_state(Unit *u) {
4142 int r;
4143
4144 assert(u);
4145
4146 if (u->unit_file_state < 0 && u->fragment_path) {
4147 r = unit_file_get_state(
4148 u->manager->runtime_scope,
4149 NULL,
4150 u->id,
4151 &u->unit_file_state);
4152 if (r < 0)
4153 u->unit_file_state = UNIT_FILE_BAD;
4154 }
4155
4156 return u->unit_file_state;
4157 }
4158
4159 PresetAction unit_get_unit_file_preset(Unit *u) {
4160 int r;
4161
4162 assert(u);
4163
4164 if (u->unit_file_preset < 0 && u->fragment_path) {
4165 _cleanup_free_ char *bn = NULL;
4166
4167 r = path_extract_filename(u->fragment_path, &bn);
4168 if (r < 0)
4169 return (u->unit_file_preset = r);
4170
4171 if (r == O_DIRECTORY)
4172 return (u->unit_file_preset = -EISDIR);
4173
4174 u->unit_file_preset = unit_file_query_preset(
4175 u->manager->runtime_scope,
4176 NULL,
4177 bn,
4178 NULL);
4179 }
4180
4181 return u->unit_file_preset;
4182 }
4183
4184 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4185 assert(ref);
4186 assert(source);
4187 assert(target);
4188
4189 if (ref->target)
4190 unit_ref_unset(ref);
4191
4192 ref->source = source;
4193 ref->target = target;
4194 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4195 return target;
4196 }
4197
4198 void unit_ref_unset(UnitRef *ref) {
4199 assert(ref);
4200
4201 if (!ref->target)
4202 return;
4203
4204 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4205 * be unreferenced now. */
4206 unit_add_to_gc_queue(ref->target);
4207
4208 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4209 ref->source = ref->target = NULL;
4210 }
4211
4212 static int user_from_unit_name(Unit *u, char **ret) {
4213
4214 static const uint8_t hash_key[] = {
4215 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4216 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4217 };
4218
4219 _cleanup_free_ char *n = NULL;
4220 int r;
4221
4222 r = unit_name_to_prefix(u->id, &n);
4223 if (r < 0)
4224 return r;
4225
4226 if (valid_user_group_name(n, 0)) {
4227 *ret = TAKE_PTR(n);
4228 return 0;
4229 }
4230
4231 /* If we can't use the unit name as a user name, then let's hash it and use that */
4232 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4233 return -ENOMEM;
4234
4235 return 0;
4236 }
4237
4238 int unit_patch_contexts(Unit *u) {
4239 CGroupContext *cc;
4240 ExecContext *ec;
4241 int r;
4242
4243 assert(u);
4244
4245 /* Patch in the manager defaults into the exec and cgroup
4246 * contexts, _after_ the rest of the settings have been
4247 * initialized */
4248
4249 ec = unit_get_exec_context(u);
4250 if (ec) {
4251 /* This only copies in the ones that need memory */
4252 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4253 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4254 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4255 if (!ec->rlimit[i])
4256 return -ENOMEM;
4257 }
4258
4259 if (MANAGER_IS_USER(u->manager) &&
4260 !ec->working_directory) {
4261
4262 r = get_home_dir(&ec->working_directory);
4263 if (r < 0)
4264 return r;
4265
4266 /* Allow user services to run, even if the
4267 * home directory is missing */
4268 ec->working_directory_missing_ok = true;
4269 }
4270
4271 if (ec->private_devices)
4272 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4273
4274 if (ec->protect_kernel_modules)
4275 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4276
4277 if (ec->protect_kernel_logs)
4278 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4279
4280 if (ec->protect_clock)
4281 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4282
4283 if (ec->dynamic_user) {
4284 if (!ec->user) {
4285 r = user_from_unit_name(u, &ec->user);
4286 if (r < 0)
4287 return r;
4288 }
4289
4290 if (!ec->group) {
4291 ec->group = strdup(ec->user);
4292 if (!ec->group)
4293 return -ENOMEM;
4294 }
4295
4296 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4297 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4298 * sandbox. */
4299
4300 ec->private_tmp = true;
4301 ec->remove_ipc = true;
4302 ec->protect_system = PROTECT_SYSTEM_STRICT;
4303 if (ec->protect_home == PROTECT_HOME_NO)
4304 ec->protect_home = PROTECT_HOME_READ_ONLY;
4305
4306 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4307 * them. */
4308 ec->no_new_privileges = true;
4309 ec->restrict_suid_sgid = true;
4310 }
4311
4312 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4313 exec_directory_sort(ec->directories + dt);
4314 }
4315
4316 cc = unit_get_cgroup_context(u);
4317 if (cc && ec) {
4318
4319 if (ec->private_devices &&
4320 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4321 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4322
4323 /* Only add these if needed, as they imply that everything else is blocked. */
4324 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4325 if (ec->root_image || ec->mount_images) {
4326
4327 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4328 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4329 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4330 if (r < 0)
4331 return r;
4332 }
4333 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4334 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4335 if (r < 0)
4336 return r;
4337 }
4338
4339 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4340 * Same for mapper and verity. */
4341 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4342 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4343 if (r < 0)
4344 return r;
4345 }
4346 }
4347
4348 if (ec->protect_clock) {
4349 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4350 if (r < 0)
4351 return r;
4352 }
4353
4354 /* If there are encrypted credentials we might need to access the TPM. */
4355 if (exec_context_has_encrypted_credentials(ec)) {
4356 r = cgroup_context_add_device_allow(cc, "char-tpm", CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4357 if (r < 0)
4358 return r;
4359 }
4360 }
4361 }
4362
4363 return 0;
4364 }
4365
4366 ExecContext *unit_get_exec_context(const Unit *u) {
4367 size_t offset;
4368 assert(u);
4369
4370 if (u->type < 0)
4371 return NULL;
4372
4373 offset = UNIT_VTABLE(u)->exec_context_offset;
4374 if (offset <= 0)
4375 return NULL;
4376
4377 return (ExecContext*) ((uint8_t*) u + offset);
4378 }
4379
4380 KillContext *unit_get_kill_context(Unit *u) {
4381 size_t offset;
4382 assert(u);
4383
4384 if (u->type < 0)
4385 return NULL;
4386
4387 offset = UNIT_VTABLE(u)->kill_context_offset;
4388 if (offset <= 0)
4389 return NULL;
4390
4391 return (KillContext*) ((uint8_t*) u + offset);
4392 }
4393
4394 CGroupContext *unit_get_cgroup_context(Unit *u) {
4395 size_t offset;
4396
4397 if (u->type < 0)
4398 return NULL;
4399
4400 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4401 if (offset <= 0)
4402 return NULL;
4403
4404 return (CGroupContext*) ((uint8_t*) u + offset);
4405 }
4406
4407 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4408 size_t offset;
4409
4410 if (u->type < 0)
4411 return NULL;
4412
4413 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4414 if (offset <= 0)
4415 return NULL;
4416
4417 return *(ExecRuntime**) ((uint8_t*) u + offset);
4418 }
4419
4420 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4421 assert(u);
4422
4423 if (UNIT_WRITE_FLAGS_NOOP(flags))
4424 return NULL;
4425
4426 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4427 return u->manager->lookup_paths.transient;
4428
4429 if (flags & UNIT_PERSISTENT)
4430 return u->manager->lookup_paths.persistent_control;
4431
4432 if (flags & UNIT_RUNTIME)
4433 return u->manager->lookup_paths.runtime_control;
4434
4435 return NULL;
4436 }
4437
4438 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4439 assert(s);
4440 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4441 assert(buf);
4442
4443 _cleanup_free_ char *t = NULL;
4444
4445 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4446 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4447 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4448 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4449 * allocations. */
4450
4451 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4452 t = specifier_escape(s);
4453 if (!t)
4454 return NULL;
4455
4456 s = t;
4457 }
4458
4459 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4460 * ExecStart= and friends, i.e. '$' and quotes. */
4461
4462 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4463 char *t2;
4464
4465 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4466 t2 = strreplace(s, "$", "$$");
4467 if (!t2)
4468 return NULL;
4469 free_and_replace(t, t2);
4470 }
4471
4472 t2 = shell_escape(t ?: s, "\"");
4473 if (!t2)
4474 return NULL;
4475 free_and_replace(t, t2);
4476
4477 s = t;
4478
4479 } else if (flags & UNIT_ESCAPE_C) {
4480 char *t2;
4481
4482 t2 = cescape(s);
4483 if (!t2)
4484 return NULL;
4485 free_and_replace(t, t2);
4486
4487 s = t;
4488 }
4489
4490 *buf = TAKE_PTR(t);
4491 return s;
4492 }
4493
4494 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4495 _cleanup_free_ char *result = NULL;
4496 size_t n = 0;
4497
4498 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4499 * lines in a way suitable for ExecStart= stanzas. */
4500
4501 STRV_FOREACH(i, l) {
4502 _cleanup_free_ char *buf = NULL;
4503 const char *p;
4504 size_t a;
4505 char *q;
4506
4507 p = unit_escape_setting(*i, flags, &buf);
4508 if (!p)
4509 return NULL;
4510
4511 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4512 if (!GREEDY_REALLOC(result, n + a + 1))
4513 return NULL;
4514
4515 q = result + n;
4516 if (n > 0)
4517 *(q++) = ' ';
4518
4519 *(q++) = '"';
4520 q = stpcpy(q, p);
4521 *(q++) = '"';
4522
4523 n += a;
4524 }
4525
4526 if (!GREEDY_REALLOC(result, n + 1))
4527 return NULL;
4528
4529 result[n] = 0;
4530
4531 return TAKE_PTR(result);
4532 }
4533
4534 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4535 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4536 const char *dir, *wrapped;
4537 int r;
4538
4539 assert(u);
4540 assert(name);
4541 assert(data);
4542
4543 if (UNIT_WRITE_FLAGS_NOOP(flags))
4544 return 0;
4545
4546 data = unit_escape_setting(data, flags, &escaped);
4547 if (!data)
4548 return -ENOMEM;
4549
4550 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4551 * previous section header is the same */
4552
4553 if (flags & UNIT_PRIVATE) {
4554 if (!UNIT_VTABLE(u)->private_section)
4555 return -EINVAL;
4556
4557 if (!u->transient_file || u->last_section_private < 0)
4558 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4559 else if (u->last_section_private == 0)
4560 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4561 } else {
4562 if (!u->transient_file || u->last_section_private < 0)
4563 data = strjoina("[Unit]\n", data);
4564 else if (u->last_section_private > 0)
4565 data = strjoina("\n[Unit]\n", data);
4566 }
4567
4568 if (u->transient_file) {
4569 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4570 * write to the transient unit file. */
4571 fputs(data, u->transient_file);
4572
4573 if (!endswith(data, "\n"))
4574 fputc('\n', u->transient_file);
4575
4576 /* Remember which section we wrote this entry to */
4577 u->last_section_private = !!(flags & UNIT_PRIVATE);
4578 return 0;
4579 }
4580
4581 dir = unit_drop_in_dir(u, flags);
4582 if (!dir)
4583 return -EINVAL;
4584
4585 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4586 "# or an equivalent operation. Do not edit.\n",
4587 data,
4588 "\n");
4589
4590 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4591 if (r < 0)
4592 return r;
4593
4594 (void) mkdir_p_label(p, 0755);
4595
4596 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4597 * recreate the cache after every drop-in we write. */
4598 if (u->manager->unit_path_cache) {
4599 r = set_put_strdup(&u->manager->unit_path_cache, p);
4600 if (r < 0)
4601 return r;
4602 }
4603
4604 r = write_string_file_atomic_label(q, wrapped);
4605 if (r < 0)
4606 return r;
4607
4608 r = strv_push(&u->dropin_paths, q);
4609 if (r < 0)
4610 return r;
4611 q = NULL;
4612
4613 strv_uniq(u->dropin_paths);
4614
4615 u->dropin_mtime = now(CLOCK_REALTIME);
4616
4617 return 0;
4618 }
4619
4620 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4621 _cleanup_free_ char *p = NULL;
4622 va_list ap;
4623 int r;
4624
4625 assert(u);
4626 assert(name);
4627 assert(format);
4628
4629 if (UNIT_WRITE_FLAGS_NOOP(flags))
4630 return 0;
4631
4632 va_start(ap, format);
4633 r = vasprintf(&p, format, ap);
4634 va_end(ap);
4635
4636 if (r < 0)
4637 return -ENOMEM;
4638
4639 return unit_write_setting(u, flags, name, p);
4640 }
4641
4642 int unit_make_transient(Unit *u) {
4643 _cleanup_free_ char *path = NULL;
4644 FILE *f;
4645
4646 assert(u);
4647
4648 if (!UNIT_VTABLE(u)->can_transient)
4649 return -EOPNOTSUPP;
4650
4651 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4652
4653 path = path_join(u->manager->lookup_paths.transient, u->id);
4654 if (!path)
4655 return -ENOMEM;
4656
4657 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4658 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4659
4660 WITH_UMASK(0022) {
4661 f = fopen(path, "we");
4662 if (!f)
4663 return -errno;
4664 }
4665
4666 safe_fclose(u->transient_file);
4667 u->transient_file = f;
4668
4669 free_and_replace(u->fragment_path, path);
4670
4671 u->source_path = mfree(u->source_path);
4672 u->dropin_paths = strv_free(u->dropin_paths);
4673 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4674
4675 u->load_state = UNIT_STUB;
4676 u->load_error = 0;
4677 u->transient = true;
4678
4679 unit_add_to_dbus_queue(u);
4680 unit_add_to_gc_queue(u);
4681
4682 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4683 u->transient_file);
4684
4685 return 0;
4686 }
4687
4688 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4689 _cleanup_free_ char *comm = NULL;
4690
4691 assert(pidref_is_set(pid));
4692
4693 (void) pidref_get_comm(pid, &comm);
4694
4695 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4696 only, like for example systemd's own PAM stub process. */
4697 if (comm && comm[0] == '(')
4698 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4699 * here to let the manager know that a process was killed. */
4700 return 1;
4701
4702 log_unit_notice(userdata,
4703 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4704 pid->pid,
4705 strna(comm),
4706 signal_to_string(sig));
4707
4708 return 1;
4709 }
4710
4711 static int operation_to_signal(
4712 const KillContext *c,
4713 KillOperation k,
4714 bool *ret_noteworthy) {
4715
4716 assert(c);
4717
4718 switch (k) {
4719
4720 case KILL_TERMINATE:
4721 case KILL_TERMINATE_AND_LOG:
4722 *ret_noteworthy = false;
4723 return c->kill_signal;
4724
4725 case KILL_RESTART:
4726 *ret_noteworthy = false;
4727 return restart_kill_signal(c);
4728
4729 case KILL_KILL:
4730 *ret_noteworthy = true;
4731 return c->final_kill_signal;
4732
4733 case KILL_WATCHDOG:
4734 *ret_noteworthy = true;
4735 return c->watchdog_signal;
4736
4737 default:
4738 assert_not_reached();
4739 }
4740 }
4741
4742 static int unit_kill_context_one(
4743 Unit *u,
4744 const PidRef *pidref,
4745 const char *type,
4746 bool is_alien,
4747 int sig,
4748 bool send_sighup,
4749 cg_kill_log_func_t log_func) {
4750
4751 int r;
4752
4753 assert(u);
4754 assert(type);
4755
4756 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4757
4758 if (!pidref_is_set(pidref))
4759 return 0;
4760
4761 if (log_func)
4762 log_func(pidref, sig, u);
4763
4764 r = pidref_kill_and_sigcont(pidref, sig);
4765 if (r == -ESRCH)
4766 return !is_alien;
4767 if (r < 0) {
4768 _cleanup_free_ char *comm = NULL;
4769
4770 (void) pidref_get_comm(pidref, &comm);
4771 return log_unit_warning_errno(u, r, "Failed to kill %s process " PID_FMT " (%s), ignoring: %m", type, pidref->pid, strna(comm));
4772 }
4773
4774 if (send_sighup)
4775 (void) pidref_kill(pidref, SIGHUP);
4776
4777 return !is_alien;
4778 }
4779
4780 int unit_kill_context(Unit *u, KillOperation k) {
4781 bool wait_for_exit = false, send_sighup;
4782 cg_kill_log_func_t log_func = NULL;
4783 int sig, r;
4784
4785 assert(u);
4786
4787 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4788 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4789 * which is used for user-requested killing of unit processes. */
4790
4791 KillContext *c = unit_get_kill_context(u);
4792 if (!c || c->kill_mode == KILL_NONE)
4793 return 0;
4794
4795 bool noteworthy;
4796 sig = operation_to_signal(c, k, &noteworthy);
4797 if (noteworthy)
4798 log_func = log_kill;
4799
4800 send_sighup =
4801 c->send_sighup &&
4802 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4803 sig != SIGHUP;
4804
4805 bool is_alien;
4806 PidRef *main_pid = unit_main_pid_full(u, &is_alien);
4807 r = unit_kill_context_one(u, main_pid, "main", is_alien, sig, send_sighup, log_func);
4808 wait_for_exit = wait_for_exit || r > 0;
4809
4810 r = unit_kill_context_one(u, unit_control_pid(u), "control", /* is_alien = */ false, sig, send_sighup, log_func);
4811 wait_for_exit = wait_for_exit || r > 0;
4812
4813 if (u->cgroup_path &&
4814 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4815 _cleanup_set_free_ Set *pid_set = NULL;
4816
4817 /* Exclude the main/control pids from being killed via the cgroup */
4818 r = unit_pid_set(u, &pid_set);
4819 if (r < 0)
4820 return r;
4821
4822 r = cg_kill_recursive(
4823 u->cgroup_path,
4824 sig,
4825 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4826 pid_set,
4827 log_func, u);
4828 if (r < 0) {
4829 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4830 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4831
4832 } else if (r > 0) {
4833
4834 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4835 * we are running in a container or if this is a delegation unit, simply because cgroup
4836 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4837 * of containers it can be confused easily by left-over directories in the cgroup — which
4838 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4839 * there we get proper events. Hence rely on them. */
4840
4841 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4842 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4843 wait_for_exit = true;
4844
4845 if (send_sighup) {
4846 r = unit_pid_set(u, &pid_set);
4847 if (r < 0)
4848 return r;
4849
4850 (void) cg_kill_recursive(
4851 u->cgroup_path,
4852 SIGHUP,
4853 CGROUP_IGNORE_SELF,
4854 pid_set,
4855 /* kill_log= */ NULL,
4856 /* userdata= */ NULL);
4857 }
4858 }
4859 }
4860
4861 return wait_for_exit;
4862 }
4863
4864 int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) {
4865 Hashmap **unit_map, **manager_map;
4866 int r;
4867
4868 assert(u);
4869 assert(path);
4870 assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4871
4872 unit_map = &u->mounts_for[type];
4873 manager_map = &u->manager->units_needing_mounts_for[type];
4874
4875 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4876 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4877 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4878 * appearing mount units can easily determine which units to make themselves a dependency of. */
4879
4880 if (!path_is_absolute(path))
4881 return -EINVAL;
4882
4883 if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */
4884 return 0;
4885
4886 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4887 * only after simplification, since path_is_normalized() rejects paths with '.'.
4888 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4889 _cleanup_free_ char *p = NULL;
4890 r = path_simplify_alloc(path, &p);
4891 if (r < 0)
4892 return r;
4893 path = p;
4894
4895 if (!path_is_normalized(path))
4896 return -EPERM;
4897
4898 UnitDependencyInfo di = {
4899 .origin_mask = mask
4900 };
4901
4902 r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data);
4903 if (r < 0)
4904 return r;
4905 assert(r > 0);
4906 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4907
4908 char prefix[strlen(path) + 1];
4909 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4910 Set *x;
4911
4912 x = hashmap_get(*manager_map, prefix);
4913 if (!x) {
4914 _cleanup_free_ char *q = NULL;
4915
4916 r = hashmap_ensure_allocated(manager_map, &path_hash_ops);
4917 if (r < 0)
4918 return r;
4919
4920 q = strdup(prefix);
4921 if (!q)
4922 return -ENOMEM;
4923
4924 x = set_new(NULL);
4925 if (!x)
4926 return -ENOMEM;
4927
4928 r = hashmap_put(*manager_map, q, x);
4929 if (r < 0) {
4930 set_free(x);
4931 return r;
4932 }
4933 q = NULL;
4934 }
4935
4936 r = set_put(x, u);
4937 if (r < 0)
4938 return r;
4939 }
4940
4941 return 0;
4942 }
4943
4944 int unit_setup_exec_runtime(Unit *u) {
4945 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
4946 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
4947 _cleanup_set_free_ Set *units = NULL;
4948 ExecRuntime **rt;
4949 ExecContext *ec;
4950 size_t offset;
4951 Unit *other;
4952 int r;
4953
4954 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4955 assert(offset > 0);
4956
4957 /* Check if there already is an ExecRuntime for this unit? */
4958 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4959 if (*rt)
4960 return 0;
4961
4962 ec = ASSERT_PTR(unit_get_exec_context(u));
4963
4964 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
4965 if (r < 0)
4966 return r;
4967
4968 /* Try to get it from somebody else */
4969 SET_FOREACH(other, units) {
4970 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
4971 if (r < 0)
4972 return r;
4973 if (r > 0)
4974 break;
4975 }
4976
4977 if (!esr) {
4978 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
4979 if (r < 0)
4980 return r;
4981 }
4982
4983 if (ec->dynamic_user) {
4984 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
4985 if (r < 0)
4986 return r;
4987 }
4988
4989 r = exec_runtime_make(u, ec, esr, dcreds, rt);
4990 if (r < 0)
4991 return r;
4992
4993 TAKE_PTR(esr);
4994 TAKE_PTR(dcreds);
4995
4996 return r;
4997 }
4998
4999 bool unit_type_supported(UnitType t) {
5000 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5001 int r;
5002
5003 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5004
5005 if (cache[t] == 0) {
5006 char *e;
5007
5008 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5009
5010 r = getenv_bool(ascii_strupper(e));
5011 if (r < 0 && r != -ENXIO)
5012 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5013
5014 cache[t] = r == 0 ? -1 : 1;
5015 }
5016 if (cache[t] < 0)
5017 return false;
5018
5019 if (!unit_vtable[t]->supported)
5020 return true;
5021
5022 return unit_vtable[t]->supported();
5023 }
5024
5025 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5026 int r;
5027
5028 assert(u);
5029 assert(where);
5030
5031 if (!unit_log_level_test(u, LOG_NOTICE))
5032 return;
5033
5034 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5035 if (r > 0 || r == -ENOTDIR)
5036 return;
5037 if (r < 0) {
5038 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5039 return;
5040 }
5041
5042 log_unit_struct(u, LOG_NOTICE,
5043 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5044 LOG_UNIT_INVOCATION_ID(u),
5045 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5046 "WHERE=%s", where);
5047 }
5048
5049 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5050 _cleanup_free_ char *canonical_where = NULL;
5051 int r;
5052
5053 assert(u);
5054 assert(where);
5055
5056 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5057 if (r < 0) {
5058 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5059 return 0;
5060 }
5061
5062 /* We will happily ignore a trailing slash (or any redundant slashes) */
5063 if (path_equal(where, canonical_where))
5064 return 0;
5065
5066 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5067 log_unit_struct(u, LOG_ERR,
5068 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5069 LOG_UNIT_INVOCATION_ID(u),
5070 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5071 "WHERE=%s", where);
5072
5073 return -ELOOP;
5074 }
5075
5076 bool unit_is_pristine(Unit *u) {
5077 assert(u);
5078
5079 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5080 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5081 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5082 *
5083 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5084 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5085 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5086 */
5087
5088 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5089 !u->fragment_path &&
5090 !u->source_path &&
5091 !u->job &&
5092 !u->merged_into;
5093 }
5094
5095 PidRef* unit_control_pid(Unit *u) {
5096 assert(u);
5097
5098 if (UNIT_VTABLE(u)->control_pid)
5099 return UNIT_VTABLE(u)->control_pid(u);
5100
5101 return NULL;
5102 }
5103
5104 PidRef* unit_main_pid_full(Unit *u, bool *ret_is_alien) {
5105 assert(u);
5106
5107 if (UNIT_VTABLE(u)->main_pid)
5108 return UNIT_VTABLE(u)->main_pid(u, ret_is_alien);
5109
5110 if (ret_is_alien)
5111 *ret_is_alien = false;
5112 return NULL;
5113 }
5114
5115 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5116 int r;
5117
5118 assert(u);
5119
5120 if (!MANAGER_IS_SYSTEM(u->manager))
5121 return;
5122
5123 CGroupContext *c;
5124 c = unit_get_cgroup_context(u);
5125 if (!c)
5126 return;
5127
5128 if (!u->manager->fw_ctx) {
5129 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5130 if (r < 0)
5131 return;
5132
5133 assert(u->manager->fw_ctx);
5134 }
5135
5136 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5137 if (nft_set->source != source)
5138 continue;
5139
5140 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5141 if (r < 0)
5142 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5143 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5144 else
5145 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5146 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5147 }
5148 }
5149
5150 static void unit_unref_uid_internal(
5151 Unit *u,
5152 uid_t *ref_uid,
5153 bool destroy_now,
5154 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5155
5156 assert(u);
5157 assert(ref_uid);
5158 assert(_manager_unref_uid);
5159
5160 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5161 * gid_t are actually the same time, with the same validity rules.
5162 *
5163 * Drops a reference to UID/GID from a unit. */
5164
5165 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5166 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5167
5168 if (!uid_is_valid(*ref_uid))
5169 return;
5170
5171 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5172 *ref_uid = UID_INVALID;
5173 }
5174
5175 static void unit_unref_uid(Unit *u, bool destroy_now) {
5176 assert(u);
5177
5178 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5179
5180 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5181 }
5182
5183 static void unit_unref_gid(Unit *u, bool destroy_now) {
5184 assert(u);
5185
5186 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5187
5188 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5189 }
5190
5191 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5192 assert(u);
5193
5194 unit_unref_uid(u, destroy_now);
5195 unit_unref_gid(u, destroy_now);
5196 }
5197
5198 static int unit_ref_uid_internal(
5199 Unit *u,
5200 uid_t *ref_uid,
5201 uid_t uid,
5202 bool clean_ipc,
5203 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5204
5205 int r;
5206
5207 assert(u);
5208 assert(ref_uid);
5209 assert(uid_is_valid(uid));
5210 assert(_manager_ref_uid);
5211
5212 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5213 * are actually the same type, and have the same validity rules.
5214 *
5215 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5216 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5217 * drops to zero. */
5218
5219 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5220 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5221
5222 if (*ref_uid == uid)
5223 return 0;
5224
5225 if (uid_is_valid(*ref_uid)) /* Already set? */
5226 return -EBUSY;
5227
5228 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5229 if (r < 0)
5230 return r;
5231
5232 *ref_uid = uid;
5233 return 1;
5234 }
5235
5236 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5237 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5238 }
5239
5240 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5241 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5242 }
5243
5244 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5245 int r = 0, q = 0;
5246
5247 assert(u);
5248
5249 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5250
5251 if (uid_is_valid(uid)) {
5252 r = unit_ref_uid(u, uid, clean_ipc);
5253 if (r < 0)
5254 return r;
5255 }
5256
5257 if (gid_is_valid(gid)) {
5258 q = unit_ref_gid(u, gid, clean_ipc);
5259 if (q < 0) {
5260 if (r > 0)
5261 unit_unref_uid(u, false);
5262
5263 return q;
5264 }
5265 }
5266
5267 return r > 0 || q > 0;
5268 }
5269
5270 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5271 ExecContext *c;
5272 int r;
5273
5274 assert(u);
5275
5276 c = unit_get_exec_context(u);
5277
5278 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5279 if (r < 0)
5280 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5281
5282 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5283 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5284
5285 return r;
5286 }
5287
5288 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5289 int r;
5290
5291 assert(u);
5292
5293 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5294 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5295 * objects when no service references the UID/GID anymore. */
5296
5297 r = unit_ref_uid_gid(u, uid, gid);
5298 if (r > 0)
5299 unit_add_to_dbus_queue(u);
5300 }
5301
5302 int unit_acquire_invocation_id(Unit *u) {
5303 sd_id128_t id;
5304 int r;
5305
5306 assert(u);
5307
5308 r = sd_id128_randomize(&id);
5309 if (r < 0)
5310 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5311
5312 r = unit_set_invocation_id(u, id);
5313 if (r < 0)
5314 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5315
5316 unit_add_to_dbus_queue(u);
5317 return 0;
5318 }
5319
5320 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5321 int r;
5322
5323 assert(u);
5324 assert(p);
5325
5326 /* Copy parameters from manager */
5327 r = manager_get_effective_environment(u->manager, &p->environment);
5328 if (r < 0)
5329 return r;
5330
5331 p->runtime_scope = u->manager->runtime_scope;
5332
5333 r = strdup_or_null(manager_get_confirm_spawn(u->manager), &p->confirm_spawn);
5334 if (r < 0)
5335 return r;
5336
5337 p->cgroup_supported = u->manager->cgroup_supported;
5338 p->prefix = u->manager->prefix;
5339 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5340
5341 /* Copy parameters from unit */
5342 p->cgroup_path = u->cgroup_path;
5343 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5344
5345 p->received_credentials_directory = u->manager->received_credentials_directory;
5346 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5347
5348 p->shall_confirm_spawn = u->manager->confirm_spawn;
5349
5350 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5351
5352 if (u->manager->restrict_fs && p->bpf_restrict_fs_map_fd < 0) {
5353 int fd = bpf_restrict_fs_map_fd(u);
5354 if (fd < 0)
5355 return fd;
5356
5357 p->bpf_restrict_fs_map_fd = fd;
5358 }
5359
5360 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5361
5362 p->cgroup_id = u->cgroup_id;
5363 p->invocation_id = u->invocation_id;
5364 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5365 p->unit_id = strdup(u->id);
5366 if (!p->unit_id)
5367 return -ENOMEM;
5368
5369 return 0;
5370 }
5371
5372 int unit_fork_helper_process(Unit *u, const char *name, PidRef *ret) {
5373 pid_t pid;
5374 int r;
5375
5376 assert(u);
5377 assert(ret);
5378
5379 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5380 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5381
5382 (void) unit_realize_cgroup(u);
5383
5384 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5385 if (r < 0)
5386 return r;
5387 if (r > 0) {
5388 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5389 int q;
5390
5391 /* Parent */
5392
5393 q = pidref_set_pid(&pidref, pid);
5394 if (q < 0)
5395 return q;
5396
5397 *ret = TAKE_PIDREF(pidref);
5398 return r;
5399 }
5400
5401 /* Child */
5402
5403 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5404 (void) ignore_signals(SIGPIPE);
5405
5406 if (u->cgroup_path) {
5407 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5408 if (r < 0) {
5409 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5410 _exit(EXIT_CGROUP);
5411 }
5412 }
5413
5414 return 0;
5415 }
5416
5417 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5418 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5419 int r;
5420
5421 assert(u);
5422 assert(ret_pid);
5423
5424 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5425 if (r < 0)
5426 return r;
5427 if (r == 0) {
5428 int ret = EXIT_SUCCESS;
5429
5430 STRV_FOREACH(i, paths) {
5431 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5432 if (r < 0) {
5433 log_error_errno(r, "Failed to remove '%s': %m", *i);
5434 ret = EXIT_FAILURE;
5435 }
5436 }
5437
5438 _exit(ret);
5439 }
5440
5441 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5442 if (r < 0)
5443 return r;
5444
5445 *ret_pid = TAKE_PIDREF(pid);
5446 return 0;
5447 }
5448
5449 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5450 assert(deps);
5451 assert(other);
5452
5453 if (di.origin_mask == 0 && di.destination_mask == 0)
5454 /* No bit set anymore, let's drop the whole entry */
5455 assert_se(hashmap_remove(deps, other));
5456 else
5457 /* Mask was reduced, let's update the entry */
5458 assert_se(hashmap_update(deps, other, di.data) == 0);
5459 }
5460
5461 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5462 Hashmap *deps;
5463 assert(u);
5464
5465 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5466
5467 if (mask == 0)
5468 return;
5469
5470 HASHMAP_FOREACH(deps, u->dependencies) {
5471 bool done;
5472
5473 do {
5474 UnitDependencyInfo di;
5475 Unit *other;
5476
5477 done = true;
5478
5479 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5480 Hashmap *other_deps;
5481
5482 if (FLAGS_SET(~mask, di.origin_mask))
5483 continue;
5484
5485 di.origin_mask &= ~mask;
5486 unit_update_dependency_mask(deps, other, di);
5487
5488 /* We updated the dependency from our unit to the other unit now. But most
5489 * dependencies imply a reverse dependency. Hence, let's delete that one
5490 * too. For that we go through all dependency types on the other unit and
5491 * delete all those which point to us and have the right mask set. */
5492
5493 HASHMAP_FOREACH(other_deps, other->dependencies) {
5494 UnitDependencyInfo dj;
5495
5496 dj.data = hashmap_get(other_deps, u);
5497 if (FLAGS_SET(~mask, dj.destination_mask))
5498 continue;
5499
5500 dj.destination_mask &= ~mask;
5501 unit_update_dependency_mask(other_deps, u, dj);
5502 }
5503
5504 unit_add_to_gc_queue(other);
5505
5506 /* The unit 'other' may not be wanted by the unit 'u'. */
5507 unit_submit_to_stop_when_unneeded_queue(other);
5508
5509 done = false;
5510 break;
5511 }
5512
5513 } while (!done);
5514 }
5515 }
5516
5517 static int unit_get_invocation_path(Unit *u, char **ret) {
5518 char *p;
5519 int r;
5520
5521 assert(u);
5522 assert(ret);
5523
5524 if (MANAGER_IS_SYSTEM(u->manager))
5525 p = strjoin("/run/systemd/units/invocation:", u->id);
5526 else {
5527 _cleanup_free_ char *user_path = NULL;
5528 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5529 if (r < 0)
5530 return r;
5531 p = strjoin(user_path, u->id);
5532 }
5533
5534 if (!p)
5535 return -ENOMEM;
5536
5537 *ret = p;
5538 return 0;
5539 }
5540
5541 static int unit_export_invocation_id(Unit *u) {
5542 _cleanup_free_ char *p = NULL;
5543 int r;
5544
5545 assert(u);
5546
5547 if (u->exported_invocation_id)
5548 return 0;
5549
5550 if (sd_id128_is_null(u->invocation_id))
5551 return 0;
5552
5553 r = unit_get_invocation_path(u, &p);
5554 if (r < 0)
5555 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5556
5557 r = symlink_atomic_label(u->invocation_id_string, p);
5558 if (r < 0)
5559 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5560
5561 u->exported_invocation_id = true;
5562 return 0;
5563 }
5564
5565 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5566 const char *p;
5567 char buf[2];
5568 int r;
5569
5570 assert(u);
5571 assert(c);
5572
5573 if (u->exported_log_level_max)
5574 return 0;
5575
5576 if (c->log_level_max < 0)
5577 return 0;
5578
5579 assert(c->log_level_max <= 7);
5580
5581 buf[0] = '0' + c->log_level_max;
5582 buf[1] = 0;
5583
5584 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5585 r = symlink_atomic(buf, p);
5586 if (r < 0)
5587 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5588
5589 u->exported_log_level_max = true;
5590 return 0;
5591 }
5592
5593 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5594 _cleanup_close_ int fd = -EBADF;
5595 struct iovec *iovec;
5596 const char *p;
5597 char *pattern;
5598 le64_t *sizes;
5599 ssize_t n;
5600 int r;
5601
5602 if (u->exported_log_extra_fields)
5603 return 0;
5604
5605 if (c->n_log_extra_fields <= 0)
5606 return 0;
5607
5608 sizes = newa(le64_t, c->n_log_extra_fields);
5609 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5610
5611 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5612 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5613
5614 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5615 iovec[i*2+1] = c->log_extra_fields[i];
5616 }
5617
5618 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5619 pattern = strjoina(p, ".XXXXXX");
5620
5621 fd = mkostemp_safe(pattern);
5622 if (fd < 0)
5623 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5624
5625 n = writev(fd, iovec, c->n_log_extra_fields*2);
5626 if (n < 0) {
5627 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5628 goto fail;
5629 }
5630
5631 (void) fchmod(fd, 0644);
5632
5633 if (rename(pattern, p) < 0) {
5634 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5635 goto fail;
5636 }
5637
5638 u->exported_log_extra_fields = true;
5639 return 0;
5640
5641 fail:
5642 (void) unlink(pattern);
5643 return r;
5644 }
5645
5646 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5647 _cleanup_free_ char *buf = NULL;
5648 const char *p;
5649 int r;
5650
5651 assert(u);
5652 assert(c);
5653
5654 if (u->exported_log_ratelimit_interval)
5655 return 0;
5656
5657 if (c->log_ratelimit_interval_usec == 0)
5658 return 0;
5659
5660 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5661
5662 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5663 return log_oom();
5664
5665 r = symlink_atomic(buf, p);
5666 if (r < 0)
5667 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5668
5669 u->exported_log_ratelimit_interval = true;
5670 return 0;
5671 }
5672
5673 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5674 _cleanup_free_ char *buf = NULL;
5675 const char *p;
5676 int r;
5677
5678 assert(u);
5679 assert(c);
5680
5681 if (u->exported_log_ratelimit_burst)
5682 return 0;
5683
5684 if (c->log_ratelimit_burst == 0)
5685 return 0;
5686
5687 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5688
5689 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5690 return log_oom();
5691
5692 r = symlink_atomic(buf, p);
5693 if (r < 0)
5694 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5695
5696 u->exported_log_ratelimit_burst = true;
5697 return 0;
5698 }
5699
5700 void unit_export_state_files(Unit *u) {
5701 const ExecContext *c;
5702
5703 assert(u);
5704
5705 if (!u->id)
5706 return;
5707
5708 if (MANAGER_IS_TEST_RUN(u->manager))
5709 return;
5710
5711 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5712 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5713 * the IPC system itself and PID 1 also log to the journal.
5714 *
5715 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5716 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5717 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5718 * namespace at least.
5719 *
5720 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5721 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5722 * them with one. */
5723
5724 (void) unit_export_invocation_id(u);
5725
5726 if (!MANAGER_IS_SYSTEM(u->manager))
5727 return;
5728
5729 c = unit_get_exec_context(u);
5730 if (c) {
5731 (void) unit_export_log_level_max(u, c);
5732 (void) unit_export_log_extra_fields(u, c);
5733 (void) unit_export_log_ratelimit_interval(u, c);
5734 (void) unit_export_log_ratelimit_burst(u, c);
5735 }
5736 }
5737
5738 void unit_unlink_state_files(Unit *u) {
5739 const char *p;
5740
5741 assert(u);
5742
5743 if (!u->id)
5744 return;
5745
5746 /* Undoes the effect of unit_export_state() */
5747
5748 if (u->exported_invocation_id) {
5749 _cleanup_free_ char *invocation_path = NULL;
5750 int r = unit_get_invocation_path(u, &invocation_path);
5751 if (r >= 0) {
5752 (void) unlink(invocation_path);
5753 u->exported_invocation_id = false;
5754 }
5755 }
5756
5757 if (!MANAGER_IS_SYSTEM(u->manager))
5758 return;
5759
5760 if (u->exported_log_level_max) {
5761 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5762 (void) unlink(p);
5763
5764 u->exported_log_level_max = false;
5765 }
5766
5767 if (u->exported_log_extra_fields) {
5768 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5769 (void) unlink(p);
5770
5771 u->exported_log_extra_fields = false;
5772 }
5773
5774 if (u->exported_log_ratelimit_interval) {
5775 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5776 (void) unlink(p);
5777
5778 u->exported_log_ratelimit_interval = false;
5779 }
5780
5781 if (u->exported_log_ratelimit_burst) {
5782 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5783 (void) unlink(p);
5784
5785 u->exported_log_ratelimit_burst = false;
5786 }
5787 }
5788
5789 int unit_prepare_exec(Unit *u) {
5790 int r;
5791
5792 assert(u);
5793
5794 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5795 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5796 r = bpf_firewall_load_custom(u);
5797 if (r < 0)
5798 return r;
5799
5800 /* Prepares everything so that we can fork of a process for this unit */
5801
5802 (void) unit_realize_cgroup(u);
5803
5804 if (u->reset_accounting) {
5805 (void) unit_reset_accounting(u);
5806 u->reset_accounting = false;
5807 }
5808
5809 unit_export_state_files(u);
5810
5811 r = unit_setup_exec_runtime(u);
5812 if (r < 0)
5813 return r;
5814
5815 return 0;
5816 }
5817
5818 static bool ignore_leftover_process(const char *comm) {
5819 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5820 }
5821
5822 int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5823 _cleanup_free_ char *comm = NULL;
5824
5825 assert(pidref_is_set(pid));
5826
5827 (void) pidref_get_comm(pid, &comm);
5828
5829 if (ignore_leftover_process(comm))
5830 return 0;
5831
5832 /* During start we print a warning */
5833
5834 log_unit_warning(userdata,
5835 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5836 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5837 pid->pid, strna(comm));
5838
5839 return 1;
5840 }
5841
5842 int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5843 _cleanup_free_ char *comm = NULL;
5844
5845 assert(pidref_is_set(pid));
5846
5847 (void) pidref_get_comm(pid, &comm);
5848
5849 if (ignore_leftover_process(comm))
5850 return 0;
5851
5852 /* During stop we only print an informational message */
5853
5854 log_unit_info(userdata,
5855 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5856 pid->pid, strna(comm));
5857
5858 return 1;
5859 }
5860
5861 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5862 assert(u);
5863
5864 (void) unit_pick_cgroup_path(u);
5865
5866 if (!u->cgroup_path)
5867 return 0;
5868
5869 return cg_kill_recursive(
5870 u->cgroup_path,
5871 /* sig= */ 0,
5872 /* flags= */ 0,
5873 /* set= */ NULL,
5874 log_func,
5875 u);
5876 }
5877
5878 bool unit_needs_console(Unit *u) {
5879 ExecContext *ec;
5880 UnitActiveState state;
5881
5882 assert(u);
5883
5884 state = unit_active_state(u);
5885
5886 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5887 return false;
5888
5889 if (UNIT_VTABLE(u)->needs_console)
5890 return UNIT_VTABLE(u)->needs_console(u);
5891
5892 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5893 ec = unit_get_exec_context(u);
5894 if (!ec)
5895 return false;
5896
5897 return exec_context_may_touch_console(ec);
5898 }
5899
5900 int unit_pid_attachable(Unit *u, const PidRef *pid, sd_bus_error *error) {
5901 int r;
5902
5903 assert(u);
5904
5905 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5906 * and not a kernel thread either */
5907
5908 /* First, a simple range check */
5909 if (!pidref_is_set(pid))
5910 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
5911
5912 /* Some extra safety check */
5913 if (pid->pid == 1 || pidref_is_self(pid))
5914 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
5915
5916 /* Don't even begin to bother with kernel threads */
5917 r = pidref_is_kernel_thread(pid);
5918 if (r == -ESRCH)
5919 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
5920 if (r < 0)
5921 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
5922 if (r > 0)
5923 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
5924
5925 return 0;
5926 }
5927
5928 void unit_log_success(Unit *u) {
5929 assert(u);
5930
5931 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5932 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5933 * a lot of devices. */
5934 log_unit_struct(u,
5935 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5936 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5937 LOG_UNIT_INVOCATION_ID(u),
5938 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5939 }
5940
5941 void unit_log_failure(Unit *u, const char *result) {
5942 assert(u);
5943 assert(result);
5944
5945 log_unit_struct(u, LOG_WARNING,
5946 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5947 LOG_UNIT_INVOCATION_ID(u),
5948 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5949 "UNIT_RESULT=%s", result);
5950 }
5951
5952 void unit_log_skip(Unit *u, const char *result) {
5953 assert(u);
5954 assert(result);
5955
5956 log_unit_struct(u, LOG_INFO,
5957 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5958 LOG_UNIT_INVOCATION_ID(u),
5959 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5960 "UNIT_RESULT=%s", result);
5961 }
5962
5963 void unit_log_process_exit(
5964 Unit *u,
5965 const char *kind,
5966 const char *command,
5967 bool success,
5968 int code,
5969 int status) {
5970
5971 int level;
5972
5973 assert(u);
5974 assert(kind);
5975
5976 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5977 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5978 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5979 * WARNING. */
5980 if (success)
5981 level = LOG_DEBUG;
5982 else if (code == CLD_EXITED)
5983 level = LOG_NOTICE;
5984 else
5985 level = LOG_WARNING;
5986
5987 log_unit_struct(u, level,
5988 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5989 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
5990 kind,
5991 sigchld_code_to_string(code), status,
5992 strna(code == CLD_EXITED
5993 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5994 : signal_to_string(status)),
5995 success ? " (success)" : ""),
5996 "EXIT_CODE=%s", sigchld_code_to_string(code),
5997 "EXIT_STATUS=%i", status,
5998 "COMMAND=%s", strna(command),
5999 LOG_UNIT_INVOCATION_ID(u));
6000 }
6001
6002 int unit_exit_status(Unit *u) {
6003 assert(u);
6004
6005 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6006 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6007 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6008 * service process has exited abnormally (signal/coredump). */
6009
6010 if (!UNIT_VTABLE(u)->exit_status)
6011 return -EOPNOTSUPP;
6012
6013 return UNIT_VTABLE(u)->exit_status(u);
6014 }
6015
6016 int unit_failure_action_exit_status(Unit *u) {
6017 int r;
6018
6019 assert(u);
6020
6021 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6022
6023 if (u->failure_action_exit_status >= 0)
6024 return u->failure_action_exit_status;
6025
6026 r = unit_exit_status(u);
6027 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6028 return 255;
6029
6030 return r;
6031 }
6032
6033 int unit_success_action_exit_status(Unit *u) {
6034 int r;
6035
6036 assert(u);
6037
6038 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6039
6040 if (u->success_action_exit_status >= 0)
6041 return u->success_action_exit_status;
6042
6043 r = unit_exit_status(u);
6044 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6045 return 255;
6046
6047 return r;
6048 }
6049
6050 int unit_test_trigger_loaded(Unit *u) {
6051 Unit *trigger;
6052
6053 /* Tests whether the unit to trigger is loaded */
6054
6055 trigger = UNIT_TRIGGER(u);
6056 if (!trigger)
6057 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6058 "Refusing to start, no unit to trigger.");
6059 if (trigger->load_state != UNIT_LOADED)
6060 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6061 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6062
6063 return 0;
6064 }
6065
6066 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
6067 assert(u);
6068 assert(context);
6069
6070 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6071 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6072 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6073
6074 exec_context_destroy_credentials(u);
6075 exec_context_destroy_mount_ns_dir(u);
6076 }
6077
6078 int unit_clean(Unit *u, ExecCleanMask mask) {
6079 UnitActiveState state;
6080
6081 assert(u);
6082
6083 /* Special return values:
6084 *
6085 * -EOPNOTSUPP → cleaning not supported for this unit type
6086 * -EUNATCH → cleaning not defined for this resource type
6087 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6088 * a job queued or similar
6089 */
6090
6091 if (!UNIT_VTABLE(u)->clean)
6092 return -EOPNOTSUPP;
6093
6094 if (mask == 0)
6095 return -EUNATCH;
6096
6097 if (u->load_state != UNIT_LOADED)
6098 return -EBUSY;
6099
6100 if (u->job)
6101 return -EBUSY;
6102
6103 state = unit_active_state(u);
6104 if (state != UNIT_INACTIVE)
6105 return -EBUSY;
6106
6107 return UNIT_VTABLE(u)->clean(u, mask);
6108 }
6109
6110 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6111 assert(u);
6112
6113 if (!UNIT_VTABLE(u)->clean ||
6114 u->load_state != UNIT_LOADED) {
6115 *ret = 0;
6116 return 0;
6117 }
6118
6119 /* When the clean() method is set, can_clean() really should be set too */
6120 assert(UNIT_VTABLE(u)->can_clean);
6121
6122 return UNIT_VTABLE(u)->can_clean(u, ret);
6123 }
6124
6125 bool unit_can_start_refuse_manual(Unit *u) {
6126 return unit_can_start(u) && !u->refuse_manual_start;
6127 }
6128
6129 bool unit_can_stop_refuse_manual(Unit *u) {
6130 return unit_can_stop(u) && !u->refuse_manual_stop;
6131 }
6132
6133 bool unit_can_isolate_refuse_manual(Unit *u) {
6134 return unit_can_isolate(u) && !u->refuse_manual_start;
6135 }
6136
6137 void unit_next_freezer_state(Unit *u, FreezerAction action, FreezerState *ret, FreezerState *ret_target) {
6138 Unit *slice;
6139 FreezerState curr, parent, next, tgt;
6140
6141 assert(u);
6142 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_PARENT_FREEZE,
6143 FREEZER_THAW, FREEZER_PARENT_THAW));
6144 assert(ret);
6145 assert(ret_target);
6146
6147 /* This function determines the correct freezer state transitions for a unit
6148 * given the action being requested. It returns the next state, and also the "target",
6149 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6150 * ultimately want to achieve. */
6151
6152 curr = u->freezer_state;
6153 slice = UNIT_GET_SLICE(u);
6154 if (slice)
6155 parent = slice->freezer_state;
6156 else
6157 parent = FREEZER_RUNNING;
6158
6159 if (action == FREEZER_FREEZE) {
6160 /* We always "promote" a freeze initiated by parent into a normal freeze */
6161 if (IN_SET(curr, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6162 next = FREEZER_FROZEN;
6163 else
6164 next = FREEZER_FREEZING;
6165 } else if (action == FREEZER_THAW) {
6166 /* Thawing is the most complicated operation here, because we can't thaw a unit
6167 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6168 * initiated by parent if the parent is frozen */
6169 if (IN_SET(curr, FREEZER_RUNNING, FREEZER_THAWING, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6170 next = curr;
6171 else if (curr == FREEZER_FREEZING) {
6172 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6173 next = FREEZER_THAWING;
6174 else
6175 next = FREEZER_FREEZING_BY_PARENT;
6176 } else {
6177 assert(curr == FREEZER_FROZEN);
6178 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6179 next = FREEZER_THAWING;
6180 else
6181 next = FREEZER_FROZEN_BY_PARENT;
6182 }
6183 } else if (action == FREEZER_PARENT_FREEZE) {
6184 /* We need to avoid accidentally demoting units frozen manually */
6185 if (IN_SET(curr, FREEZER_FREEZING, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6186 next = curr;
6187 else
6188 next = FREEZER_FREEZING_BY_PARENT;
6189 } else {
6190 assert(action == FREEZER_PARENT_THAW);
6191
6192 /* We don't want to thaw units from a parent if they were frozen
6193 * manually, so for such units this action is a no-op */
6194 if (IN_SET(curr, FREEZER_RUNNING, FREEZER_FREEZING, FREEZER_FROZEN))
6195 next = curr;
6196 else
6197 next = FREEZER_THAWING;
6198 }
6199
6200 tgt = freezer_state_finish(next);
6201 if (tgt == FREEZER_FROZEN_BY_PARENT)
6202 tgt = FREEZER_FROZEN;
6203 assert(IN_SET(tgt, FREEZER_RUNNING, FREEZER_FROZEN));
6204
6205 *ret = next;
6206 *ret_target = tgt;
6207 }
6208
6209 bool unit_can_freeze(Unit *u) {
6210 assert(u);
6211
6212 if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE))
6213 return false;
6214
6215 if (UNIT_VTABLE(u)->can_freeze)
6216 return UNIT_VTABLE(u)->can_freeze(u);
6217
6218 return UNIT_VTABLE(u)->freezer_action;
6219 }
6220
6221 void unit_frozen(Unit *u) {
6222 assert(u);
6223
6224 u->freezer_state = u->freezer_state == FREEZER_FREEZING_BY_PARENT
6225 ? FREEZER_FROZEN_BY_PARENT
6226 : FREEZER_FROZEN;
6227
6228 log_unit_debug(u, "Unit now %s.", freezer_state_to_string(u->freezer_state));
6229
6230 bus_unit_send_pending_freezer_message(u, false);
6231 }
6232
6233 void unit_thawed(Unit *u) {
6234 assert(u);
6235
6236 u->freezer_state = FREEZER_RUNNING;
6237
6238 log_unit_debug(u, "Unit thawed.");
6239
6240 bus_unit_send_pending_freezer_message(u, false);
6241 }
6242
6243 int unit_freezer_action(Unit *u, FreezerAction action) {
6244 UnitActiveState s;
6245 int r;
6246
6247 assert(u);
6248 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6249
6250 if (!cg_freezer_supported() || !unit_can_freeze(u))
6251 return -EOPNOTSUPP;
6252
6253 if (u->job)
6254 return -EBUSY;
6255
6256 if (u->load_state != UNIT_LOADED)
6257 return -EHOSTDOWN;
6258
6259 s = unit_active_state(u);
6260 if (s != UNIT_ACTIVE)
6261 return -EHOSTDOWN;
6262
6263 if (action == FREEZER_FREEZE && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT))
6264 return -EALREADY;
6265 if (action == FREEZER_THAW && u->freezer_state == FREEZER_THAWING)
6266 return -EALREADY;
6267 if (action == FREEZER_THAW && IN_SET(u->freezer_state, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6268 return -ECHILD;
6269
6270 r = UNIT_VTABLE(u)->freezer_action(u, action);
6271 if (r <= 0)
6272 return r;
6273
6274 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING));
6275 return 1;
6276 }
6277
6278 Condition *unit_find_failed_condition(Unit *u) {
6279 Condition *failed_trigger = NULL;
6280 bool has_succeeded_trigger = false;
6281
6282 if (u->condition_result)
6283 return NULL;
6284
6285 LIST_FOREACH(conditions, c, u->conditions)
6286 if (c->trigger) {
6287 if (c->result == CONDITION_SUCCEEDED)
6288 has_succeeded_trigger = true;
6289 else if (!failed_trigger)
6290 failed_trigger = c;
6291 } else if (c->result != CONDITION_SUCCEEDED)
6292 return c;
6293
6294 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6295 }
6296
6297 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6298 [COLLECT_INACTIVE] = "inactive",
6299 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6300 };
6301
6302 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6303
6304 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6305 Unit *i;
6306
6307 assert(u);
6308
6309 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6310 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6311 * is NULL the first entry found), or NULL if not found. */
6312
6313 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6314 if (!other || other == i)
6315 return i;
6316
6317 return NULL;
6318 }
6319
6320 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6321 _cleanup_free_ Unit **array = NULL;
6322 size_t n = 0;
6323 Unit *other;
6324
6325 assert(u);
6326 assert(ret_array);
6327
6328 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6329 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6330 * while the dependency table is continuously updated. */
6331
6332 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6333 if (!GREEDY_REALLOC(array, n + 1))
6334 return -ENOMEM;
6335
6336 array[n++] = other;
6337 }
6338
6339 *ret_array = TAKE_PTR(array);
6340
6341 assert(n <= INT_MAX);
6342 return (int) n;
6343 }
6344
6345 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6346 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6347 Unit *other;
6348 int r;
6349
6350 assert(u);
6351 assert(ret);
6352
6353 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6354
6355 do {
6356 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6357 r = set_ensure_put(&units, NULL, other);
6358 if (r < 0)
6359 return r;
6360 if (r == 0)
6361 continue;
6362 r = set_ensure_put(&queue, NULL, other);
6363 if (r < 0)
6364 return r;
6365 }
6366 } while ((u = set_steal_first(queue)));
6367
6368 *ret = TAKE_PTR(units);
6369 return 0;
6370 }
6371
6372 int unit_arm_timer(
6373 Unit *u,
6374 sd_event_source **source,
6375 bool relative,
6376 usec_t usec,
6377 sd_event_time_handler_t handler) {
6378
6379 int r;
6380
6381 assert(u);
6382 assert(source);
6383 assert(handler);
6384
6385 if (*source) {
6386 if (usec == USEC_INFINITY)
6387 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6388
6389 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6390 if (r < 0)
6391 return r;
6392
6393 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6394 }
6395
6396 if (usec == USEC_INFINITY)
6397 return 0;
6398
6399 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6400 u->manager->event,
6401 source,
6402 CLOCK_MONOTONIC,
6403 usec, 0,
6404 handler,
6405 u);
6406 if (r < 0)
6407 return r;
6408
6409 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6410 (void) sd_event_source_set_description(*source, d);
6411
6412 return 0;
6413 }
6414
6415 static int unit_get_nice(Unit *u) {
6416 ExecContext *ec;
6417
6418 ec = unit_get_exec_context(u);
6419 return ec ? ec->nice : 0;
6420 }
6421
6422 static uint64_t unit_get_cpu_weight(Unit *u) {
6423 CGroupContext *cc;
6424
6425 cc = unit_get_cgroup_context(u);
6426 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6427 }
6428
6429 int unit_compare_priority(Unit *a, Unit *b) {
6430 int ret;
6431
6432 ret = CMP(a->type, b->type);
6433 if (ret != 0)
6434 return -ret;
6435
6436 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6437 if (ret != 0)
6438 return -ret;
6439
6440 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6441 if (ret != 0)
6442 return ret;
6443
6444 return strcmp(a->id, b->id);
6445 }
6446
6447 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6448 [UNIT_PATH] = &activation_details_path_vtable,
6449 [UNIT_TIMER] = &activation_details_timer_vtable,
6450 };
6451
6452 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6453 _cleanup_free_ ActivationDetails *details = NULL;
6454
6455 assert(trigger_unit);
6456 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6457 assert(trigger_unit->id);
6458
6459 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6460 if (!details)
6461 return NULL;
6462
6463 *details = (ActivationDetails) {
6464 .n_ref = 1,
6465 .trigger_unit_type = trigger_unit->type,
6466 };
6467
6468 details->trigger_unit_name = strdup(trigger_unit->id);
6469 if (!details->trigger_unit_name)
6470 return NULL;
6471
6472 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6473 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6474
6475 return TAKE_PTR(details);
6476 }
6477
6478 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6479 if (!details)
6480 return NULL;
6481
6482 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6483 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6484
6485 free(details->trigger_unit_name);
6486
6487 return mfree(details);
6488 }
6489
6490 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6491 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6492 return;
6493
6494 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6495 if (details->trigger_unit_name)
6496 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6497 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6498 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6499 }
6500
6501 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6502 int r;
6503
6504 assert(key);
6505 assert(value);
6506 assert(details);
6507
6508 if (!*details) {
6509 UnitType t;
6510
6511 if (!streq(key, "activation-details-unit-type"))
6512 return -EINVAL;
6513
6514 t = unit_type_from_string(value);
6515 if (t < 0)
6516 return t;
6517
6518 /* The activation details vtable has defined ops only for path and timer units */
6519 if (!activation_details_vtable[t])
6520 return -EINVAL;
6521
6522 *details = malloc0(activation_details_vtable[t]->object_size);
6523 if (!*details)
6524 return -ENOMEM;
6525
6526 **details = (ActivationDetails) {
6527 .n_ref = 1,
6528 .trigger_unit_type = t,
6529 };
6530
6531 return 0;
6532 }
6533
6534 if (streq(key, "activation-details-unit-name")) {
6535 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6536 if (r < 0)
6537 return r;
6538
6539 return 0;
6540 }
6541
6542 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6543 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6544
6545 return -EINVAL;
6546 }
6547
6548 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6549 int r = 0;
6550
6551 assert(strv);
6552
6553 if (!details)
6554 return 0;
6555
6556 if (!isempty(details->trigger_unit_name)) {
6557 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6558 if (!s)
6559 return -ENOMEM;
6560
6561 r = strv_consume(strv, TAKE_PTR(s));
6562 if (r < 0)
6563 return r;
6564 }
6565
6566 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6567 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6568 if (r < 0)
6569 return r;
6570 }
6571
6572 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6573 }
6574
6575 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6576 int r = 0;
6577
6578 assert(strv);
6579
6580 if (!details)
6581 return 0;
6582
6583 if (!isempty(details->trigger_unit_name)) {
6584 r = strv_extend_many(strv, "trigger_unit", details->trigger_unit_name);
6585 if (r < 0)
6586 return r;
6587 }
6588
6589 if (ACTIVATION_DETAILS_VTABLE(details)->append_pair) {
6590 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6591 if (r < 0)
6592 return r;
6593 }
6594
6595 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6596 }
6597
6598 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);
6599
6600 static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = {
6601 [UNIT_MOUNT_WANTS] = "WantsMountsFor",
6602 [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor",
6603 };
6604
6605 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType);
6606
6607 UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) {
6608 switch (t) {
6609
6610 case UNIT_MOUNT_WANTS:
6611 return UNIT_WANTS;
6612
6613 case UNIT_MOUNT_REQUIRES:
6614 return UNIT_REQUIRES;
6615
6616 default:
6617 assert_not_reached();
6618 }
6619 }