]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #29630 from DaanDeMeyer/manager-json
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio-label.h"
32 #include "fileio.h"
33 #include "format-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "macro.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
46 #include "rm-rf.h"
47 #include "serialize.h"
48 #include "set.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
51 #include "special.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "strv.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
62 #include "unit.h"
63 #include "user-util.h"
64 #include "virt.h"
65 #if BPF_FRAMEWORK
66 #include "bpf-link.h"
67 #endif
68
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
72 #define MENTIONWORTHY_IP_BYTES (0ULL)
73
74 /* Thresholds for logging at INFO level about resource consumption */
75 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
76 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
77 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
78
79 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
80 [UNIT_SERVICE] = &service_vtable,
81 [UNIT_SOCKET] = &socket_vtable,
82 [UNIT_TARGET] = &target_vtable,
83 [UNIT_DEVICE] = &device_vtable,
84 [UNIT_MOUNT] = &mount_vtable,
85 [UNIT_AUTOMOUNT] = &automount_vtable,
86 [UNIT_SWAP] = &swap_vtable,
87 [UNIT_TIMER] = &timer_vtable,
88 [UNIT_PATH] = &path_vtable,
89 [UNIT_SLICE] = &slice_vtable,
90 [UNIT_SCOPE] = &scope_vtable,
91 };
92
93 Unit* unit_new(Manager *m, size_t size) {
94 Unit *u;
95
96 assert(m);
97 assert(size >= sizeof(Unit));
98
99 u = malloc0(size);
100 if (!u)
101 return NULL;
102
103 u->manager = m;
104 u->type = _UNIT_TYPE_INVALID;
105 u->default_dependencies = true;
106 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
107 u->unit_file_preset = -1;
108 u->on_failure_job_mode = JOB_REPLACE;
109 u->on_success_job_mode = JOB_FAIL;
110 u->cgroup_control_inotify_wd = -1;
111 u->cgroup_memory_inotify_wd = -1;
112 u->job_timeout = USEC_INFINITY;
113 u->job_running_timeout = USEC_INFINITY;
114 u->ref_uid = UID_INVALID;
115 u->ref_gid = GID_INVALID;
116 u->cpu_usage_last = NSEC_INFINITY;
117 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
118 u->failure_action_exit_status = u->success_action_exit_status = -1;
119
120 u->ip_accounting_ingress_map_fd = -EBADF;
121 u->ip_accounting_egress_map_fd = -EBADF;
122 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
123 u->io_accounting_last[i] = UINT64_MAX;
124
125 u->ipv4_allow_map_fd = -EBADF;
126 u->ipv6_allow_map_fd = -EBADF;
127 u->ipv4_deny_map_fd = -EBADF;
128 u->ipv6_deny_map_fd = -EBADF;
129
130 u->last_section_private = -1;
131
132 u->start_ratelimit = (RateLimit) {
133 m->defaults.start_limit_interval,
134 m->defaults.start_limit_burst
135 };
136
137 u->auto_start_stop_ratelimit = (const RateLimit) {
138 10 * USEC_PER_SEC,
139 16
140 };
141
142 return u;
143 }
144
145 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
146 _cleanup_(unit_freep) Unit *u = NULL;
147 int r;
148
149 u = unit_new(m, size);
150 if (!u)
151 return -ENOMEM;
152
153 r = unit_add_name(u, name);
154 if (r < 0)
155 return r;
156
157 *ret = TAKE_PTR(u);
158
159 return r;
160 }
161
162 bool unit_has_name(const Unit *u, const char *name) {
163 assert(u);
164 assert(name);
165
166 return streq_ptr(name, u->id) ||
167 set_contains(u->aliases, name);
168 }
169
170 static void unit_init(Unit *u) {
171 CGroupContext *cc;
172 ExecContext *ec;
173 KillContext *kc;
174
175 assert(u);
176 assert(u->manager);
177 assert(u->type >= 0);
178
179 cc = unit_get_cgroup_context(u);
180 if (cc) {
181 cgroup_context_init(cc);
182
183 /* Copy in the manager defaults into the cgroup
184 * context, _before_ the rest of the settings have
185 * been initialized */
186
187 cc->cpu_accounting = u->manager->defaults.cpu_accounting;
188 cc->io_accounting = u->manager->defaults.io_accounting;
189 cc->blockio_accounting = u->manager->defaults.blockio_accounting;
190 cc->memory_accounting = u->manager->defaults.memory_accounting;
191 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
192 cc->ip_accounting = u->manager->defaults.ip_accounting;
193
194 if (u->type != UNIT_SLICE)
195 cc->tasks_max = u->manager->defaults.tasks_max;
196
197 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
198 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
199 }
200
201 ec = unit_get_exec_context(u);
202 if (ec) {
203 exec_context_init(ec);
204
205 if (u->manager->defaults.oom_score_adjust_set) {
206 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
207 ec->oom_score_adjust_set = true;
208 }
209
210 if (MANAGER_IS_SYSTEM(u->manager))
211 ec->keyring_mode = EXEC_KEYRING_SHARED;
212 else {
213 ec->keyring_mode = EXEC_KEYRING_INHERIT;
214
215 /* User manager might have its umask redefined by PAM or UMask=. In this
216 * case let the units it manages inherit this value by default. They can
217 * still tune this value through their own unit file */
218 (void) get_process_umask(0, &ec->umask);
219 }
220 }
221
222 kc = unit_get_kill_context(u);
223 if (kc)
224 kill_context_init(kc);
225
226 if (UNIT_VTABLE(u)->init)
227 UNIT_VTABLE(u)->init(u);
228 }
229
230 static int unit_add_alias(Unit *u, char *donated_name) {
231 int r;
232
233 /* Make sure that u->names is allocated. We may leave u->names
234 * empty if we fail later, but this is not a problem. */
235 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
236 if (r < 0)
237 return r;
238 assert(r > 0);
239
240 return 0;
241 }
242
243 int unit_add_name(Unit *u, const char *text) {
244 _cleanup_free_ char *name = NULL, *instance = NULL;
245 UnitType t;
246 int r;
247
248 assert(u);
249 assert(text);
250
251 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
252 if (!u->instance)
253 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
254 "instance is not set when adding name '%s': %m", text);
255
256 r = unit_name_replace_instance(text, u->instance, &name);
257 if (r < 0)
258 return log_unit_debug_errno(u, r,
259 "failed to build instance name from '%s': %m", text);
260 } else {
261 name = strdup(text);
262 if (!name)
263 return -ENOMEM;
264 }
265
266 if (unit_has_name(u, name))
267 return 0;
268
269 if (hashmap_contains(u->manager->units, name))
270 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
271 "unit already exist when adding name '%s': %m", name);
272
273 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
274 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
275 "name '%s' is invalid: %m", name);
276
277 t = unit_name_to_type(name);
278 if (t < 0)
279 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
280 "failed to derive unit type from name '%s': %m", name);
281
282 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
283 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
284 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
285 u->type, t, name);
286
287 r = unit_name_to_instance(name, &instance);
288 if (r < 0)
289 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
290
291 if (instance && !unit_type_may_template(t))
292 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
293
294 /* Ensure that this unit either has no instance, or that the instance matches. */
295 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
296 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
297 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
298 name, instance, u->instance);
299
300 if (u->id && !unit_type_may_alias(t))
301 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
302 "cannot add name %s, aliases are not allowed for %s units.",
303 name, unit_type_to_string(t));
304
305 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
306 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
307
308 /* Add name to the global hashmap first, because that's easier to undo */
309 r = hashmap_put(u->manager->units, name, u);
310 if (r < 0)
311 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
312
313 if (u->id) {
314 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
315 if (r < 0) {
316 hashmap_remove(u->manager->units, name);
317 return r;
318 }
319 TAKE_PTR(name);
320
321 } else {
322 /* A new name, we don't need the set yet. */
323 assert(u->type == _UNIT_TYPE_INVALID);
324 assert(!u->instance);
325
326 u->type = t;
327 u->id = TAKE_PTR(name);
328 u->instance = TAKE_PTR(instance);
329
330 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
331 unit_init(u);
332 }
333
334 unit_add_to_dbus_queue(u);
335 return 0;
336 }
337
338 int unit_choose_id(Unit *u, const char *name) {
339 _cleanup_free_ char *t = NULL;
340 char *s;
341 int r;
342
343 assert(u);
344 assert(name);
345
346 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
347 if (!u->instance)
348 return -EINVAL;
349
350 r = unit_name_replace_instance(name, u->instance, &t);
351 if (r < 0)
352 return r;
353
354 name = t;
355 }
356
357 if (streq_ptr(u->id, name))
358 return 0; /* Nothing to do. */
359
360 /* Selects one of the aliases of this unit as the id */
361 s = set_get(u->aliases, (char*) name);
362 if (!s)
363 return -ENOENT;
364
365 if (u->id) {
366 r = set_remove_and_put(u->aliases, name, u->id);
367 if (r < 0)
368 return r;
369 } else
370 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
371
372 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
373 unit_add_to_dbus_queue(u);
374
375 return 0;
376 }
377
378 int unit_set_description(Unit *u, const char *description) {
379 int r;
380
381 assert(u);
382
383 r = free_and_strdup(&u->description, empty_to_null(description));
384 if (r < 0)
385 return r;
386 if (r > 0)
387 unit_add_to_dbus_queue(u);
388
389 return 0;
390 }
391
392 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
393 Unit *other;
394
395 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
396 if (other->job || other->nop_job)
397 return true;
398
399 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
400 if (other->job || other->nop_job)
401 return true;
402
403 return false;
404 }
405
406 void unit_release_resources(Unit *u) {
407 UnitActiveState state;
408 ExecContext *ec;
409
410 assert(u);
411
412 if (u->job || u->nop_job)
413 return;
414
415 if (u->perpetual)
416 return;
417
418 state = unit_active_state(u);
419 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
420 return;
421
422 if (unit_will_restart(u))
423 return;
424
425 ec = unit_get_exec_context(u);
426 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
427 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
428
429 if (UNIT_VTABLE(u)->release_resources)
430 UNIT_VTABLE(u)->release_resources(u);
431 }
432
433 bool unit_may_gc(Unit *u) {
434 UnitActiveState state;
435 int r;
436
437 assert(u);
438
439 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
440 * unit may be collected, and false if there's some reason to keep it loaded.
441 *
442 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
443 * using markers to properly collect dependency loops.
444 */
445
446 if (u->job || u->nop_job)
447 return false;
448
449 if (u->perpetual)
450 return false;
451
452 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
453 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
454 * before we release the unit. */
455 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
456 return false;
457
458 /* Make sure to send out D-Bus events before we unload the unit */
459 if (u->in_dbus_queue)
460 return false;
461
462 if (sd_bus_track_count(u->bus_track) > 0)
463 return false;
464
465 state = unit_active_state(u);
466
467 /* But we keep the unit object around for longer when it is referenced or configured to not be
468 * gc'ed */
469 switch (u->collect_mode) {
470
471 case COLLECT_INACTIVE:
472 if (state != UNIT_INACTIVE)
473 return false;
474
475 break;
476
477 case COLLECT_INACTIVE_OR_FAILED:
478 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
479 return false;
480
481 break;
482
483 default:
484 assert_not_reached();
485 }
486
487 /* Check if any OnFailure= or on Success= jobs may be pending */
488 if (unit_success_failure_handler_has_jobs(u))
489 return false;
490
491 if (u->cgroup_path) {
492 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
493 * around. Units with active processes should never be collected. */
494
495 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
496 if (r < 0)
497 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
498 if (r <= 0)
499 return false;
500 }
501
502 if (!UNIT_VTABLE(u)->may_gc)
503 return true;
504
505 return UNIT_VTABLE(u)->may_gc(u);
506 }
507
508 void unit_add_to_load_queue(Unit *u) {
509 assert(u);
510 assert(u->type != _UNIT_TYPE_INVALID);
511
512 if (u->load_state != UNIT_STUB || u->in_load_queue)
513 return;
514
515 LIST_PREPEND(load_queue, u->manager->load_queue, u);
516 u->in_load_queue = true;
517 }
518
519 void unit_add_to_cleanup_queue(Unit *u) {
520 assert(u);
521
522 if (u->in_cleanup_queue)
523 return;
524
525 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
526 u->in_cleanup_queue = true;
527 }
528
529 void unit_add_to_gc_queue(Unit *u) {
530 assert(u);
531
532 if (u->in_gc_queue || u->in_cleanup_queue)
533 return;
534
535 if (!unit_may_gc(u))
536 return;
537
538 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
539 u->in_gc_queue = true;
540 }
541
542 void unit_add_to_dbus_queue(Unit *u) {
543 assert(u);
544 assert(u->type != _UNIT_TYPE_INVALID);
545
546 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
547 return;
548
549 /* Shortcut things if nobody cares */
550 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
551 sd_bus_track_count(u->bus_track) <= 0 &&
552 set_isempty(u->manager->private_buses)) {
553 u->sent_dbus_new_signal = true;
554 return;
555 }
556
557 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
558 u->in_dbus_queue = true;
559 }
560
561 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
562 assert(u);
563
564 if (u->in_stop_when_unneeded_queue)
565 return;
566
567 if (!u->stop_when_unneeded)
568 return;
569
570 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
571 return;
572
573 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
574 u->in_stop_when_unneeded_queue = true;
575 }
576
577 void unit_submit_to_start_when_upheld_queue(Unit *u) {
578 assert(u);
579
580 if (u->in_start_when_upheld_queue)
581 return;
582
583 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
584 return;
585
586 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
587 return;
588
589 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
590 u->in_start_when_upheld_queue = true;
591 }
592
593 void unit_submit_to_stop_when_bound_queue(Unit *u) {
594 assert(u);
595
596 if (u->in_stop_when_bound_queue)
597 return;
598
599 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
600 return;
601
602 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
603 return;
604
605 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
606 u->in_stop_when_bound_queue = true;
607 }
608
609 static bool unit_can_release_resources(Unit *u) {
610 ExecContext *ec;
611
612 assert(u);
613
614 if (UNIT_VTABLE(u)->release_resources)
615 return true;
616
617 ec = unit_get_exec_context(u);
618 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
619 return true;
620
621 return false;
622 }
623
624 void unit_submit_to_release_resources_queue(Unit *u) {
625 assert(u);
626
627 if (u->in_release_resources_queue)
628 return;
629
630 if (u->job || u->nop_job)
631 return;
632
633 if (u->perpetual)
634 return;
635
636 if (!unit_can_release_resources(u))
637 return;
638
639 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
640 u->in_release_resources_queue = true;
641 }
642
643 static void unit_clear_dependencies(Unit *u) {
644 assert(u);
645
646 /* Removes all dependencies configured on u and their reverse dependencies. */
647
648 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
649
650 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
651 Hashmap *other_deps;
652
653 HASHMAP_FOREACH(other_deps, other->dependencies)
654 hashmap_remove(other_deps, u);
655
656 unit_add_to_gc_queue(other);
657 }
658
659 hashmap_free(deps);
660 }
661
662 u->dependencies = hashmap_free(u->dependencies);
663 }
664
665 static void unit_remove_transient(Unit *u) {
666 assert(u);
667
668 if (!u->transient)
669 return;
670
671 if (u->fragment_path)
672 (void) unlink(u->fragment_path);
673
674 STRV_FOREACH(i, u->dropin_paths) {
675 _cleanup_free_ char *p = NULL, *pp = NULL;
676
677 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
678 continue;
679
680 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
681 continue;
682
683 /* Only drop transient drop-ins */
684 if (!path_equal(u->manager->lookup_paths.transient, pp))
685 continue;
686
687 (void) unlink(*i);
688 (void) rmdir(p);
689 }
690 }
691
692 static void unit_free_requires_mounts_for(Unit *u) {
693 assert(u);
694
695 for (;;) {
696 _cleanup_free_ char *path = NULL;
697
698 path = hashmap_steal_first_key(u->requires_mounts_for);
699 if (!path)
700 break;
701 else {
702 char s[strlen(path) + 1];
703
704 PATH_FOREACH_PREFIX_MORE(s, path) {
705 char *y;
706 Set *x;
707
708 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
709 if (!x)
710 continue;
711
712 (void) set_remove(x, u);
713
714 if (set_isempty(x)) {
715 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
716 free(y);
717 set_free(x);
718 }
719 }
720 }
721 }
722
723 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
724 }
725
726 static void unit_done(Unit *u) {
727 ExecContext *ec;
728 CGroupContext *cc;
729
730 assert(u);
731
732 if (u->type < 0)
733 return;
734
735 if (UNIT_VTABLE(u)->done)
736 UNIT_VTABLE(u)->done(u);
737
738 ec = unit_get_exec_context(u);
739 if (ec)
740 exec_context_done(ec);
741
742 cc = unit_get_cgroup_context(u);
743 if (cc)
744 cgroup_context_done(cc);
745 }
746
747 Unit* unit_free(Unit *u) {
748 Unit *slice;
749 char *t;
750
751 if (!u)
752 return NULL;
753
754 sd_event_source_disable_unref(u->auto_start_stop_event_source);
755
756 u->transient_file = safe_fclose(u->transient_file);
757
758 if (!MANAGER_IS_RELOADING(u->manager))
759 unit_remove_transient(u);
760
761 bus_unit_send_removed_signal(u);
762
763 unit_done(u);
764
765 unit_dequeue_rewatch_pids(u);
766
767 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
768 u->bus_track = sd_bus_track_unref(u->bus_track);
769 u->deserialized_refs = strv_free(u->deserialized_refs);
770 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
771
772 unit_free_requires_mounts_for(u);
773
774 SET_FOREACH(t, u->aliases)
775 hashmap_remove_value(u->manager->units, t, u);
776 if (u->id)
777 hashmap_remove_value(u->manager->units, u->id, u);
778
779 if (!sd_id128_is_null(u->invocation_id))
780 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
781
782 if (u->job) {
783 Job *j = u->job;
784 job_uninstall(j);
785 job_free(j);
786 }
787
788 if (u->nop_job) {
789 Job *j = u->nop_job;
790 job_uninstall(j);
791 job_free(j);
792 }
793
794 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
795 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
796 slice = UNIT_GET_SLICE(u);
797 unit_clear_dependencies(u);
798 if (slice)
799 unit_add_family_to_cgroup_realize_queue(slice);
800
801 if (u->on_console)
802 manager_unref_console(u->manager);
803
804 fdset_free(u->initial_socket_bind_link_fds);
805 #if BPF_FRAMEWORK
806 bpf_link_free(u->ipv4_socket_bind_link);
807 bpf_link_free(u->ipv6_socket_bind_link);
808 #endif
809
810 unit_release_cgroup(u);
811
812 if (!MANAGER_IS_RELOADING(u->manager))
813 unit_unlink_state_files(u);
814
815 unit_unref_uid_gid(u, false);
816
817 (void) manager_update_failed_units(u->manager, u, false);
818 set_remove(u->manager->startup_units, u);
819
820 unit_unwatch_all_pids(u);
821
822 while (u->refs_by_target)
823 unit_ref_unset(u->refs_by_target);
824
825 if (u->type != _UNIT_TYPE_INVALID)
826 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
827
828 if (u->in_load_queue)
829 LIST_REMOVE(load_queue, u->manager->load_queue, u);
830
831 if (u->in_dbus_queue)
832 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
833
834 if (u->in_cleanup_queue)
835 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
836
837 if (u->in_gc_queue)
838 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
839
840 if (u->in_cgroup_realize_queue)
841 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
842
843 if (u->in_cgroup_empty_queue)
844 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
845
846 if (u->in_cgroup_oom_queue)
847 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
848
849 if (u->in_target_deps_queue)
850 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
851
852 if (u->in_stop_when_unneeded_queue)
853 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
854
855 if (u->in_start_when_upheld_queue)
856 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
857
858 if (u->in_stop_when_bound_queue)
859 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
860
861 if (u->in_release_resources_queue)
862 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
863
864 bpf_firewall_close(u);
865
866 hashmap_free(u->bpf_foreign_by_key);
867
868 bpf_program_free(u->bpf_device_control_installed);
869
870 #if BPF_FRAMEWORK
871 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
872 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
873 #endif
874 fdset_free(u->initial_restric_ifaces_link_fds);
875
876 condition_free_list(u->conditions);
877 condition_free_list(u->asserts);
878
879 free(u->description);
880 strv_free(u->documentation);
881 free(u->fragment_path);
882 free(u->source_path);
883 strv_free(u->dropin_paths);
884 free(u->instance);
885
886 free(u->job_timeout_reboot_arg);
887 free(u->reboot_arg);
888
889 free(u->access_selinux_context);
890
891 set_free_free(u->aliases);
892 free(u->id);
893
894 activation_details_unref(u->activation_details);
895
896 return mfree(u);
897 }
898
899 FreezerState unit_freezer_state(Unit *u) {
900 assert(u);
901
902 return u->freezer_state;
903 }
904
905 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
906 char *values[1] = {};
907 int r;
908
909 assert(u);
910
911 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
912 STRV_MAKE("frozen"), values);
913 if (r < 0)
914 return r;
915
916 r = _FREEZER_STATE_INVALID;
917
918 if (values[0]) {
919 if (streq(values[0], "0"))
920 r = FREEZER_RUNNING;
921 else if (streq(values[0], "1"))
922 r = FREEZER_FROZEN;
923 }
924
925 free(values[0]);
926 *ret = r;
927
928 return 0;
929 }
930
931 UnitActiveState unit_active_state(Unit *u) {
932 assert(u);
933
934 if (u->load_state == UNIT_MERGED)
935 return unit_active_state(unit_follow_merge(u));
936
937 /* After a reload it might happen that a unit is not correctly
938 * loaded but still has a process around. That's why we won't
939 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
940
941 return UNIT_VTABLE(u)->active_state(u);
942 }
943
944 const char* unit_sub_state_to_string(Unit *u) {
945 assert(u);
946
947 return UNIT_VTABLE(u)->sub_state_to_string(u);
948 }
949
950 static int unit_merge_names(Unit *u, Unit *other) {
951 char *name;
952 int r;
953
954 assert(u);
955 assert(other);
956
957 r = unit_add_alias(u, other->id);
958 if (r < 0)
959 return r;
960
961 r = set_move(u->aliases, other->aliases);
962 if (r < 0) {
963 set_remove(u->aliases, other->id);
964 return r;
965 }
966
967 TAKE_PTR(other->id);
968 other->aliases = set_free_free(other->aliases);
969
970 SET_FOREACH(name, u->aliases)
971 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
972
973 return 0;
974 }
975
976 static int unit_reserve_dependencies(Unit *u, Unit *other) {
977 size_t n_reserve;
978 Hashmap* deps;
979 void *d;
980 int r;
981
982 assert(u);
983 assert(other);
984
985 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
986 * fail.
987 *
988 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
989 * hashmaps is an estimate that is likely too high since they probably use some of the same
990 * types. But it's never too low, and that's all we need. */
991
992 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
993 if (n_reserve > 0) {
994 r = hashmap_ensure_allocated(&u->dependencies, NULL);
995 if (r < 0)
996 return r;
997
998 r = hashmap_reserve(u->dependencies, n_reserve);
999 if (r < 0)
1000 return r;
1001 }
1002
1003 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
1004 * other unit's dependencies.
1005 *
1006 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
1007 * reserve anything for. In that case other's set will be transferred as a whole to u by
1008 * complete_move(). */
1009
1010 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
1011 Hashmap *other_deps;
1012
1013 other_deps = hashmap_get(other->dependencies, d);
1014
1015 r = hashmap_reserve(deps, hashmap_size(other_deps));
1016 if (r < 0)
1017 return r;
1018 }
1019
1020 return 0;
1021 }
1022
1023 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
1024 /* Only warn about some unit types */
1025 return IN_SET(dependency,
1026 UNIT_CONFLICTS,
1027 UNIT_CONFLICTED_BY,
1028 UNIT_BEFORE,
1029 UNIT_AFTER,
1030 UNIT_ON_SUCCESS,
1031 UNIT_ON_FAILURE,
1032 UNIT_TRIGGERS,
1033 UNIT_TRIGGERED_BY);
1034 }
1035
1036 static int unit_per_dependency_type_hashmap_update(
1037 Hashmap *per_type,
1038 Unit *other,
1039 UnitDependencyMask origin_mask,
1040 UnitDependencyMask destination_mask) {
1041
1042 UnitDependencyInfo info;
1043 int r;
1044
1045 assert(other);
1046 assert_cc(sizeof(void*) == sizeof(info));
1047
1048 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1049 * exists, or insert it anew if not. */
1050
1051 info.data = hashmap_get(per_type, other);
1052 if (info.data) {
1053 /* Entry already exists. Add in our mask. */
1054
1055 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1056 FLAGS_SET(destination_mask, info.destination_mask))
1057 return 0; /* NOP */
1058
1059 info.origin_mask |= origin_mask;
1060 info.destination_mask |= destination_mask;
1061
1062 r = hashmap_update(per_type, other, info.data);
1063 } else {
1064 info = (UnitDependencyInfo) {
1065 .origin_mask = origin_mask,
1066 .destination_mask = destination_mask,
1067 };
1068
1069 r = hashmap_put(per_type, other, info.data);
1070 }
1071 if (r < 0)
1072 return r;
1073
1074 return 1;
1075 }
1076
1077 static void unit_merge_dependencies(Unit *u, Unit *other) {
1078 Hashmap *deps;
1079 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1080 * since the hashmaps all want it as void pointer. */
1081
1082 assert(u);
1083 assert(other);
1084
1085 if (u == other)
1086 return;
1087
1088 /* First, remove dependency to other. */
1089 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1090 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1091 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1092 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1093 other->id, other->id, u->id);
1094
1095 if (hashmap_isempty(deps))
1096 hashmap_free(hashmap_remove(u->dependencies, dt));
1097 }
1098
1099 for (;;) {
1100 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1101 UnitDependencyInfo di_back;
1102 Unit *back;
1103
1104 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1105 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1106 if (!other_deps)
1107 break; /* done! */
1108
1109 deps = hashmap_get(u->dependencies, dt);
1110
1111 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1112 * referenced units as 'back'. */
1113 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1114 Hashmap *back_deps;
1115 void *back_dt;
1116
1117 if (back == u) {
1118 /* This is a dependency pointing back to the unit we want to merge with?
1119 * Suppress it (but warn) */
1120 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1121 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1122 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1123 u->id, other->id, other->id, u->id);
1124
1125 hashmap_remove(other_deps, back);
1126 continue;
1127 }
1128
1129 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1130 * point to 'u' instead. */
1131 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1132 UnitDependencyInfo di_move;
1133
1134 di_move.data = hashmap_remove(back_deps, other);
1135 if (!di_move.data)
1136 continue;
1137
1138 assert_se(unit_per_dependency_type_hashmap_update(
1139 back_deps,
1140 u,
1141 di_move.origin_mask,
1142 di_move.destination_mask) >= 0);
1143 }
1144
1145 /* The target unit already has dependencies of this type, let's then merge this individually. */
1146 if (deps)
1147 assert_se(unit_per_dependency_type_hashmap_update(
1148 deps,
1149 back,
1150 di_back.origin_mask,
1151 di_back.destination_mask) >= 0);
1152 }
1153
1154 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1155 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1156 * dependencies of this type, let's move them per type wholesale. */
1157 if (!deps)
1158 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1159 }
1160
1161 other->dependencies = hashmap_free(other->dependencies);
1162 }
1163
1164 int unit_merge(Unit *u, Unit *other) {
1165 int r;
1166
1167 assert(u);
1168 assert(other);
1169 assert(u->manager == other->manager);
1170 assert(u->type != _UNIT_TYPE_INVALID);
1171
1172 other = unit_follow_merge(other);
1173
1174 if (other == u)
1175 return 0;
1176
1177 if (u->type != other->type)
1178 return -EINVAL;
1179
1180 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1181 return -EEXIST;
1182
1183 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1184 return -EEXIST;
1185
1186 if (!streq_ptr(u->instance, other->instance))
1187 return -EINVAL;
1188
1189 if (other->job)
1190 return -EEXIST;
1191
1192 if (other->nop_job)
1193 return -EEXIST;
1194
1195 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1196 return -EEXIST;
1197
1198 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1199 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1200 r = unit_reserve_dependencies(u, other);
1201 if (r < 0)
1202 return r;
1203
1204 /* Redirect all references */
1205 while (other->refs_by_target)
1206 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1207
1208 /* Merge dependencies */
1209 unit_merge_dependencies(u, other);
1210
1211 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1212 r = unit_merge_names(u, other);
1213 if (r < 0)
1214 return r;
1215
1216 other->load_state = UNIT_MERGED;
1217 other->merged_into = u;
1218
1219 if (!u->activation_details)
1220 u->activation_details = activation_details_ref(other->activation_details);
1221
1222 /* If there is still some data attached to the other node, we
1223 * don't need it anymore, and can free it. */
1224 if (other->load_state != UNIT_STUB)
1225 if (UNIT_VTABLE(other)->done)
1226 UNIT_VTABLE(other)->done(other);
1227
1228 unit_add_to_dbus_queue(u);
1229 unit_add_to_cleanup_queue(other);
1230
1231 return 0;
1232 }
1233
1234 int unit_merge_by_name(Unit *u, const char *name) {
1235 _cleanup_free_ char *s = NULL;
1236 Unit *other;
1237 int r;
1238
1239 /* Either add name to u, or if a unit with name already exists, merge it with u.
1240 * If name is a template, do the same for name@instance, where instance is u's instance. */
1241
1242 assert(u);
1243 assert(name);
1244
1245 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1246 if (!u->instance)
1247 return -EINVAL;
1248
1249 r = unit_name_replace_instance(name, u->instance, &s);
1250 if (r < 0)
1251 return r;
1252
1253 name = s;
1254 }
1255
1256 other = manager_get_unit(u->manager, name);
1257 if (other)
1258 return unit_merge(u, other);
1259
1260 return unit_add_name(u, name);
1261 }
1262
1263 Unit* unit_follow_merge(Unit *u) {
1264 assert(u);
1265
1266 while (u->load_state == UNIT_MERGED)
1267 assert_se(u = u->merged_into);
1268
1269 return u;
1270 }
1271
1272 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1273 int r;
1274
1275 assert(u);
1276 assert(c);
1277
1278 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1279
1280 if (c->working_directory && !c->working_directory_missing_ok) {
1281 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1282 if (r < 0)
1283 return r;
1284 }
1285
1286 if (c->root_directory) {
1287 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1288 if (r < 0)
1289 return r;
1290 }
1291
1292 if (c->root_image) {
1293 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1294 if (r < 0)
1295 return r;
1296 }
1297
1298 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1299 if (!u->manager->prefix[dt])
1300 continue;
1301
1302 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1303 _cleanup_free_ char *p = NULL;
1304
1305 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1306 if (!p)
1307 return -ENOMEM;
1308
1309 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1310 if (r < 0)
1311 return r;
1312 }
1313 }
1314
1315 if (!MANAGER_IS_SYSTEM(u->manager))
1316 return 0;
1317
1318 /* For the following three directory types we need write access, and /var/ is possibly on the root
1319 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1320 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1321 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1322 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1323 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1324 if (r < 0)
1325 return r;
1326 }
1327
1328 if (c->private_tmp) {
1329
1330 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1331 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1332 * /tmp specifically and masking other mount units should be handled more
1333 * gracefully too, see PR#16894. */
1334 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "tmp.mount", true, UNIT_DEPENDENCY_FILE);
1335 if (r < 0)
1336 return r;
1337
1338 r = unit_require_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE);
1339 if (r < 0)
1340 return r;
1341
1342 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1343 if (r < 0)
1344 return r;
1345 }
1346
1347 if (c->root_image) {
1348 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1349 * implicit dependency on udev */
1350
1351 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1352 if (r < 0)
1353 return r;
1354 }
1355
1356 if (!IN_SET(c->std_output,
1357 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1358 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1359 !IN_SET(c->std_error,
1360 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1361 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1362 !c->log_namespace)
1363 return 0;
1364
1365 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1366 * is run first. */
1367
1368 if (c->log_namespace) {
1369 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1370
1371 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1372 if (r < 0)
1373 return r;
1374
1375 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1376 if (r < 0)
1377 return r;
1378
1379 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1380 if (r < 0)
1381 return r;
1382
1383 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1384 if (r < 0)
1385 return r;
1386 } else {
1387 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1388 if (r < 0)
1389 return r;
1390 }
1391
1392 r = unit_add_default_credential_dependencies(u, c);
1393 if (r < 0)
1394 return r;
1395
1396 return 0;
1397 }
1398
1399 const char* unit_description(Unit *u) {
1400 assert(u);
1401
1402 if (u->description)
1403 return u->description;
1404
1405 return strna(u->id);
1406 }
1407
1408 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1409 assert(u);
1410 assert(u->id);
1411
1412 /* Return u->id, u->description, or "{u->id} - {u->description}".
1413 * Versions with u->description are only used if it is set.
1414 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1415 * pointer.
1416 *
1417 * Note that *ret_combined_buffer may be set to NULL. */
1418
1419 if (!u->description ||
1420 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1421 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1422 streq(u->description, u->id)) {
1423
1424 if (ret_combined_buffer)
1425 *ret_combined_buffer = NULL;
1426 return u->id;
1427 }
1428
1429 if (ret_combined_buffer) {
1430 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1431 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1432 if (*ret_combined_buffer)
1433 return *ret_combined_buffer;
1434 log_oom(); /* Fall back to ->description */
1435 } else
1436 *ret_combined_buffer = NULL;
1437 }
1438
1439 return u->description;
1440 }
1441
1442 /* Common implementation for multiple backends */
1443 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1444 int r;
1445
1446 assert(u);
1447
1448 /* Load a .{service,socket,...} file */
1449 r = unit_load_fragment(u);
1450 if (r < 0)
1451 return r;
1452
1453 if (u->load_state == UNIT_STUB) {
1454 if (fragment_required)
1455 return -ENOENT;
1456
1457 u->load_state = UNIT_LOADED;
1458 }
1459
1460 /* Load drop-in directory data. If u is an alias, we might be reloading the
1461 * target unit needlessly. But we cannot be sure which drops-ins have already
1462 * been loaded and which not, at least without doing complicated book-keeping,
1463 * so let's always reread all drop-ins. */
1464 r = unit_load_dropin(unit_follow_merge(u));
1465 if (r < 0)
1466 return r;
1467
1468 if (u->source_path) {
1469 struct stat st;
1470
1471 if (stat(u->source_path, &st) >= 0)
1472 u->source_mtime = timespec_load(&st.st_mtim);
1473 else
1474 u->source_mtime = 0;
1475 }
1476
1477 return 0;
1478 }
1479
1480 void unit_add_to_target_deps_queue(Unit *u) {
1481 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1482
1483 if (u->in_target_deps_queue)
1484 return;
1485
1486 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1487 u->in_target_deps_queue = true;
1488 }
1489
1490 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1491 assert(u);
1492 assert(target);
1493
1494 if (target->type != UNIT_TARGET)
1495 return 0;
1496
1497 /* Only add the dependency if both units are loaded, so that
1498 * that loop check below is reliable */
1499 if (u->load_state != UNIT_LOADED ||
1500 target->load_state != UNIT_LOADED)
1501 return 0;
1502
1503 /* If either side wants no automatic dependencies, then let's
1504 * skip this */
1505 if (!u->default_dependencies ||
1506 !target->default_dependencies)
1507 return 0;
1508
1509 /* Don't create loops */
1510 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1511 return 0;
1512
1513 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1514 }
1515
1516 static int unit_add_slice_dependencies(Unit *u) {
1517 Unit *slice;
1518 assert(u);
1519
1520 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1521 return 0;
1522
1523 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1524 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1525 relationship). */
1526 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1527
1528 slice = UNIT_GET_SLICE(u);
1529 if (slice)
1530 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1531
1532 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1533 return 0;
1534
1535 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1536 }
1537
1538 static int unit_add_mount_dependencies(Unit *u) {
1539 UnitDependencyInfo di;
1540 const char *path;
1541 bool changed = false;
1542 int r;
1543
1544 assert(u);
1545
1546 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1547 char prefix[strlen(path) + 1];
1548
1549 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1550 _cleanup_free_ char *p = NULL;
1551 Unit *m;
1552
1553 r = unit_name_from_path(prefix, ".mount", &p);
1554 if (r == -EINVAL)
1555 continue; /* If the path cannot be converted to a mount unit name, then it's
1556 * not manageable as a unit by systemd, and hence we don't need a
1557 * dependency on it. Let's thus silently ignore the issue. */
1558 if (r < 0)
1559 return r;
1560
1561 m = manager_get_unit(u->manager, p);
1562 if (!m) {
1563 /* Make sure to load the mount unit if it exists. If so the dependencies on
1564 * this unit will be added later during the loading of the mount unit. */
1565 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1566 continue;
1567 }
1568 if (m == u)
1569 continue;
1570
1571 if (m->load_state != UNIT_LOADED)
1572 continue;
1573
1574 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1575 if (r < 0)
1576 return r;
1577 changed = changed || r > 0;
1578
1579 if (m->fragment_path) {
1580 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1581 if (r < 0)
1582 return r;
1583 changed = changed || r > 0;
1584 }
1585 }
1586 }
1587
1588 return changed;
1589 }
1590
1591 static int unit_add_oomd_dependencies(Unit *u) {
1592 CGroupContext *c;
1593 CGroupMask mask;
1594 int r;
1595
1596 assert(u);
1597
1598 if (!u->default_dependencies)
1599 return 0;
1600
1601 c = unit_get_cgroup_context(u);
1602 if (!c)
1603 return 0;
1604
1605 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1606 if (!wants_oomd)
1607 return 0;
1608
1609 if (!cg_all_unified())
1610 return 0;
1611
1612 r = cg_mask_supported(&mask);
1613 if (r < 0)
1614 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1615
1616 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1617 return 0;
1618
1619 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1620 }
1621
1622 static int unit_add_startup_units(Unit *u) {
1623 if (!unit_has_startup_cgroup_constraints(u))
1624 return 0;
1625
1626 return set_ensure_put(&u->manager->startup_units, NULL, u);
1627 }
1628
1629 static int unit_validate_on_failure_job_mode(
1630 Unit *u,
1631 const char *job_mode_setting,
1632 JobMode job_mode,
1633 const char *dependency_name,
1634 UnitDependencyAtom atom) {
1635
1636 Unit *other, *found = NULL;
1637
1638 if (job_mode != JOB_ISOLATE)
1639 return 0;
1640
1641 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1642 if (!found)
1643 found = other;
1644 else if (found != other)
1645 return log_unit_error_errno(
1646 u, SYNTHETIC_ERRNO(ENOEXEC),
1647 "More than one %s dependencies specified but %sisolate set. Refusing.",
1648 dependency_name, job_mode_setting);
1649 }
1650
1651 return 0;
1652 }
1653
1654 int unit_load(Unit *u) {
1655 int r;
1656
1657 assert(u);
1658
1659 if (u->in_load_queue) {
1660 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1661 u->in_load_queue = false;
1662 }
1663
1664 if (u->type == _UNIT_TYPE_INVALID)
1665 return -EINVAL;
1666
1667 if (u->load_state != UNIT_STUB)
1668 return 0;
1669
1670 if (u->transient_file) {
1671 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1672 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1673
1674 r = fflush_and_check(u->transient_file);
1675 if (r < 0)
1676 goto fail;
1677
1678 u->transient_file = safe_fclose(u->transient_file);
1679 u->fragment_mtime = now(CLOCK_REALTIME);
1680 }
1681
1682 r = UNIT_VTABLE(u)->load(u);
1683 if (r < 0)
1684 goto fail;
1685
1686 assert(u->load_state != UNIT_STUB);
1687
1688 if (u->load_state == UNIT_LOADED) {
1689 unit_add_to_target_deps_queue(u);
1690
1691 r = unit_add_slice_dependencies(u);
1692 if (r < 0)
1693 goto fail;
1694
1695 r = unit_add_mount_dependencies(u);
1696 if (r < 0)
1697 goto fail;
1698
1699 r = unit_add_oomd_dependencies(u);
1700 if (r < 0)
1701 goto fail;
1702
1703 r = unit_add_startup_units(u);
1704 if (r < 0)
1705 goto fail;
1706
1707 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1708 if (r < 0)
1709 goto fail;
1710
1711 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1712 if (r < 0)
1713 goto fail;
1714
1715 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1716 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1717
1718 /* We finished loading, let's ensure our parents recalculate the members mask */
1719 unit_invalidate_cgroup_members_masks(u);
1720 }
1721
1722 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1723
1724 unit_add_to_dbus_queue(unit_follow_merge(u));
1725 unit_add_to_gc_queue(u);
1726 (void) manager_varlink_send_managed_oom_update(u);
1727
1728 return 0;
1729
1730 fail:
1731 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1732 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1733
1734 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1735 r == -ENOEXEC ? UNIT_BAD_SETTING :
1736 UNIT_ERROR;
1737 u->load_error = r;
1738
1739 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1740 * an attempt is made to load this unit, we know we need to check again. */
1741 if (u->load_state == UNIT_NOT_FOUND)
1742 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1743
1744 unit_add_to_dbus_queue(u);
1745 unit_add_to_gc_queue(u);
1746
1747 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1748 }
1749
1750 _printf_(7, 8)
1751 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1752 Unit *u = userdata;
1753 va_list ap;
1754 int r;
1755
1756 if (u && !unit_log_level_test(u, level))
1757 return -ERRNO_VALUE(error);
1758
1759 va_start(ap, format);
1760 if (u)
1761 r = log_object_internalv(level, error, file, line, func,
1762 u->manager->unit_log_field,
1763 u->id,
1764 u->manager->invocation_log_field,
1765 u->invocation_id_string,
1766 format, ap);
1767 else
1768 r = log_internalv(level, error, file, line, func, format, ap);
1769 va_end(ap);
1770
1771 return r;
1772 }
1773
1774 static bool unit_test_condition(Unit *u) {
1775 _cleanup_strv_free_ char **env = NULL;
1776 int r;
1777
1778 assert(u);
1779
1780 dual_timestamp_get(&u->condition_timestamp);
1781
1782 r = manager_get_effective_environment(u->manager, &env);
1783 if (r < 0) {
1784 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1785 u->condition_result = true;
1786 } else
1787 u->condition_result = condition_test_list(
1788 u->conditions,
1789 env,
1790 condition_type_to_string,
1791 log_unit_internal,
1792 u);
1793
1794 unit_add_to_dbus_queue(u);
1795 return u->condition_result;
1796 }
1797
1798 static bool unit_test_assert(Unit *u) {
1799 _cleanup_strv_free_ char **env = NULL;
1800 int r;
1801
1802 assert(u);
1803
1804 dual_timestamp_get(&u->assert_timestamp);
1805
1806 r = manager_get_effective_environment(u->manager, &env);
1807 if (r < 0) {
1808 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1809 u->assert_result = CONDITION_ERROR;
1810 } else
1811 u->assert_result = condition_test_list(
1812 u->asserts,
1813 env,
1814 assert_type_to_string,
1815 log_unit_internal,
1816 u);
1817
1818 unit_add_to_dbus_queue(u);
1819 return u->assert_result;
1820 }
1821
1822 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1823 if (log_get_show_color()) {
1824 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1825 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1826 else
1827 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1828 }
1829
1830 DISABLE_WARNING_FORMAT_NONLITERAL;
1831 manager_status_printf(u->manager, status_type, status, format, ident);
1832 REENABLE_WARNING;
1833 }
1834
1835 int unit_test_start_limit(Unit *u) {
1836 const char *reason;
1837
1838 assert(u);
1839
1840 if (ratelimit_below(&u->start_ratelimit)) {
1841 u->start_limit_hit = false;
1842 return 0;
1843 }
1844
1845 log_unit_warning(u, "Start request repeated too quickly.");
1846 u->start_limit_hit = true;
1847
1848 reason = strjoina("unit ", u->id, " failed");
1849
1850 emergency_action(u->manager, u->start_limit_action,
1851 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1852 u->reboot_arg, -1, reason);
1853
1854 return -ECANCELED;
1855 }
1856
1857 static bool unit_verify_deps(Unit *u) {
1858 Unit *other;
1859
1860 assert(u);
1861
1862 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1863 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1864 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1865 * that are not used in conjunction with After= as for them any such check would make things entirely
1866 * racy. */
1867
1868 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1869
1870 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1871 continue;
1872
1873 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1874 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1875 return false;
1876 }
1877 }
1878
1879 return true;
1880 }
1881
1882 /* Errors that aren't really errors:
1883 * -EALREADY: Unit is already started.
1884 * -ECOMM: Condition failed
1885 * -EAGAIN: An operation is already in progress. Retry later.
1886 *
1887 * Errors that are real errors:
1888 * -EBADR: This unit type does not support starting.
1889 * -ECANCELED: Start limit hit, too many requests for now
1890 * -EPROTO: Assert failed
1891 * -EINVAL: Unit not loaded
1892 * -EOPNOTSUPP: Unit type not supported
1893 * -ENOLINK: The necessary dependencies are not fulfilled.
1894 * -ESTALE: This unit has been started before and can't be started a second time
1895 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1896 */
1897 int unit_start(Unit *u, ActivationDetails *details) {
1898 UnitActiveState state;
1899 Unit *following;
1900 int r;
1901
1902 assert(u);
1903
1904 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1905 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1906 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1907 if (r < 0)
1908 return r;
1909 if (r > 0)
1910 return -EAGAIN;
1911 }
1912
1913 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1914 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1915 * waiting is finished. */
1916 state = unit_active_state(u);
1917 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1918 return -EALREADY;
1919 if (state == UNIT_MAINTENANCE)
1920 return -EAGAIN;
1921
1922 /* Units that aren't loaded cannot be started */
1923 if (u->load_state != UNIT_LOADED)
1924 return -EINVAL;
1925
1926 /* Refuse starting scope units more than once */
1927 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1928 return -ESTALE;
1929
1930 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1931 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1932 * recheck the condition in that case. */
1933 if (state != UNIT_ACTIVATING &&
1934 !unit_test_condition(u))
1935 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1936
1937 /* If the asserts failed, fail the entire job */
1938 if (state != UNIT_ACTIVATING &&
1939 !unit_test_assert(u))
1940 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1941
1942 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1943 * condition checks, so that we rather return condition check errors (which are usually not
1944 * considered a true failure) than "not supported" errors (which are considered a failure).
1945 */
1946 if (!unit_type_supported(u->type))
1947 return -EOPNOTSUPP;
1948
1949 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1950 * should have taken care of this already, but let's check this here again. After all, our
1951 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1952 if (!unit_verify_deps(u))
1953 return -ENOLINK;
1954
1955 /* Forward to the main object, if we aren't it. */
1956 following = unit_following(u);
1957 if (following) {
1958 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1959 return unit_start(following, details);
1960 }
1961
1962 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1963 if (UNIT_VTABLE(u)->can_start) {
1964 r = UNIT_VTABLE(u)->can_start(u);
1965 if (r < 0)
1966 return r;
1967 }
1968
1969 /* If it is stopped, but we cannot start it, then fail */
1970 if (!UNIT_VTABLE(u)->start)
1971 return -EBADR;
1972
1973 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1974 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1975 * waits for a holdoff timer to elapse before it will start again. */
1976
1977 unit_add_to_dbus_queue(u);
1978 unit_cgroup_freezer_action(u, FREEZER_THAW);
1979
1980 if (!u->activation_details) /* Older details object wins */
1981 u->activation_details = activation_details_ref(details);
1982
1983 return UNIT_VTABLE(u)->start(u);
1984 }
1985
1986 bool unit_can_start(Unit *u) {
1987 assert(u);
1988
1989 if (u->load_state != UNIT_LOADED)
1990 return false;
1991
1992 if (!unit_type_supported(u->type))
1993 return false;
1994
1995 /* Scope units may be started only once */
1996 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1997 return false;
1998
1999 return !!UNIT_VTABLE(u)->start;
2000 }
2001
2002 bool unit_can_isolate(Unit *u) {
2003 assert(u);
2004
2005 return unit_can_start(u) &&
2006 u->allow_isolate;
2007 }
2008
2009 /* Errors:
2010 * -EBADR: This unit type does not support stopping.
2011 * -EALREADY: Unit is already stopped.
2012 * -EAGAIN: An operation is already in progress. Retry later.
2013 */
2014 int unit_stop(Unit *u) {
2015 UnitActiveState state;
2016 Unit *following;
2017
2018 assert(u);
2019
2020 state = unit_active_state(u);
2021 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2022 return -EALREADY;
2023
2024 following = unit_following(u);
2025 if (following) {
2026 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2027 return unit_stop(following);
2028 }
2029
2030 if (!UNIT_VTABLE(u)->stop)
2031 return -EBADR;
2032
2033 unit_add_to_dbus_queue(u);
2034 unit_cgroup_freezer_action(u, FREEZER_THAW);
2035
2036 return UNIT_VTABLE(u)->stop(u);
2037 }
2038
2039 bool unit_can_stop(Unit *u) {
2040 assert(u);
2041
2042 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2043 * Extrinsic units follow external state and they may stop following external state changes
2044 * (hence we return true here), but an attempt to do this through the manager will fail. */
2045
2046 if (!unit_type_supported(u->type))
2047 return false;
2048
2049 if (u->perpetual)
2050 return false;
2051
2052 return !!UNIT_VTABLE(u)->stop;
2053 }
2054
2055 /* Errors:
2056 * -EBADR: This unit type does not support reloading.
2057 * -ENOEXEC: Unit is not started.
2058 * -EAGAIN: An operation is already in progress. Retry later.
2059 */
2060 int unit_reload(Unit *u) {
2061 UnitActiveState state;
2062 Unit *following;
2063
2064 assert(u);
2065
2066 if (u->load_state != UNIT_LOADED)
2067 return -EINVAL;
2068
2069 if (!unit_can_reload(u))
2070 return -EBADR;
2071
2072 state = unit_active_state(u);
2073 if (state == UNIT_RELOADING)
2074 return -EAGAIN;
2075
2076 if (state != UNIT_ACTIVE)
2077 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2078
2079 following = unit_following(u);
2080 if (following) {
2081 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2082 return unit_reload(following);
2083 }
2084
2085 unit_add_to_dbus_queue(u);
2086
2087 if (!UNIT_VTABLE(u)->reload) {
2088 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2089 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2090 return 0;
2091 }
2092
2093 unit_cgroup_freezer_action(u, FREEZER_THAW);
2094
2095 return UNIT_VTABLE(u)->reload(u);
2096 }
2097
2098 bool unit_can_reload(Unit *u) {
2099 assert(u);
2100
2101 if (UNIT_VTABLE(u)->can_reload)
2102 return UNIT_VTABLE(u)->can_reload(u);
2103
2104 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2105 return true;
2106
2107 return UNIT_VTABLE(u)->reload;
2108 }
2109
2110 bool unit_is_unneeded(Unit *u) {
2111 Unit *other;
2112 assert(u);
2113
2114 if (!u->stop_when_unneeded)
2115 return false;
2116
2117 /* Don't clean up while the unit is transitioning or is even inactive. */
2118 if (unit_active_state(u) != UNIT_ACTIVE)
2119 return false;
2120 if (u->job)
2121 return false;
2122
2123 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2124 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2125 * restart, then don't clean this one up. */
2126
2127 if (other->job)
2128 return false;
2129
2130 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2131 return false;
2132
2133 if (unit_will_restart(other))
2134 return false;
2135 }
2136
2137 return true;
2138 }
2139
2140 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2141 Unit *other;
2142
2143 assert(u);
2144
2145 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2146 * that is active declared an Uphold= dependencies on it */
2147
2148 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2149 if (ret_culprit)
2150 *ret_culprit = NULL;
2151 return false;
2152 }
2153
2154 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2155 if (other->job)
2156 continue;
2157
2158 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2159 if (ret_culprit)
2160 *ret_culprit = other;
2161 return true;
2162 }
2163 }
2164
2165 if (ret_culprit)
2166 *ret_culprit = NULL;
2167 return false;
2168 }
2169
2170 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2171 Unit *other;
2172
2173 assert(u);
2174
2175 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2176 * because the other unit is down. */
2177
2178 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2179 /* Don't clean up while the unit is transitioning or is even inactive. */
2180 if (ret_culprit)
2181 *ret_culprit = NULL;
2182 return false;
2183 }
2184
2185 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2186 if (other->job)
2187 continue;
2188
2189 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2190 if (ret_culprit)
2191 *ret_culprit = other;
2192
2193 return true;
2194 }
2195 }
2196
2197 if (ret_culprit)
2198 *ret_culprit = NULL;
2199 return false;
2200 }
2201
2202 static void check_unneeded_dependencies(Unit *u) {
2203 Unit *other;
2204 assert(u);
2205
2206 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2207
2208 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2209 unit_submit_to_stop_when_unneeded_queue(other);
2210 }
2211
2212 static void check_uphold_dependencies(Unit *u) {
2213 Unit *other;
2214 assert(u);
2215
2216 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2217
2218 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2219 unit_submit_to_start_when_upheld_queue(other);
2220 }
2221
2222 static void check_bound_by_dependencies(Unit *u) {
2223 Unit *other;
2224 assert(u);
2225
2226 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2227
2228 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2229 unit_submit_to_stop_when_bound_queue(other);
2230 }
2231
2232 static void retroactively_start_dependencies(Unit *u) {
2233 Unit *other;
2234
2235 assert(u);
2236 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2237
2238 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2239 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2240 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2241 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2242
2243 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2244 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2245 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2246 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2247
2248 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2249 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2250 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2251 }
2252
2253 static void retroactively_stop_dependencies(Unit *u) {
2254 Unit *other;
2255
2256 assert(u);
2257 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2258
2259 /* Pull down units which are bound to us recursively if enabled */
2260 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2261 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2262 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2263 }
2264
2265 void unit_start_on_failure(
2266 Unit *u,
2267 const char *dependency_name,
2268 UnitDependencyAtom atom,
2269 JobMode job_mode) {
2270
2271 int n_jobs = -1;
2272 Unit *other;
2273 int r;
2274
2275 assert(u);
2276 assert(dependency_name);
2277 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2278
2279 /* Act on OnFailure= and OnSuccess= dependencies */
2280
2281 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2282 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2283
2284 if (n_jobs < 0) {
2285 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2286 n_jobs = 0;
2287 }
2288
2289 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2290 if (r < 0)
2291 log_unit_warning_errno(
2292 u, r, "Failed to enqueue %s job, ignoring: %s",
2293 dependency_name, bus_error_message(&error, r));
2294 n_jobs ++;
2295 }
2296
2297 if (n_jobs >= 0)
2298 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2299 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2300 }
2301
2302 void unit_trigger_notify(Unit *u) {
2303 Unit *other;
2304
2305 assert(u);
2306
2307 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2308 if (UNIT_VTABLE(other)->trigger_notify)
2309 UNIT_VTABLE(other)->trigger_notify(other, u);
2310 }
2311
2312 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2313 if (condition_notice && log_level > LOG_NOTICE)
2314 return LOG_NOTICE;
2315 if (condition_info && log_level > LOG_INFO)
2316 return LOG_INFO;
2317 return log_level;
2318 }
2319
2320 static int unit_log_resources(Unit *u) {
2321 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2322 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2323 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2324 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2325 size_t n_message_parts = 0, n_iovec = 0;
2326 char* message_parts[1 + 2 + 2 + 1], *t;
2327 nsec_t nsec = NSEC_INFINITY;
2328 int r;
2329 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2330 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2331 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2332 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2333 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2334 };
2335 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2336 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2337 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2338 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2339 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2340 };
2341
2342 assert(u);
2343
2344 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2345 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2346 * information and the complete data in structured fields. */
2347
2348 (void) unit_get_cpu_usage(u, &nsec);
2349 if (nsec != NSEC_INFINITY) {
2350 /* Format the CPU time for inclusion in the structured log message */
2351 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2352 r = log_oom();
2353 goto finish;
2354 }
2355 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2356
2357 /* Format the CPU time for inclusion in the human language message string */
2358 t = strjoin("consumed ", FORMAT_TIMESPAN(nsec / NSEC_PER_USEC, USEC_PER_MSEC), " CPU time");
2359 if (!t) {
2360 r = log_oom();
2361 goto finish;
2362 }
2363
2364 message_parts[n_message_parts++] = t;
2365
2366 log_level = raise_level(log_level,
2367 nsec > MENTIONWORTHY_CPU_NSEC,
2368 nsec > NOTICEWORTHY_CPU_NSEC);
2369 }
2370
2371 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2372 uint64_t value = UINT64_MAX;
2373
2374 assert(io_fields[k]);
2375
2376 (void) unit_get_io_accounting(u, k, k > 0, &value);
2377 if (value == UINT64_MAX)
2378 continue;
2379
2380 have_io_accounting = true;
2381 if (value > 0)
2382 any_io = true;
2383
2384 /* Format IO accounting data for inclusion in the structured log message */
2385 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2386 r = log_oom();
2387 goto finish;
2388 }
2389 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2390
2391 /* Format the IO accounting data for inclusion in the human language message string, but only
2392 * for the bytes counters (and not for the operations counters) */
2393 if (k == CGROUP_IO_READ_BYTES) {
2394 assert(!rr);
2395 rr = strjoin("read ", strna(FORMAT_BYTES(value)), " from disk");
2396 if (!rr) {
2397 r = log_oom();
2398 goto finish;
2399 }
2400 } else if (k == CGROUP_IO_WRITE_BYTES) {
2401 assert(!wr);
2402 wr = strjoin("written ", strna(FORMAT_BYTES(value)), " to disk");
2403 if (!wr) {
2404 r = log_oom();
2405 goto finish;
2406 }
2407 }
2408
2409 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2410 log_level = raise_level(log_level,
2411 value > MENTIONWORTHY_IO_BYTES,
2412 value > NOTICEWORTHY_IO_BYTES);
2413 }
2414
2415 if (have_io_accounting) {
2416 if (any_io) {
2417 if (rr)
2418 message_parts[n_message_parts++] = TAKE_PTR(rr);
2419 if (wr)
2420 message_parts[n_message_parts++] = TAKE_PTR(wr);
2421
2422 } else {
2423 char *k;
2424
2425 k = strdup("no IO");
2426 if (!k) {
2427 r = log_oom();
2428 goto finish;
2429 }
2430
2431 message_parts[n_message_parts++] = k;
2432 }
2433 }
2434
2435 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2436 uint64_t value = UINT64_MAX;
2437
2438 assert(ip_fields[m]);
2439
2440 (void) unit_get_ip_accounting(u, m, &value);
2441 if (value == UINT64_MAX)
2442 continue;
2443
2444 have_ip_accounting = true;
2445 if (value > 0)
2446 any_traffic = true;
2447
2448 /* Format IP accounting data for inclusion in the structured log message */
2449 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2450 r = log_oom();
2451 goto finish;
2452 }
2453 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2454
2455 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2456 * bytes counters (and not for the packets counters) */
2457 if (m == CGROUP_IP_INGRESS_BYTES) {
2458 assert(!igress);
2459 igress = strjoin("received ", strna(FORMAT_BYTES(value)), " IP traffic");
2460 if (!igress) {
2461 r = log_oom();
2462 goto finish;
2463 }
2464 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2465 assert(!egress);
2466 egress = strjoin("sent ", strna(FORMAT_BYTES(value)), " IP traffic");
2467 if (!egress) {
2468 r = log_oom();
2469 goto finish;
2470 }
2471 }
2472
2473 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2474 log_level = raise_level(log_level,
2475 value > MENTIONWORTHY_IP_BYTES,
2476 value > NOTICEWORTHY_IP_BYTES);
2477 }
2478
2479 /* This check is here because it is the earliest point following all possible log_level assignments. If
2480 * log_level is assigned anywhere after this point, move this check. */
2481 if (!unit_log_level_test(u, log_level)) {
2482 r = 0;
2483 goto finish;
2484 }
2485
2486 if (have_ip_accounting) {
2487 if (any_traffic) {
2488 if (igress)
2489 message_parts[n_message_parts++] = TAKE_PTR(igress);
2490 if (egress)
2491 message_parts[n_message_parts++] = TAKE_PTR(egress);
2492
2493 } else {
2494 char *k;
2495
2496 k = strdup("no IP traffic");
2497 if (!k) {
2498 r = log_oom();
2499 goto finish;
2500 }
2501
2502 message_parts[n_message_parts++] = k;
2503 }
2504 }
2505
2506 /* Is there any accounting data available at all? */
2507 if (n_iovec == 0) {
2508 r = 0;
2509 goto finish;
2510 }
2511
2512 if (n_message_parts == 0)
2513 t = strjoina("MESSAGE=", u->id, ": Completed.");
2514 else {
2515 _cleanup_free_ char *joined = NULL;
2516
2517 message_parts[n_message_parts] = NULL;
2518
2519 joined = strv_join(message_parts, ", ");
2520 if (!joined) {
2521 r = log_oom();
2522 goto finish;
2523 }
2524
2525 joined[0] = ascii_toupper(joined[0]);
2526 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2527 }
2528
2529 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2530 * and hence don't increase n_iovec for them */
2531 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2532 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2533
2534 t = strjoina(u->manager->unit_log_field, u->id);
2535 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2536
2537 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2538 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2539
2540 log_unit_struct_iovec(u, log_level, iovec, n_iovec + 4);
2541 r = 0;
2542
2543 finish:
2544 free_many_charp(message_parts, n_message_parts);
2545
2546 for (size_t i = 0; i < n_iovec; i++)
2547 free(iovec[i].iov_base);
2548
2549 return r;
2550
2551 }
2552
2553 static void unit_update_on_console(Unit *u) {
2554 bool b;
2555
2556 assert(u);
2557
2558 b = unit_needs_console(u);
2559 if (u->on_console == b)
2560 return;
2561
2562 u->on_console = b;
2563 if (b)
2564 manager_ref_console(u->manager);
2565 else
2566 manager_unref_console(u->manager);
2567 }
2568
2569 static void unit_emit_audit_start(Unit *u) {
2570 assert(u);
2571
2572 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2573 return;
2574
2575 /* Write audit record if we have just finished starting up */
2576 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2577 u->in_audit = true;
2578 }
2579
2580 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2581 assert(u);
2582
2583 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2584 return;
2585
2586 if (u->in_audit) {
2587 /* Write audit record if we have just finished shutting down */
2588 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2589 u->in_audit = false;
2590 } else {
2591 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2592 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2593
2594 if (state == UNIT_INACTIVE)
2595 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2596 }
2597 }
2598
2599 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2600 bool unexpected = false;
2601 JobResult result;
2602
2603 assert(j);
2604
2605 if (j->state == JOB_WAITING)
2606 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2607 * due to EAGAIN. */
2608 job_add_to_run_queue(j);
2609
2610 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2611 * hence needs to invalidate jobs. */
2612
2613 switch (j->type) {
2614
2615 case JOB_START:
2616 case JOB_VERIFY_ACTIVE:
2617
2618 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2619 job_finish_and_invalidate(j, JOB_DONE, true, false);
2620 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2621 unexpected = true;
2622
2623 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2624 if (ns == UNIT_FAILED)
2625 result = JOB_FAILED;
2626 else
2627 result = JOB_DONE;
2628
2629 job_finish_and_invalidate(j, result, true, false);
2630 }
2631 }
2632
2633 break;
2634
2635 case JOB_RELOAD:
2636 case JOB_RELOAD_OR_START:
2637 case JOB_TRY_RELOAD:
2638
2639 if (j->state == JOB_RUNNING) {
2640 if (ns == UNIT_ACTIVE)
2641 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2642 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2643 unexpected = true;
2644
2645 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2646 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2647 }
2648 }
2649
2650 break;
2651
2652 case JOB_STOP:
2653 case JOB_RESTART:
2654 case JOB_TRY_RESTART:
2655
2656 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2657 job_finish_and_invalidate(j, JOB_DONE, true, false);
2658 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2659 unexpected = true;
2660 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2661 }
2662
2663 break;
2664
2665 default:
2666 assert_not_reached();
2667 }
2668
2669 return unexpected;
2670 }
2671
2672 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2673 const char *reason;
2674 Manager *m;
2675
2676 assert(u);
2677 assert(os < _UNIT_ACTIVE_STATE_MAX);
2678 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2679
2680 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2681 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2682 * remounted this function will be called too! */
2683
2684 m = u->manager;
2685
2686 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2687 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2688 unit_add_to_dbus_queue(u);
2689
2690 /* Update systemd-oomd on the property/state change */
2691 if (os != ns) {
2692 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2693 * monitoring.
2694 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2695 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2696 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2697 * have the information on the property. Thus, indiscriminately send an update. */
2698 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2699 (void) manager_varlink_send_managed_oom_update(u);
2700 }
2701
2702 /* Update timestamps for state changes */
2703 if (!MANAGER_IS_RELOADING(m)) {
2704 dual_timestamp_get(&u->state_change_timestamp);
2705
2706 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2707 u->inactive_exit_timestamp = u->state_change_timestamp;
2708 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2709 u->inactive_enter_timestamp = u->state_change_timestamp;
2710
2711 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2712 u->active_enter_timestamp = u->state_change_timestamp;
2713 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2714 u->active_exit_timestamp = u->state_change_timestamp;
2715 }
2716
2717 /* Keep track of failed units */
2718 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2719
2720 /* Make sure the cgroup and state files are always removed when we become inactive */
2721 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2722 SET_FLAG(u->markers,
2723 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2724 false);
2725 unit_prune_cgroup(u);
2726 unit_unlink_state_files(u);
2727 } else if (ns != os && ns == UNIT_RELOADING)
2728 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2729
2730 unit_update_on_console(u);
2731
2732 if (!MANAGER_IS_RELOADING(m)) {
2733 bool unexpected;
2734
2735 /* Let's propagate state changes to the job */
2736 if (u->job)
2737 unexpected = unit_process_job(u->job, ns, reload_success);
2738 else
2739 unexpected = true;
2740
2741 /* If this state change happened without being requested by a job, then let's retroactively start or
2742 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2743 * additional jobs just because something is already activated. */
2744
2745 if (unexpected) {
2746 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2747 retroactively_start_dependencies(u);
2748 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2749 retroactively_stop_dependencies(u);
2750 }
2751
2752 if (ns != os && ns == UNIT_FAILED) {
2753 log_unit_debug(u, "Unit entered failed state.");
2754 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2755 }
2756
2757 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2758 /* This unit just finished starting up */
2759
2760 unit_emit_audit_start(u);
2761 manager_send_unit_plymouth(m, u);
2762 }
2763
2764 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2765 /* This unit just stopped/failed. */
2766
2767 unit_emit_audit_stop(u, ns);
2768 unit_log_resources(u);
2769 }
2770
2771 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2772 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2773 }
2774
2775 manager_recheck_journal(m);
2776 manager_recheck_dbus(m);
2777
2778 unit_trigger_notify(u);
2779
2780 if (!MANAGER_IS_RELOADING(m)) {
2781 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2782 reason = strjoina("unit ", u->id, " failed");
2783 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2784 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2785 reason = strjoina("unit ", u->id, " succeeded");
2786 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2787 }
2788 }
2789
2790 /* And now, add the unit or depending units to various queues that will act on the new situation if
2791 * needed. These queues generally check for continuous state changes rather than events (like most of
2792 * the state propagation above), and do work deferred instead of instantly, since they typically
2793 * don't want to run during reloading, and usually involve checking combined state of multiple units
2794 * at once. */
2795
2796 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2797 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2798 check_unneeded_dependencies(u);
2799 check_bound_by_dependencies(u);
2800
2801 /* Maybe someone wants us to remain up? */
2802 unit_submit_to_start_when_upheld_queue(u);
2803
2804 /* Maybe the unit should be GC'ed now? */
2805 unit_add_to_gc_queue(u);
2806
2807 /* Maybe we can release some resources now? */
2808 unit_submit_to_release_resources_queue(u);
2809 }
2810
2811 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2812 /* Start uphold units regardless if going up was expected or not */
2813 check_uphold_dependencies(u);
2814
2815 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2816 unit_submit_to_stop_when_unneeded_queue(u);
2817
2818 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2819 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2820 * inactive, without ever entering started.) */
2821 unit_submit_to_stop_when_bound_queue(u);
2822 }
2823 }
2824
2825 int unit_watch_pidref(Unit *u, PidRef *pid, bool exclusive) {
2826 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2827 int r;
2828
2829 /* Adds a specific PID to the set of PIDs this unit watches. */
2830
2831 assert(u);
2832 assert(pidref_is_set(pid));
2833
2834 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2835 * opportunity to remove any stalled references to this PID as they can be created
2836 * easily (when watching a process which is not our direct child). */
2837 if (exclusive)
2838 manager_unwatch_pidref(u->manager, pid);
2839
2840 if (set_contains(u->pids, pid)) /* early exit if already being watched */
2841 return 0;
2842
2843 r = pidref_dup(pid, &pid_dup);
2844 if (r < 0)
2845 return r;
2846
2847 /* First, insert into the set of PIDs maintained by the unit */
2848 r = set_ensure_put(&u->pids, &pidref_hash_ops, pid_dup);
2849 if (r < 0)
2850 return r;
2851
2852 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2853
2854 /* Second, insert it into the simple global table, see if that works */
2855 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops, pid, u);
2856 if (r != -EEXIST)
2857 return r;
2858
2859 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2860 * hashmap that points to an array. */
2861
2862 PidRef *old_pid = NULL;
2863 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2864
2865 /* Count entries in array */
2866 size_t n = 0;
2867 for (; array && array[n]; n++)
2868 ;
2869
2870 /* Allocate a new array */
2871 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2872 if (!new_array)
2873 return -ENOMEM;
2874
2875 /* Append us to the end */
2876 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2877 new_array[n] = u;
2878 new_array[n+1] = NULL;
2879
2880 /* Make sure the hashmap is allocated */
2881 r = hashmap_ensure_allocated(&u->manager->watch_pids_more, &pidref_hash_ops);
2882 if (r < 0)
2883 return r;
2884
2885 /* Add or replace the old array */
2886 r = hashmap_replace(u->manager->watch_pids_more, old_pid ?: pid, new_array);
2887 if (r < 0)
2888 return r;
2889
2890 TAKE_PTR(new_array); /* Now part of the hash table */
2891 free(array); /* Which means we can now delete the old version */
2892 return 0;
2893 }
2894
2895 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2896 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
2897 int r;
2898
2899 assert(u);
2900 assert(pid_is_valid(pid));
2901
2902 r = pidref_set_pid(&pidref, pid);
2903 if (r < 0)
2904 return r;
2905
2906 return unit_watch_pidref(u, &pidref, exclusive);
2907 }
2908
2909 void unit_unwatch_pidref(Unit *u, PidRef *pid) {
2910 assert(u);
2911 assert(pidref_is_set(pid));
2912
2913 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2914 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2915 if (!pid1)
2916 return; /* Early exit if this PID was never watched by us */
2917
2918 /* First let's drop the unit from the simple hash table, if it is included there */
2919 PidRef *pid2 = NULL;
2920 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2921
2922 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2923 assert((uu == u) == (pid1 == pid2));
2924
2925 if (uu == u)
2926 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2927 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu) == uu);
2928 else {
2929 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2930 PidRef *pid3 = NULL;
2931 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2932
2933 /* Let's iterate through the array, dropping our own entry */
2934 size_t m = 0, n = 0;
2935 for (; array && array[n]; n++)
2936 if (array[n] != u)
2937 array[m++] = array[n];
2938 if (n == m)
2939 return; /* Not there */
2940
2941 array[m] = NULL; /* set trailing NULL marker on the new end */
2942
2943 if (m == 0) {
2944 /* The array is now empty, remove the entire entry */
2945 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array) == array);
2946 free(array);
2947 } else {
2948 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2949 * we will delete, but by the PidRef object of the Unit that is now first in the
2950 * array. */
2951
2952 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2953 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2954 }
2955 }
2956 }
2957
2958 void unit_unwatch_pid(Unit *u, pid_t pid) {
2959 return unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(pid));
2960 }
2961
2962 void unit_unwatch_all_pids(Unit *u) {
2963 assert(u);
2964
2965 while (!set_isempty(u->pids))
2966 unit_unwatch_pidref(u, set_first(u->pids));
2967
2968 u->pids = set_free(u->pids);
2969 }
2970
2971 static void unit_tidy_watch_pids(Unit *u) {
2972 PidRef *except1, *except2, *e;
2973
2974 assert(u);
2975
2976 /* Cleans dead PIDs from our list */
2977
2978 except1 = unit_main_pid(u);
2979 except2 = unit_control_pid(u);
2980
2981 SET_FOREACH(e, u->pids) {
2982 if (pidref_equal(except1, e) || pidref_equal(except2, e))
2983 continue;
2984
2985 if (pidref_is_unwaited(e) <= 0)
2986 unit_unwatch_pidref(u, e);
2987 }
2988 }
2989
2990 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2991 Unit *u = ASSERT_PTR(userdata);
2992
2993 assert(s);
2994
2995 unit_tidy_watch_pids(u);
2996 unit_watch_all_pids(u);
2997
2998 /* If the PID set is empty now, then let's finish this off. */
2999 unit_synthesize_cgroup_empty_event(u);
3000
3001 return 0;
3002 }
3003
3004 int unit_enqueue_rewatch_pids(Unit *u) {
3005 int r;
3006
3007 assert(u);
3008
3009 if (!u->cgroup_path)
3010 return -ENOENT;
3011
3012 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3013 if (r < 0)
3014 return r;
3015 if (r > 0) /* On unified we can use proper notifications */
3016 return 0;
3017
3018 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
3019 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
3020 * involves issuing kill(pid, 0) on all processes we watch. */
3021
3022 if (!u->rewatch_pids_event_source) {
3023 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
3024
3025 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
3026 if (r < 0)
3027 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
3028
3029 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
3030 if (r < 0)
3031 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
3032
3033 (void) sd_event_source_set_description(s, "tidy-watch-pids");
3034
3035 u->rewatch_pids_event_source = TAKE_PTR(s);
3036 }
3037
3038 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
3039 if (r < 0)
3040 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
3041
3042 return 0;
3043 }
3044
3045 void unit_dequeue_rewatch_pids(Unit *u) {
3046 int r;
3047 assert(u);
3048
3049 if (!u->rewatch_pids_event_source)
3050 return;
3051
3052 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
3053 if (r < 0)
3054 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3055
3056 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
3057 }
3058
3059 bool unit_job_is_applicable(Unit *u, JobType j) {
3060 assert(u);
3061 assert(j >= 0 && j < _JOB_TYPE_MAX);
3062
3063 switch (j) {
3064
3065 case JOB_VERIFY_ACTIVE:
3066 case JOB_START:
3067 case JOB_NOP:
3068 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3069 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3070 * jobs for it. */
3071 return true;
3072
3073 case JOB_STOP:
3074 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3075 * external events), hence it makes no sense to permit enqueuing such a request either. */
3076 return !u->perpetual;
3077
3078 case JOB_RESTART:
3079 case JOB_TRY_RESTART:
3080 return unit_can_stop(u) && unit_can_start(u);
3081
3082 case JOB_RELOAD:
3083 case JOB_TRY_RELOAD:
3084 return unit_can_reload(u);
3085
3086 case JOB_RELOAD_OR_START:
3087 return unit_can_reload(u) && unit_can_start(u);
3088
3089 default:
3090 assert_not_reached();
3091 }
3092 }
3093
3094 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3095 Hashmap *deps;
3096
3097 assert(u);
3098 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3099
3100 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3101 if (!deps) {
3102 _cleanup_hashmap_free_ Hashmap *h = NULL;
3103
3104 h = hashmap_new(NULL);
3105 if (!h)
3106 return NULL;
3107
3108 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3109 return NULL;
3110
3111 deps = TAKE_PTR(h);
3112 }
3113
3114 return deps;
3115 }
3116
3117 typedef enum NotifyDependencyFlags {
3118 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3119 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3120 } NotifyDependencyFlags;
3121
3122 static int unit_add_dependency_impl(
3123 Unit *u,
3124 UnitDependency d,
3125 Unit *other,
3126 UnitDependencyMask mask) {
3127
3128 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3129 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3130 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3131 [UNIT_WANTS] = UNIT_WANTED_BY,
3132 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3133 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3134 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3135 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3136 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3137 [UNIT_WANTED_BY] = UNIT_WANTS,
3138 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3139 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3140 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3141 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3142 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3143 [UNIT_BEFORE] = UNIT_AFTER,
3144 [UNIT_AFTER] = UNIT_BEFORE,
3145 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3146 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3147 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3148 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3149 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3150 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3151 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3152 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3153 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3154 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3155 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3156 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3157 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3158 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3159 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3160 };
3161
3162 Hashmap *u_deps, *other_deps;
3163 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3164 NotifyDependencyFlags flags = 0;
3165 int r;
3166
3167 assert(u);
3168 assert(other);
3169 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3170 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3171 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3172
3173 /* Ensure the following two hashmaps for each unit exist:
3174 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3175 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3176 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3177 if (!u_deps)
3178 return -ENOMEM;
3179
3180 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3181 if (!other_deps)
3182 return -ENOMEM;
3183
3184 /* Save the original dependency info. */
3185 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3186 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3187
3188 /* Update dependency info. */
3189 u_info.origin_mask |= mask;
3190 other_info.destination_mask |= mask;
3191
3192 /* Save updated dependency info. */
3193 if (u_info.data != u_info_old.data) {
3194 r = hashmap_replace(u_deps, other, u_info.data);
3195 if (r < 0)
3196 return r;
3197
3198 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3199 }
3200
3201 if (other_info.data != other_info_old.data) {
3202 r = hashmap_replace(other_deps, u, other_info.data);
3203 if (r < 0) {
3204 if (u_info.data != u_info_old.data) {
3205 /* Restore the old dependency. */
3206 if (u_info_old.data)
3207 (void) hashmap_update(u_deps, other, u_info_old.data);
3208 else
3209 hashmap_remove(u_deps, other);
3210 }
3211 return r;
3212 }
3213
3214 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3215 }
3216
3217 return flags;
3218 }
3219
3220 int unit_add_dependency(
3221 Unit *u,
3222 UnitDependency d,
3223 Unit *other,
3224 bool add_reference,
3225 UnitDependencyMask mask) {
3226
3227 UnitDependencyAtom a;
3228 int r;
3229
3230 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3231 * there, no need to notify! */
3232 NotifyDependencyFlags notify_flags;
3233
3234 assert(u);
3235 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3236 assert(other);
3237
3238 u = unit_follow_merge(u);
3239 other = unit_follow_merge(other);
3240 a = unit_dependency_to_atom(d);
3241 assert(a >= 0);
3242
3243 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3244 if (u == other) {
3245 if (unit_should_warn_about_dependency(d))
3246 log_unit_warning(u, "Dependency %s=%s is dropped.",
3247 unit_dependency_to_string(d), u->id);
3248 return 0;
3249 }
3250
3251 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3252 return 0;
3253
3254 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3255 * running timeout at a specific time. */
3256 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3257 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3258 return 0;
3259 }
3260
3261 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3262 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3263 return 0;
3264 }
3265
3266 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3267 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3268 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3269 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3270 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3271 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3272
3273 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3274 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3275 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3276 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3277 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3278 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3279
3280 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3281 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3282 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3283
3284 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3285 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3286 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3287
3288 r = unit_add_dependency_impl(u, d, other, mask);
3289 if (r < 0)
3290 return r;
3291 notify_flags = r;
3292
3293 if (add_reference) {
3294 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3295 if (r < 0)
3296 return r;
3297 notify_flags |= r;
3298 }
3299
3300 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3301 unit_add_to_dbus_queue(u);
3302 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3303 unit_add_to_dbus_queue(other);
3304
3305 return notify_flags != 0;
3306 }
3307
3308 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3309 int r = 0, s = 0;
3310
3311 assert(u);
3312 assert(d >= 0 || e >= 0);
3313
3314 if (d >= 0) {
3315 r = unit_add_dependency(u, d, other, add_reference, mask);
3316 if (r < 0)
3317 return r;
3318 }
3319
3320 if (e >= 0) {
3321 s = unit_add_dependency(u, e, other, add_reference, mask);
3322 if (s < 0)
3323 return s;
3324 }
3325
3326 return r > 0 || s > 0;
3327 }
3328
3329 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3330 int r;
3331
3332 assert(u);
3333 assert(name);
3334 assert(buf);
3335 assert(ret);
3336
3337 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3338 *buf = NULL;
3339 *ret = name;
3340 return 0;
3341 }
3342
3343 if (u->instance)
3344 r = unit_name_replace_instance(name, u->instance, buf);
3345 else {
3346 _cleanup_free_ char *i = NULL;
3347
3348 r = unit_name_to_prefix(u->id, &i);
3349 if (r < 0)
3350 return r;
3351
3352 r = unit_name_replace_instance(name, i, buf);
3353 }
3354 if (r < 0)
3355 return r;
3356
3357 *ret = *buf;
3358 return 0;
3359 }
3360
3361 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3362 _cleanup_free_ char *buf = NULL;
3363 Unit *other;
3364 int r;
3365
3366 assert(u);
3367 assert(name);
3368
3369 r = resolve_template(u, name, &buf, &name);
3370 if (r < 0)
3371 return r;
3372
3373 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3374 return 0;
3375
3376 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3377 if (r < 0)
3378 return r;
3379
3380 return unit_add_dependency(u, d, other, add_reference, mask);
3381 }
3382
3383 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3384 _cleanup_free_ char *buf = NULL;
3385 Unit *other;
3386 int r;
3387
3388 assert(u);
3389 assert(name);
3390
3391 r = resolve_template(u, name, &buf, &name);
3392 if (r < 0)
3393 return r;
3394
3395 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3396 return 0;
3397
3398 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3399 if (r < 0)
3400 return r;
3401
3402 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3403 }
3404
3405 int set_unit_path(const char *p) {
3406 /* This is mostly for debug purposes */
3407 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3408 }
3409
3410 char *unit_dbus_path(Unit *u) {
3411 assert(u);
3412
3413 if (!u->id)
3414 return NULL;
3415
3416 return unit_dbus_path_from_name(u->id);
3417 }
3418
3419 char *unit_dbus_path_invocation_id(Unit *u) {
3420 assert(u);
3421
3422 if (sd_id128_is_null(u->invocation_id))
3423 return NULL;
3424
3425 return unit_dbus_path_from_name(u->invocation_id_string);
3426 }
3427
3428 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3429 int r;
3430
3431 assert(u);
3432
3433 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3434
3435 if (sd_id128_equal(u->invocation_id, id))
3436 return 0;
3437
3438 if (!sd_id128_is_null(u->invocation_id))
3439 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3440
3441 if (sd_id128_is_null(id)) {
3442 r = 0;
3443 goto reset;
3444 }
3445
3446 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3447 if (r < 0)
3448 goto reset;
3449
3450 u->invocation_id = id;
3451 sd_id128_to_string(id, u->invocation_id_string);
3452
3453 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3454 if (r < 0)
3455 goto reset;
3456
3457 return 0;
3458
3459 reset:
3460 u->invocation_id = SD_ID128_NULL;
3461 u->invocation_id_string[0] = 0;
3462 return r;
3463 }
3464
3465 int unit_set_slice(Unit *u, Unit *slice) {
3466 int r;
3467
3468 assert(u);
3469 assert(slice);
3470
3471 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3472 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3473 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3474
3475 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3476 return -EOPNOTSUPP;
3477
3478 if (u->type == UNIT_SLICE)
3479 return -EINVAL;
3480
3481 if (unit_active_state(u) != UNIT_INACTIVE)
3482 return -EBUSY;
3483
3484 if (slice->type != UNIT_SLICE)
3485 return -EINVAL;
3486
3487 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3488 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3489 return -EPERM;
3490
3491 if (UNIT_GET_SLICE(u) == slice)
3492 return 0;
3493
3494 /* Disallow slice changes if @u is already bound to cgroups */
3495 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3496 return -EBUSY;
3497
3498 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3499 if (UNIT_GET_SLICE(u))
3500 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3501
3502 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3503 if (r < 0)
3504 return r;
3505
3506 return 1;
3507 }
3508
3509 int unit_set_default_slice(Unit *u) {
3510 const char *slice_name;
3511 Unit *slice;
3512 int r;
3513
3514 assert(u);
3515
3516 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3517 return 0;
3518
3519 if (UNIT_GET_SLICE(u))
3520 return 0;
3521
3522 if (u->instance) {
3523 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3524
3525 /* Implicitly place all instantiated units in their
3526 * own per-template slice */
3527
3528 r = unit_name_to_prefix(u->id, &prefix);
3529 if (r < 0)
3530 return r;
3531
3532 /* The prefix is already escaped, but it might include
3533 * "-" which has a special meaning for slice units,
3534 * hence escape it here extra. */
3535 escaped = unit_name_escape(prefix);
3536 if (!escaped)
3537 return -ENOMEM;
3538
3539 if (MANAGER_IS_SYSTEM(u->manager))
3540 slice_name = strjoina("system-", escaped, ".slice");
3541 else
3542 slice_name = strjoina("app-", escaped, ".slice");
3543
3544 } else if (unit_is_extrinsic(u))
3545 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3546 * the root slice. They don't really belong in one of the subslices. */
3547 slice_name = SPECIAL_ROOT_SLICE;
3548
3549 else if (MANAGER_IS_SYSTEM(u->manager))
3550 slice_name = SPECIAL_SYSTEM_SLICE;
3551 else
3552 slice_name = SPECIAL_APP_SLICE;
3553
3554 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3555 if (r < 0)
3556 return r;
3557
3558 return unit_set_slice(u, slice);
3559 }
3560
3561 const char *unit_slice_name(Unit *u) {
3562 Unit *slice;
3563 assert(u);
3564
3565 slice = UNIT_GET_SLICE(u);
3566 if (!slice)
3567 return NULL;
3568
3569 return slice->id;
3570 }
3571
3572 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3573 _cleanup_free_ char *t = NULL;
3574 int r;
3575
3576 assert(u);
3577 assert(type);
3578 assert(_found);
3579
3580 r = unit_name_change_suffix(u->id, type, &t);
3581 if (r < 0)
3582 return r;
3583 if (unit_has_name(u, t))
3584 return -EINVAL;
3585
3586 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3587 assert(r < 0 || *_found != u);
3588 return r;
3589 }
3590
3591 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3592 const char *new_owner;
3593 Unit *u = ASSERT_PTR(userdata);
3594 int r;
3595
3596 assert(message);
3597
3598 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3599 if (r < 0) {
3600 bus_log_parse_error(r);
3601 return 0;
3602 }
3603
3604 if (UNIT_VTABLE(u)->bus_name_owner_change)
3605 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3606
3607 return 0;
3608 }
3609
3610 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3611 const sd_bus_error *e;
3612 const char *new_owner;
3613 Unit *u = ASSERT_PTR(userdata);
3614 int r;
3615
3616 assert(message);
3617
3618 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3619
3620 e = sd_bus_message_get_error(message);
3621 if (e) {
3622 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3623 r = sd_bus_error_get_errno(e);
3624 log_unit_error_errno(u, r,
3625 "Unexpected error response from GetNameOwner(): %s",
3626 bus_error_message(e, r));
3627 }
3628
3629 new_owner = NULL;
3630 } else {
3631 r = sd_bus_message_read(message, "s", &new_owner);
3632 if (r < 0)
3633 return bus_log_parse_error(r);
3634
3635 assert(!isempty(new_owner));
3636 }
3637
3638 if (UNIT_VTABLE(u)->bus_name_owner_change)
3639 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3640
3641 return 0;
3642 }
3643
3644 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3645 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3646 const char *match;
3647 usec_t timeout_usec = 0;
3648 int r;
3649
3650 assert(u);
3651 assert(bus);
3652 assert(name);
3653
3654 if (u->match_bus_slot || u->get_name_owner_slot)
3655 return -EBUSY;
3656
3657 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3658 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3659 * value defined above. */
3660 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3661 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3662
3663 match = strjoina("type='signal',"
3664 "sender='org.freedesktop.DBus',"
3665 "path='/org/freedesktop/DBus',"
3666 "interface='org.freedesktop.DBus',"
3667 "member='NameOwnerChanged',"
3668 "arg0='", name, "'");
3669
3670 r = bus_add_match_full(
3671 bus,
3672 &u->match_bus_slot,
3673 true,
3674 match,
3675 signal_name_owner_changed,
3676 NULL,
3677 u,
3678 timeout_usec);
3679 if (r < 0)
3680 return r;
3681
3682 r = sd_bus_message_new_method_call(
3683 bus,
3684 &m,
3685 "org.freedesktop.DBus",
3686 "/org/freedesktop/DBus",
3687 "org.freedesktop.DBus",
3688 "GetNameOwner");
3689 if (r < 0)
3690 return r;
3691
3692 r = sd_bus_message_append(m, "s", name);
3693 if (r < 0)
3694 return r;
3695
3696 r = sd_bus_call_async(
3697 bus,
3698 &u->get_name_owner_slot,
3699 m,
3700 get_name_owner_handler,
3701 u,
3702 timeout_usec);
3703
3704 if (r < 0) {
3705 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3706 return r;
3707 }
3708
3709 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3710 return 0;
3711 }
3712
3713 int unit_watch_bus_name(Unit *u, const char *name) {
3714 int r;
3715
3716 assert(u);
3717 assert(name);
3718
3719 /* Watch a specific name on the bus. We only support one unit
3720 * watching each name for now. */
3721
3722 if (u->manager->api_bus) {
3723 /* If the bus is already available, install the match directly.
3724 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3725 r = unit_install_bus_match(u, u->manager->api_bus, name);
3726 if (r < 0)
3727 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3728 }
3729
3730 r = hashmap_put(u->manager->watch_bus, name, u);
3731 if (r < 0) {
3732 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3733 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3734 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3735 }
3736
3737 return 0;
3738 }
3739
3740 void unit_unwatch_bus_name(Unit *u, const char *name) {
3741 assert(u);
3742 assert(name);
3743
3744 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3745 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3746 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3747 }
3748
3749 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3750 _cleanup_free_ char *e = NULL;
3751 Unit *device;
3752 int r;
3753
3754 assert(u);
3755
3756 /* Adds in links to the device node that this unit is based on */
3757 if (isempty(what))
3758 return 0;
3759
3760 if (!is_device_path(what))
3761 return 0;
3762
3763 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3764 if (!unit_type_supported(UNIT_DEVICE))
3765 return 0;
3766
3767 r = unit_name_from_path(what, ".device", &e);
3768 if (r < 0)
3769 return r;
3770
3771 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3772 if (r < 0)
3773 return r;
3774
3775 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3776 dep = UNIT_BINDS_TO;
3777
3778 return unit_add_two_dependencies(u, UNIT_AFTER,
3779 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3780 device, true, mask);
3781 }
3782
3783 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3784 _cleanup_free_ char *escaped = NULL, *target = NULL;
3785 int r;
3786
3787 assert(u);
3788
3789 if (isempty(what))
3790 return 0;
3791
3792 if (!path_startswith(what, "/dev/"))
3793 return 0;
3794
3795 /* If we don't support devices, then also don't bother with blockdev@.target */
3796 if (!unit_type_supported(UNIT_DEVICE))
3797 return 0;
3798
3799 r = unit_name_path_escape(what, &escaped);
3800 if (r < 0)
3801 return r;
3802
3803 r = unit_name_build("blockdev", escaped, ".target", &target);
3804 if (r < 0)
3805 return r;
3806
3807 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3808 }
3809
3810 int unit_coldplug(Unit *u) {
3811 int r = 0;
3812
3813 assert(u);
3814
3815 /* Make sure we don't enter a loop, when coldplugging recursively. */
3816 if (u->coldplugged)
3817 return 0;
3818
3819 u->coldplugged = true;
3820
3821 STRV_FOREACH(i, u->deserialized_refs)
3822 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3823
3824 u->deserialized_refs = strv_free(u->deserialized_refs);
3825
3826 if (UNIT_VTABLE(u)->coldplug)
3827 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3828
3829 if (u->job)
3830 RET_GATHER(r, job_coldplug(u->job));
3831 if (u->nop_job)
3832 RET_GATHER(r, job_coldplug(u->nop_job));
3833
3834 unit_modify_nft_set(u, /* add = */ true);
3835 return r;
3836 }
3837
3838 void unit_catchup(Unit *u) {
3839 assert(u);
3840
3841 if (UNIT_VTABLE(u)->catchup)
3842 UNIT_VTABLE(u)->catchup(u);
3843
3844 unit_cgroup_catchup(u);
3845 }
3846
3847 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3848 struct stat st;
3849
3850 if (!path)
3851 return false;
3852
3853 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3854 * are never out-of-date. */
3855 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3856 return false;
3857
3858 if (stat(path, &st) < 0)
3859 /* What, cannot access this anymore? */
3860 return true;
3861
3862 if (path_masked)
3863 /* For masked files check if they are still so */
3864 return !null_or_empty(&st);
3865 else
3866 /* For non-empty files check the mtime */
3867 return timespec_load(&st.st_mtim) > mtime;
3868
3869 return false;
3870 }
3871
3872 bool unit_need_daemon_reload(Unit *u) {
3873 _cleanup_strv_free_ char **dropins = NULL;
3874
3875 assert(u);
3876 assert(u->manager);
3877
3878 if (u->manager->unit_file_state_outdated)
3879 return true;
3880
3881 /* For unit files, we allow masking… */
3882 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3883 u->load_state == UNIT_MASKED))
3884 return true;
3885
3886 /* Source paths should not be masked… */
3887 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3888 return true;
3889
3890 if (u->load_state == UNIT_LOADED)
3891 (void) unit_find_dropin_paths(u, &dropins);
3892 if (!strv_equal(u->dropin_paths, dropins))
3893 return true;
3894
3895 /* … any drop-ins that are masked are simply omitted from the list. */
3896 STRV_FOREACH(path, u->dropin_paths)
3897 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3898 return true;
3899
3900 return false;
3901 }
3902
3903 void unit_reset_failed(Unit *u) {
3904 assert(u);
3905
3906 if (UNIT_VTABLE(u)->reset_failed)
3907 UNIT_VTABLE(u)->reset_failed(u);
3908
3909 ratelimit_reset(&u->start_ratelimit);
3910 u->start_limit_hit = false;
3911 }
3912
3913 Unit *unit_following(Unit *u) {
3914 assert(u);
3915
3916 if (UNIT_VTABLE(u)->following)
3917 return UNIT_VTABLE(u)->following(u);
3918
3919 return NULL;
3920 }
3921
3922 bool unit_stop_pending(Unit *u) {
3923 assert(u);
3924
3925 /* This call does check the current state of the unit. It's
3926 * hence useful to be called from state change calls of the
3927 * unit itself, where the state isn't updated yet. This is
3928 * different from unit_inactive_or_pending() which checks both
3929 * the current state and for a queued job. */
3930
3931 return unit_has_job_type(u, JOB_STOP);
3932 }
3933
3934 bool unit_inactive_or_pending(Unit *u) {
3935 assert(u);
3936
3937 /* Returns true if the unit is inactive or going down */
3938
3939 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3940 return true;
3941
3942 if (unit_stop_pending(u))
3943 return true;
3944
3945 return false;
3946 }
3947
3948 bool unit_active_or_pending(Unit *u) {
3949 assert(u);
3950
3951 /* Returns true if the unit is active or going up */
3952
3953 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3954 return true;
3955
3956 if (u->job &&
3957 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3958 return true;
3959
3960 return false;
3961 }
3962
3963 bool unit_will_restart_default(Unit *u) {
3964 assert(u);
3965
3966 return unit_has_job_type(u, JOB_START);
3967 }
3968
3969 bool unit_will_restart(Unit *u) {
3970 assert(u);
3971
3972 if (!UNIT_VTABLE(u)->will_restart)
3973 return false;
3974
3975 return UNIT_VTABLE(u)->will_restart(u);
3976 }
3977
3978 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3979 assert(u);
3980
3981 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3982 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3983 }
3984
3985 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3986 _cleanup_set_free_ Set *pid_set = NULL;
3987 int r;
3988
3989 pid_set = set_new(NULL);
3990 if (!pid_set)
3991 return NULL;
3992
3993 /* Exclude the main/control pids from being killed via the cgroup */
3994 if (main_pid > 0) {
3995 r = set_put(pid_set, PID_TO_PTR(main_pid));
3996 if (r < 0)
3997 return NULL;
3998 }
3999
4000 if (control_pid > 0) {
4001 r = set_put(pid_set, PID_TO_PTR(control_pid));
4002 if (r < 0)
4003 return NULL;
4004 }
4005
4006 return TAKE_PTR(pid_set);
4007 }
4008
4009 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
4010 _cleanup_free_ char *comm = NULL;
4011 Unit *u = ASSERT_PTR(userdata);
4012
4013 (void) pidref_get_comm(pid, &comm);
4014
4015 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
4016 signal_to_string(signo), pid->pid, strna(comm));
4017
4018 return 1;
4019 }
4020
4021 static int kill_or_sigqueue(PidRef* pidref, int signo, int code, int value) {
4022 assert(pidref_is_set(pidref));
4023 assert(SIGNAL_VALID(signo));
4024
4025 switch (code) {
4026
4027 case SI_USER:
4028 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
4029 return pidref_kill(pidref, signo);
4030
4031 case SI_QUEUE:
4032 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
4033 return pidref_sigqueue(pidref, signo, value);
4034
4035 default:
4036 assert_not_reached();
4037 }
4038 }
4039
4040 int unit_kill(
4041 Unit *u,
4042 KillWho who,
4043 int signo,
4044 int code,
4045 int value,
4046 sd_bus_error *error) {
4047
4048 PidRef *main_pid, *control_pid;
4049 bool killed = false;
4050 int ret = 0, r;
4051
4052 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4053 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4054 * stop a service ourselves. */
4055
4056 assert(u);
4057 assert(who >= 0);
4058 assert(who < _KILL_WHO_MAX);
4059 assert(SIGNAL_VALID(signo));
4060 assert(IN_SET(code, SI_USER, SI_QUEUE));
4061
4062 main_pid = unit_main_pid(u);
4063 control_pid = unit_control_pid(u);
4064
4065 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4066 return sd_bus_error_setf(error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4067
4068 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4069 if (!main_pid)
4070 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4071 if (!pidref_is_set(main_pid))
4072 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4073 }
4074
4075 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4076 if (!control_pid)
4077 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4078 if (!pidref_is_set(control_pid))
4079 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4080 }
4081
4082 if (pidref_is_set(control_pid) &&
4083 IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4084 _cleanup_free_ char *comm = NULL;
4085 (void) pidref_get_comm(control_pid, &comm);
4086
4087 r = kill_or_sigqueue(control_pid, signo, code, value);
4088 if (r < 0) {
4089 ret = r;
4090
4091 /* Report this failure both to the logs and to the client */
4092 sd_bus_error_set_errnof(
4093 error, r,
4094 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
4095 signal_to_string(signo), control_pid->pid, strna(comm));
4096 log_unit_warning_errno(
4097 u, r,
4098 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
4099 signal_to_string(signo), control_pid->pid, strna(comm));
4100 } else {
4101 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
4102 signal_to_string(signo), control_pid->pid, strna(comm));
4103 killed = true;
4104 }
4105 }
4106
4107 if (pidref_is_set(main_pid) &&
4108 IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4109 _cleanup_free_ char *comm = NULL;
4110 (void) pidref_get_comm(main_pid, &comm);
4111
4112 r = kill_or_sigqueue(main_pid, signo, code, value);
4113 if (r < 0) {
4114 if (ret == 0) {
4115 ret = r;
4116
4117 sd_bus_error_set_errnof(
4118 error, r,
4119 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
4120 signal_to_string(signo), main_pid->pid, strna(comm));
4121 }
4122
4123 log_unit_warning_errno(
4124 u, r,
4125 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
4126 signal_to_string(signo), main_pid->pid, strna(comm));
4127
4128 } else {
4129 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
4130 signal_to_string(signo), main_pid->pid, strna(comm));
4131 killed = true;
4132 }
4133 }
4134
4135 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4136 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4137 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4138 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path && code == SI_USER) {
4139 _cleanup_set_free_ Set *pid_set = NULL;
4140
4141 /* Exclude the main/control pids from being killed via the cgroup */
4142 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4143 if (!pid_set)
4144 return log_oom();
4145
4146 r = cg_kill_recursive(u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4147 if (r < 0) {
4148 if (!IN_SET(r, -ESRCH, -ENOENT)) {
4149 if (ret == 0) {
4150 ret = r;
4151
4152 sd_bus_error_set_errnof(
4153 error, r,
4154 "Failed to send signal SIG%s to auxiliary processes: %m",
4155 signal_to_string(signo));
4156 }
4157
4158 log_unit_warning_errno(
4159 u, r,
4160 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4161 signal_to_string(signo));
4162 }
4163 } else
4164 killed = true;
4165 }
4166
4167 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4168 if (ret == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4169 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4170
4171 return ret;
4172 }
4173
4174 int unit_following_set(Unit *u, Set **s) {
4175 assert(u);
4176 assert(s);
4177
4178 if (UNIT_VTABLE(u)->following_set)
4179 return UNIT_VTABLE(u)->following_set(u, s);
4180
4181 *s = NULL;
4182 return 0;
4183 }
4184
4185 UnitFileState unit_get_unit_file_state(Unit *u) {
4186 int r;
4187
4188 assert(u);
4189
4190 if (u->unit_file_state < 0 && u->fragment_path) {
4191 r = unit_file_get_state(
4192 u->manager->runtime_scope,
4193 NULL,
4194 u->id,
4195 &u->unit_file_state);
4196 if (r < 0)
4197 u->unit_file_state = UNIT_FILE_BAD;
4198 }
4199
4200 return u->unit_file_state;
4201 }
4202
4203 PresetAction unit_get_unit_file_preset(Unit *u) {
4204 int r;
4205
4206 assert(u);
4207
4208 if (u->unit_file_preset < 0 && u->fragment_path) {
4209 _cleanup_free_ char *bn = NULL;
4210
4211 r = path_extract_filename(u->fragment_path, &bn);
4212 if (r < 0)
4213 return (u->unit_file_preset = r);
4214
4215 if (r == O_DIRECTORY)
4216 return (u->unit_file_preset = -EISDIR);
4217
4218 u->unit_file_preset = unit_file_query_preset(
4219 u->manager->runtime_scope,
4220 NULL,
4221 bn,
4222 NULL);
4223 }
4224
4225 return u->unit_file_preset;
4226 }
4227
4228 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4229 assert(ref);
4230 assert(source);
4231 assert(target);
4232
4233 if (ref->target)
4234 unit_ref_unset(ref);
4235
4236 ref->source = source;
4237 ref->target = target;
4238 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4239 return target;
4240 }
4241
4242 void unit_ref_unset(UnitRef *ref) {
4243 assert(ref);
4244
4245 if (!ref->target)
4246 return;
4247
4248 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4249 * be unreferenced now. */
4250 unit_add_to_gc_queue(ref->target);
4251
4252 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4253 ref->source = ref->target = NULL;
4254 }
4255
4256 static int user_from_unit_name(Unit *u, char **ret) {
4257
4258 static const uint8_t hash_key[] = {
4259 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4260 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4261 };
4262
4263 _cleanup_free_ char *n = NULL;
4264 int r;
4265
4266 r = unit_name_to_prefix(u->id, &n);
4267 if (r < 0)
4268 return r;
4269
4270 if (valid_user_group_name(n, 0)) {
4271 *ret = TAKE_PTR(n);
4272 return 0;
4273 }
4274
4275 /* If we can't use the unit name as a user name, then let's hash it and use that */
4276 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4277 return -ENOMEM;
4278
4279 return 0;
4280 }
4281
4282 int unit_patch_contexts(Unit *u) {
4283 CGroupContext *cc;
4284 ExecContext *ec;
4285 int r;
4286
4287 assert(u);
4288
4289 /* Patch in the manager defaults into the exec and cgroup
4290 * contexts, _after_ the rest of the settings have been
4291 * initialized */
4292
4293 ec = unit_get_exec_context(u);
4294 if (ec) {
4295 /* This only copies in the ones that need memory */
4296 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4297 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4298 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4299 if (!ec->rlimit[i])
4300 return -ENOMEM;
4301 }
4302
4303 if (MANAGER_IS_USER(u->manager) &&
4304 !ec->working_directory) {
4305
4306 r = get_home_dir(&ec->working_directory);
4307 if (r < 0)
4308 return r;
4309
4310 /* Allow user services to run, even if the
4311 * home directory is missing */
4312 ec->working_directory_missing_ok = true;
4313 }
4314
4315 if (ec->private_devices)
4316 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4317
4318 if (ec->protect_kernel_modules)
4319 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4320
4321 if (ec->protect_kernel_logs)
4322 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4323
4324 if (ec->protect_clock)
4325 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4326
4327 if (ec->dynamic_user) {
4328 if (!ec->user) {
4329 r = user_from_unit_name(u, &ec->user);
4330 if (r < 0)
4331 return r;
4332 }
4333
4334 if (!ec->group) {
4335 ec->group = strdup(ec->user);
4336 if (!ec->group)
4337 return -ENOMEM;
4338 }
4339
4340 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4341 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4342 * sandbox. */
4343
4344 ec->private_tmp = true;
4345 ec->remove_ipc = true;
4346 ec->protect_system = PROTECT_SYSTEM_STRICT;
4347 if (ec->protect_home == PROTECT_HOME_NO)
4348 ec->protect_home = PROTECT_HOME_READ_ONLY;
4349
4350 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4351 * them. */
4352 ec->no_new_privileges = true;
4353 ec->restrict_suid_sgid = true;
4354 }
4355
4356 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4357 exec_directory_sort(ec->directories + dt);
4358 }
4359
4360 cc = unit_get_cgroup_context(u);
4361 if (cc && ec) {
4362
4363 if (ec->private_devices &&
4364 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4365 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4366
4367 /* Only add these if needed, as they imply that everything else is blocked. */
4368 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4369 if (ec->root_image || ec->mount_images) {
4370
4371 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4372 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4373 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4374 if (r < 0)
4375 return r;
4376 }
4377 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4378 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4379 if (r < 0)
4380 return r;
4381 }
4382
4383 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4384 * Same for mapper and verity. */
4385 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4386 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4387 if (r < 0)
4388 return r;
4389 }
4390 }
4391
4392 if (ec->protect_clock) {
4393 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4394 if (r < 0)
4395 return r;
4396 }
4397
4398 /* If there are encrypted credentials we might need to access the TPM. */
4399 if (exec_context_has_encrypted_credentials(ec)) {
4400 r = cgroup_context_add_device_allow(cc, "char-tpm", CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4401 if (r < 0)
4402 return r;
4403 }
4404 }
4405 }
4406
4407 return 0;
4408 }
4409
4410 ExecContext *unit_get_exec_context(const Unit *u) {
4411 size_t offset;
4412 assert(u);
4413
4414 if (u->type < 0)
4415 return NULL;
4416
4417 offset = UNIT_VTABLE(u)->exec_context_offset;
4418 if (offset <= 0)
4419 return NULL;
4420
4421 return (ExecContext*) ((uint8_t*) u + offset);
4422 }
4423
4424 KillContext *unit_get_kill_context(Unit *u) {
4425 size_t offset;
4426 assert(u);
4427
4428 if (u->type < 0)
4429 return NULL;
4430
4431 offset = UNIT_VTABLE(u)->kill_context_offset;
4432 if (offset <= 0)
4433 return NULL;
4434
4435 return (KillContext*) ((uint8_t*) u + offset);
4436 }
4437
4438 CGroupContext *unit_get_cgroup_context(Unit *u) {
4439 size_t offset;
4440
4441 if (u->type < 0)
4442 return NULL;
4443
4444 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4445 if (offset <= 0)
4446 return NULL;
4447
4448 return (CGroupContext*) ((uint8_t*) u + offset);
4449 }
4450
4451 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4452 size_t offset;
4453
4454 if (u->type < 0)
4455 return NULL;
4456
4457 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4458 if (offset <= 0)
4459 return NULL;
4460
4461 return *(ExecRuntime**) ((uint8_t*) u + offset);
4462 }
4463
4464 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4465 assert(u);
4466
4467 if (UNIT_WRITE_FLAGS_NOOP(flags))
4468 return NULL;
4469
4470 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4471 return u->manager->lookup_paths.transient;
4472
4473 if (flags & UNIT_PERSISTENT)
4474 return u->manager->lookup_paths.persistent_control;
4475
4476 if (flags & UNIT_RUNTIME)
4477 return u->manager->lookup_paths.runtime_control;
4478
4479 return NULL;
4480 }
4481
4482 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4483 assert(s);
4484 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4485 assert(buf);
4486
4487 _cleanup_free_ char *t = NULL;
4488
4489 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4490 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4491 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4492 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4493 * allocations. */
4494
4495 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4496 t = specifier_escape(s);
4497 if (!t)
4498 return NULL;
4499
4500 s = t;
4501 }
4502
4503 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4504 * ExecStart= and friends, i.e. '$' and quotes. */
4505
4506 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4507 char *t2;
4508
4509 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4510 t2 = strreplace(s, "$", "$$");
4511 if (!t2)
4512 return NULL;
4513 free_and_replace(t, t2);
4514 }
4515
4516 t2 = shell_escape(t ?: s, "\"");
4517 if (!t2)
4518 return NULL;
4519 free_and_replace(t, t2);
4520
4521 s = t;
4522
4523 } else if (flags & UNIT_ESCAPE_C) {
4524 char *t2;
4525
4526 t2 = cescape(s);
4527 if (!t2)
4528 return NULL;
4529 free_and_replace(t, t2);
4530
4531 s = t;
4532 }
4533
4534 *buf = TAKE_PTR(t);
4535 return s;
4536 }
4537
4538 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4539 _cleanup_free_ char *result = NULL;
4540 size_t n = 0;
4541
4542 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4543 * lines in a way suitable for ExecStart= stanzas. */
4544
4545 STRV_FOREACH(i, l) {
4546 _cleanup_free_ char *buf = NULL;
4547 const char *p;
4548 size_t a;
4549 char *q;
4550
4551 p = unit_escape_setting(*i, flags, &buf);
4552 if (!p)
4553 return NULL;
4554
4555 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4556 if (!GREEDY_REALLOC(result, n + a + 1))
4557 return NULL;
4558
4559 q = result + n;
4560 if (n > 0)
4561 *(q++) = ' ';
4562
4563 *(q++) = '"';
4564 q = stpcpy(q, p);
4565 *(q++) = '"';
4566
4567 n += a;
4568 }
4569
4570 if (!GREEDY_REALLOC(result, n + 1))
4571 return NULL;
4572
4573 result[n] = 0;
4574
4575 return TAKE_PTR(result);
4576 }
4577
4578 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4579 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4580 const char *dir, *wrapped;
4581 int r;
4582
4583 assert(u);
4584 assert(name);
4585 assert(data);
4586
4587 if (UNIT_WRITE_FLAGS_NOOP(flags))
4588 return 0;
4589
4590 data = unit_escape_setting(data, flags, &escaped);
4591 if (!data)
4592 return -ENOMEM;
4593
4594 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4595 * previous section header is the same */
4596
4597 if (flags & UNIT_PRIVATE) {
4598 if (!UNIT_VTABLE(u)->private_section)
4599 return -EINVAL;
4600
4601 if (!u->transient_file || u->last_section_private < 0)
4602 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4603 else if (u->last_section_private == 0)
4604 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4605 } else {
4606 if (!u->transient_file || u->last_section_private < 0)
4607 data = strjoina("[Unit]\n", data);
4608 else if (u->last_section_private > 0)
4609 data = strjoina("\n[Unit]\n", data);
4610 }
4611
4612 if (u->transient_file) {
4613 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4614 * write to the transient unit file. */
4615 fputs(data, u->transient_file);
4616
4617 if (!endswith(data, "\n"))
4618 fputc('\n', u->transient_file);
4619
4620 /* Remember which section we wrote this entry to */
4621 u->last_section_private = !!(flags & UNIT_PRIVATE);
4622 return 0;
4623 }
4624
4625 dir = unit_drop_in_dir(u, flags);
4626 if (!dir)
4627 return -EINVAL;
4628
4629 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4630 "# or an equivalent operation. Do not edit.\n",
4631 data,
4632 "\n");
4633
4634 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4635 if (r < 0)
4636 return r;
4637
4638 (void) mkdir_p_label(p, 0755);
4639
4640 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4641 * recreate the cache after every drop-in we write. */
4642 if (u->manager->unit_path_cache) {
4643 r = set_put_strdup(&u->manager->unit_path_cache, p);
4644 if (r < 0)
4645 return r;
4646 }
4647
4648 r = write_string_file_atomic_label(q, wrapped);
4649 if (r < 0)
4650 return r;
4651
4652 r = strv_push(&u->dropin_paths, q);
4653 if (r < 0)
4654 return r;
4655 q = NULL;
4656
4657 strv_uniq(u->dropin_paths);
4658
4659 u->dropin_mtime = now(CLOCK_REALTIME);
4660
4661 return 0;
4662 }
4663
4664 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4665 _cleanup_free_ char *p = NULL;
4666 va_list ap;
4667 int r;
4668
4669 assert(u);
4670 assert(name);
4671 assert(format);
4672
4673 if (UNIT_WRITE_FLAGS_NOOP(flags))
4674 return 0;
4675
4676 va_start(ap, format);
4677 r = vasprintf(&p, format, ap);
4678 va_end(ap);
4679
4680 if (r < 0)
4681 return -ENOMEM;
4682
4683 return unit_write_setting(u, flags, name, p);
4684 }
4685
4686 int unit_make_transient(Unit *u) {
4687 _cleanup_free_ char *path = NULL;
4688 FILE *f;
4689
4690 assert(u);
4691
4692 if (!UNIT_VTABLE(u)->can_transient)
4693 return -EOPNOTSUPP;
4694
4695 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4696
4697 path = path_join(u->manager->lookup_paths.transient, u->id);
4698 if (!path)
4699 return -ENOMEM;
4700
4701 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4702 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4703
4704 WITH_UMASK(0022) {
4705 f = fopen(path, "we");
4706 if (!f)
4707 return -errno;
4708 }
4709
4710 safe_fclose(u->transient_file);
4711 u->transient_file = f;
4712
4713 free_and_replace(u->fragment_path, path);
4714
4715 u->source_path = mfree(u->source_path);
4716 u->dropin_paths = strv_free(u->dropin_paths);
4717 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4718
4719 u->load_state = UNIT_STUB;
4720 u->load_error = 0;
4721 u->transient = true;
4722
4723 unit_add_to_dbus_queue(u);
4724 unit_add_to_gc_queue(u);
4725
4726 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4727 u->transient_file);
4728
4729 return 0;
4730 }
4731
4732 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4733 _cleanup_free_ char *comm = NULL;
4734
4735 assert(pidref_is_set(pid));
4736
4737 (void) pidref_get_comm(pid, &comm);
4738
4739 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4740 only, like for example systemd's own PAM stub process. */
4741 if (comm && comm[0] == '(')
4742 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4743 * here to let the manager know that a process was killed. */
4744 return 1;
4745
4746 log_unit_notice(userdata,
4747 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4748 pid->pid,
4749 strna(comm),
4750 signal_to_string(sig));
4751
4752 return 1;
4753 }
4754
4755 static int operation_to_signal(
4756 const KillContext *c,
4757 KillOperation k,
4758 bool *ret_noteworthy) {
4759
4760 assert(c);
4761
4762 switch (k) {
4763
4764 case KILL_TERMINATE:
4765 case KILL_TERMINATE_AND_LOG:
4766 *ret_noteworthy = false;
4767 return c->kill_signal;
4768
4769 case KILL_RESTART:
4770 *ret_noteworthy = false;
4771 return restart_kill_signal(c);
4772
4773 case KILL_KILL:
4774 *ret_noteworthy = true;
4775 return c->final_kill_signal;
4776
4777 case KILL_WATCHDOG:
4778 *ret_noteworthy = true;
4779 return c->watchdog_signal;
4780
4781 default:
4782 assert_not_reached();
4783 }
4784 }
4785
4786 int unit_kill_context(
4787 Unit *u,
4788 KillContext *c,
4789 KillOperation k,
4790 PidRef* main_pid,
4791 PidRef* control_pid,
4792 bool main_pid_alien) {
4793
4794 bool wait_for_exit = false, send_sighup;
4795 cg_kill_log_func_t log_func = NULL;
4796 int sig, r;
4797
4798 assert(u);
4799 assert(c);
4800
4801 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4802 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4803 * which is used for user-requested killing of unit processes. */
4804
4805 if (c->kill_mode == KILL_NONE)
4806 return 0;
4807
4808 bool noteworthy;
4809 sig = operation_to_signal(c, k, &noteworthy);
4810 if (noteworthy)
4811 log_func = log_kill;
4812
4813 send_sighup =
4814 c->send_sighup &&
4815 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4816 sig != SIGHUP;
4817
4818 if (pidref_is_set(main_pid)) {
4819 if (log_func)
4820 log_func(main_pid, sig, u);
4821
4822 r = pidref_kill_and_sigcont(main_pid, sig);
4823 if (r < 0 && r != -ESRCH) {
4824 _cleanup_free_ char *comm = NULL;
4825 (void) pidref_get_comm(main_pid, &comm);
4826
4827 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid->pid, strna(comm));
4828 } else {
4829 if (!main_pid_alien)
4830 wait_for_exit = true;
4831
4832 if (r != -ESRCH && send_sighup)
4833 (void) pidref_kill(main_pid, SIGHUP);
4834 }
4835 }
4836
4837 if (pidref_is_set(control_pid)) {
4838 if (log_func)
4839 log_func(control_pid, sig, u);
4840
4841 r = pidref_kill_and_sigcont(control_pid, sig);
4842 if (r < 0 && r != -ESRCH) {
4843 _cleanup_free_ char *comm = NULL;
4844 (void) pidref_get_comm(control_pid, &comm);
4845
4846 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid->pid, strna(comm));
4847 } else {
4848 wait_for_exit = true;
4849
4850 if (r != -ESRCH && send_sighup)
4851 (void) pidref_kill(control_pid, SIGHUP);
4852 }
4853 }
4854
4855 if (u->cgroup_path &&
4856 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4857 _cleanup_set_free_ Set *pid_set = NULL;
4858
4859 /* Exclude the main/control pids from being killed via the cgroup */
4860 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4861 if (!pid_set)
4862 return -ENOMEM;
4863
4864 r = cg_kill_recursive(
4865 u->cgroup_path,
4866 sig,
4867 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4868 pid_set,
4869 log_func, u);
4870 if (r < 0) {
4871 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4872 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4873
4874 } else if (r > 0) {
4875
4876 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4877 * we are running in a container or if this is a delegation unit, simply because cgroup
4878 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4879 * of containers it can be confused easily by left-over directories in the cgroup — which
4880 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4881 * there we get proper events. Hence rely on them. */
4882
4883 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4884 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4885 wait_for_exit = true;
4886
4887 if (send_sighup) {
4888 set_free(pid_set);
4889
4890 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4891 if (!pid_set)
4892 return -ENOMEM;
4893
4894 (void) cg_kill_recursive(
4895 u->cgroup_path,
4896 SIGHUP,
4897 CGROUP_IGNORE_SELF,
4898 pid_set,
4899 /* kill_log= */ NULL,
4900 /* userdata= */ NULL);
4901 }
4902 }
4903 }
4904
4905 return wait_for_exit;
4906 }
4907
4908 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4909 int r;
4910
4911 assert(u);
4912 assert(path);
4913
4914 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4915 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4916 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4917 * appearing mount units can easily determine which units to make themselves a dependency of. */
4918
4919 if (!path_is_absolute(path))
4920 return -EINVAL;
4921
4922 if (hashmap_contains(u->requires_mounts_for, path)) /* Exit quickly if the path is already covered. */
4923 return 0;
4924
4925 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4926 * only after simplification, since path_is_normalized() rejects paths with '.'.
4927 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4928 _cleanup_free_ char *p = NULL;
4929 r = path_simplify_alloc(path, &p);
4930 if (r < 0)
4931 return r;
4932 path = p;
4933
4934 if (!path_is_normalized(path))
4935 return -EPERM;
4936
4937 UnitDependencyInfo di = {
4938 .origin_mask = mask
4939 };
4940
4941 r = hashmap_ensure_put(&u->requires_mounts_for, &path_hash_ops, p, di.data);
4942 if (r < 0)
4943 return r;
4944 assert(r > 0);
4945 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4946
4947 char prefix[strlen(path) + 1];
4948 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4949 Set *x;
4950
4951 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4952 if (!x) {
4953 _cleanup_free_ char *q = NULL;
4954
4955 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4956 if (r < 0)
4957 return r;
4958
4959 q = strdup(prefix);
4960 if (!q)
4961 return -ENOMEM;
4962
4963 x = set_new(NULL);
4964 if (!x)
4965 return -ENOMEM;
4966
4967 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4968 if (r < 0) {
4969 set_free(x);
4970 return r;
4971 }
4972 q = NULL;
4973 }
4974
4975 r = set_put(x, u);
4976 if (r < 0)
4977 return r;
4978 }
4979
4980 return 0;
4981 }
4982
4983 int unit_setup_exec_runtime(Unit *u) {
4984 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
4985 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
4986 _cleanup_set_free_ Set *units = NULL;
4987 ExecRuntime **rt;
4988 ExecContext *ec;
4989 size_t offset;
4990 Unit *other;
4991 int r;
4992
4993 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4994 assert(offset > 0);
4995
4996 /* Check if there already is an ExecRuntime for this unit? */
4997 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4998 if (*rt)
4999 return 0;
5000
5001 ec = unit_get_exec_context(u);
5002 assert(ec);
5003
5004 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
5005 if (r < 0)
5006 return r;
5007
5008 /* Try to get it from somebody else */
5009 SET_FOREACH(other, units) {
5010 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
5011 if (r < 0)
5012 return r;
5013 if (r > 0)
5014 break;
5015 }
5016
5017 if (!esr) {
5018 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
5019 if (r < 0)
5020 return r;
5021 }
5022
5023 if (ec->dynamic_user) {
5024 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
5025 if (r < 0)
5026 return r;
5027 }
5028
5029 r = exec_runtime_make(u, ec, esr, dcreds, rt);
5030 if (r < 0)
5031 return r;
5032
5033 TAKE_PTR(esr);
5034 TAKE_PTR(dcreds);
5035
5036 return r;
5037 }
5038
5039 bool unit_type_supported(UnitType t) {
5040 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5041 int r;
5042
5043 if (_unlikely_(t < 0))
5044 return false;
5045 if (_unlikely_(t >= _UNIT_TYPE_MAX))
5046 return false;
5047
5048 if (cache[t] == 0) {
5049 char *e;
5050
5051 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5052
5053 r = getenv_bool(ascii_strupper(e));
5054 if (r < 0 && r != -ENXIO)
5055 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5056
5057 cache[t] = r == 0 ? -1 : 1;
5058 }
5059 if (cache[t] < 0)
5060 return false;
5061
5062 if (!unit_vtable[t]->supported)
5063 return true;
5064
5065 return unit_vtable[t]->supported();
5066 }
5067
5068 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5069 int r;
5070
5071 assert(u);
5072 assert(where);
5073
5074 if (!unit_log_level_test(u, LOG_NOTICE))
5075 return;
5076
5077 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5078 if (r > 0 || r == -ENOTDIR)
5079 return;
5080 if (r < 0) {
5081 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5082 return;
5083 }
5084
5085 log_unit_struct(u, LOG_NOTICE,
5086 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5087 LOG_UNIT_INVOCATION_ID(u),
5088 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5089 "WHERE=%s", where);
5090 }
5091
5092 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5093 _cleanup_free_ char *canonical_where = NULL;
5094 int r;
5095
5096 assert(u);
5097 assert(where);
5098
5099 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5100 if (r < 0) {
5101 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5102 return 0;
5103 }
5104
5105 /* We will happily ignore a trailing slash (or any redundant slashes) */
5106 if (path_equal(where, canonical_where))
5107 return 0;
5108
5109 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5110 log_unit_struct(u, LOG_ERR,
5111 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5112 LOG_UNIT_INVOCATION_ID(u),
5113 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5114 "WHERE=%s", where);
5115
5116 return -ELOOP;
5117 }
5118
5119 bool unit_is_pristine(Unit *u) {
5120 assert(u);
5121
5122 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5123 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5124 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5125 *
5126 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5127 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5128 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5129 */
5130
5131 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5132 !u->fragment_path &&
5133 !u->source_path &&
5134 !u->job &&
5135 !u->merged_into;
5136 }
5137
5138 PidRef* unit_control_pid(Unit *u) {
5139 assert(u);
5140
5141 if (UNIT_VTABLE(u)->control_pid)
5142 return UNIT_VTABLE(u)->control_pid(u);
5143
5144 return NULL;
5145 }
5146
5147 PidRef* unit_main_pid(Unit *u) {
5148 assert(u);
5149
5150 if (UNIT_VTABLE(u)->main_pid)
5151 return UNIT_VTABLE(u)->main_pid(u);
5152
5153 return NULL;
5154 }
5155
5156 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5157 int r;
5158
5159 assert(u);
5160
5161 if (!MANAGER_IS_SYSTEM(u->manager))
5162 return;
5163
5164 CGroupContext *c;
5165 c = unit_get_cgroup_context(u);
5166 if (!c)
5167 return;
5168
5169 if (!u->manager->fw_ctx) {
5170 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5171 if (r < 0)
5172 return;
5173
5174 assert(u->manager->fw_ctx);
5175 }
5176
5177 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5178 if (nft_set->source != source)
5179 continue;
5180
5181 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5182 if (r < 0)
5183 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5184 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5185 else
5186 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5187 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5188 }
5189 }
5190
5191 static void unit_unref_uid_internal(
5192 Unit *u,
5193 uid_t *ref_uid,
5194 bool destroy_now,
5195 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5196
5197 assert(u);
5198 assert(ref_uid);
5199 assert(_manager_unref_uid);
5200
5201 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5202 * gid_t are actually the same time, with the same validity rules.
5203 *
5204 * Drops a reference to UID/GID from a unit. */
5205
5206 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5207 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5208
5209 if (!uid_is_valid(*ref_uid))
5210 return;
5211
5212 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5213 *ref_uid = UID_INVALID;
5214 }
5215
5216 static void unit_unref_uid(Unit *u, bool destroy_now) {
5217 assert(u);
5218
5219 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5220
5221 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5222 }
5223
5224 static void unit_unref_gid(Unit *u, bool destroy_now) {
5225 assert(u);
5226
5227 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5228
5229 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5230 }
5231
5232 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5233 assert(u);
5234
5235 unit_unref_uid(u, destroy_now);
5236 unit_unref_gid(u, destroy_now);
5237 }
5238
5239 static int unit_ref_uid_internal(
5240 Unit *u,
5241 uid_t *ref_uid,
5242 uid_t uid,
5243 bool clean_ipc,
5244 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5245
5246 int r;
5247
5248 assert(u);
5249 assert(ref_uid);
5250 assert(uid_is_valid(uid));
5251 assert(_manager_ref_uid);
5252
5253 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5254 * are actually the same type, and have the same validity rules.
5255 *
5256 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5257 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5258 * drops to zero. */
5259
5260 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5261 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5262
5263 if (*ref_uid == uid)
5264 return 0;
5265
5266 if (uid_is_valid(*ref_uid)) /* Already set? */
5267 return -EBUSY;
5268
5269 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5270 if (r < 0)
5271 return r;
5272
5273 *ref_uid = uid;
5274 return 1;
5275 }
5276
5277 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5278 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5279 }
5280
5281 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5282 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5283 }
5284
5285 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5286 int r = 0, q = 0;
5287
5288 assert(u);
5289
5290 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5291
5292 if (uid_is_valid(uid)) {
5293 r = unit_ref_uid(u, uid, clean_ipc);
5294 if (r < 0)
5295 return r;
5296 }
5297
5298 if (gid_is_valid(gid)) {
5299 q = unit_ref_gid(u, gid, clean_ipc);
5300 if (q < 0) {
5301 if (r > 0)
5302 unit_unref_uid(u, false);
5303
5304 return q;
5305 }
5306 }
5307
5308 return r > 0 || q > 0;
5309 }
5310
5311 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5312 ExecContext *c;
5313 int r;
5314
5315 assert(u);
5316
5317 c = unit_get_exec_context(u);
5318
5319 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5320 if (r < 0)
5321 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5322
5323 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5324 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5325
5326 return r;
5327 }
5328
5329 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5330 int r;
5331
5332 assert(u);
5333
5334 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5335 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5336 * objects when no service references the UID/GID anymore. */
5337
5338 r = unit_ref_uid_gid(u, uid, gid);
5339 if (r > 0)
5340 unit_add_to_dbus_queue(u);
5341 }
5342
5343 int unit_acquire_invocation_id(Unit *u) {
5344 sd_id128_t id;
5345 int r;
5346
5347 assert(u);
5348
5349 r = sd_id128_randomize(&id);
5350 if (r < 0)
5351 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5352
5353 r = unit_set_invocation_id(u, id);
5354 if (r < 0)
5355 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5356
5357 unit_add_to_dbus_queue(u);
5358 return 0;
5359 }
5360
5361 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5362 const char *confirm_spawn;
5363 int r;
5364
5365 assert(u);
5366 assert(p);
5367
5368 /* Copy parameters from manager */
5369 r = manager_get_effective_environment(u->manager, &p->environment);
5370 if (r < 0)
5371 return r;
5372
5373 p->runtime_scope = u->manager->runtime_scope;
5374
5375 confirm_spawn = manager_get_confirm_spawn(u->manager);
5376 if (confirm_spawn) {
5377 p->confirm_spawn = strdup(confirm_spawn);
5378 if (!p->confirm_spawn)
5379 return -ENOMEM;
5380 }
5381
5382 p->cgroup_supported = u->manager->cgroup_supported;
5383 p->prefix = u->manager->prefix;
5384 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5385
5386 /* Copy parameters from unit */
5387 p->cgroup_path = u->cgroup_path;
5388 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5389
5390 p->received_credentials_directory = u->manager->received_credentials_directory;
5391 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5392
5393 p->shall_confirm_spawn = !!u->manager->confirm_spawn;
5394
5395 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5396
5397 if (u->manager->restrict_fs && p->bpf_outer_map_fd < 0) {
5398 int fd = lsm_bpf_map_restrict_fs_fd(u);
5399 if (fd < 0)
5400 return fd;
5401
5402 p->bpf_outer_map_fd = fd;
5403 }
5404
5405 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5406
5407 p->cgroup_id = u->cgroup_id;
5408 p->invocation_id = u->invocation_id;
5409 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5410 p->unit_id = strdup(u->id);
5411 if (!p->unit_id)
5412 return -ENOMEM;
5413
5414 return 0;
5415 }
5416
5417 int unit_fork_helper_process(Unit *u, const char *name, PidRef *ret) {
5418 pid_t pid;
5419 int r;
5420
5421 assert(u);
5422 assert(ret);
5423
5424 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5425 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5426
5427 (void) unit_realize_cgroup(u);
5428
5429 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG, &pid);
5430 if (r < 0)
5431 return r;
5432 if (r > 0) {
5433 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5434 int q;
5435
5436 /* Parent */
5437
5438 q = pidref_set_pid(&pidref, pid);
5439 if (q < 0)
5440 return q;
5441
5442 *ret = TAKE_PIDREF(pidref);
5443 return r;
5444 }
5445
5446 /* Child */
5447
5448 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5449 (void) ignore_signals(SIGPIPE);
5450
5451 if (u->cgroup_path) {
5452 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5453 if (r < 0) {
5454 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5455 _exit(EXIT_CGROUP);
5456 }
5457 }
5458
5459 return 0;
5460 }
5461
5462 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5463 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5464 int r;
5465
5466 assert(u);
5467 assert(ret_pid);
5468
5469 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5470 if (r < 0)
5471 return r;
5472 if (r == 0) {
5473 int ret = EXIT_SUCCESS;
5474
5475 STRV_FOREACH(i, paths) {
5476 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5477 if (r < 0) {
5478 log_error_errno(r, "Failed to remove '%s': %m", *i);
5479 ret = EXIT_FAILURE;
5480 }
5481 }
5482
5483 _exit(ret);
5484 }
5485
5486 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5487 if (r < 0)
5488 return r;
5489
5490 *ret_pid = TAKE_PIDREF(pid);
5491 return 0;
5492 }
5493
5494 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5495 assert(deps);
5496 assert(other);
5497
5498 if (di.origin_mask == 0 && di.destination_mask == 0)
5499 /* No bit set anymore, let's drop the whole entry */
5500 assert_se(hashmap_remove(deps, other));
5501 else
5502 /* Mask was reduced, let's update the entry */
5503 assert_se(hashmap_update(deps, other, di.data) == 0);
5504 }
5505
5506 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5507 Hashmap *deps;
5508 assert(u);
5509
5510 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5511
5512 if (mask == 0)
5513 return;
5514
5515 HASHMAP_FOREACH(deps, u->dependencies) {
5516 bool done;
5517
5518 do {
5519 UnitDependencyInfo di;
5520 Unit *other;
5521
5522 done = true;
5523
5524 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5525 Hashmap *other_deps;
5526
5527 if (FLAGS_SET(~mask, di.origin_mask))
5528 continue;
5529
5530 di.origin_mask &= ~mask;
5531 unit_update_dependency_mask(deps, other, di);
5532
5533 /* We updated the dependency from our unit to the other unit now. But most
5534 * dependencies imply a reverse dependency. Hence, let's delete that one
5535 * too. For that we go through all dependency types on the other unit and
5536 * delete all those which point to us and have the right mask set. */
5537
5538 HASHMAP_FOREACH(other_deps, other->dependencies) {
5539 UnitDependencyInfo dj;
5540
5541 dj.data = hashmap_get(other_deps, u);
5542 if (FLAGS_SET(~mask, dj.destination_mask))
5543 continue;
5544
5545 dj.destination_mask &= ~mask;
5546 unit_update_dependency_mask(other_deps, u, dj);
5547 }
5548
5549 unit_add_to_gc_queue(other);
5550
5551 /* The unit 'other' may not be wanted by the unit 'u'. */
5552 unit_submit_to_stop_when_unneeded_queue(other);
5553
5554 done = false;
5555 break;
5556 }
5557
5558 } while (!done);
5559 }
5560 }
5561
5562 static int unit_get_invocation_path(Unit *u, char **ret) {
5563 char *p;
5564 int r;
5565
5566 assert(u);
5567 assert(ret);
5568
5569 if (MANAGER_IS_SYSTEM(u->manager))
5570 p = strjoin("/run/systemd/units/invocation:", u->id);
5571 else {
5572 _cleanup_free_ char *user_path = NULL;
5573 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5574 if (r < 0)
5575 return r;
5576 p = strjoin(user_path, u->id);
5577 }
5578
5579 if (!p)
5580 return -ENOMEM;
5581
5582 *ret = p;
5583 return 0;
5584 }
5585
5586 static int unit_export_invocation_id(Unit *u) {
5587 _cleanup_free_ char *p = NULL;
5588 int r;
5589
5590 assert(u);
5591
5592 if (u->exported_invocation_id)
5593 return 0;
5594
5595 if (sd_id128_is_null(u->invocation_id))
5596 return 0;
5597
5598 r = unit_get_invocation_path(u, &p);
5599 if (r < 0)
5600 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5601
5602 r = symlink_atomic_label(u->invocation_id_string, p);
5603 if (r < 0)
5604 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5605
5606 u->exported_invocation_id = true;
5607 return 0;
5608 }
5609
5610 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5611 const char *p;
5612 char buf[2];
5613 int r;
5614
5615 assert(u);
5616 assert(c);
5617
5618 if (u->exported_log_level_max)
5619 return 0;
5620
5621 if (c->log_level_max < 0)
5622 return 0;
5623
5624 assert(c->log_level_max <= 7);
5625
5626 buf[0] = '0' + c->log_level_max;
5627 buf[1] = 0;
5628
5629 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5630 r = symlink_atomic(buf, p);
5631 if (r < 0)
5632 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5633
5634 u->exported_log_level_max = true;
5635 return 0;
5636 }
5637
5638 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5639 _cleanup_close_ int fd = -EBADF;
5640 struct iovec *iovec;
5641 const char *p;
5642 char *pattern;
5643 le64_t *sizes;
5644 ssize_t n;
5645 int r;
5646
5647 if (u->exported_log_extra_fields)
5648 return 0;
5649
5650 if (c->n_log_extra_fields <= 0)
5651 return 0;
5652
5653 sizes = newa(le64_t, c->n_log_extra_fields);
5654 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5655
5656 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5657 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5658
5659 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5660 iovec[i*2+1] = c->log_extra_fields[i];
5661 }
5662
5663 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5664 pattern = strjoina(p, ".XXXXXX");
5665
5666 fd = mkostemp_safe(pattern);
5667 if (fd < 0)
5668 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5669
5670 n = writev(fd, iovec, c->n_log_extra_fields*2);
5671 if (n < 0) {
5672 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5673 goto fail;
5674 }
5675
5676 (void) fchmod(fd, 0644);
5677
5678 if (rename(pattern, p) < 0) {
5679 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5680 goto fail;
5681 }
5682
5683 u->exported_log_extra_fields = true;
5684 return 0;
5685
5686 fail:
5687 (void) unlink(pattern);
5688 return r;
5689 }
5690
5691 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5692 _cleanup_free_ char *buf = NULL;
5693 const char *p;
5694 int r;
5695
5696 assert(u);
5697 assert(c);
5698
5699 if (u->exported_log_ratelimit_interval)
5700 return 0;
5701
5702 if (c->log_ratelimit_interval_usec == 0)
5703 return 0;
5704
5705 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5706
5707 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5708 return log_oom();
5709
5710 r = symlink_atomic(buf, p);
5711 if (r < 0)
5712 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5713
5714 u->exported_log_ratelimit_interval = true;
5715 return 0;
5716 }
5717
5718 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5719 _cleanup_free_ char *buf = NULL;
5720 const char *p;
5721 int r;
5722
5723 assert(u);
5724 assert(c);
5725
5726 if (u->exported_log_ratelimit_burst)
5727 return 0;
5728
5729 if (c->log_ratelimit_burst == 0)
5730 return 0;
5731
5732 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5733
5734 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5735 return log_oom();
5736
5737 r = symlink_atomic(buf, p);
5738 if (r < 0)
5739 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5740
5741 u->exported_log_ratelimit_burst = true;
5742 return 0;
5743 }
5744
5745 void unit_export_state_files(Unit *u) {
5746 const ExecContext *c;
5747
5748 assert(u);
5749
5750 if (!u->id)
5751 return;
5752
5753 if (MANAGER_IS_TEST_RUN(u->manager))
5754 return;
5755
5756 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5757 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5758 * the IPC system itself and PID 1 also log to the journal.
5759 *
5760 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5761 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5762 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5763 * namespace at least.
5764 *
5765 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5766 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5767 * them with one. */
5768
5769 (void) unit_export_invocation_id(u);
5770
5771 if (!MANAGER_IS_SYSTEM(u->manager))
5772 return;
5773
5774 c = unit_get_exec_context(u);
5775 if (c) {
5776 (void) unit_export_log_level_max(u, c);
5777 (void) unit_export_log_extra_fields(u, c);
5778 (void) unit_export_log_ratelimit_interval(u, c);
5779 (void) unit_export_log_ratelimit_burst(u, c);
5780 }
5781 }
5782
5783 void unit_unlink_state_files(Unit *u) {
5784 const char *p;
5785
5786 assert(u);
5787
5788 if (!u->id)
5789 return;
5790
5791 /* Undoes the effect of unit_export_state() */
5792
5793 if (u->exported_invocation_id) {
5794 _cleanup_free_ char *invocation_path = NULL;
5795 int r = unit_get_invocation_path(u, &invocation_path);
5796 if (r >= 0) {
5797 (void) unlink(invocation_path);
5798 u->exported_invocation_id = false;
5799 }
5800 }
5801
5802 if (!MANAGER_IS_SYSTEM(u->manager))
5803 return;
5804
5805 if (u->exported_log_level_max) {
5806 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5807 (void) unlink(p);
5808
5809 u->exported_log_level_max = false;
5810 }
5811
5812 if (u->exported_log_extra_fields) {
5813 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5814 (void) unlink(p);
5815
5816 u->exported_log_extra_fields = false;
5817 }
5818
5819 if (u->exported_log_ratelimit_interval) {
5820 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5821 (void) unlink(p);
5822
5823 u->exported_log_ratelimit_interval = false;
5824 }
5825
5826 if (u->exported_log_ratelimit_burst) {
5827 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5828 (void) unlink(p);
5829
5830 u->exported_log_ratelimit_burst = false;
5831 }
5832 }
5833
5834 int unit_prepare_exec(Unit *u) {
5835 int r;
5836
5837 assert(u);
5838
5839 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5840 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5841 r = bpf_firewall_load_custom(u);
5842 if (r < 0)
5843 return r;
5844
5845 /* Prepares everything so that we can fork of a process for this unit */
5846
5847 (void) unit_realize_cgroup(u);
5848
5849 if (u->reset_accounting) {
5850 (void) unit_reset_accounting(u);
5851 u->reset_accounting = false;
5852 }
5853
5854 unit_export_state_files(u);
5855
5856 r = unit_setup_exec_runtime(u);
5857 if (r < 0)
5858 return r;
5859
5860 return 0;
5861 }
5862
5863 static bool ignore_leftover_process(const char *comm) {
5864 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5865 }
5866
5867 int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5868 _cleanup_free_ char *comm = NULL;
5869
5870 assert(pidref_is_set(pid));
5871
5872 (void) pidref_get_comm(pid, &comm);
5873
5874 if (ignore_leftover_process(comm))
5875 return 0;
5876
5877 /* During start we print a warning */
5878
5879 log_unit_warning(userdata,
5880 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5881 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5882 pid->pid, strna(comm));
5883
5884 return 1;
5885 }
5886
5887 int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5888 _cleanup_free_ char *comm = NULL;
5889
5890 assert(pidref_is_set(pid));
5891
5892 (void) pidref_get_comm(pid, &comm);
5893
5894 if (ignore_leftover_process(comm))
5895 return 0;
5896
5897 /* During stop we only print an informational message */
5898
5899 log_unit_info(userdata,
5900 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5901 pid->pid, strna(comm));
5902
5903 return 1;
5904 }
5905
5906 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5907 assert(u);
5908
5909 (void) unit_pick_cgroup_path(u);
5910
5911 if (!u->cgroup_path)
5912 return 0;
5913
5914 return cg_kill_recursive(
5915 u->cgroup_path,
5916 /* sig= */ 0,
5917 /* flags= */ 0,
5918 /* set= */ NULL,
5919 log_func,
5920 u);
5921 }
5922
5923 bool unit_needs_console(Unit *u) {
5924 ExecContext *ec;
5925 UnitActiveState state;
5926
5927 assert(u);
5928
5929 state = unit_active_state(u);
5930
5931 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5932 return false;
5933
5934 if (UNIT_VTABLE(u)->needs_console)
5935 return UNIT_VTABLE(u)->needs_console(u);
5936
5937 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5938 ec = unit_get_exec_context(u);
5939 if (!ec)
5940 return false;
5941
5942 return exec_context_may_touch_console(ec);
5943 }
5944
5945 int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) {
5946 int r;
5947
5948 assert(u);
5949
5950 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5951 * and not a kernel thread either */
5952
5953 /* First, a simple range check */
5954 if (!pidref_is_set(pid))
5955 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
5956
5957 /* Some extra safety check */
5958 if (pid->pid == 1 || pidref_is_self(pid))
5959 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
5960
5961 /* Don't even begin to bother with kernel threads */
5962 r = pidref_is_kernel_thread(pid);
5963 if (r == -ESRCH)
5964 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
5965 if (r < 0)
5966 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
5967 if (r > 0)
5968 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
5969
5970 return 0;
5971 }
5972
5973 void unit_log_success(Unit *u) {
5974 assert(u);
5975
5976 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5977 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5978 * a lot of devices. */
5979 log_unit_struct(u,
5980 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5981 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5982 LOG_UNIT_INVOCATION_ID(u),
5983 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5984 }
5985
5986 void unit_log_failure(Unit *u, const char *result) {
5987 assert(u);
5988 assert(result);
5989
5990 log_unit_struct(u, LOG_WARNING,
5991 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5992 LOG_UNIT_INVOCATION_ID(u),
5993 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5994 "UNIT_RESULT=%s", result);
5995 }
5996
5997 void unit_log_skip(Unit *u, const char *result) {
5998 assert(u);
5999 assert(result);
6000
6001 log_unit_struct(u, LOG_INFO,
6002 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
6003 LOG_UNIT_INVOCATION_ID(u),
6004 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
6005 "UNIT_RESULT=%s", result);
6006 }
6007
6008 void unit_log_process_exit(
6009 Unit *u,
6010 const char *kind,
6011 const char *command,
6012 bool success,
6013 int code,
6014 int status) {
6015
6016 int level;
6017
6018 assert(u);
6019 assert(kind);
6020
6021 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6022 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6023 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6024 * WARNING. */
6025 if (success)
6026 level = LOG_DEBUG;
6027 else if (code == CLD_EXITED)
6028 level = LOG_NOTICE;
6029 else
6030 level = LOG_WARNING;
6031
6032 log_unit_struct(u, level,
6033 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
6034 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
6035 kind,
6036 sigchld_code_to_string(code), status,
6037 strna(code == CLD_EXITED
6038 ? exit_status_to_string(status, EXIT_STATUS_FULL)
6039 : signal_to_string(status)),
6040 success ? " (success)" : ""),
6041 "EXIT_CODE=%s", sigchld_code_to_string(code),
6042 "EXIT_STATUS=%i", status,
6043 "COMMAND=%s", strna(command),
6044 LOG_UNIT_INVOCATION_ID(u));
6045 }
6046
6047 int unit_exit_status(Unit *u) {
6048 assert(u);
6049
6050 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6051 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6052 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6053 * service process has exited abnormally (signal/coredump). */
6054
6055 if (!UNIT_VTABLE(u)->exit_status)
6056 return -EOPNOTSUPP;
6057
6058 return UNIT_VTABLE(u)->exit_status(u);
6059 }
6060
6061 int unit_failure_action_exit_status(Unit *u) {
6062 int r;
6063
6064 assert(u);
6065
6066 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6067
6068 if (u->failure_action_exit_status >= 0)
6069 return u->failure_action_exit_status;
6070
6071 r = unit_exit_status(u);
6072 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6073 return 255;
6074
6075 return r;
6076 }
6077
6078 int unit_success_action_exit_status(Unit *u) {
6079 int r;
6080
6081 assert(u);
6082
6083 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6084
6085 if (u->success_action_exit_status >= 0)
6086 return u->success_action_exit_status;
6087
6088 r = unit_exit_status(u);
6089 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6090 return 255;
6091
6092 return r;
6093 }
6094
6095 int unit_test_trigger_loaded(Unit *u) {
6096 Unit *trigger;
6097
6098 /* Tests whether the unit to trigger is loaded */
6099
6100 trigger = UNIT_TRIGGER(u);
6101 if (!trigger)
6102 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6103 "Refusing to start, no unit to trigger.");
6104 if (trigger->load_state != UNIT_LOADED)
6105 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6106 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6107
6108 return 0;
6109 }
6110
6111 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
6112 assert(u);
6113 assert(context);
6114
6115 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6116 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6117 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6118
6119 exec_context_destroy_credentials(u);
6120 exec_context_destroy_mount_ns_dir(u);
6121 }
6122
6123 int unit_clean(Unit *u, ExecCleanMask mask) {
6124 UnitActiveState state;
6125
6126 assert(u);
6127
6128 /* Special return values:
6129 *
6130 * -EOPNOTSUPP → cleaning not supported for this unit type
6131 * -EUNATCH → cleaning not defined for this resource type
6132 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6133 * a job queued or similar
6134 */
6135
6136 if (!UNIT_VTABLE(u)->clean)
6137 return -EOPNOTSUPP;
6138
6139 if (mask == 0)
6140 return -EUNATCH;
6141
6142 if (u->load_state != UNIT_LOADED)
6143 return -EBUSY;
6144
6145 if (u->job)
6146 return -EBUSY;
6147
6148 state = unit_active_state(u);
6149 if (state != UNIT_INACTIVE)
6150 return -EBUSY;
6151
6152 return UNIT_VTABLE(u)->clean(u, mask);
6153 }
6154
6155 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6156 assert(u);
6157
6158 if (!UNIT_VTABLE(u)->clean ||
6159 u->load_state != UNIT_LOADED) {
6160 *ret = 0;
6161 return 0;
6162 }
6163
6164 /* When the clean() method is set, can_clean() really should be set too */
6165 assert(UNIT_VTABLE(u)->can_clean);
6166
6167 return UNIT_VTABLE(u)->can_clean(u, ret);
6168 }
6169
6170 bool unit_can_start_refuse_manual(Unit *u) {
6171 return unit_can_start(u) && !u->refuse_manual_start;
6172 }
6173
6174 bool unit_can_stop_refuse_manual(Unit *u) {
6175 return unit_can_stop(u) && !u->refuse_manual_stop;
6176 }
6177
6178 bool unit_can_isolate_refuse_manual(Unit *u) {
6179 return unit_can_isolate(u) && !u->refuse_manual_start;
6180 }
6181
6182 bool unit_can_freeze(Unit *u) {
6183 assert(u);
6184
6185 if (UNIT_VTABLE(u)->can_freeze)
6186 return UNIT_VTABLE(u)->can_freeze(u);
6187
6188 return UNIT_VTABLE(u)->freeze;
6189 }
6190
6191 void unit_frozen(Unit *u) {
6192 assert(u);
6193
6194 u->freezer_state = FREEZER_FROZEN;
6195
6196 bus_unit_send_pending_freezer_message(u, false);
6197 }
6198
6199 void unit_thawed(Unit *u) {
6200 assert(u);
6201
6202 u->freezer_state = FREEZER_RUNNING;
6203
6204 bus_unit_send_pending_freezer_message(u, false);
6205 }
6206
6207 static int unit_freezer_action(Unit *u, FreezerAction action) {
6208 UnitActiveState s;
6209 int (*method)(Unit*);
6210 int r;
6211
6212 assert(u);
6213 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6214
6215 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
6216 if (!method || !cg_freezer_supported())
6217 return -EOPNOTSUPP;
6218
6219 if (u->job)
6220 return -EBUSY;
6221
6222 if (u->load_state != UNIT_LOADED)
6223 return -EHOSTDOWN;
6224
6225 s = unit_active_state(u);
6226 if (s != UNIT_ACTIVE)
6227 return -EHOSTDOWN;
6228
6229 if ((IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING) && action == FREEZER_FREEZE) ||
6230 (u->freezer_state == FREEZER_THAWING && action == FREEZER_THAW))
6231 return -EALREADY;
6232
6233 r = method(u);
6234 if (r <= 0)
6235 return r;
6236
6237 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING));
6238
6239 return 1;
6240 }
6241
6242 int unit_freeze(Unit *u) {
6243 return unit_freezer_action(u, FREEZER_FREEZE);
6244 }
6245
6246 int unit_thaw(Unit *u) {
6247 return unit_freezer_action(u, FREEZER_THAW);
6248 }
6249
6250 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
6251 int unit_freeze_vtable_common(Unit *u) {
6252 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
6253 }
6254
6255 int unit_thaw_vtable_common(Unit *u) {
6256 return unit_cgroup_freezer_action(u, FREEZER_THAW);
6257 }
6258
6259 Condition *unit_find_failed_condition(Unit *u) {
6260 Condition *failed_trigger = NULL;
6261 bool has_succeeded_trigger = false;
6262
6263 if (u->condition_result)
6264 return NULL;
6265
6266 LIST_FOREACH(conditions, c, u->conditions)
6267 if (c->trigger) {
6268 if (c->result == CONDITION_SUCCEEDED)
6269 has_succeeded_trigger = true;
6270 else if (!failed_trigger)
6271 failed_trigger = c;
6272 } else if (c->result != CONDITION_SUCCEEDED)
6273 return c;
6274
6275 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6276 }
6277
6278 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6279 [COLLECT_INACTIVE] = "inactive",
6280 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6281 };
6282
6283 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6284
6285 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6286 Unit *i;
6287
6288 assert(u);
6289
6290 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6291 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6292 * is NULL the first entry found), or NULL if not found. */
6293
6294 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6295 if (!other || other == i)
6296 return i;
6297
6298 return NULL;
6299 }
6300
6301 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6302 _cleanup_free_ Unit **array = NULL;
6303 size_t n = 0;
6304 Unit *other;
6305
6306 assert(u);
6307 assert(ret_array);
6308
6309 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6310 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6311 * while the dependency table is continuously updated. */
6312
6313 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6314 if (!GREEDY_REALLOC(array, n + 1))
6315 return -ENOMEM;
6316
6317 array[n++] = other;
6318 }
6319
6320 *ret_array = TAKE_PTR(array);
6321
6322 assert(n <= INT_MAX);
6323 return (int) n;
6324 }
6325
6326 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6327 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6328 Unit *other;
6329 int r;
6330
6331 assert(u);
6332 assert(ret);
6333
6334 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6335
6336 do {
6337 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6338 r = set_ensure_put(&units, NULL, other);
6339 if (r < 0)
6340 return r;
6341 if (r == 0)
6342 continue;
6343 r = set_ensure_put(&queue, NULL, other);
6344 if (r < 0)
6345 return r;
6346 }
6347 } while ((u = set_steal_first(queue)));
6348
6349 *ret = TAKE_PTR(units);
6350 return 0;
6351 }
6352
6353 int unit_arm_timer(
6354 Unit *u,
6355 sd_event_source **source,
6356 bool relative,
6357 usec_t usec,
6358 sd_event_time_handler_t handler) {
6359
6360 int r;
6361
6362 assert(u);
6363 assert(source);
6364 assert(handler);
6365
6366 if (*source) {
6367 if (usec == USEC_INFINITY)
6368 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6369
6370 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6371 if (r < 0)
6372 return r;
6373
6374 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6375 }
6376
6377 if (usec == USEC_INFINITY)
6378 return 0;
6379
6380 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6381 u->manager->event,
6382 source,
6383 CLOCK_MONOTONIC,
6384 usec, 0,
6385 handler,
6386 u);
6387 if (r < 0)
6388 return r;
6389
6390 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6391 (void) sd_event_source_set_description(*source, d);
6392
6393 return 0;
6394 }
6395
6396 static int unit_get_nice(Unit *u) {
6397 ExecContext *ec;
6398
6399 ec = unit_get_exec_context(u);
6400 return ec ? ec->nice : 0;
6401 }
6402
6403 static uint64_t unit_get_cpu_weight(Unit *u) {
6404 CGroupContext *cc;
6405
6406 cc = unit_get_cgroup_context(u);
6407 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6408 }
6409
6410 int unit_compare_priority(Unit *a, Unit *b) {
6411 int ret;
6412
6413 ret = CMP(a->type, b->type);
6414 if (ret != 0)
6415 return -ret;
6416
6417 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6418 if (ret != 0)
6419 return -ret;
6420
6421 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6422 if (ret != 0)
6423 return ret;
6424
6425 return strcmp(a->id, b->id);
6426 }
6427
6428 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6429 [UNIT_PATH] = &activation_details_path_vtable,
6430 [UNIT_TIMER] = &activation_details_timer_vtable,
6431 };
6432
6433 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6434 _cleanup_free_ ActivationDetails *details = NULL;
6435
6436 assert(trigger_unit);
6437 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6438 assert(trigger_unit->id);
6439
6440 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6441 if (!details)
6442 return NULL;
6443
6444 *details = (ActivationDetails) {
6445 .n_ref = 1,
6446 .trigger_unit_type = trigger_unit->type,
6447 };
6448
6449 details->trigger_unit_name = strdup(trigger_unit->id);
6450 if (!details->trigger_unit_name)
6451 return NULL;
6452
6453 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6454 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6455
6456 return TAKE_PTR(details);
6457 }
6458
6459 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6460 if (!details)
6461 return NULL;
6462
6463 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6464 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6465
6466 free(details->trigger_unit_name);
6467
6468 return mfree(details);
6469 }
6470
6471 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6472 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6473 return;
6474
6475 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6476 if (details->trigger_unit_name)
6477 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6478 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6479 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6480 }
6481
6482 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6483 int r;
6484
6485 assert(key);
6486 assert(value);
6487 assert(details);
6488
6489 if (!*details) {
6490 UnitType t;
6491
6492 if (!streq(key, "activation-details-unit-type"))
6493 return -EINVAL;
6494
6495 t = unit_type_from_string(value);
6496 if (t < 0)
6497 return t;
6498
6499 /* The activation details vtable has defined ops only for path and timer units */
6500 if (!activation_details_vtable[t])
6501 return -EINVAL;
6502
6503 *details = malloc0(activation_details_vtable[t]->object_size);
6504 if (!*details)
6505 return -ENOMEM;
6506
6507 **details = (ActivationDetails) {
6508 .n_ref = 1,
6509 .trigger_unit_type = t,
6510 };
6511
6512 return 0;
6513 }
6514
6515 if (streq(key, "activation-details-unit-name")) {
6516 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6517 if (r < 0)
6518 return r;
6519
6520 return 0;
6521 }
6522
6523 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6524 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6525
6526 return -EINVAL;
6527 }
6528
6529 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6530 int r = 0;
6531
6532 assert(strv);
6533
6534 if (!details)
6535 return 0;
6536
6537 if (!isempty(details->trigger_unit_name)) {
6538 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6539 if (!s)
6540 return -ENOMEM;
6541
6542 r = strv_consume(strv, TAKE_PTR(s));
6543 if (r < 0)
6544 return r;
6545 }
6546
6547 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6548 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6549 if (r < 0)
6550 return r;
6551 }
6552
6553 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6554 }
6555
6556 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6557 int r = 0;
6558
6559 assert(strv);
6560
6561 if (!details)
6562 return 0;
6563
6564 if (!isempty(details->trigger_unit_name)) {
6565 r = strv_extend(strv, "trigger_unit");
6566 if (r < 0)
6567 return r;
6568
6569 r = strv_extend(strv, details->trigger_unit_name);
6570 if (r < 0)
6571 return r;
6572 }
6573
6574 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6575 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6576 if (r < 0)
6577 return r;
6578 }
6579
6580 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6581 }
6582
6583 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);