]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: add unit_reset_{memory,io}_accounting_last
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio-label.h"
32 #include "fileio.h"
33 #include "format-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "macro.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
46 #include "rm-rf.h"
47 #include "serialize.h"
48 #include "set.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
51 #include "special.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "strv.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
62 #include "unit.h"
63 #include "user-util.h"
64 #include "virt.h"
65 #if BPF_FRAMEWORK
66 #include "bpf-link.h"
67 #endif
68
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
72 #define MENTIONWORTHY_IP_BYTES (0ULL)
73
74 /* Thresholds for logging at INFO level about resource consumption */
75 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
76 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
77 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
78
79 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
80 [UNIT_SERVICE] = &service_vtable,
81 [UNIT_SOCKET] = &socket_vtable,
82 [UNIT_TARGET] = &target_vtable,
83 [UNIT_DEVICE] = &device_vtable,
84 [UNIT_MOUNT] = &mount_vtable,
85 [UNIT_AUTOMOUNT] = &automount_vtable,
86 [UNIT_SWAP] = &swap_vtable,
87 [UNIT_TIMER] = &timer_vtable,
88 [UNIT_PATH] = &path_vtable,
89 [UNIT_SLICE] = &slice_vtable,
90 [UNIT_SCOPE] = &scope_vtable,
91 };
92
93 Unit* unit_new(Manager *m, size_t size) {
94 Unit *u;
95
96 assert(m);
97 assert(size >= sizeof(Unit));
98
99 u = malloc0(size);
100 if (!u)
101 return NULL;
102
103 u->manager = m;
104 u->type = _UNIT_TYPE_INVALID;
105 u->default_dependencies = true;
106 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
107 u->unit_file_preset = -1;
108 u->on_failure_job_mode = JOB_REPLACE;
109 u->on_success_job_mode = JOB_FAIL;
110 u->cgroup_control_inotify_wd = -1;
111 u->cgroup_memory_inotify_wd = -1;
112 u->job_timeout = USEC_INFINITY;
113 u->job_running_timeout = USEC_INFINITY;
114 u->ref_uid = UID_INVALID;
115 u->ref_gid = GID_INVALID;
116 u->cpu_usage_last = NSEC_INFINITY;
117
118 unit_reset_memory_accounting_last(u);
119
120 unit_reset_io_accounting_last(u);
121
122 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
123 u->failure_action_exit_status = u->success_action_exit_status = -1;
124
125 u->ip_accounting_ingress_map_fd = -EBADF;
126 u->ip_accounting_egress_map_fd = -EBADF;
127
128 u->ipv4_allow_map_fd = -EBADF;
129 u->ipv6_allow_map_fd = -EBADF;
130 u->ipv4_deny_map_fd = -EBADF;
131 u->ipv6_deny_map_fd = -EBADF;
132
133 u->last_section_private = -1;
134
135 u->start_ratelimit = (RateLimit) {
136 m->defaults.start_limit_interval,
137 m->defaults.start_limit_burst
138 };
139
140 u->auto_start_stop_ratelimit = (const RateLimit) {
141 10 * USEC_PER_SEC,
142 16
143 };
144
145 return u;
146 }
147
148 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
149 _cleanup_(unit_freep) Unit *u = NULL;
150 int r;
151
152 u = unit_new(m, size);
153 if (!u)
154 return -ENOMEM;
155
156 r = unit_add_name(u, name);
157 if (r < 0)
158 return r;
159
160 *ret = TAKE_PTR(u);
161
162 return r;
163 }
164
165 bool unit_has_name(const Unit *u, const char *name) {
166 assert(u);
167 assert(name);
168
169 return streq_ptr(name, u->id) ||
170 set_contains(u->aliases, name);
171 }
172
173 static void unit_init(Unit *u) {
174 CGroupContext *cc;
175 ExecContext *ec;
176 KillContext *kc;
177
178 assert(u);
179 assert(u->manager);
180 assert(u->type >= 0);
181
182 cc = unit_get_cgroup_context(u);
183 if (cc) {
184 cgroup_context_init(cc);
185
186 /* Copy in the manager defaults into the cgroup
187 * context, _before_ the rest of the settings have
188 * been initialized */
189
190 cc->cpu_accounting = u->manager->defaults.cpu_accounting;
191 cc->io_accounting = u->manager->defaults.io_accounting;
192 cc->blockio_accounting = u->manager->defaults.blockio_accounting;
193 cc->memory_accounting = u->manager->defaults.memory_accounting;
194 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
195 cc->ip_accounting = u->manager->defaults.ip_accounting;
196
197 if (u->type != UNIT_SLICE)
198 cc->tasks_max = u->manager->defaults.tasks_max;
199
200 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
201 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
202 }
203
204 ec = unit_get_exec_context(u);
205 if (ec) {
206 exec_context_init(ec);
207
208 if (u->manager->defaults.oom_score_adjust_set) {
209 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
210 ec->oom_score_adjust_set = true;
211 }
212
213 if (MANAGER_IS_SYSTEM(u->manager))
214 ec->keyring_mode = EXEC_KEYRING_SHARED;
215 else {
216 ec->keyring_mode = EXEC_KEYRING_INHERIT;
217
218 /* User manager might have its umask redefined by PAM or UMask=. In this
219 * case let the units it manages inherit this value by default. They can
220 * still tune this value through their own unit file */
221 (void) get_process_umask(0, &ec->umask);
222 }
223 }
224
225 kc = unit_get_kill_context(u);
226 if (kc)
227 kill_context_init(kc);
228
229 if (UNIT_VTABLE(u)->init)
230 UNIT_VTABLE(u)->init(u);
231 }
232
233 static int unit_add_alias(Unit *u, char *donated_name) {
234 int r;
235
236 /* Make sure that u->names is allocated. We may leave u->names
237 * empty if we fail later, but this is not a problem. */
238 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
239 if (r < 0)
240 return r;
241 assert(r > 0);
242
243 return 0;
244 }
245
246 int unit_add_name(Unit *u, const char *text) {
247 _cleanup_free_ char *name = NULL, *instance = NULL;
248 UnitType t;
249 int r;
250
251 assert(u);
252 assert(text);
253
254 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
255 if (!u->instance)
256 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
257 "instance is not set when adding name '%s': %m", text);
258
259 r = unit_name_replace_instance(text, u->instance, &name);
260 if (r < 0)
261 return log_unit_debug_errno(u, r,
262 "failed to build instance name from '%s': %m", text);
263 } else {
264 name = strdup(text);
265 if (!name)
266 return -ENOMEM;
267 }
268
269 if (unit_has_name(u, name))
270 return 0;
271
272 if (hashmap_contains(u->manager->units, name))
273 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
274 "unit already exist when adding name '%s': %m", name);
275
276 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
277 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
278 "name '%s' is invalid: %m", name);
279
280 t = unit_name_to_type(name);
281 if (t < 0)
282 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
283 "failed to derive unit type from name '%s': %m", name);
284
285 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
286 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
287 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
288 u->type, t, name);
289
290 r = unit_name_to_instance(name, &instance);
291 if (r < 0)
292 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
293
294 if (instance && !unit_type_may_template(t))
295 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
296
297 /* Ensure that this unit either has no instance, or that the instance matches. */
298 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
299 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
300 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
301 name, instance, u->instance);
302
303 if (u->id && !unit_type_may_alias(t))
304 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
305 "cannot add name %s, aliases are not allowed for %s units.",
306 name, unit_type_to_string(t));
307
308 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
309 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
310
311 /* Add name to the global hashmap first, because that's easier to undo */
312 r = hashmap_put(u->manager->units, name, u);
313 if (r < 0)
314 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
315
316 if (u->id) {
317 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
318 if (r < 0) {
319 hashmap_remove(u->manager->units, name);
320 return r;
321 }
322 TAKE_PTR(name);
323
324 } else {
325 /* A new name, we don't need the set yet. */
326 assert(u->type == _UNIT_TYPE_INVALID);
327 assert(!u->instance);
328
329 u->type = t;
330 u->id = TAKE_PTR(name);
331 u->instance = TAKE_PTR(instance);
332
333 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
334 unit_init(u);
335 }
336
337 unit_add_to_dbus_queue(u);
338 return 0;
339 }
340
341 int unit_choose_id(Unit *u, const char *name) {
342 _cleanup_free_ char *t = NULL;
343 char *s;
344 int r;
345
346 assert(u);
347 assert(name);
348
349 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
350 if (!u->instance)
351 return -EINVAL;
352
353 r = unit_name_replace_instance(name, u->instance, &t);
354 if (r < 0)
355 return r;
356
357 name = t;
358 }
359
360 if (streq_ptr(u->id, name))
361 return 0; /* Nothing to do. */
362
363 /* Selects one of the aliases of this unit as the id */
364 s = set_get(u->aliases, (char*) name);
365 if (!s)
366 return -ENOENT;
367
368 if (u->id) {
369 r = set_remove_and_put(u->aliases, name, u->id);
370 if (r < 0)
371 return r;
372 } else
373 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
374
375 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
376 unit_add_to_dbus_queue(u);
377
378 return 0;
379 }
380
381 int unit_set_description(Unit *u, const char *description) {
382 int r;
383
384 assert(u);
385
386 r = free_and_strdup(&u->description, empty_to_null(description));
387 if (r < 0)
388 return r;
389 if (r > 0)
390 unit_add_to_dbus_queue(u);
391
392 return 0;
393 }
394
395 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
396 Unit *other;
397
398 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
399 if (other->job || other->nop_job)
400 return true;
401
402 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
403 if (other->job || other->nop_job)
404 return true;
405
406 return false;
407 }
408
409 void unit_release_resources(Unit *u) {
410 UnitActiveState state;
411 ExecContext *ec;
412
413 assert(u);
414
415 if (u->job || u->nop_job)
416 return;
417
418 if (u->perpetual)
419 return;
420
421 state = unit_active_state(u);
422 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
423 return;
424
425 if (unit_will_restart(u))
426 return;
427
428 ec = unit_get_exec_context(u);
429 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
430 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
431
432 if (UNIT_VTABLE(u)->release_resources)
433 UNIT_VTABLE(u)->release_resources(u);
434 }
435
436 bool unit_may_gc(Unit *u) {
437 UnitActiveState state;
438 int r;
439
440 assert(u);
441
442 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
443 * unit may be collected, and false if there's some reason to keep it loaded.
444 *
445 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
446 * using markers to properly collect dependency loops.
447 */
448
449 if (u->job || u->nop_job)
450 return false;
451
452 if (u->perpetual)
453 return false;
454
455 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
456 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
457 * before we release the unit. */
458 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
459 return false;
460
461 /* Make sure to send out D-Bus events before we unload the unit */
462 if (u->in_dbus_queue)
463 return false;
464
465 if (sd_bus_track_count(u->bus_track) > 0)
466 return false;
467
468 state = unit_active_state(u);
469
470 /* But we keep the unit object around for longer when it is referenced or configured to not be
471 * gc'ed */
472 switch (u->collect_mode) {
473
474 case COLLECT_INACTIVE:
475 if (state != UNIT_INACTIVE)
476 return false;
477
478 break;
479
480 case COLLECT_INACTIVE_OR_FAILED:
481 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
482 return false;
483
484 break;
485
486 default:
487 assert_not_reached();
488 }
489
490 /* Check if any OnFailure= or on Success= jobs may be pending */
491 if (unit_success_failure_handler_has_jobs(u))
492 return false;
493
494 if (u->cgroup_path) {
495 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
496 * around. Units with active processes should never be collected. */
497
498 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
499 if (r < 0)
500 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
501 if (r <= 0)
502 return false;
503 }
504
505 if (!UNIT_VTABLE(u)->may_gc)
506 return true;
507
508 return UNIT_VTABLE(u)->may_gc(u);
509 }
510
511 void unit_add_to_load_queue(Unit *u) {
512 assert(u);
513 assert(u->type != _UNIT_TYPE_INVALID);
514
515 if (u->load_state != UNIT_STUB || u->in_load_queue)
516 return;
517
518 LIST_PREPEND(load_queue, u->manager->load_queue, u);
519 u->in_load_queue = true;
520 }
521
522 void unit_add_to_cleanup_queue(Unit *u) {
523 assert(u);
524
525 if (u->in_cleanup_queue)
526 return;
527
528 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
529 u->in_cleanup_queue = true;
530 }
531
532 void unit_add_to_gc_queue(Unit *u) {
533 assert(u);
534
535 if (u->in_gc_queue || u->in_cleanup_queue)
536 return;
537
538 if (!unit_may_gc(u))
539 return;
540
541 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
542 u->in_gc_queue = true;
543 }
544
545 void unit_add_to_dbus_queue(Unit *u) {
546 assert(u);
547 assert(u->type != _UNIT_TYPE_INVALID);
548
549 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
550 return;
551
552 /* Shortcut things if nobody cares */
553 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
554 sd_bus_track_count(u->bus_track) <= 0 &&
555 set_isempty(u->manager->private_buses)) {
556 u->sent_dbus_new_signal = true;
557 return;
558 }
559
560 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
561 u->in_dbus_queue = true;
562 }
563
564 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
565 assert(u);
566
567 if (u->in_stop_when_unneeded_queue)
568 return;
569
570 if (!u->stop_when_unneeded)
571 return;
572
573 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
574 return;
575
576 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
577 u->in_stop_when_unneeded_queue = true;
578 }
579
580 void unit_submit_to_start_when_upheld_queue(Unit *u) {
581 assert(u);
582
583 if (u->in_start_when_upheld_queue)
584 return;
585
586 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
587 return;
588
589 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
590 return;
591
592 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
593 u->in_start_when_upheld_queue = true;
594 }
595
596 void unit_submit_to_stop_when_bound_queue(Unit *u) {
597 assert(u);
598
599 if (u->in_stop_when_bound_queue)
600 return;
601
602 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
603 return;
604
605 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
606 return;
607
608 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
609 u->in_stop_when_bound_queue = true;
610 }
611
612 static bool unit_can_release_resources(Unit *u) {
613 ExecContext *ec;
614
615 assert(u);
616
617 if (UNIT_VTABLE(u)->release_resources)
618 return true;
619
620 ec = unit_get_exec_context(u);
621 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
622 return true;
623
624 return false;
625 }
626
627 void unit_submit_to_release_resources_queue(Unit *u) {
628 assert(u);
629
630 if (u->in_release_resources_queue)
631 return;
632
633 if (u->job || u->nop_job)
634 return;
635
636 if (u->perpetual)
637 return;
638
639 if (!unit_can_release_resources(u))
640 return;
641
642 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
643 u->in_release_resources_queue = true;
644 }
645
646 static void unit_clear_dependencies(Unit *u) {
647 assert(u);
648
649 /* Removes all dependencies configured on u and their reverse dependencies. */
650
651 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
652
653 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
654 Hashmap *other_deps;
655
656 HASHMAP_FOREACH(other_deps, other->dependencies)
657 hashmap_remove(other_deps, u);
658
659 unit_add_to_gc_queue(other);
660 }
661
662 hashmap_free(deps);
663 }
664
665 u->dependencies = hashmap_free(u->dependencies);
666 }
667
668 static void unit_remove_transient(Unit *u) {
669 assert(u);
670
671 if (!u->transient)
672 return;
673
674 if (u->fragment_path)
675 (void) unlink(u->fragment_path);
676
677 STRV_FOREACH(i, u->dropin_paths) {
678 _cleanup_free_ char *p = NULL, *pp = NULL;
679
680 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
681 continue;
682
683 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
684 continue;
685
686 /* Only drop transient drop-ins */
687 if (!path_equal(u->manager->lookup_paths.transient, pp))
688 continue;
689
690 (void) unlink(*i);
691 (void) rmdir(p);
692 }
693 }
694
695 static void unit_free_requires_mounts_for(Unit *u) {
696 assert(u);
697
698 for (;;) {
699 _cleanup_free_ char *path = NULL;
700
701 path = hashmap_steal_first_key(u->requires_mounts_for);
702 if (!path)
703 break;
704 else {
705 char s[strlen(path) + 1];
706
707 PATH_FOREACH_PREFIX_MORE(s, path) {
708 char *y;
709 Set *x;
710
711 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
712 if (!x)
713 continue;
714
715 (void) set_remove(x, u);
716
717 if (set_isempty(x)) {
718 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
719 free(y);
720 set_free(x);
721 }
722 }
723 }
724 }
725
726 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
727 }
728
729 static void unit_done(Unit *u) {
730 ExecContext *ec;
731 CGroupContext *cc;
732
733 assert(u);
734
735 if (u->type < 0)
736 return;
737
738 if (UNIT_VTABLE(u)->done)
739 UNIT_VTABLE(u)->done(u);
740
741 ec = unit_get_exec_context(u);
742 if (ec)
743 exec_context_done(ec);
744
745 cc = unit_get_cgroup_context(u);
746 if (cc)
747 cgroup_context_done(cc);
748 }
749
750 Unit* unit_free(Unit *u) {
751 Unit *slice;
752 char *t;
753
754 if (!u)
755 return NULL;
756
757 sd_event_source_disable_unref(u->auto_start_stop_event_source);
758
759 u->transient_file = safe_fclose(u->transient_file);
760
761 if (!MANAGER_IS_RELOADING(u->manager))
762 unit_remove_transient(u);
763
764 bus_unit_send_removed_signal(u);
765
766 unit_done(u);
767
768 unit_dequeue_rewatch_pids(u);
769
770 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
771 u->bus_track = sd_bus_track_unref(u->bus_track);
772 u->deserialized_refs = strv_free(u->deserialized_refs);
773 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
774
775 unit_free_requires_mounts_for(u);
776
777 SET_FOREACH(t, u->aliases)
778 hashmap_remove_value(u->manager->units, t, u);
779 if (u->id)
780 hashmap_remove_value(u->manager->units, u->id, u);
781
782 if (!sd_id128_is_null(u->invocation_id))
783 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
784
785 if (u->job) {
786 Job *j = u->job;
787 job_uninstall(j);
788 job_free(j);
789 }
790
791 if (u->nop_job) {
792 Job *j = u->nop_job;
793 job_uninstall(j);
794 job_free(j);
795 }
796
797 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
798 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
799 slice = UNIT_GET_SLICE(u);
800 unit_clear_dependencies(u);
801 if (slice)
802 unit_add_family_to_cgroup_realize_queue(slice);
803
804 if (u->on_console)
805 manager_unref_console(u->manager);
806
807 fdset_free(u->initial_socket_bind_link_fds);
808 #if BPF_FRAMEWORK
809 bpf_link_free(u->ipv4_socket_bind_link);
810 bpf_link_free(u->ipv6_socket_bind_link);
811 #endif
812
813 unit_release_cgroup(u);
814
815 if (!MANAGER_IS_RELOADING(u->manager))
816 unit_unlink_state_files(u);
817
818 unit_unref_uid_gid(u, false);
819
820 (void) manager_update_failed_units(u->manager, u, false);
821 set_remove(u->manager->startup_units, u);
822
823 unit_unwatch_all_pids(u);
824
825 while (u->refs_by_target)
826 unit_ref_unset(u->refs_by_target);
827
828 if (u->type != _UNIT_TYPE_INVALID)
829 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
830
831 if (u->in_load_queue)
832 LIST_REMOVE(load_queue, u->manager->load_queue, u);
833
834 if (u->in_dbus_queue)
835 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
836
837 if (u->in_cleanup_queue)
838 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
839
840 if (u->in_gc_queue)
841 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
842
843 if (u->in_cgroup_realize_queue)
844 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
845
846 if (u->in_cgroup_empty_queue)
847 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
848
849 if (u->in_cgroup_oom_queue)
850 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
851
852 if (u->in_target_deps_queue)
853 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
854
855 if (u->in_stop_when_unneeded_queue)
856 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
857
858 if (u->in_start_when_upheld_queue)
859 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
860
861 if (u->in_stop_when_bound_queue)
862 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
863
864 if (u->in_release_resources_queue)
865 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
866
867 bpf_firewall_close(u);
868
869 hashmap_free(u->bpf_foreign_by_key);
870
871 bpf_program_free(u->bpf_device_control_installed);
872
873 #if BPF_FRAMEWORK
874 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
875 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
876 #endif
877 fdset_free(u->initial_restric_ifaces_link_fds);
878
879 condition_free_list(u->conditions);
880 condition_free_list(u->asserts);
881
882 free(u->description);
883 strv_free(u->documentation);
884 free(u->fragment_path);
885 free(u->source_path);
886 strv_free(u->dropin_paths);
887 free(u->instance);
888
889 free(u->job_timeout_reboot_arg);
890 free(u->reboot_arg);
891
892 free(u->access_selinux_context);
893
894 set_free_free(u->aliases);
895 free(u->id);
896
897 activation_details_unref(u->activation_details);
898
899 return mfree(u);
900 }
901
902 FreezerState unit_freezer_state(Unit *u) {
903 assert(u);
904
905 return u->freezer_state;
906 }
907
908 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
909 char *values[1] = {};
910 int r;
911
912 assert(u);
913
914 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
915 STRV_MAKE("frozen"), values);
916 if (r < 0)
917 return r;
918
919 r = _FREEZER_STATE_INVALID;
920
921 if (values[0]) {
922 if (streq(values[0], "0"))
923 r = FREEZER_RUNNING;
924 else if (streq(values[0], "1"))
925 r = FREEZER_FROZEN;
926 }
927
928 free(values[0]);
929 *ret = r;
930
931 return 0;
932 }
933
934 UnitActiveState unit_active_state(Unit *u) {
935 assert(u);
936
937 if (u->load_state == UNIT_MERGED)
938 return unit_active_state(unit_follow_merge(u));
939
940 /* After a reload it might happen that a unit is not correctly
941 * loaded but still has a process around. That's why we won't
942 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
943
944 return UNIT_VTABLE(u)->active_state(u);
945 }
946
947 const char* unit_sub_state_to_string(Unit *u) {
948 assert(u);
949
950 return UNIT_VTABLE(u)->sub_state_to_string(u);
951 }
952
953 static int unit_merge_names(Unit *u, Unit *other) {
954 char *name;
955 int r;
956
957 assert(u);
958 assert(other);
959
960 r = unit_add_alias(u, other->id);
961 if (r < 0)
962 return r;
963
964 r = set_move(u->aliases, other->aliases);
965 if (r < 0) {
966 set_remove(u->aliases, other->id);
967 return r;
968 }
969
970 TAKE_PTR(other->id);
971 other->aliases = set_free_free(other->aliases);
972
973 SET_FOREACH(name, u->aliases)
974 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
975
976 return 0;
977 }
978
979 static int unit_reserve_dependencies(Unit *u, Unit *other) {
980 size_t n_reserve;
981 Hashmap* deps;
982 void *d;
983 int r;
984
985 assert(u);
986 assert(other);
987
988 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
989 * fail.
990 *
991 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
992 * hashmaps is an estimate that is likely too high since they probably use some of the same
993 * types. But it's never too low, and that's all we need. */
994
995 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
996 if (n_reserve > 0) {
997 r = hashmap_ensure_allocated(&u->dependencies, NULL);
998 if (r < 0)
999 return r;
1000
1001 r = hashmap_reserve(u->dependencies, n_reserve);
1002 if (r < 0)
1003 return r;
1004 }
1005
1006 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
1007 * other unit's dependencies.
1008 *
1009 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
1010 * reserve anything for. In that case other's set will be transferred as a whole to u by
1011 * complete_move(). */
1012
1013 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
1014 Hashmap *other_deps;
1015
1016 other_deps = hashmap_get(other->dependencies, d);
1017
1018 r = hashmap_reserve(deps, hashmap_size(other_deps));
1019 if (r < 0)
1020 return r;
1021 }
1022
1023 return 0;
1024 }
1025
1026 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
1027 /* Only warn about some unit types */
1028 return IN_SET(dependency,
1029 UNIT_CONFLICTS,
1030 UNIT_CONFLICTED_BY,
1031 UNIT_BEFORE,
1032 UNIT_AFTER,
1033 UNIT_ON_SUCCESS,
1034 UNIT_ON_FAILURE,
1035 UNIT_TRIGGERS,
1036 UNIT_TRIGGERED_BY);
1037 }
1038
1039 static int unit_per_dependency_type_hashmap_update(
1040 Hashmap *per_type,
1041 Unit *other,
1042 UnitDependencyMask origin_mask,
1043 UnitDependencyMask destination_mask) {
1044
1045 UnitDependencyInfo info;
1046 int r;
1047
1048 assert(other);
1049 assert_cc(sizeof(void*) == sizeof(info));
1050
1051 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1052 * exists, or insert it anew if not. */
1053
1054 info.data = hashmap_get(per_type, other);
1055 if (info.data) {
1056 /* Entry already exists. Add in our mask. */
1057
1058 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1059 FLAGS_SET(destination_mask, info.destination_mask))
1060 return 0; /* NOP */
1061
1062 info.origin_mask |= origin_mask;
1063 info.destination_mask |= destination_mask;
1064
1065 r = hashmap_update(per_type, other, info.data);
1066 } else {
1067 info = (UnitDependencyInfo) {
1068 .origin_mask = origin_mask,
1069 .destination_mask = destination_mask,
1070 };
1071
1072 r = hashmap_put(per_type, other, info.data);
1073 }
1074 if (r < 0)
1075 return r;
1076
1077 return 1;
1078 }
1079
1080 static void unit_merge_dependencies(Unit *u, Unit *other) {
1081 Hashmap *deps;
1082 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1083 * since the hashmaps all want it as void pointer. */
1084
1085 assert(u);
1086 assert(other);
1087
1088 if (u == other)
1089 return;
1090
1091 /* First, remove dependency to other. */
1092 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1093 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1094 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1095 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1096 other->id, other->id, u->id);
1097
1098 if (hashmap_isempty(deps))
1099 hashmap_free(hashmap_remove(u->dependencies, dt));
1100 }
1101
1102 for (;;) {
1103 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1104 UnitDependencyInfo di_back;
1105 Unit *back;
1106
1107 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1108 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1109 if (!other_deps)
1110 break; /* done! */
1111
1112 deps = hashmap_get(u->dependencies, dt);
1113
1114 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1115 * referenced units as 'back'. */
1116 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1117 Hashmap *back_deps;
1118 void *back_dt;
1119
1120 if (back == u) {
1121 /* This is a dependency pointing back to the unit we want to merge with?
1122 * Suppress it (but warn) */
1123 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1124 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1125 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1126 u->id, other->id, other->id, u->id);
1127
1128 hashmap_remove(other_deps, back);
1129 continue;
1130 }
1131
1132 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1133 * point to 'u' instead. */
1134 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1135 UnitDependencyInfo di_move;
1136
1137 di_move.data = hashmap_remove(back_deps, other);
1138 if (!di_move.data)
1139 continue;
1140
1141 assert_se(unit_per_dependency_type_hashmap_update(
1142 back_deps,
1143 u,
1144 di_move.origin_mask,
1145 di_move.destination_mask) >= 0);
1146 }
1147
1148 /* The target unit already has dependencies of this type, let's then merge this individually. */
1149 if (deps)
1150 assert_se(unit_per_dependency_type_hashmap_update(
1151 deps,
1152 back,
1153 di_back.origin_mask,
1154 di_back.destination_mask) >= 0);
1155 }
1156
1157 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1158 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1159 * dependencies of this type, let's move them per type wholesale. */
1160 if (!deps)
1161 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1162 }
1163
1164 other->dependencies = hashmap_free(other->dependencies);
1165 }
1166
1167 int unit_merge(Unit *u, Unit *other) {
1168 int r;
1169
1170 assert(u);
1171 assert(other);
1172 assert(u->manager == other->manager);
1173 assert(u->type != _UNIT_TYPE_INVALID);
1174
1175 other = unit_follow_merge(other);
1176
1177 if (other == u)
1178 return 0;
1179
1180 if (u->type != other->type)
1181 return -EINVAL;
1182
1183 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1184 return -EEXIST;
1185
1186 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1187 return -EEXIST;
1188
1189 if (!streq_ptr(u->instance, other->instance))
1190 return -EINVAL;
1191
1192 if (other->job)
1193 return -EEXIST;
1194
1195 if (other->nop_job)
1196 return -EEXIST;
1197
1198 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1199 return -EEXIST;
1200
1201 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1202 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1203 r = unit_reserve_dependencies(u, other);
1204 if (r < 0)
1205 return r;
1206
1207 /* Redirect all references */
1208 while (other->refs_by_target)
1209 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1210
1211 /* Merge dependencies */
1212 unit_merge_dependencies(u, other);
1213
1214 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1215 r = unit_merge_names(u, other);
1216 if (r < 0)
1217 return r;
1218
1219 other->load_state = UNIT_MERGED;
1220 other->merged_into = u;
1221
1222 if (!u->activation_details)
1223 u->activation_details = activation_details_ref(other->activation_details);
1224
1225 /* If there is still some data attached to the other node, we
1226 * don't need it anymore, and can free it. */
1227 if (other->load_state != UNIT_STUB)
1228 if (UNIT_VTABLE(other)->done)
1229 UNIT_VTABLE(other)->done(other);
1230
1231 unit_add_to_dbus_queue(u);
1232 unit_add_to_cleanup_queue(other);
1233
1234 return 0;
1235 }
1236
1237 int unit_merge_by_name(Unit *u, const char *name) {
1238 _cleanup_free_ char *s = NULL;
1239 Unit *other;
1240 int r;
1241
1242 /* Either add name to u, or if a unit with name already exists, merge it with u.
1243 * If name is a template, do the same for name@instance, where instance is u's instance. */
1244
1245 assert(u);
1246 assert(name);
1247
1248 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1249 if (!u->instance)
1250 return -EINVAL;
1251
1252 r = unit_name_replace_instance(name, u->instance, &s);
1253 if (r < 0)
1254 return r;
1255
1256 name = s;
1257 }
1258
1259 other = manager_get_unit(u->manager, name);
1260 if (other)
1261 return unit_merge(u, other);
1262
1263 return unit_add_name(u, name);
1264 }
1265
1266 Unit* unit_follow_merge(Unit *u) {
1267 assert(u);
1268
1269 while (u->load_state == UNIT_MERGED)
1270 assert_se(u = u->merged_into);
1271
1272 return u;
1273 }
1274
1275 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1276 int r;
1277
1278 assert(u);
1279 assert(c);
1280
1281 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1282
1283 if (c->working_directory && !c->working_directory_missing_ok) {
1284 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1285 if (r < 0)
1286 return r;
1287 }
1288
1289 if (c->root_directory) {
1290 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1291 if (r < 0)
1292 return r;
1293 }
1294
1295 if (c->root_image) {
1296 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1297 if (r < 0)
1298 return r;
1299 }
1300
1301 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1302 if (!u->manager->prefix[dt])
1303 continue;
1304
1305 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1306 _cleanup_free_ char *p = NULL;
1307
1308 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1309 if (!p)
1310 return -ENOMEM;
1311
1312 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1313 if (r < 0)
1314 return r;
1315 }
1316 }
1317
1318 if (!MANAGER_IS_SYSTEM(u->manager))
1319 return 0;
1320
1321 /* For the following three directory types we need write access, and /var/ is possibly on the root
1322 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1323 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1324 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1325 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1326 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1327 if (r < 0)
1328 return r;
1329 }
1330
1331 if (c->private_tmp) {
1332
1333 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1334 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1335 * /tmp specifically and masking other mount units should be handled more
1336 * gracefully too, see PR#16894. */
1337 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "tmp.mount", true, UNIT_DEPENDENCY_FILE);
1338 if (r < 0)
1339 return r;
1340
1341 r = unit_require_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE);
1342 if (r < 0)
1343 return r;
1344
1345 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1346 if (r < 0)
1347 return r;
1348 }
1349
1350 if (c->root_image) {
1351 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1352 * implicit dependency on udev */
1353
1354 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1355 if (r < 0)
1356 return r;
1357 }
1358
1359 if (!IN_SET(c->std_output,
1360 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1361 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1362 !IN_SET(c->std_error,
1363 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1364 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1365 !c->log_namespace)
1366 return 0;
1367
1368 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1369 * is run first. */
1370
1371 if (c->log_namespace) {
1372 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1373
1374 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1375 if (r < 0)
1376 return r;
1377
1378 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1379 if (r < 0)
1380 return r;
1381
1382 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1383 if (r < 0)
1384 return r;
1385
1386 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1387 if (r < 0)
1388 return r;
1389 } else {
1390 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1391 if (r < 0)
1392 return r;
1393 }
1394
1395 r = unit_add_default_credential_dependencies(u, c);
1396 if (r < 0)
1397 return r;
1398
1399 return 0;
1400 }
1401
1402 const char* unit_description(Unit *u) {
1403 assert(u);
1404
1405 if (u->description)
1406 return u->description;
1407
1408 return strna(u->id);
1409 }
1410
1411 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1412 assert(u);
1413 assert(u->id);
1414
1415 /* Return u->id, u->description, or "{u->id} - {u->description}".
1416 * Versions with u->description are only used if it is set.
1417 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1418 * pointer.
1419 *
1420 * Note that *ret_combined_buffer may be set to NULL. */
1421
1422 if (!u->description ||
1423 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1424 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1425 streq(u->description, u->id)) {
1426
1427 if (ret_combined_buffer)
1428 *ret_combined_buffer = NULL;
1429 return u->id;
1430 }
1431
1432 if (ret_combined_buffer) {
1433 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1434 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1435 if (*ret_combined_buffer)
1436 return *ret_combined_buffer;
1437 log_oom(); /* Fall back to ->description */
1438 } else
1439 *ret_combined_buffer = NULL;
1440 }
1441
1442 return u->description;
1443 }
1444
1445 /* Common implementation for multiple backends */
1446 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1447 int r;
1448
1449 assert(u);
1450
1451 /* Load a .{service,socket,...} file */
1452 r = unit_load_fragment(u);
1453 if (r < 0)
1454 return r;
1455
1456 if (u->load_state == UNIT_STUB) {
1457 if (fragment_required)
1458 return -ENOENT;
1459
1460 u->load_state = UNIT_LOADED;
1461 }
1462
1463 /* Load drop-in directory data. If u is an alias, we might be reloading the
1464 * target unit needlessly. But we cannot be sure which drops-ins have already
1465 * been loaded and which not, at least without doing complicated book-keeping,
1466 * so let's always reread all drop-ins. */
1467 r = unit_load_dropin(unit_follow_merge(u));
1468 if (r < 0)
1469 return r;
1470
1471 if (u->source_path) {
1472 struct stat st;
1473
1474 if (stat(u->source_path, &st) >= 0)
1475 u->source_mtime = timespec_load(&st.st_mtim);
1476 else
1477 u->source_mtime = 0;
1478 }
1479
1480 return 0;
1481 }
1482
1483 void unit_add_to_target_deps_queue(Unit *u) {
1484 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1485
1486 if (u->in_target_deps_queue)
1487 return;
1488
1489 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1490 u->in_target_deps_queue = true;
1491 }
1492
1493 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1494 assert(u);
1495 assert(target);
1496
1497 if (target->type != UNIT_TARGET)
1498 return 0;
1499
1500 /* Only add the dependency if both units are loaded, so that
1501 * that loop check below is reliable */
1502 if (u->load_state != UNIT_LOADED ||
1503 target->load_state != UNIT_LOADED)
1504 return 0;
1505
1506 /* If either side wants no automatic dependencies, then let's
1507 * skip this */
1508 if (!u->default_dependencies ||
1509 !target->default_dependencies)
1510 return 0;
1511
1512 /* Don't create loops */
1513 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1514 return 0;
1515
1516 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1517 }
1518
1519 static int unit_add_slice_dependencies(Unit *u) {
1520 Unit *slice;
1521 assert(u);
1522
1523 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1524 return 0;
1525
1526 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1527 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1528 relationship). */
1529 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1530
1531 slice = UNIT_GET_SLICE(u);
1532 if (slice)
1533 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1534
1535 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1536 return 0;
1537
1538 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1539 }
1540
1541 static int unit_add_mount_dependencies(Unit *u) {
1542 UnitDependencyInfo di;
1543 const char *path;
1544 bool changed = false;
1545 int r;
1546
1547 assert(u);
1548
1549 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1550 char prefix[strlen(path) + 1];
1551
1552 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1553 _cleanup_free_ char *p = NULL;
1554 Unit *m;
1555
1556 r = unit_name_from_path(prefix, ".mount", &p);
1557 if (r == -EINVAL)
1558 continue; /* If the path cannot be converted to a mount unit name, then it's
1559 * not manageable as a unit by systemd, and hence we don't need a
1560 * dependency on it. Let's thus silently ignore the issue. */
1561 if (r < 0)
1562 return r;
1563
1564 m = manager_get_unit(u->manager, p);
1565 if (!m) {
1566 /* Make sure to load the mount unit if it exists. If so the dependencies on
1567 * this unit will be added later during the loading of the mount unit. */
1568 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1569 continue;
1570 }
1571 if (m == u)
1572 continue;
1573
1574 if (m->load_state != UNIT_LOADED)
1575 continue;
1576
1577 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1578 if (r < 0)
1579 return r;
1580 changed = changed || r > 0;
1581
1582 if (m->fragment_path) {
1583 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1584 if (r < 0)
1585 return r;
1586 changed = changed || r > 0;
1587 }
1588 }
1589 }
1590
1591 return changed;
1592 }
1593
1594 static int unit_add_oomd_dependencies(Unit *u) {
1595 CGroupContext *c;
1596 CGroupMask mask;
1597 int r;
1598
1599 assert(u);
1600
1601 if (!u->default_dependencies)
1602 return 0;
1603
1604 c = unit_get_cgroup_context(u);
1605 if (!c)
1606 return 0;
1607
1608 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1609 if (!wants_oomd)
1610 return 0;
1611
1612 if (!cg_all_unified())
1613 return 0;
1614
1615 r = cg_mask_supported(&mask);
1616 if (r < 0)
1617 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1618
1619 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1620 return 0;
1621
1622 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1623 }
1624
1625 static int unit_add_startup_units(Unit *u) {
1626 if (!unit_has_startup_cgroup_constraints(u))
1627 return 0;
1628
1629 return set_ensure_put(&u->manager->startup_units, NULL, u);
1630 }
1631
1632 static int unit_validate_on_failure_job_mode(
1633 Unit *u,
1634 const char *job_mode_setting,
1635 JobMode job_mode,
1636 const char *dependency_name,
1637 UnitDependencyAtom atom) {
1638
1639 Unit *other, *found = NULL;
1640
1641 if (job_mode != JOB_ISOLATE)
1642 return 0;
1643
1644 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1645 if (!found)
1646 found = other;
1647 else if (found != other)
1648 return log_unit_error_errno(
1649 u, SYNTHETIC_ERRNO(ENOEXEC),
1650 "More than one %s dependencies specified but %sisolate set. Refusing.",
1651 dependency_name, job_mode_setting);
1652 }
1653
1654 return 0;
1655 }
1656
1657 int unit_load(Unit *u) {
1658 int r;
1659
1660 assert(u);
1661
1662 if (u->in_load_queue) {
1663 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1664 u->in_load_queue = false;
1665 }
1666
1667 if (u->type == _UNIT_TYPE_INVALID)
1668 return -EINVAL;
1669
1670 if (u->load_state != UNIT_STUB)
1671 return 0;
1672
1673 if (u->transient_file) {
1674 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1675 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1676
1677 r = fflush_and_check(u->transient_file);
1678 if (r < 0)
1679 goto fail;
1680
1681 u->transient_file = safe_fclose(u->transient_file);
1682 u->fragment_mtime = now(CLOCK_REALTIME);
1683 }
1684
1685 r = UNIT_VTABLE(u)->load(u);
1686 if (r < 0)
1687 goto fail;
1688
1689 assert(u->load_state != UNIT_STUB);
1690
1691 if (u->load_state == UNIT_LOADED) {
1692 unit_add_to_target_deps_queue(u);
1693
1694 r = unit_add_slice_dependencies(u);
1695 if (r < 0)
1696 goto fail;
1697
1698 r = unit_add_mount_dependencies(u);
1699 if (r < 0)
1700 goto fail;
1701
1702 r = unit_add_oomd_dependencies(u);
1703 if (r < 0)
1704 goto fail;
1705
1706 r = unit_add_startup_units(u);
1707 if (r < 0)
1708 goto fail;
1709
1710 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1711 if (r < 0)
1712 goto fail;
1713
1714 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1715 if (r < 0)
1716 goto fail;
1717
1718 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1719 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1720
1721 /* We finished loading, let's ensure our parents recalculate the members mask */
1722 unit_invalidate_cgroup_members_masks(u);
1723 }
1724
1725 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1726
1727 unit_add_to_dbus_queue(unit_follow_merge(u));
1728 unit_add_to_gc_queue(u);
1729 (void) manager_varlink_send_managed_oom_update(u);
1730
1731 return 0;
1732
1733 fail:
1734 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1735 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1736
1737 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1738 r == -ENOEXEC ? UNIT_BAD_SETTING :
1739 UNIT_ERROR;
1740 u->load_error = r;
1741
1742 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1743 * an attempt is made to load this unit, we know we need to check again. */
1744 if (u->load_state == UNIT_NOT_FOUND)
1745 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1746
1747 unit_add_to_dbus_queue(u);
1748 unit_add_to_gc_queue(u);
1749
1750 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1751 }
1752
1753 _printf_(7, 8)
1754 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1755 Unit *u = userdata;
1756 va_list ap;
1757 int r;
1758
1759 if (u && !unit_log_level_test(u, level))
1760 return -ERRNO_VALUE(error);
1761
1762 va_start(ap, format);
1763 if (u)
1764 r = log_object_internalv(level, error, file, line, func,
1765 u->manager->unit_log_field,
1766 u->id,
1767 u->manager->invocation_log_field,
1768 u->invocation_id_string,
1769 format, ap);
1770 else
1771 r = log_internalv(level, error, file, line, func, format, ap);
1772 va_end(ap);
1773
1774 return r;
1775 }
1776
1777 static bool unit_test_condition(Unit *u) {
1778 _cleanup_strv_free_ char **env = NULL;
1779 int r;
1780
1781 assert(u);
1782
1783 dual_timestamp_now(&u->condition_timestamp);
1784
1785 r = manager_get_effective_environment(u->manager, &env);
1786 if (r < 0) {
1787 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1788 u->condition_result = true;
1789 } else
1790 u->condition_result = condition_test_list(
1791 u->conditions,
1792 env,
1793 condition_type_to_string,
1794 log_unit_internal,
1795 u);
1796
1797 unit_add_to_dbus_queue(u);
1798 return u->condition_result;
1799 }
1800
1801 static bool unit_test_assert(Unit *u) {
1802 _cleanup_strv_free_ char **env = NULL;
1803 int r;
1804
1805 assert(u);
1806
1807 dual_timestamp_now(&u->assert_timestamp);
1808
1809 r = manager_get_effective_environment(u->manager, &env);
1810 if (r < 0) {
1811 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1812 u->assert_result = CONDITION_ERROR;
1813 } else
1814 u->assert_result = condition_test_list(
1815 u->asserts,
1816 env,
1817 assert_type_to_string,
1818 log_unit_internal,
1819 u);
1820
1821 unit_add_to_dbus_queue(u);
1822 return u->assert_result;
1823 }
1824
1825 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1826 if (log_get_show_color()) {
1827 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1828 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1829 else
1830 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1831 }
1832
1833 DISABLE_WARNING_FORMAT_NONLITERAL;
1834 manager_status_printf(u->manager, status_type, status, format, ident);
1835 REENABLE_WARNING;
1836 }
1837
1838 int unit_test_start_limit(Unit *u) {
1839 const char *reason;
1840
1841 assert(u);
1842
1843 if (ratelimit_below(&u->start_ratelimit)) {
1844 u->start_limit_hit = false;
1845 return 0;
1846 }
1847
1848 log_unit_warning(u, "Start request repeated too quickly.");
1849 u->start_limit_hit = true;
1850
1851 reason = strjoina("unit ", u->id, " failed");
1852
1853 emergency_action(u->manager, u->start_limit_action,
1854 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1855 u->reboot_arg, -1, reason);
1856
1857 return -ECANCELED;
1858 }
1859
1860 static bool unit_verify_deps(Unit *u) {
1861 Unit *other;
1862
1863 assert(u);
1864
1865 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1866 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1867 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1868 * that are not used in conjunction with After= as for them any such check would make things entirely
1869 * racy. */
1870
1871 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1872
1873 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1874 continue;
1875
1876 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1877 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1878 return false;
1879 }
1880 }
1881
1882 return true;
1883 }
1884
1885 /* Errors that aren't really errors:
1886 * -EALREADY: Unit is already started.
1887 * -ECOMM: Condition failed
1888 * -EAGAIN: An operation is already in progress. Retry later.
1889 *
1890 * Errors that are real errors:
1891 * -EBADR: This unit type does not support starting.
1892 * -ECANCELED: Start limit hit, too many requests for now
1893 * -EPROTO: Assert failed
1894 * -EINVAL: Unit not loaded
1895 * -EOPNOTSUPP: Unit type not supported
1896 * -ENOLINK: The necessary dependencies are not fulfilled.
1897 * -ESTALE: This unit has been started before and can't be started a second time
1898 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1899 */
1900 int unit_start(Unit *u, ActivationDetails *details) {
1901 UnitActiveState state;
1902 Unit *following;
1903 int r;
1904
1905 assert(u);
1906
1907 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1908 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1909 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1910 if (r < 0)
1911 return r;
1912 if (r > 0)
1913 return -EAGAIN;
1914 }
1915
1916 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1917 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1918 * waiting is finished. */
1919 state = unit_active_state(u);
1920 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1921 return -EALREADY;
1922 if (state == UNIT_MAINTENANCE)
1923 return -EAGAIN;
1924
1925 /* Units that aren't loaded cannot be started */
1926 if (u->load_state != UNIT_LOADED)
1927 return -EINVAL;
1928
1929 /* Refuse starting scope units more than once */
1930 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1931 return -ESTALE;
1932
1933 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1934 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1935 * recheck the condition in that case. */
1936 if (state != UNIT_ACTIVATING &&
1937 !unit_test_condition(u))
1938 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1939
1940 /* If the asserts failed, fail the entire job */
1941 if (state != UNIT_ACTIVATING &&
1942 !unit_test_assert(u))
1943 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1944
1945 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1946 * condition checks, so that we rather return condition check errors (which are usually not
1947 * considered a true failure) than "not supported" errors (which are considered a failure).
1948 */
1949 if (!unit_type_supported(u->type))
1950 return -EOPNOTSUPP;
1951
1952 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1953 * should have taken care of this already, but let's check this here again. After all, our
1954 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1955 if (!unit_verify_deps(u))
1956 return -ENOLINK;
1957
1958 /* Forward to the main object, if we aren't it. */
1959 following = unit_following(u);
1960 if (following) {
1961 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1962 return unit_start(following, details);
1963 }
1964
1965 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1966 if (UNIT_VTABLE(u)->can_start) {
1967 r = UNIT_VTABLE(u)->can_start(u);
1968 if (r < 0)
1969 return r;
1970 }
1971
1972 /* If it is stopped, but we cannot start it, then fail */
1973 if (!UNIT_VTABLE(u)->start)
1974 return -EBADR;
1975
1976 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1977 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1978 * waits for a holdoff timer to elapse before it will start again. */
1979
1980 unit_add_to_dbus_queue(u);
1981 unit_cgroup_freezer_action(u, FREEZER_THAW);
1982
1983 if (!u->activation_details) /* Older details object wins */
1984 u->activation_details = activation_details_ref(details);
1985
1986 return UNIT_VTABLE(u)->start(u);
1987 }
1988
1989 bool unit_can_start(Unit *u) {
1990 assert(u);
1991
1992 if (u->load_state != UNIT_LOADED)
1993 return false;
1994
1995 if (!unit_type_supported(u->type))
1996 return false;
1997
1998 /* Scope units may be started only once */
1999 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
2000 return false;
2001
2002 return !!UNIT_VTABLE(u)->start;
2003 }
2004
2005 bool unit_can_isolate(Unit *u) {
2006 assert(u);
2007
2008 return unit_can_start(u) &&
2009 u->allow_isolate;
2010 }
2011
2012 /* Errors:
2013 * -EBADR: This unit type does not support stopping.
2014 * -EALREADY: Unit is already stopped.
2015 * -EAGAIN: An operation is already in progress. Retry later.
2016 */
2017 int unit_stop(Unit *u) {
2018 UnitActiveState state;
2019 Unit *following;
2020
2021 assert(u);
2022
2023 state = unit_active_state(u);
2024 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2025 return -EALREADY;
2026
2027 following = unit_following(u);
2028 if (following) {
2029 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2030 return unit_stop(following);
2031 }
2032
2033 if (!UNIT_VTABLE(u)->stop)
2034 return -EBADR;
2035
2036 unit_add_to_dbus_queue(u);
2037 unit_cgroup_freezer_action(u, FREEZER_THAW);
2038
2039 return UNIT_VTABLE(u)->stop(u);
2040 }
2041
2042 bool unit_can_stop(Unit *u) {
2043 assert(u);
2044
2045 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2046 * Extrinsic units follow external state and they may stop following external state changes
2047 * (hence we return true here), but an attempt to do this through the manager will fail. */
2048
2049 if (!unit_type_supported(u->type))
2050 return false;
2051
2052 if (u->perpetual)
2053 return false;
2054
2055 return !!UNIT_VTABLE(u)->stop;
2056 }
2057
2058 /* Errors:
2059 * -EBADR: This unit type does not support reloading.
2060 * -ENOEXEC: Unit is not started.
2061 * -EAGAIN: An operation is already in progress. Retry later.
2062 */
2063 int unit_reload(Unit *u) {
2064 UnitActiveState state;
2065 Unit *following;
2066
2067 assert(u);
2068
2069 if (u->load_state != UNIT_LOADED)
2070 return -EINVAL;
2071
2072 if (!unit_can_reload(u))
2073 return -EBADR;
2074
2075 state = unit_active_state(u);
2076 if (state == UNIT_RELOADING)
2077 return -EAGAIN;
2078
2079 if (state != UNIT_ACTIVE)
2080 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2081
2082 following = unit_following(u);
2083 if (following) {
2084 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2085 return unit_reload(following);
2086 }
2087
2088 unit_add_to_dbus_queue(u);
2089
2090 if (!UNIT_VTABLE(u)->reload) {
2091 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2092 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2093 return 0;
2094 }
2095
2096 unit_cgroup_freezer_action(u, FREEZER_THAW);
2097
2098 return UNIT_VTABLE(u)->reload(u);
2099 }
2100
2101 bool unit_can_reload(Unit *u) {
2102 assert(u);
2103
2104 if (UNIT_VTABLE(u)->can_reload)
2105 return UNIT_VTABLE(u)->can_reload(u);
2106
2107 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2108 return true;
2109
2110 return UNIT_VTABLE(u)->reload;
2111 }
2112
2113 bool unit_is_unneeded(Unit *u) {
2114 Unit *other;
2115 assert(u);
2116
2117 if (!u->stop_when_unneeded)
2118 return false;
2119
2120 /* Don't clean up while the unit is transitioning or is even inactive. */
2121 if (unit_active_state(u) != UNIT_ACTIVE)
2122 return false;
2123 if (u->job)
2124 return false;
2125
2126 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2127 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2128 * restart, then don't clean this one up. */
2129
2130 if (other->job)
2131 return false;
2132
2133 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2134 return false;
2135
2136 if (unit_will_restart(other))
2137 return false;
2138 }
2139
2140 return true;
2141 }
2142
2143 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2144 Unit *other;
2145
2146 assert(u);
2147
2148 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2149 * that is active declared an Uphold= dependencies on it */
2150
2151 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2152 if (ret_culprit)
2153 *ret_culprit = NULL;
2154 return false;
2155 }
2156
2157 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2158 if (other->job)
2159 continue;
2160
2161 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2162 if (ret_culprit)
2163 *ret_culprit = other;
2164 return true;
2165 }
2166 }
2167
2168 if (ret_culprit)
2169 *ret_culprit = NULL;
2170 return false;
2171 }
2172
2173 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2174 Unit *other;
2175
2176 assert(u);
2177
2178 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2179 * because the other unit is down. */
2180
2181 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2182 /* Don't clean up while the unit is transitioning or is even inactive. */
2183 if (ret_culprit)
2184 *ret_culprit = NULL;
2185 return false;
2186 }
2187
2188 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2189 if (other->job)
2190 continue;
2191
2192 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2193 if (ret_culprit)
2194 *ret_culprit = other;
2195
2196 return true;
2197 }
2198 }
2199
2200 if (ret_culprit)
2201 *ret_culprit = NULL;
2202 return false;
2203 }
2204
2205 static void check_unneeded_dependencies(Unit *u) {
2206 Unit *other;
2207 assert(u);
2208
2209 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2210
2211 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2212 unit_submit_to_stop_when_unneeded_queue(other);
2213 }
2214
2215 static void check_uphold_dependencies(Unit *u) {
2216 Unit *other;
2217 assert(u);
2218
2219 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2220
2221 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2222 unit_submit_to_start_when_upheld_queue(other);
2223 }
2224
2225 static void check_bound_by_dependencies(Unit *u) {
2226 Unit *other;
2227 assert(u);
2228
2229 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2230
2231 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2232 unit_submit_to_stop_when_bound_queue(other);
2233 }
2234
2235 static void retroactively_start_dependencies(Unit *u) {
2236 Unit *other;
2237
2238 assert(u);
2239 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2240
2241 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2242 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2243 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2244 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2245
2246 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2247 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2248 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2249 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2250
2251 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2252 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2253 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2254 }
2255
2256 static void retroactively_stop_dependencies(Unit *u) {
2257 Unit *other;
2258
2259 assert(u);
2260 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2261
2262 /* Pull down units which are bound to us recursively if enabled */
2263 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2264 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2265 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2266 }
2267
2268 void unit_start_on_failure(
2269 Unit *u,
2270 const char *dependency_name,
2271 UnitDependencyAtom atom,
2272 JobMode job_mode) {
2273
2274 int n_jobs = -1;
2275 Unit *other;
2276 int r;
2277
2278 assert(u);
2279 assert(dependency_name);
2280 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2281
2282 /* Act on OnFailure= and OnSuccess= dependencies */
2283
2284 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2285 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2286
2287 if (n_jobs < 0) {
2288 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2289 n_jobs = 0;
2290 }
2291
2292 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2293 if (r < 0)
2294 log_unit_warning_errno(
2295 u, r, "Failed to enqueue %s job, ignoring: %s",
2296 dependency_name, bus_error_message(&error, r));
2297 n_jobs ++;
2298 }
2299
2300 if (n_jobs >= 0)
2301 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2302 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2303 }
2304
2305 void unit_trigger_notify(Unit *u) {
2306 Unit *other;
2307
2308 assert(u);
2309
2310 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2311 if (UNIT_VTABLE(other)->trigger_notify)
2312 UNIT_VTABLE(other)->trigger_notify(other, u);
2313 }
2314
2315 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2316 if (condition_notice && log_level > LOG_NOTICE)
2317 return LOG_NOTICE;
2318 if (condition_info && log_level > LOG_INFO)
2319 return LOG_INFO;
2320 return log_level;
2321 }
2322
2323 static int unit_log_resources(Unit *u) {
2324 struct iovec iovec[1 + 2 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2325 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2326 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2327 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2328 size_t n_message_parts = 0, n_iovec = 0;
2329 char* message_parts[1 + 2 + 2 + 2 + 1], *t;
2330 nsec_t nsec = NSEC_INFINITY;
2331 uint64_t memory_peak = UINT64_MAX, memory_swap_peak = UINT64_MAX;
2332 int r;
2333 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2334 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2335 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2336 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2337 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2338 };
2339 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2340 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2341 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2342 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2343 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2344 };
2345
2346 assert(u);
2347
2348 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2349 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2350 * information and the complete data in structured fields. */
2351
2352 (void) unit_get_cpu_usage(u, &nsec);
2353 if (nsec != NSEC_INFINITY) {
2354 /* Format the CPU time for inclusion in the structured log message */
2355 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2356 r = log_oom();
2357 goto finish;
2358 }
2359 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2360
2361 /* Format the CPU time for inclusion in the human language message string */
2362 t = strjoin("consumed ", FORMAT_TIMESPAN(nsec / NSEC_PER_USEC, USEC_PER_MSEC), " CPU time");
2363 if (!t) {
2364 r = log_oom();
2365 goto finish;
2366 }
2367
2368 message_parts[n_message_parts++] = t;
2369
2370 log_level = raise_level(log_level,
2371 nsec > MENTIONWORTHY_CPU_NSEC,
2372 nsec > NOTICEWORTHY_CPU_NSEC);
2373 }
2374
2375 (void) unit_get_memory_accounting(u, CGROUP_MEMORY_PEAK, &memory_peak);
2376 if (memory_peak != UINT64_MAX) {
2377 /* Format peak memory for inclusion in the structured log message */
2378 if (asprintf(&t, "MEMORY_PEAK=%" PRIu64, memory_peak) < 0) {
2379 r = log_oom();
2380 goto finish;
2381 }
2382 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2383
2384 /* Format peak memory for inclusion in the human language message string */
2385 t = strjoin(FORMAT_BYTES(memory_peak), " memory peak");
2386 if (!t) {
2387 r = log_oom();
2388 goto finish;
2389 }
2390 message_parts[n_message_parts++] = t;
2391 }
2392
2393 (void) unit_get_memory_accounting(u, CGROUP_MEMORY_SWAP_PEAK, &memory_swap_peak);
2394 if (memory_swap_peak != UINT64_MAX) {
2395 /* Format peak swap memory for inclusion in the structured log message */
2396 if (asprintf(&t, "MEMORY_SWAP_PEAK=%" PRIu64, memory_swap_peak) < 0) {
2397 r = log_oom();
2398 goto finish;
2399 }
2400 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2401
2402 /* Format peak swap memory for inclusion in the human language message string */
2403 t = strjoin(FORMAT_BYTES(memory_swap_peak), " memory swap peak");
2404 if (!t) {
2405 r = log_oom();
2406 goto finish;
2407 }
2408 message_parts[n_message_parts++] = t;
2409 }
2410
2411 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2412 uint64_t value = UINT64_MAX;
2413
2414 assert(io_fields[k]);
2415
2416 (void) unit_get_io_accounting(u, k, k > 0, &value);
2417 if (value == UINT64_MAX)
2418 continue;
2419
2420 have_io_accounting = true;
2421 if (value > 0)
2422 any_io = true;
2423
2424 /* Format IO accounting data for inclusion in the structured log message */
2425 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2426 r = log_oom();
2427 goto finish;
2428 }
2429 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2430
2431 /* Format the IO accounting data for inclusion in the human language message string, but only
2432 * for the bytes counters (and not for the operations counters) */
2433 if (k == CGROUP_IO_READ_BYTES) {
2434 assert(!rr);
2435 rr = strjoin("read ", strna(FORMAT_BYTES(value)), " from disk");
2436 if (!rr) {
2437 r = log_oom();
2438 goto finish;
2439 }
2440 } else if (k == CGROUP_IO_WRITE_BYTES) {
2441 assert(!wr);
2442 wr = strjoin("written ", strna(FORMAT_BYTES(value)), " to disk");
2443 if (!wr) {
2444 r = log_oom();
2445 goto finish;
2446 }
2447 }
2448
2449 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2450 log_level = raise_level(log_level,
2451 value > MENTIONWORTHY_IO_BYTES,
2452 value > NOTICEWORTHY_IO_BYTES);
2453 }
2454
2455 if (have_io_accounting) {
2456 if (any_io) {
2457 if (rr)
2458 message_parts[n_message_parts++] = TAKE_PTR(rr);
2459 if (wr)
2460 message_parts[n_message_parts++] = TAKE_PTR(wr);
2461
2462 } else {
2463 char *k;
2464
2465 k = strdup("no IO");
2466 if (!k) {
2467 r = log_oom();
2468 goto finish;
2469 }
2470
2471 message_parts[n_message_parts++] = k;
2472 }
2473 }
2474
2475 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2476 uint64_t value = UINT64_MAX;
2477
2478 assert(ip_fields[m]);
2479
2480 (void) unit_get_ip_accounting(u, m, &value);
2481 if (value == UINT64_MAX)
2482 continue;
2483
2484 have_ip_accounting = true;
2485 if (value > 0)
2486 any_traffic = true;
2487
2488 /* Format IP accounting data for inclusion in the structured log message */
2489 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2490 r = log_oom();
2491 goto finish;
2492 }
2493 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2494
2495 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2496 * bytes counters (and not for the packets counters) */
2497 if (m == CGROUP_IP_INGRESS_BYTES) {
2498 assert(!igress);
2499 igress = strjoin("received ", strna(FORMAT_BYTES(value)), " IP traffic");
2500 if (!igress) {
2501 r = log_oom();
2502 goto finish;
2503 }
2504 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2505 assert(!egress);
2506 egress = strjoin("sent ", strna(FORMAT_BYTES(value)), " IP traffic");
2507 if (!egress) {
2508 r = log_oom();
2509 goto finish;
2510 }
2511 }
2512
2513 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2514 log_level = raise_level(log_level,
2515 value > MENTIONWORTHY_IP_BYTES,
2516 value > NOTICEWORTHY_IP_BYTES);
2517 }
2518
2519 /* This check is here because it is the earliest point following all possible log_level assignments. If
2520 * log_level is assigned anywhere after this point, move this check. */
2521 if (!unit_log_level_test(u, log_level)) {
2522 r = 0;
2523 goto finish;
2524 }
2525
2526 if (have_ip_accounting) {
2527 if (any_traffic) {
2528 if (igress)
2529 message_parts[n_message_parts++] = TAKE_PTR(igress);
2530 if (egress)
2531 message_parts[n_message_parts++] = TAKE_PTR(egress);
2532
2533 } else {
2534 char *k;
2535
2536 k = strdup("no IP traffic");
2537 if (!k) {
2538 r = log_oom();
2539 goto finish;
2540 }
2541
2542 message_parts[n_message_parts++] = k;
2543 }
2544 }
2545
2546 /* Is there any accounting data available at all? */
2547 if (n_iovec == 0) {
2548 r = 0;
2549 goto finish;
2550 }
2551
2552 if (n_message_parts == 0)
2553 t = strjoina("MESSAGE=", u->id, ": Completed.");
2554 else {
2555 _cleanup_free_ char *joined = NULL;
2556
2557 message_parts[n_message_parts] = NULL;
2558
2559 joined = strv_join(message_parts, ", ");
2560 if (!joined) {
2561 r = log_oom();
2562 goto finish;
2563 }
2564
2565 joined[0] = ascii_toupper(joined[0]);
2566 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2567 }
2568
2569 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2570 * and hence don't increase n_iovec for them */
2571 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2572 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2573
2574 t = strjoina(u->manager->unit_log_field, u->id);
2575 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2576
2577 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2578 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2579
2580 log_unit_struct_iovec(u, log_level, iovec, n_iovec + 4);
2581 r = 0;
2582
2583 finish:
2584 free_many_charp(message_parts, n_message_parts);
2585
2586 for (size_t i = 0; i < n_iovec; i++)
2587 free(iovec[i].iov_base);
2588
2589 return r;
2590
2591 }
2592
2593 static void unit_update_on_console(Unit *u) {
2594 bool b;
2595
2596 assert(u);
2597
2598 b = unit_needs_console(u);
2599 if (u->on_console == b)
2600 return;
2601
2602 u->on_console = b;
2603 if (b)
2604 manager_ref_console(u->manager);
2605 else
2606 manager_unref_console(u->manager);
2607 }
2608
2609 static void unit_emit_audit_start(Unit *u) {
2610 assert(u);
2611
2612 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2613 return;
2614
2615 /* Write audit record if we have just finished starting up */
2616 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2617 u->in_audit = true;
2618 }
2619
2620 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2621 assert(u);
2622
2623 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2624 return;
2625
2626 if (u->in_audit) {
2627 /* Write audit record if we have just finished shutting down */
2628 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2629 u->in_audit = false;
2630 } else {
2631 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2632 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2633
2634 if (state == UNIT_INACTIVE)
2635 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2636 }
2637 }
2638
2639 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2640 bool unexpected = false;
2641 JobResult result;
2642
2643 assert(j);
2644
2645 if (j->state == JOB_WAITING)
2646 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2647 * due to EAGAIN. */
2648 job_add_to_run_queue(j);
2649
2650 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2651 * hence needs to invalidate jobs. */
2652
2653 switch (j->type) {
2654
2655 case JOB_START:
2656 case JOB_VERIFY_ACTIVE:
2657
2658 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2659 job_finish_and_invalidate(j, JOB_DONE, true, false);
2660 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2661 unexpected = true;
2662
2663 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2664 if (ns == UNIT_FAILED)
2665 result = JOB_FAILED;
2666 else
2667 result = JOB_DONE;
2668
2669 job_finish_and_invalidate(j, result, true, false);
2670 }
2671 }
2672
2673 break;
2674
2675 case JOB_RELOAD:
2676 case JOB_RELOAD_OR_START:
2677 case JOB_TRY_RELOAD:
2678
2679 if (j->state == JOB_RUNNING) {
2680 if (ns == UNIT_ACTIVE)
2681 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2682 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2683 unexpected = true;
2684
2685 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2686 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2687 }
2688 }
2689
2690 break;
2691
2692 case JOB_STOP:
2693 case JOB_RESTART:
2694 case JOB_TRY_RESTART:
2695
2696 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2697 job_finish_and_invalidate(j, JOB_DONE, true, false);
2698 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2699 unexpected = true;
2700 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2701 }
2702
2703 break;
2704
2705 default:
2706 assert_not_reached();
2707 }
2708
2709 return unexpected;
2710 }
2711
2712 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2713 const char *reason;
2714 Manager *m;
2715
2716 assert(u);
2717 assert(os < _UNIT_ACTIVE_STATE_MAX);
2718 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2719
2720 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2721 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2722 * remounted this function will be called too! */
2723
2724 m = u->manager;
2725
2726 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2727 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2728 unit_add_to_dbus_queue(u);
2729
2730 /* Update systemd-oomd on the property/state change */
2731 if (os != ns) {
2732 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2733 * monitoring.
2734 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2735 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2736 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2737 * have the information on the property. Thus, indiscriminately send an update. */
2738 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2739 (void) manager_varlink_send_managed_oom_update(u);
2740 }
2741
2742 /* Update timestamps for state changes */
2743 if (!MANAGER_IS_RELOADING(m)) {
2744 dual_timestamp_now(&u->state_change_timestamp);
2745
2746 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2747 u->inactive_exit_timestamp = u->state_change_timestamp;
2748 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2749 u->inactive_enter_timestamp = u->state_change_timestamp;
2750
2751 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2752 u->active_enter_timestamp = u->state_change_timestamp;
2753 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2754 u->active_exit_timestamp = u->state_change_timestamp;
2755 }
2756
2757 /* Keep track of failed units */
2758 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2759
2760 /* Make sure the cgroup and state files are always removed when we become inactive */
2761 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2762 SET_FLAG(u->markers,
2763 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2764 false);
2765 unit_prune_cgroup(u);
2766 unit_unlink_state_files(u);
2767 } else if (ns != os && ns == UNIT_RELOADING)
2768 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2769
2770 unit_update_on_console(u);
2771
2772 if (!MANAGER_IS_RELOADING(m)) {
2773 bool unexpected;
2774
2775 /* Let's propagate state changes to the job */
2776 if (u->job)
2777 unexpected = unit_process_job(u->job, ns, reload_success);
2778 else
2779 unexpected = true;
2780
2781 /* If this state change happened without being requested by a job, then let's retroactively start or
2782 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2783 * additional jobs just because something is already activated. */
2784
2785 if (unexpected) {
2786 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2787 retroactively_start_dependencies(u);
2788 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2789 retroactively_stop_dependencies(u);
2790 }
2791
2792 if (ns != os && ns == UNIT_FAILED) {
2793 log_unit_debug(u, "Unit entered failed state.");
2794 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2795 }
2796
2797 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2798 /* This unit just finished starting up */
2799
2800 unit_emit_audit_start(u);
2801 manager_send_unit_plymouth(m, u);
2802 }
2803
2804 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2805 /* This unit just stopped/failed. */
2806
2807 unit_emit_audit_stop(u, ns);
2808 unit_log_resources(u);
2809 }
2810
2811 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2812 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2813 }
2814
2815 manager_recheck_journal(m);
2816 manager_recheck_dbus(m);
2817
2818 unit_trigger_notify(u);
2819
2820 if (!MANAGER_IS_RELOADING(m)) {
2821 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2822 reason = strjoina("unit ", u->id, " failed");
2823 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2824 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2825 reason = strjoina("unit ", u->id, " succeeded");
2826 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2827 }
2828 }
2829
2830 /* And now, add the unit or depending units to various queues that will act on the new situation if
2831 * needed. These queues generally check for continuous state changes rather than events (like most of
2832 * the state propagation above), and do work deferred instead of instantly, since they typically
2833 * don't want to run during reloading, and usually involve checking combined state of multiple units
2834 * at once. */
2835
2836 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2837 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2838 check_unneeded_dependencies(u);
2839 check_bound_by_dependencies(u);
2840
2841 /* Maybe someone wants us to remain up? */
2842 unit_submit_to_start_when_upheld_queue(u);
2843
2844 /* Maybe the unit should be GC'ed now? */
2845 unit_add_to_gc_queue(u);
2846
2847 /* Maybe we can release some resources now? */
2848 unit_submit_to_release_resources_queue(u);
2849 }
2850
2851 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2852 /* Start uphold units regardless if going up was expected or not */
2853 check_uphold_dependencies(u);
2854
2855 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2856 unit_submit_to_stop_when_unneeded_queue(u);
2857
2858 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2859 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2860 * inactive, without ever entering started.) */
2861 unit_submit_to_stop_when_bound_queue(u);
2862 }
2863 }
2864
2865 int unit_watch_pidref(Unit *u, PidRef *pid, bool exclusive) {
2866 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2867 int r;
2868
2869 /* Adds a specific PID to the set of PIDs this unit watches. */
2870
2871 assert(u);
2872 assert(pidref_is_set(pid));
2873
2874 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2875 * opportunity to remove any stalled references to this PID as they can be created
2876 * easily (when watching a process which is not our direct child). */
2877 if (exclusive)
2878 manager_unwatch_pidref(u->manager, pid);
2879
2880 if (set_contains(u->pids, pid)) /* early exit if already being watched */
2881 return 0;
2882
2883 r = pidref_dup(pid, &pid_dup);
2884 if (r < 0)
2885 return r;
2886
2887 /* First, insert into the set of PIDs maintained by the unit */
2888 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2889 if (r < 0)
2890 return r;
2891
2892 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2893
2894 /* Second, insert it into the simple global table, see if that works */
2895 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops_free, pid, u);
2896 if (r != -EEXIST)
2897 return r;
2898
2899 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2900 * hashmap that points to an array. */
2901
2902 PidRef *old_pid = NULL;
2903 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2904
2905 /* Count entries in array */
2906 size_t n = 0;
2907 for (; array && array[n]; n++)
2908 ;
2909
2910 /* Allocate a new array */
2911 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2912 if (!new_array)
2913 return -ENOMEM;
2914
2915 /* Append us to the end */
2916 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2917 new_array[n] = u;
2918 new_array[n+1] = NULL;
2919
2920 /* Make sure the hashmap is allocated */
2921 r = hashmap_ensure_allocated(&u->manager->watch_pids_more, &pidref_hash_ops_free);
2922 if (r < 0)
2923 return r;
2924
2925 /* Add or replace the old array */
2926 r = hashmap_replace(u->manager->watch_pids_more, old_pid ?: pid, new_array);
2927 if (r < 0)
2928 return r;
2929
2930 TAKE_PTR(new_array); /* Now part of the hash table */
2931 free(array); /* Which means we can now delete the old version */
2932 return 0;
2933 }
2934
2935 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2936 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
2937 int r;
2938
2939 assert(u);
2940 assert(pid_is_valid(pid));
2941
2942 r = pidref_set_pid(&pidref, pid);
2943 if (r < 0)
2944 return r;
2945
2946 return unit_watch_pidref(u, &pidref, exclusive);
2947 }
2948
2949 void unit_unwatch_pidref(Unit *u, PidRef *pid) {
2950 assert(u);
2951 assert(pidref_is_set(pid));
2952
2953 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2954 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2955 if (!pid1)
2956 return; /* Early exit if this PID was never watched by us */
2957
2958 /* First let's drop the unit from the simple hash table, if it is included there */
2959 PidRef *pid2 = NULL;
2960 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2961
2962 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2963 assert((uu == u) == (pid1 == pid2));
2964
2965 if (uu == u)
2966 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2967 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2968 else {
2969 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2970 PidRef *pid3 = NULL;
2971 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2972
2973 /* Let's iterate through the array, dropping our own entry */
2974 size_t m = 0, n = 0;
2975 for (; array && array[n]; n++)
2976 if (array[n] != u)
2977 array[m++] = array[n];
2978 if (n == m)
2979 return; /* Not there */
2980
2981 array[m] = NULL; /* set trailing NULL marker on the new end */
2982
2983 if (m == 0) {
2984 /* The array is now empty, remove the entire entry */
2985 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2986 free(array);
2987 } else {
2988 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2989 * we will delete, but by the PidRef object of the Unit that is now first in the
2990 * array. */
2991
2992 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2993 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2994 }
2995 }
2996 }
2997
2998 void unit_unwatch_pid(Unit *u, pid_t pid) {
2999 return unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(pid));
3000 }
3001
3002 void unit_unwatch_all_pids(Unit *u) {
3003 assert(u);
3004
3005 while (!set_isempty(u->pids))
3006 unit_unwatch_pidref(u, set_first(u->pids));
3007
3008 u->pids = set_free(u->pids);
3009 }
3010
3011 static void unit_tidy_watch_pids(Unit *u) {
3012 PidRef *except1, *except2, *e;
3013
3014 assert(u);
3015
3016 /* Cleans dead PIDs from our list */
3017
3018 except1 = unit_main_pid(u);
3019 except2 = unit_control_pid(u);
3020
3021 SET_FOREACH(e, u->pids) {
3022 if (pidref_equal(except1, e) || pidref_equal(except2, e))
3023 continue;
3024
3025 if (pidref_is_unwaited(e) <= 0)
3026 unit_unwatch_pidref(u, e);
3027 }
3028 }
3029
3030 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
3031 Unit *u = ASSERT_PTR(userdata);
3032
3033 assert(s);
3034
3035 unit_tidy_watch_pids(u);
3036 unit_watch_all_pids(u);
3037
3038 /* If the PID set is empty now, then let's finish this off. */
3039 unit_synthesize_cgroup_empty_event(u);
3040
3041 return 0;
3042 }
3043
3044 int unit_enqueue_rewatch_pids(Unit *u) {
3045 int r;
3046
3047 assert(u);
3048
3049 if (!u->cgroup_path)
3050 return -ENOENT;
3051
3052 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3053 if (r < 0)
3054 return r;
3055 if (r > 0) /* On unified we can use proper notifications */
3056 return 0;
3057
3058 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
3059 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
3060 * involves issuing kill(pid, 0) on all processes we watch. */
3061
3062 if (!u->rewatch_pids_event_source) {
3063 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
3064
3065 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
3066 if (r < 0)
3067 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
3068
3069 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
3070 if (r < 0)
3071 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
3072
3073 (void) sd_event_source_set_description(s, "tidy-watch-pids");
3074
3075 u->rewatch_pids_event_source = TAKE_PTR(s);
3076 }
3077
3078 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
3079 if (r < 0)
3080 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
3081
3082 return 0;
3083 }
3084
3085 void unit_dequeue_rewatch_pids(Unit *u) {
3086 int r;
3087 assert(u);
3088
3089 if (!u->rewatch_pids_event_source)
3090 return;
3091
3092 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
3093 if (r < 0)
3094 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3095
3096 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
3097 }
3098
3099 bool unit_job_is_applicable(Unit *u, JobType j) {
3100 assert(u);
3101 assert(j >= 0 && j < _JOB_TYPE_MAX);
3102
3103 switch (j) {
3104
3105 case JOB_VERIFY_ACTIVE:
3106 case JOB_START:
3107 case JOB_NOP:
3108 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3109 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3110 * jobs for it. */
3111 return true;
3112
3113 case JOB_STOP:
3114 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3115 * external events), hence it makes no sense to permit enqueuing such a request either. */
3116 return !u->perpetual;
3117
3118 case JOB_RESTART:
3119 case JOB_TRY_RESTART:
3120 return unit_can_stop(u) && unit_can_start(u);
3121
3122 case JOB_RELOAD:
3123 case JOB_TRY_RELOAD:
3124 return unit_can_reload(u);
3125
3126 case JOB_RELOAD_OR_START:
3127 return unit_can_reload(u) && unit_can_start(u);
3128
3129 default:
3130 assert_not_reached();
3131 }
3132 }
3133
3134 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3135 Hashmap *deps;
3136
3137 assert(u);
3138 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3139
3140 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3141 if (!deps) {
3142 _cleanup_hashmap_free_ Hashmap *h = NULL;
3143
3144 h = hashmap_new(NULL);
3145 if (!h)
3146 return NULL;
3147
3148 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3149 return NULL;
3150
3151 deps = TAKE_PTR(h);
3152 }
3153
3154 return deps;
3155 }
3156
3157 typedef enum NotifyDependencyFlags {
3158 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3159 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3160 } NotifyDependencyFlags;
3161
3162 static int unit_add_dependency_impl(
3163 Unit *u,
3164 UnitDependency d,
3165 Unit *other,
3166 UnitDependencyMask mask) {
3167
3168 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3169 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3170 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3171 [UNIT_WANTS] = UNIT_WANTED_BY,
3172 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3173 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3174 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3175 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3176 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3177 [UNIT_WANTED_BY] = UNIT_WANTS,
3178 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3179 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3180 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3181 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3182 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3183 [UNIT_BEFORE] = UNIT_AFTER,
3184 [UNIT_AFTER] = UNIT_BEFORE,
3185 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3186 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3187 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3188 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3189 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3190 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3191 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3192 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3193 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3194 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3195 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3196 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3197 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3198 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3199 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3200 };
3201
3202 Hashmap *u_deps, *other_deps;
3203 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3204 NotifyDependencyFlags flags = 0;
3205 int r;
3206
3207 assert(u);
3208 assert(other);
3209 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3210 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3211 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3212
3213 /* Ensure the following two hashmaps for each unit exist:
3214 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3215 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3216 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3217 if (!u_deps)
3218 return -ENOMEM;
3219
3220 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3221 if (!other_deps)
3222 return -ENOMEM;
3223
3224 /* Save the original dependency info. */
3225 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3226 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3227
3228 /* Update dependency info. */
3229 u_info.origin_mask |= mask;
3230 other_info.destination_mask |= mask;
3231
3232 /* Save updated dependency info. */
3233 if (u_info.data != u_info_old.data) {
3234 r = hashmap_replace(u_deps, other, u_info.data);
3235 if (r < 0)
3236 return r;
3237
3238 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3239 }
3240
3241 if (other_info.data != other_info_old.data) {
3242 r = hashmap_replace(other_deps, u, other_info.data);
3243 if (r < 0) {
3244 if (u_info.data != u_info_old.data) {
3245 /* Restore the old dependency. */
3246 if (u_info_old.data)
3247 (void) hashmap_update(u_deps, other, u_info_old.data);
3248 else
3249 hashmap_remove(u_deps, other);
3250 }
3251 return r;
3252 }
3253
3254 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3255 }
3256
3257 return flags;
3258 }
3259
3260 int unit_add_dependency(
3261 Unit *u,
3262 UnitDependency d,
3263 Unit *other,
3264 bool add_reference,
3265 UnitDependencyMask mask) {
3266
3267 UnitDependencyAtom a;
3268 int r;
3269
3270 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3271 * there, no need to notify! */
3272 NotifyDependencyFlags notify_flags;
3273
3274 assert(u);
3275 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3276 assert(other);
3277
3278 u = unit_follow_merge(u);
3279 other = unit_follow_merge(other);
3280 a = unit_dependency_to_atom(d);
3281 assert(a >= 0);
3282
3283 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3284 if (u == other) {
3285 if (unit_should_warn_about_dependency(d))
3286 log_unit_warning(u, "Dependency %s=%s is dropped.",
3287 unit_dependency_to_string(d), u->id);
3288 return 0;
3289 }
3290
3291 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3292 return 0;
3293
3294 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3295 * running timeout at a specific time. */
3296 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3297 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3298 return 0;
3299 }
3300
3301 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3302 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3303 return 0;
3304 }
3305
3306 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3307 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3308 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3309 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3310 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3311 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3312
3313 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3314 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3315 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3316 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3317 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3318 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3319
3320 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3321 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3322 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3323
3324 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3325 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3326 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3327
3328 r = unit_add_dependency_impl(u, d, other, mask);
3329 if (r < 0)
3330 return r;
3331 notify_flags = r;
3332
3333 if (add_reference) {
3334 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3335 if (r < 0)
3336 return r;
3337 notify_flags |= r;
3338 }
3339
3340 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3341 unit_add_to_dbus_queue(u);
3342 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3343 unit_add_to_dbus_queue(other);
3344
3345 return notify_flags != 0;
3346 }
3347
3348 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3349 int r = 0, s = 0;
3350
3351 assert(u);
3352 assert(d >= 0 || e >= 0);
3353
3354 if (d >= 0) {
3355 r = unit_add_dependency(u, d, other, add_reference, mask);
3356 if (r < 0)
3357 return r;
3358 }
3359
3360 if (e >= 0) {
3361 s = unit_add_dependency(u, e, other, add_reference, mask);
3362 if (s < 0)
3363 return s;
3364 }
3365
3366 return r > 0 || s > 0;
3367 }
3368
3369 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3370 int r;
3371
3372 assert(u);
3373 assert(name);
3374 assert(buf);
3375 assert(ret);
3376
3377 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3378 *buf = NULL;
3379 *ret = name;
3380 return 0;
3381 }
3382
3383 if (u->instance)
3384 r = unit_name_replace_instance(name, u->instance, buf);
3385 else {
3386 _cleanup_free_ char *i = NULL;
3387
3388 r = unit_name_to_prefix(u->id, &i);
3389 if (r < 0)
3390 return r;
3391
3392 r = unit_name_replace_instance(name, i, buf);
3393 }
3394 if (r < 0)
3395 return r;
3396
3397 *ret = *buf;
3398 return 0;
3399 }
3400
3401 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3402 _cleanup_free_ char *buf = NULL;
3403 Unit *other;
3404 int r;
3405
3406 assert(u);
3407 assert(name);
3408
3409 r = resolve_template(u, name, &buf, &name);
3410 if (r < 0)
3411 return r;
3412
3413 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3414 return 0;
3415
3416 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3417 if (r < 0)
3418 return r;
3419
3420 return unit_add_dependency(u, d, other, add_reference, mask);
3421 }
3422
3423 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3424 _cleanup_free_ char *buf = NULL;
3425 Unit *other;
3426 int r;
3427
3428 assert(u);
3429 assert(name);
3430
3431 r = resolve_template(u, name, &buf, &name);
3432 if (r < 0)
3433 return r;
3434
3435 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3436 return 0;
3437
3438 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3439 if (r < 0)
3440 return r;
3441
3442 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3443 }
3444
3445 int set_unit_path(const char *p) {
3446 /* This is mostly for debug purposes */
3447 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3448 }
3449
3450 char *unit_dbus_path(Unit *u) {
3451 assert(u);
3452
3453 if (!u->id)
3454 return NULL;
3455
3456 return unit_dbus_path_from_name(u->id);
3457 }
3458
3459 char *unit_dbus_path_invocation_id(Unit *u) {
3460 assert(u);
3461
3462 if (sd_id128_is_null(u->invocation_id))
3463 return NULL;
3464
3465 return unit_dbus_path_from_name(u->invocation_id_string);
3466 }
3467
3468 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3469 int r;
3470
3471 assert(u);
3472
3473 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3474
3475 if (sd_id128_equal(u->invocation_id, id))
3476 return 0;
3477
3478 if (!sd_id128_is_null(u->invocation_id))
3479 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3480
3481 if (sd_id128_is_null(id)) {
3482 r = 0;
3483 goto reset;
3484 }
3485
3486 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3487 if (r < 0)
3488 goto reset;
3489
3490 u->invocation_id = id;
3491 sd_id128_to_string(id, u->invocation_id_string);
3492
3493 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3494 if (r < 0)
3495 goto reset;
3496
3497 return 0;
3498
3499 reset:
3500 u->invocation_id = SD_ID128_NULL;
3501 u->invocation_id_string[0] = 0;
3502 return r;
3503 }
3504
3505 int unit_set_slice(Unit *u, Unit *slice) {
3506 int r;
3507
3508 assert(u);
3509 assert(slice);
3510
3511 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3512 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3513 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3514
3515 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3516 return -EOPNOTSUPP;
3517
3518 if (u->type == UNIT_SLICE)
3519 return -EINVAL;
3520
3521 if (unit_active_state(u) != UNIT_INACTIVE)
3522 return -EBUSY;
3523
3524 if (slice->type != UNIT_SLICE)
3525 return -EINVAL;
3526
3527 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3528 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3529 return -EPERM;
3530
3531 if (UNIT_GET_SLICE(u) == slice)
3532 return 0;
3533
3534 /* Disallow slice changes if @u is already bound to cgroups */
3535 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3536 return -EBUSY;
3537
3538 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3539 if (UNIT_GET_SLICE(u))
3540 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3541
3542 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3543 if (r < 0)
3544 return r;
3545
3546 return 1;
3547 }
3548
3549 int unit_set_default_slice(Unit *u) {
3550 const char *slice_name;
3551 Unit *slice;
3552 int r;
3553
3554 assert(u);
3555
3556 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3557 return 0;
3558
3559 if (UNIT_GET_SLICE(u))
3560 return 0;
3561
3562 if (u->instance) {
3563 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3564
3565 /* Implicitly place all instantiated units in their
3566 * own per-template slice */
3567
3568 r = unit_name_to_prefix(u->id, &prefix);
3569 if (r < 0)
3570 return r;
3571
3572 /* The prefix is already escaped, but it might include
3573 * "-" which has a special meaning for slice units,
3574 * hence escape it here extra. */
3575 escaped = unit_name_escape(prefix);
3576 if (!escaped)
3577 return -ENOMEM;
3578
3579 if (MANAGER_IS_SYSTEM(u->manager))
3580 slice_name = strjoina("system-", escaped, ".slice");
3581 else
3582 slice_name = strjoina("app-", escaped, ".slice");
3583
3584 } else if (unit_is_extrinsic(u))
3585 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3586 * the root slice. They don't really belong in one of the subslices. */
3587 slice_name = SPECIAL_ROOT_SLICE;
3588
3589 else if (MANAGER_IS_SYSTEM(u->manager))
3590 slice_name = SPECIAL_SYSTEM_SLICE;
3591 else
3592 slice_name = SPECIAL_APP_SLICE;
3593
3594 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3595 if (r < 0)
3596 return r;
3597
3598 return unit_set_slice(u, slice);
3599 }
3600
3601 const char *unit_slice_name(Unit *u) {
3602 Unit *slice;
3603 assert(u);
3604
3605 slice = UNIT_GET_SLICE(u);
3606 if (!slice)
3607 return NULL;
3608
3609 return slice->id;
3610 }
3611
3612 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3613 _cleanup_free_ char *t = NULL;
3614 int r;
3615
3616 assert(u);
3617 assert(type);
3618 assert(_found);
3619
3620 r = unit_name_change_suffix(u->id, type, &t);
3621 if (r < 0)
3622 return r;
3623 if (unit_has_name(u, t))
3624 return -EINVAL;
3625
3626 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3627 assert(r < 0 || *_found != u);
3628 return r;
3629 }
3630
3631 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3632 const char *new_owner;
3633 Unit *u = ASSERT_PTR(userdata);
3634 int r;
3635
3636 assert(message);
3637
3638 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3639 if (r < 0) {
3640 bus_log_parse_error(r);
3641 return 0;
3642 }
3643
3644 if (UNIT_VTABLE(u)->bus_name_owner_change)
3645 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3646
3647 return 0;
3648 }
3649
3650 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3651 const sd_bus_error *e;
3652 const char *new_owner;
3653 Unit *u = ASSERT_PTR(userdata);
3654 int r;
3655
3656 assert(message);
3657
3658 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3659
3660 e = sd_bus_message_get_error(message);
3661 if (e) {
3662 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3663 r = sd_bus_error_get_errno(e);
3664 log_unit_error_errno(u, r,
3665 "Unexpected error response from GetNameOwner(): %s",
3666 bus_error_message(e, r));
3667 }
3668
3669 new_owner = NULL;
3670 } else {
3671 r = sd_bus_message_read(message, "s", &new_owner);
3672 if (r < 0)
3673 return bus_log_parse_error(r);
3674
3675 assert(!isempty(new_owner));
3676 }
3677
3678 if (UNIT_VTABLE(u)->bus_name_owner_change)
3679 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3680
3681 return 0;
3682 }
3683
3684 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3685 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3686 const char *match;
3687 usec_t timeout_usec = 0;
3688 int r;
3689
3690 assert(u);
3691 assert(bus);
3692 assert(name);
3693
3694 if (u->match_bus_slot || u->get_name_owner_slot)
3695 return -EBUSY;
3696
3697 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3698 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3699 * value defined above. */
3700 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3701 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3702
3703 match = strjoina("type='signal',"
3704 "sender='org.freedesktop.DBus',"
3705 "path='/org/freedesktop/DBus',"
3706 "interface='org.freedesktop.DBus',"
3707 "member='NameOwnerChanged',"
3708 "arg0='", name, "'");
3709
3710 r = bus_add_match_full(
3711 bus,
3712 &u->match_bus_slot,
3713 true,
3714 match,
3715 signal_name_owner_changed,
3716 NULL,
3717 u,
3718 timeout_usec);
3719 if (r < 0)
3720 return r;
3721
3722 r = sd_bus_message_new_method_call(
3723 bus,
3724 &m,
3725 "org.freedesktop.DBus",
3726 "/org/freedesktop/DBus",
3727 "org.freedesktop.DBus",
3728 "GetNameOwner");
3729 if (r < 0)
3730 return r;
3731
3732 r = sd_bus_message_append(m, "s", name);
3733 if (r < 0)
3734 return r;
3735
3736 r = sd_bus_call_async(
3737 bus,
3738 &u->get_name_owner_slot,
3739 m,
3740 get_name_owner_handler,
3741 u,
3742 timeout_usec);
3743
3744 if (r < 0) {
3745 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3746 return r;
3747 }
3748
3749 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3750 return 0;
3751 }
3752
3753 int unit_watch_bus_name(Unit *u, const char *name) {
3754 int r;
3755
3756 assert(u);
3757 assert(name);
3758
3759 /* Watch a specific name on the bus. We only support one unit
3760 * watching each name for now. */
3761
3762 if (u->manager->api_bus) {
3763 /* If the bus is already available, install the match directly.
3764 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3765 r = unit_install_bus_match(u, u->manager->api_bus, name);
3766 if (r < 0)
3767 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3768 }
3769
3770 r = hashmap_put(u->manager->watch_bus, name, u);
3771 if (r < 0) {
3772 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3773 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3774 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3775 }
3776
3777 return 0;
3778 }
3779
3780 void unit_unwatch_bus_name(Unit *u, const char *name) {
3781 assert(u);
3782 assert(name);
3783
3784 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3785 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3786 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3787 }
3788
3789 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3790 _cleanup_free_ char *e = NULL;
3791 Unit *device;
3792 int r;
3793
3794 assert(u);
3795
3796 /* Adds in links to the device node that this unit is based on */
3797 if (isempty(what))
3798 return 0;
3799
3800 if (!is_device_path(what))
3801 return 0;
3802
3803 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3804 if (!unit_type_supported(UNIT_DEVICE))
3805 return 0;
3806
3807 r = unit_name_from_path(what, ".device", &e);
3808 if (r < 0)
3809 return r;
3810
3811 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3812 if (r < 0)
3813 return r;
3814
3815 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3816 dep = UNIT_BINDS_TO;
3817
3818 return unit_add_two_dependencies(u, UNIT_AFTER,
3819 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3820 device, true, mask);
3821 }
3822
3823 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3824 _cleanup_free_ char *escaped = NULL, *target = NULL;
3825 int r;
3826
3827 assert(u);
3828
3829 if (isempty(what))
3830 return 0;
3831
3832 if (!path_startswith(what, "/dev/"))
3833 return 0;
3834
3835 /* If we don't support devices, then also don't bother with blockdev@.target */
3836 if (!unit_type_supported(UNIT_DEVICE))
3837 return 0;
3838
3839 r = unit_name_path_escape(what, &escaped);
3840 if (r < 0)
3841 return r;
3842
3843 r = unit_name_build("blockdev", escaped, ".target", &target);
3844 if (r < 0)
3845 return r;
3846
3847 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3848 }
3849
3850 int unit_coldplug(Unit *u) {
3851 int r = 0;
3852
3853 assert(u);
3854
3855 /* Make sure we don't enter a loop, when coldplugging recursively. */
3856 if (u->coldplugged)
3857 return 0;
3858
3859 u->coldplugged = true;
3860
3861 STRV_FOREACH(i, u->deserialized_refs)
3862 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3863
3864 u->deserialized_refs = strv_free(u->deserialized_refs);
3865
3866 if (UNIT_VTABLE(u)->coldplug)
3867 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3868
3869 if (u->job)
3870 RET_GATHER(r, job_coldplug(u->job));
3871 if (u->nop_job)
3872 RET_GATHER(r, job_coldplug(u->nop_job));
3873
3874 unit_modify_nft_set(u, /* add = */ true);
3875 return r;
3876 }
3877
3878 void unit_catchup(Unit *u) {
3879 assert(u);
3880
3881 if (UNIT_VTABLE(u)->catchup)
3882 UNIT_VTABLE(u)->catchup(u);
3883
3884 unit_cgroup_catchup(u);
3885 }
3886
3887 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3888 struct stat st;
3889
3890 if (!path)
3891 return false;
3892
3893 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3894 * are never out-of-date. */
3895 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3896 return false;
3897
3898 if (stat(path, &st) < 0)
3899 /* What, cannot access this anymore? */
3900 return true;
3901
3902 if (path_masked)
3903 /* For masked files check if they are still so */
3904 return !null_or_empty(&st);
3905 else
3906 /* For non-empty files check the mtime */
3907 return timespec_load(&st.st_mtim) > mtime;
3908
3909 return false;
3910 }
3911
3912 bool unit_need_daemon_reload(Unit *u) {
3913 _cleanup_strv_free_ char **dropins = NULL;
3914
3915 assert(u);
3916 assert(u->manager);
3917
3918 if (u->manager->unit_file_state_outdated)
3919 return true;
3920
3921 /* For unit files, we allow masking… */
3922 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3923 u->load_state == UNIT_MASKED))
3924 return true;
3925
3926 /* Source paths should not be masked… */
3927 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3928 return true;
3929
3930 if (u->load_state == UNIT_LOADED)
3931 (void) unit_find_dropin_paths(u, &dropins);
3932 if (!strv_equal(u->dropin_paths, dropins))
3933 return true;
3934
3935 /* … any drop-ins that are masked are simply omitted from the list. */
3936 STRV_FOREACH(path, u->dropin_paths)
3937 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3938 return true;
3939
3940 return false;
3941 }
3942
3943 void unit_reset_failed(Unit *u) {
3944 assert(u);
3945
3946 if (UNIT_VTABLE(u)->reset_failed)
3947 UNIT_VTABLE(u)->reset_failed(u);
3948
3949 ratelimit_reset(&u->start_ratelimit);
3950 u->start_limit_hit = false;
3951 }
3952
3953 Unit *unit_following(Unit *u) {
3954 assert(u);
3955
3956 if (UNIT_VTABLE(u)->following)
3957 return UNIT_VTABLE(u)->following(u);
3958
3959 return NULL;
3960 }
3961
3962 bool unit_stop_pending(Unit *u) {
3963 assert(u);
3964
3965 /* This call does check the current state of the unit. It's
3966 * hence useful to be called from state change calls of the
3967 * unit itself, where the state isn't updated yet. This is
3968 * different from unit_inactive_or_pending() which checks both
3969 * the current state and for a queued job. */
3970
3971 return unit_has_job_type(u, JOB_STOP);
3972 }
3973
3974 bool unit_inactive_or_pending(Unit *u) {
3975 assert(u);
3976
3977 /* Returns true if the unit is inactive or going down */
3978
3979 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3980 return true;
3981
3982 if (unit_stop_pending(u))
3983 return true;
3984
3985 return false;
3986 }
3987
3988 bool unit_active_or_pending(Unit *u) {
3989 assert(u);
3990
3991 /* Returns true if the unit is active or going up */
3992
3993 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3994 return true;
3995
3996 if (u->job &&
3997 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3998 return true;
3999
4000 return false;
4001 }
4002
4003 bool unit_will_restart_default(Unit *u) {
4004 assert(u);
4005
4006 return unit_has_job_type(u, JOB_START);
4007 }
4008
4009 bool unit_will_restart(Unit *u) {
4010 assert(u);
4011
4012 if (!UNIT_VTABLE(u)->will_restart)
4013 return false;
4014
4015 return UNIT_VTABLE(u)->will_restart(u);
4016 }
4017
4018 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
4019 assert(u);
4020
4021 if (UNIT_VTABLE(u)->notify_cgroup_oom)
4022 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
4023 }
4024
4025 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4026 _cleanup_set_free_ Set *pid_set = NULL;
4027 int r;
4028
4029 pid_set = set_new(NULL);
4030 if (!pid_set)
4031 return NULL;
4032
4033 /* Exclude the main/control pids from being killed via the cgroup */
4034 if (main_pid > 0) {
4035 r = set_put(pid_set, PID_TO_PTR(main_pid));
4036 if (r < 0)
4037 return NULL;
4038 }
4039
4040 if (control_pid > 0) {
4041 r = set_put(pid_set, PID_TO_PTR(control_pid));
4042 if (r < 0)
4043 return NULL;
4044 }
4045
4046 return TAKE_PTR(pid_set);
4047 }
4048
4049 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
4050 _cleanup_free_ char *comm = NULL;
4051 Unit *u = ASSERT_PTR(userdata);
4052
4053 (void) pidref_get_comm(pid, &comm);
4054
4055 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
4056 signal_to_string(signo), pid->pid, strna(comm));
4057
4058 return 1;
4059 }
4060
4061 static int kill_or_sigqueue(PidRef* pidref, int signo, int code, int value) {
4062 assert(pidref_is_set(pidref));
4063 assert(SIGNAL_VALID(signo));
4064
4065 switch (code) {
4066
4067 case SI_USER:
4068 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
4069 return pidref_kill(pidref, signo);
4070
4071 case SI_QUEUE:
4072 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
4073 return pidref_sigqueue(pidref, signo, value);
4074
4075 default:
4076 assert_not_reached();
4077 }
4078 }
4079
4080 int unit_kill(
4081 Unit *u,
4082 KillWho who,
4083 int signo,
4084 int code,
4085 int value,
4086 sd_bus_error *error) {
4087
4088 PidRef *main_pid, *control_pid;
4089 bool killed = false;
4090 int ret = 0, r;
4091
4092 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4093 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4094 * stop a service ourselves. */
4095
4096 assert(u);
4097 assert(who >= 0);
4098 assert(who < _KILL_WHO_MAX);
4099 assert(SIGNAL_VALID(signo));
4100 assert(IN_SET(code, SI_USER, SI_QUEUE));
4101
4102 main_pid = unit_main_pid(u);
4103 control_pid = unit_control_pid(u);
4104
4105 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4106 return sd_bus_error_setf(error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4107
4108 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4109 if (!main_pid)
4110 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4111 if (!pidref_is_set(main_pid))
4112 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4113 }
4114
4115 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4116 if (!control_pid)
4117 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4118 if (!pidref_is_set(control_pid))
4119 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4120 }
4121
4122 if (pidref_is_set(control_pid) &&
4123 IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4124 _cleanup_free_ char *comm = NULL;
4125 (void) pidref_get_comm(control_pid, &comm);
4126
4127 r = kill_or_sigqueue(control_pid, signo, code, value);
4128 if (r < 0) {
4129 ret = r;
4130
4131 /* Report this failure both to the logs and to the client */
4132 sd_bus_error_set_errnof(
4133 error, r,
4134 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
4135 signal_to_string(signo), control_pid->pid, strna(comm));
4136 log_unit_warning_errno(
4137 u, r,
4138 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
4139 signal_to_string(signo), control_pid->pid, strna(comm));
4140 } else {
4141 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
4142 signal_to_string(signo), control_pid->pid, strna(comm));
4143 killed = true;
4144 }
4145 }
4146
4147 if (pidref_is_set(main_pid) &&
4148 IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4149 _cleanup_free_ char *comm = NULL;
4150 (void) pidref_get_comm(main_pid, &comm);
4151
4152 r = kill_or_sigqueue(main_pid, signo, code, value);
4153 if (r < 0) {
4154 if (ret == 0) {
4155 ret = r;
4156
4157 sd_bus_error_set_errnof(
4158 error, r,
4159 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
4160 signal_to_string(signo), main_pid->pid, strna(comm));
4161 }
4162
4163 log_unit_warning_errno(
4164 u, r,
4165 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
4166 signal_to_string(signo), main_pid->pid, strna(comm));
4167
4168 } else {
4169 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
4170 signal_to_string(signo), main_pid->pid, strna(comm));
4171 killed = true;
4172 }
4173 }
4174
4175 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4176 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4177 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4178 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path && code == SI_USER) {
4179 _cleanup_set_free_ Set *pid_set = NULL;
4180
4181 /* Exclude the main/control pids from being killed via the cgroup */
4182 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4183 if (!pid_set)
4184 return log_oom();
4185
4186 r = cg_kill_recursive(u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4187 if (r < 0) {
4188 if (!IN_SET(r, -ESRCH, -ENOENT)) {
4189 if (ret == 0) {
4190 ret = r;
4191
4192 sd_bus_error_set_errnof(
4193 error, r,
4194 "Failed to send signal SIG%s to auxiliary processes: %m",
4195 signal_to_string(signo));
4196 }
4197
4198 log_unit_warning_errno(
4199 u, r,
4200 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4201 signal_to_string(signo));
4202 }
4203 } else
4204 killed = true;
4205 }
4206
4207 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4208 if (ret == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4209 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4210
4211 return ret;
4212 }
4213
4214 int unit_following_set(Unit *u, Set **s) {
4215 assert(u);
4216 assert(s);
4217
4218 if (UNIT_VTABLE(u)->following_set)
4219 return UNIT_VTABLE(u)->following_set(u, s);
4220
4221 *s = NULL;
4222 return 0;
4223 }
4224
4225 UnitFileState unit_get_unit_file_state(Unit *u) {
4226 int r;
4227
4228 assert(u);
4229
4230 if (u->unit_file_state < 0 && u->fragment_path) {
4231 r = unit_file_get_state(
4232 u->manager->runtime_scope,
4233 NULL,
4234 u->id,
4235 &u->unit_file_state);
4236 if (r < 0)
4237 u->unit_file_state = UNIT_FILE_BAD;
4238 }
4239
4240 return u->unit_file_state;
4241 }
4242
4243 PresetAction unit_get_unit_file_preset(Unit *u) {
4244 int r;
4245
4246 assert(u);
4247
4248 if (u->unit_file_preset < 0 && u->fragment_path) {
4249 _cleanup_free_ char *bn = NULL;
4250
4251 r = path_extract_filename(u->fragment_path, &bn);
4252 if (r < 0)
4253 return (u->unit_file_preset = r);
4254
4255 if (r == O_DIRECTORY)
4256 return (u->unit_file_preset = -EISDIR);
4257
4258 u->unit_file_preset = unit_file_query_preset(
4259 u->manager->runtime_scope,
4260 NULL,
4261 bn,
4262 NULL);
4263 }
4264
4265 return u->unit_file_preset;
4266 }
4267
4268 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4269 assert(ref);
4270 assert(source);
4271 assert(target);
4272
4273 if (ref->target)
4274 unit_ref_unset(ref);
4275
4276 ref->source = source;
4277 ref->target = target;
4278 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4279 return target;
4280 }
4281
4282 void unit_ref_unset(UnitRef *ref) {
4283 assert(ref);
4284
4285 if (!ref->target)
4286 return;
4287
4288 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4289 * be unreferenced now. */
4290 unit_add_to_gc_queue(ref->target);
4291
4292 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4293 ref->source = ref->target = NULL;
4294 }
4295
4296 static int user_from_unit_name(Unit *u, char **ret) {
4297
4298 static const uint8_t hash_key[] = {
4299 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4300 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4301 };
4302
4303 _cleanup_free_ char *n = NULL;
4304 int r;
4305
4306 r = unit_name_to_prefix(u->id, &n);
4307 if (r < 0)
4308 return r;
4309
4310 if (valid_user_group_name(n, 0)) {
4311 *ret = TAKE_PTR(n);
4312 return 0;
4313 }
4314
4315 /* If we can't use the unit name as a user name, then let's hash it and use that */
4316 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4317 return -ENOMEM;
4318
4319 return 0;
4320 }
4321
4322 int unit_patch_contexts(Unit *u) {
4323 CGroupContext *cc;
4324 ExecContext *ec;
4325 int r;
4326
4327 assert(u);
4328
4329 /* Patch in the manager defaults into the exec and cgroup
4330 * contexts, _after_ the rest of the settings have been
4331 * initialized */
4332
4333 ec = unit_get_exec_context(u);
4334 if (ec) {
4335 /* This only copies in the ones that need memory */
4336 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4337 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4338 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4339 if (!ec->rlimit[i])
4340 return -ENOMEM;
4341 }
4342
4343 if (MANAGER_IS_USER(u->manager) &&
4344 !ec->working_directory) {
4345
4346 r = get_home_dir(&ec->working_directory);
4347 if (r < 0)
4348 return r;
4349
4350 /* Allow user services to run, even if the
4351 * home directory is missing */
4352 ec->working_directory_missing_ok = true;
4353 }
4354
4355 if (ec->private_devices)
4356 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4357
4358 if (ec->protect_kernel_modules)
4359 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4360
4361 if (ec->protect_kernel_logs)
4362 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4363
4364 if (ec->protect_clock)
4365 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4366
4367 if (ec->dynamic_user) {
4368 if (!ec->user) {
4369 r = user_from_unit_name(u, &ec->user);
4370 if (r < 0)
4371 return r;
4372 }
4373
4374 if (!ec->group) {
4375 ec->group = strdup(ec->user);
4376 if (!ec->group)
4377 return -ENOMEM;
4378 }
4379
4380 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4381 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4382 * sandbox. */
4383
4384 ec->private_tmp = true;
4385 ec->remove_ipc = true;
4386 ec->protect_system = PROTECT_SYSTEM_STRICT;
4387 if (ec->protect_home == PROTECT_HOME_NO)
4388 ec->protect_home = PROTECT_HOME_READ_ONLY;
4389
4390 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4391 * them. */
4392 ec->no_new_privileges = true;
4393 ec->restrict_suid_sgid = true;
4394 }
4395
4396 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4397 exec_directory_sort(ec->directories + dt);
4398 }
4399
4400 cc = unit_get_cgroup_context(u);
4401 if (cc && ec) {
4402
4403 if (ec->private_devices &&
4404 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4405 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4406
4407 /* Only add these if needed, as they imply that everything else is blocked. */
4408 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4409 if (ec->root_image || ec->mount_images) {
4410
4411 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4412 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4413 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4414 if (r < 0)
4415 return r;
4416 }
4417 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4418 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4419 if (r < 0)
4420 return r;
4421 }
4422
4423 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4424 * Same for mapper and verity. */
4425 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4426 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4427 if (r < 0)
4428 return r;
4429 }
4430 }
4431
4432 if (ec->protect_clock) {
4433 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4434 if (r < 0)
4435 return r;
4436 }
4437
4438 /* If there are encrypted credentials we might need to access the TPM. */
4439 if (exec_context_has_encrypted_credentials(ec)) {
4440 r = cgroup_context_add_device_allow(cc, "char-tpm", CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4441 if (r < 0)
4442 return r;
4443 }
4444 }
4445 }
4446
4447 return 0;
4448 }
4449
4450 ExecContext *unit_get_exec_context(const Unit *u) {
4451 size_t offset;
4452 assert(u);
4453
4454 if (u->type < 0)
4455 return NULL;
4456
4457 offset = UNIT_VTABLE(u)->exec_context_offset;
4458 if (offset <= 0)
4459 return NULL;
4460
4461 return (ExecContext*) ((uint8_t*) u + offset);
4462 }
4463
4464 KillContext *unit_get_kill_context(Unit *u) {
4465 size_t offset;
4466 assert(u);
4467
4468 if (u->type < 0)
4469 return NULL;
4470
4471 offset = UNIT_VTABLE(u)->kill_context_offset;
4472 if (offset <= 0)
4473 return NULL;
4474
4475 return (KillContext*) ((uint8_t*) u + offset);
4476 }
4477
4478 CGroupContext *unit_get_cgroup_context(Unit *u) {
4479 size_t offset;
4480
4481 if (u->type < 0)
4482 return NULL;
4483
4484 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4485 if (offset <= 0)
4486 return NULL;
4487
4488 return (CGroupContext*) ((uint8_t*) u + offset);
4489 }
4490
4491 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4492 size_t offset;
4493
4494 if (u->type < 0)
4495 return NULL;
4496
4497 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4498 if (offset <= 0)
4499 return NULL;
4500
4501 return *(ExecRuntime**) ((uint8_t*) u + offset);
4502 }
4503
4504 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4505 assert(u);
4506
4507 if (UNIT_WRITE_FLAGS_NOOP(flags))
4508 return NULL;
4509
4510 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4511 return u->manager->lookup_paths.transient;
4512
4513 if (flags & UNIT_PERSISTENT)
4514 return u->manager->lookup_paths.persistent_control;
4515
4516 if (flags & UNIT_RUNTIME)
4517 return u->manager->lookup_paths.runtime_control;
4518
4519 return NULL;
4520 }
4521
4522 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4523 assert(s);
4524 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4525 assert(buf);
4526
4527 _cleanup_free_ char *t = NULL;
4528
4529 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4530 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4531 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4532 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4533 * allocations. */
4534
4535 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4536 t = specifier_escape(s);
4537 if (!t)
4538 return NULL;
4539
4540 s = t;
4541 }
4542
4543 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4544 * ExecStart= and friends, i.e. '$' and quotes. */
4545
4546 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4547 char *t2;
4548
4549 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4550 t2 = strreplace(s, "$", "$$");
4551 if (!t2)
4552 return NULL;
4553 free_and_replace(t, t2);
4554 }
4555
4556 t2 = shell_escape(t ?: s, "\"");
4557 if (!t2)
4558 return NULL;
4559 free_and_replace(t, t2);
4560
4561 s = t;
4562
4563 } else if (flags & UNIT_ESCAPE_C) {
4564 char *t2;
4565
4566 t2 = cescape(s);
4567 if (!t2)
4568 return NULL;
4569 free_and_replace(t, t2);
4570
4571 s = t;
4572 }
4573
4574 *buf = TAKE_PTR(t);
4575 return s;
4576 }
4577
4578 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4579 _cleanup_free_ char *result = NULL;
4580 size_t n = 0;
4581
4582 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4583 * lines in a way suitable for ExecStart= stanzas. */
4584
4585 STRV_FOREACH(i, l) {
4586 _cleanup_free_ char *buf = NULL;
4587 const char *p;
4588 size_t a;
4589 char *q;
4590
4591 p = unit_escape_setting(*i, flags, &buf);
4592 if (!p)
4593 return NULL;
4594
4595 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4596 if (!GREEDY_REALLOC(result, n + a + 1))
4597 return NULL;
4598
4599 q = result + n;
4600 if (n > 0)
4601 *(q++) = ' ';
4602
4603 *(q++) = '"';
4604 q = stpcpy(q, p);
4605 *(q++) = '"';
4606
4607 n += a;
4608 }
4609
4610 if (!GREEDY_REALLOC(result, n + 1))
4611 return NULL;
4612
4613 result[n] = 0;
4614
4615 return TAKE_PTR(result);
4616 }
4617
4618 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4619 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4620 const char *dir, *wrapped;
4621 int r;
4622
4623 assert(u);
4624 assert(name);
4625 assert(data);
4626
4627 if (UNIT_WRITE_FLAGS_NOOP(flags))
4628 return 0;
4629
4630 data = unit_escape_setting(data, flags, &escaped);
4631 if (!data)
4632 return -ENOMEM;
4633
4634 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4635 * previous section header is the same */
4636
4637 if (flags & UNIT_PRIVATE) {
4638 if (!UNIT_VTABLE(u)->private_section)
4639 return -EINVAL;
4640
4641 if (!u->transient_file || u->last_section_private < 0)
4642 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4643 else if (u->last_section_private == 0)
4644 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4645 } else {
4646 if (!u->transient_file || u->last_section_private < 0)
4647 data = strjoina("[Unit]\n", data);
4648 else if (u->last_section_private > 0)
4649 data = strjoina("\n[Unit]\n", data);
4650 }
4651
4652 if (u->transient_file) {
4653 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4654 * write to the transient unit file. */
4655 fputs(data, u->transient_file);
4656
4657 if (!endswith(data, "\n"))
4658 fputc('\n', u->transient_file);
4659
4660 /* Remember which section we wrote this entry to */
4661 u->last_section_private = !!(flags & UNIT_PRIVATE);
4662 return 0;
4663 }
4664
4665 dir = unit_drop_in_dir(u, flags);
4666 if (!dir)
4667 return -EINVAL;
4668
4669 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4670 "# or an equivalent operation. Do not edit.\n",
4671 data,
4672 "\n");
4673
4674 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4675 if (r < 0)
4676 return r;
4677
4678 (void) mkdir_p_label(p, 0755);
4679
4680 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4681 * recreate the cache after every drop-in we write. */
4682 if (u->manager->unit_path_cache) {
4683 r = set_put_strdup(&u->manager->unit_path_cache, p);
4684 if (r < 0)
4685 return r;
4686 }
4687
4688 r = write_string_file_atomic_label(q, wrapped);
4689 if (r < 0)
4690 return r;
4691
4692 r = strv_push(&u->dropin_paths, q);
4693 if (r < 0)
4694 return r;
4695 q = NULL;
4696
4697 strv_uniq(u->dropin_paths);
4698
4699 u->dropin_mtime = now(CLOCK_REALTIME);
4700
4701 return 0;
4702 }
4703
4704 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4705 _cleanup_free_ char *p = NULL;
4706 va_list ap;
4707 int r;
4708
4709 assert(u);
4710 assert(name);
4711 assert(format);
4712
4713 if (UNIT_WRITE_FLAGS_NOOP(flags))
4714 return 0;
4715
4716 va_start(ap, format);
4717 r = vasprintf(&p, format, ap);
4718 va_end(ap);
4719
4720 if (r < 0)
4721 return -ENOMEM;
4722
4723 return unit_write_setting(u, flags, name, p);
4724 }
4725
4726 int unit_make_transient(Unit *u) {
4727 _cleanup_free_ char *path = NULL;
4728 FILE *f;
4729
4730 assert(u);
4731
4732 if (!UNIT_VTABLE(u)->can_transient)
4733 return -EOPNOTSUPP;
4734
4735 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4736
4737 path = path_join(u->manager->lookup_paths.transient, u->id);
4738 if (!path)
4739 return -ENOMEM;
4740
4741 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4742 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4743
4744 WITH_UMASK(0022) {
4745 f = fopen(path, "we");
4746 if (!f)
4747 return -errno;
4748 }
4749
4750 safe_fclose(u->transient_file);
4751 u->transient_file = f;
4752
4753 free_and_replace(u->fragment_path, path);
4754
4755 u->source_path = mfree(u->source_path);
4756 u->dropin_paths = strv_free(u->dropin_paths);
4757 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4758
4759 u->load_state = UNIT_STUB;
4760 u->load_error = 0;
4761 u->transient = true;
4762
4763 unit_add_to_dbus_queue(u);
4764 unit_add_to_gc_queue(u);
4765
4766 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4767 u->transient_file);
4768
4769 return 0;
4770 }
4771
4772 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4773 _cleanup_free_ char *comm = NULL;
4774
4775 assert(pidref_is_set(pid));
4776
4777 (void) pidref_get_comm(pid, &comm);
4778
4779 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4780 only, like for example systemd's own PAM stub process. */
4781 if (comm && comm[0] == '(')
4782 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4783 * here to let the manager know that a process was killed. */
4784 return 1;
4785
4786 log_unit_notice(userdata,
4787 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4788 pid->pid,
4789 strna(comm),
4790 signal_to_string(sig));
4791
4792 return 1;
4793 }
4794
4795 static int operation_to_signal(
4796 const KillContext *c,
4797 KillOperation k,
4798 bool *ret_noteworthy) {
4799
4800 assert(c);
4801
4802 switch (k) {
4803
4804 case KILL_TERMINATE:
4805 case KILL_TERMINATE_AND_LOG:
4806 *ret_noteworthy = false;
4807 return c->kill_signal;
4808
4809 case KILL_RESTART:
4810 *ret_noteworthy = false;
4811 return restart_kill_signal(c);
4812
4813 case KILL_KILL:
4814 *ret_noteworthy = true;
4815 return c->final_kill_signal;
4816
4817 case KILL_WATCHDOG:
4818 *ret_noteworthy = true;
4819 return c->watchdog_signal;
4820
4821 default:
4822 assert_not_reached();
4823 }
4824 }
4825
4826 int unit_kill_context(
4827 Unit *u,
4828 KillContext *c,
4829 KillOperation k,
4830 PidRef* main_pid,
4831 PidRef* control_pid,
4832 bool main_pid_alien) {
4833
4834 bool wait_for_exit = false, send_sighup;
4835 cg_kill_log_func_t log_func = NULL;
4836 int sig, r;
4837
4838 assert(u);
4839 assert(c);
4840
4841 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4842 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4843 * which is used for user-requested killing of unit processes. */
4844
4845 if (c->kill_mode == KILL_NONE)
4846 return 0;
4847
4848 bool noteworthy;
4849 sig = operation_to_signal(c, k, &noteworthy);
4850 if (noteworthy)
4851 log_func = log_kill;
4852
4853 send_sighup =
4854 c->send_sighup &&
4855 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4856 sig != SIGHUP;
4857
4858 if (pidref_is_set(main_pid)) {
4859 if (log_func)
4860 log_func(main_pid, sig, u);
4861
4862 r = pidref_kill_and_sigcont(main_pid, sig);
4863 if (r < 0 && r != -ESRCH) {
4864 _cleanup_free_ char *comm = NULL;
4865 (void) pidref_get_comm(main_pid, &comm);
4866
4867 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid->pid, strna(comm));
4868 } else {
4869 if (!main_pid_alien)
4870 wait_for_exit = true;
4871
4872 if (r != -ESRCH && send_sighup)
4873 (void) pidref_kill(main_pid, SIGHUP);
4874 }
4875 }
4876
4877 if (pidref_is_set(control_pid)) {
4878 if (log_func)
4879 log_func(control_pid, sig, u);
4880
4881 r = pidref_kill_and_sigcont(control_pid, sig);
4882 if (r < 0 && r != -ESRCH) {
4883 _cleanup_free_ char *comm = NULL;
4884 (void) pidref_get_comm(control_pid, &comm);
4885
4886 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid->pid, strna(comm));
4887 } else {
4888 wait_for_exit = true;
4889
4890 if (r != -ESRCH && send_sighup)
4891 (void) pidref_kill(control_pid, SIGHUP);
4892 }
4893 }
4894
4895 if (u->cgroup_path &&
4896 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4897 _cleanup_set_free_ Set *pid_set = NULL;
4898
4899 /* Exclude the main/control pids from being killed via the cgroup */
4900 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4901 if (!pid_set)
4902 return -ENOMEM;
4903
4904 r = cg_kill_recursive(
4905 u->cgroup_path,
4906 sig,
4907 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4908 pid_set,
4909 log_func, u);
4910 if (r < 0) {
4911 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4912 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4913
4914 } else if (r > 0) {
4915
4916 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4917 * we are running in a container or if this is a delegation unit, simply because cgroup
4918 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4919 * of containers it can be confused easily by left-over directories in the cgroup — which
4920 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4921 * there we get proper events. Hence rely on them. */
4922
4923 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4924 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4925 wait_for_exit = true;
4926
4927 if (send_sighup) {
4928 set_free(pid_set);
4929
4930 pid_set = unit_pid_set(main_pid ? main_pid->pid : 0, control_pid ? control_pid->pid : 0);
4931 if (!pid_set)
4932 return -ENOMEM;
4933
4934 (void) cg_kill_recursive(
4935 u->cgroup_path,
4936 SIGHUP,
4937 CGROUP_IGNORE_SELF,
4938 pid_set,
4939 /* kill_log= */ NULL,
4940 /* userdata= */ NULL);
4941 }
4942 }
4943 }
4944
4945 return wait_for_exit;
4946 }
4947
4948 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4949 int r;
4950
4951 assert(u);
4952 assert(path);
4953
4954 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4955 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4956 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4957 * appearing mount units can easily determine which units to make themselves a dependency of. */
4958
4959 if (!path_is_absolute(path))
4960 return -EINVAL;
4961
4962 if (hashmap_contains(u->requires_mounts_for, path)) /* Exit quickly if the path is already covered. */
4963 return 0;
4964
4965 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4966 * only after simplification, since path_is_normalized() rejects paths with '.'.
4967 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4968 _cleanup_free_ char *p = NULL;
4969 r = path_simplify_alloc(path, &p);
4970 if (r < 0)
4971 return r;
4972 path = p;
4973
4974 if (!path_is_normalized(path))
4975 return -EPERM;
4976
4977 UnitDependencyInfo di = {
4978 .origin_mask = mask
4979 };
4980
4981 r = hashmap_ensure_put(&u->requires_mounts_for, &path_hash_ops, p, di.data);
4982 if (r < 0)
4983 return r;
4984 assert(r > 0);
4985 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4986
4987 char prefix[strlen(path) + 1];
4988 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4989 Set *x;
4990
4991 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4992 if (!x) {
4993 _cleanup_free_ char *q = NULL;
4994
4995 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4996 if (r < 0)
4997 return r;
4998
4999 q = strdup(prefix);
5000 if (!q)
5001 return -ENOMEM;
5002
5003 x = set_new(NULL);
5004 if (!x)
5005 return -ENOMEM;
5006
5007 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
5008 if (r < 0) {
5009 set_free(x);
5010 return r;
5011 }
5012 q = NULL;
5013 }
5014
5015 r = set_put(x, u);
5016 if (r < 0)
5017 return r;
5018 }
5019
5020 return 0;
5021 }
5022
5023 int unit_setup_exec_runtime(Unit *u) {
5024 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
5025 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
5026 _cleanup_set_free_ Set *units = NULL;
5027 ExecRuntime **rt;
5028 ExecContext *ec;
5029 size_t offset;
5030 Unit *other;
5031 int r;
5032
5033 offset = UNIT_VTABLE(u)->exec_runtime_offset;
5034 assert(offset > 0);
5035
5036 /* Check if there already is an ExecRuntime for this unit? */
5037 rt = (ExecRuntime**) ((uint8_t*) u + offset);
5038 if (*rt)
5039 return 0;
5040
5041 ec = unit_get_exec_context(u);
5042 assert(ec);
5043
5044 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
5045 if (r < 0)
5046 return r;
5047
5048 /* Try to get it from somebody else */
5049 SET_FOREACH(other, units) {
5050 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
5051 if (r < 0)
5052 return r;
5053 if (r > 0)
5054 break;
5055 }
5056
5057 if (!esr) {
5058 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
5059 if (r < 0)
5060 return r;
5061 }
5062
5063 if (ec->dynamic_user) {
5064 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
5065 if (r < 0)
5066 return r;
5067 }
5068
5069 r = exec_runtime_make(u, ec, esr, dcreds, rt);
5070 if (r < 0)
5071 return r;
5072
5073 TAKE_PTR(esr);
5074 TAKE_PTR(dcreds);
5075
5076 return r;
5077 }
5078
5079 bool unit_type_supported(UnitType t) {
5080 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5081 int r;
5082
5083 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5084
5085 if (cache[t] == 0) {
5086 char *e;
5087
5088 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5089
5090 r = getenv_bool(ascii_strupper(e));
5091 if (r < 0 && r != -ENXIO)
5092 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5093
5094 cache[t] = r == 0 ? -1 : 1;
5095 }
5096 if (cache[t] < 0)
5097 return false;
5098
5099 if (!unit_vtable[t]->supported)
5100 return true;
5101
5102 return unit_vtable[t]->supported();
5103 }
5104
5105 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5106 int r;
5107
5108 assert(u);
5109 assert(where);
5110
5111 if (!unit_log_level_test(u, LOG_NOTICE))
5112 return;
5113
5114 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5115 if (r > 0 || r == -ENOTDIR)
5116 return;
5117 if (r < 0) {
5118 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5119 return;
5120 }
5121
5122 log_unit_struct(u, LOG_NOTICE,
5123 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5124 LOG_UNIT_INVOCATION_ID(u),
5125 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5126 "WHERE=%s", where);
5127 }
5128
5129 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5130 _cleanup_free_ char *canonical_where = NULL;
5131 int r;
5132
5133 assert(u);
5134 assert(where);
5135
5136 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5137 if (r < 0) {
5138 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5139 return 0;
5140 }
5141
5142 /* We will happily ignore a trailing slash (or any redundant slashes) */
5143 if (path_equal(where, canonical_where))
5144 return 0;
5145
5146 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5147 log_unit_struct(u, LOG_ERR,
5148 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5149 LOG_UNIT_INVOCATION_ID(u),
5150 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5151 "WHERE=%s", where);
5152
5153 return -ELOOP;
5154 }
5155
5156 bool unit_is_pristine(Unit *u) {
5157 assert(u);
5158
5159 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5160 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5161 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5162 *
5163 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5164 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5165 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5166 */
5167
5168 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5169 !u->fragment_path &&
5170 !u->source_path &&
5171 !u->job &&
5172 !u->merged_into;
5173 }
5174
5175 PidRef* unit_control_pid(Unit *u) {
5176 assert(u);
5177
5178 if (UNIT_VTABLE(u)->control_pid)
5179 return UNIT_VTABLE(u)->control_pid(u);
5180
5181 return NULL;
5182 }
5183
5184 PidRef* unit_main_pid(Unit *u) {
5185 assert(u);
5186
5187 if (UNIT_VTABLE(u)->main_pid)
5188 return UNIT_VTABLE(u)->main_pid(u);
5189
5190 return NULL;
5191 }
5192
5193 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5194 int r;
5195
5196 assert(u);
5197
5198 if (!MANAGER_IS_SYSTEM(u->manager))
5199 return;
5200
5201 CGroupContext *c;
5202 c = unit_get_cgroup_context(u);
5203 if (!c)
5204 return;
5205
5206 if (!u->manager->fw_ctx) {
5207 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5208 if (r < 0)
5209 return;
5210
5211 assert(u->manager->fw_ctx);
5212 }
5213
5214 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5215 if (nft_set->source != source)
5216 continue;
5217
5218 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5219 if (r < 0)
5220 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5221 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5222 else
5223 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5224 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5225 }
5226 }
5227
5228 static void unit_unref_uid_internal(
5229 Unit *u,
5230 uid_t *ref_uid,
5231 bool destroy_now,
5232 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5233
5234 assert(u);
5235 assert(ref_uid);
5236 assert(_manager_unref_uid);
5237
5238 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5239 * gid_t are actually the same time, with the same validity rules.
5240 *
5241 * Drops a reference to UID/GID from a unit. */
5242
5243 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5244 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5245
5246 if (!uid_is_valid(*ref_uid))
5247 return;
5248
5249 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5250 *ref_uid = UID_INVALID;
5251 }
5252
5253 static void unit_unref_uid(Unit *u, bool destroy_now) {
5254 assert(u);
5255
5256 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5257
5258 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5259 }
5260
5261 static void unit_unref_gid(Unit *u, bool destroy_now) {
5262 assert(u);
5263
5264 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5265
5266 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5267 }
5268
5269 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5270 assert(u);
5271
5272 unit_unref_uid(u, destroy_now);
5273 unit_unref_gid(u, destroy_now);
5274 }
5275
5276 static int unit_ref_uid_internal(
5277 Unit *u,
5278 uid_t *ref_uid,
5279 uid_t uid,
5280 bool clean_ipc,
5281 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5282
5283 int r;
5284
5285 assert(u);
5286 assert(ref_uid);
5287 assert(uid_is_valid(uid));
5288 assert(_manager_ref_uid);
5289
5290 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5291 * are actually the same type, and have the same validity rules.
5292 *
5293 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5294 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5295 * drops to zero. */
5296
5297 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5298 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5299
5300 if (*ref_uid == uid)
5301 return 0;
5302
5303 if (uid_is_valid(*ref_uid)) /* Already set? */
5304 return -EBUSY;
5305
5306 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5307 if (r < 0)
5308 return r;
5309
5310 *ref_uid = uid;
5311 return 1;
5312 }
5313
5314 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5315 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5316 }
5317
5318 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5319 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5320 }
5321
5322 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5323 int r = 0, q = 0;
5324
5325 assert(u);
5326
5327 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5328
5329 if (uid_is_valid(uid)) {
5330 r = unit_ref_uid(u, uid, clean_ipc);
5331 if (r < 0)
5332 return r;
5333 }
5334
5335 if (gid_is_valid(gid)) {
5336 q = unit_ref_gid(u, gid, clean_ipc);
5337 if (q < 0) {
5338 if (r > 0)
5339 unit_unref_uid(u, false);
5340
5341 return q;
5342 }
5343 }
5344
5345 return r > 0 || q > 0;
5346 }
5347
5348 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5349 ExecContext *c;
5350 int r;
5351
5352 assert(u);
5353
5354 c = unit_get_exec_context(u);
5355
5356 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5357 if (r < 0)
5358 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5359
5360 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5361 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5362
5363 return r;
5364 }
5365
5366 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5367 int r;
5368
5369 assert(u);
5370
5371 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5372 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5373 * objects when no service references the UID/GID anymore. */
5374
5375 r = unit_ref_uid_gid(u, uid, gid);
5376 if (r > 0)
5377 unit_add_to_dbus_queue(u);
5378 }
5379
5380 int unit_acquire_invocation_id(Unit *u) {
5381 sd_id128_t id;
5382 int r;
5383
5384 assert(u);
5385
5386 r = sd_id128_randomize(&id);
5387 if (r < 0)
5388 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5389
5390 r = unit_set_invocation_id(u, id);
5391 if (r < 0)
5392 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5393
5394 unit_add_to_dbus_queue(u);
5395 return 0;
5396 }
5397
5398 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5399 const char *confirm_spawn;
5400 int r;
5401
5402 assert(u);
5403 assert(p);
5404
5405 /* Copy parameters from manager */
5406 r = manager_get_effective_environment(u->manager, &p->environment);
5407 if (r < 0)
5408 return r;
5409
5410 p->runtime_scope = u->manager->runtime_scope;
5411
5412 confirm_spawn = manager_get_confirm_spawn(u->manager);
5413 if (confirm_spawn) {
5414 p->confirm_spawn = strdup(confirm_spawn);
5415 if (!p->confirm_spawn)
5416 return -ENOMEM;
5417 }
5418
5419 p->cgroup_supported = u->manager->cgroup_supported;
5420 p->prefix = u->manager->prefix;
5421 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5422
5423 /* Copy parameters from unit */
5424 p->cgroup_path = u->cgroup_path;
5425 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5426
5427 p->received_credentials_directory = u->manager->received_credentials_directory;
5428 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5429
5430 p->shall_confirm_spawn = u->manager->confirm_spawn;
5431
5432 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5433
5434 if (u->manager->restrict_fs && p->bpf_outer_map_fd < 0) {
5435 int fd = lsm_bpf_map_restrict_fs_fd(u);
5436 if (fd < 0)
5437 return fd;
5438
5439 p->bpf_outer_map_fd = fd;
5440 }
5441
5442 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5443
5444 p->cgroup_id = u->cgroup_id;
5445 p->invocation_id = u->invocation_id;
5446 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5447 p->unit_id = strdup(u->id);
5448 if (!p->unit_id)
5449 return -ENOMEM;
5450
5451 return 0;
5452 }
5453
5454 int unit_fork_helper_process(Unit *u, const char *name, PidRef *ret) {
5455 pid_t pid;
5456 int r;
5457
5458 assert(u);
5459 assert(ret);
5460
5461 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5462 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5463
5464 (void) unit_realize_cgroup(u);
5465
5466 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5467 if (r < 0)
5468 return r;
5469 if (r > 0) {
5470 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5471 int q;
5472
5473 /* Parent */
5474
5475 q = pidref_set_pid(&pidref, pid);
5476 if (q < 0)
5477 return q;
5478
5479 *ret = TAKE_PIDREF(pidref);
5480 return r;
5481 }
5482
5483 /* Child */
5484
5485 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5486 (void) ignore_signals(SIGPIPE);
5487
5488 if (u->cgroup_path) {
5489 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5490 if (r < 0) {
5491 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5492 _exit(EXIT_CGROUP);
5493 }
5494 }
5495
5496 return 0;
5497 }
5498
5499 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5500 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5501 int r;
5502
5503 assert(u);
5504 assert(ret_pid);
5505
5506 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5507 if (r < 0)
5508 return r;
5509 if (r == 0) {
5510 int ret = EXIT_SUCCESS;
5511
5512 STRV_FOREACH(i, paths) {
5513 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5514 if (r < 0) {
5515 log_error_errno(r, "Failed to remove '%s': %m", *i);
5516 ret = EXIT_FAILURE;
5517 }
5518 }
5519
5520 _exit(ret);
5521 }
5522
5523 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5524 if (r < 0)
5525 return r;
5526
5527 *ret_pid = TAKE_PIDREF(pid);
5528 return 0;
5529 }
5530
5531 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5532 assert(deps);
5533 assert(other);
5534
5535 if (di.origin_mask == 0 && di.destination_mask == 0)
5536 /* No bit set anymore, let's drop the whole entry */
5537 assert_se(hashmap_remove(deps, other));
5538 else
5539 /* Mask was reduced, let's update the entry */
5540 assert_se(hashmap_update(deps, other, di.data) == 0);
5541 }
5542
5543 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5544 Hashmap *deps;
5545 assert(u);
5546
5547 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5548
5549 if (mask == 0)
5550 return;
5551
5552 HASHMAP_FOREACH(deps, u->dependencies) {
5553 bool done;
5554
5555 do {
5556 UnitDependencyInfo di;
5557 Unit *other;
5558
5559 done = true;
5560
5561 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5562 Hashmap *other_deps;
5563
5564 if (FLAGS_SET(~mask, di.origin_mask))
5565 continue;
5566
5567 di.origin_mask &= ~mask;
5568 unit_update_dependency_mask(deps, other, di);
5569
5570 /* We updated the dependency from our unit to the other unit now. But most
5571 * dependencies imply a reverse dependency. Hence, let's delete that one
5572 * too. For that we go through all dependency types on the other unit and
5573 * delete all those which point to us and have the right mask set. */
5574
5575 HASHMAP_FOREACH(other_deps, other->dependencies) {
5576 UnitDependencyInfo dj;
5577
5578 dj.data = hashmap_get(other_deps, u);
5579 if (FLAGS_SET(~mask, dj.destination_mask))
5580 continue;
5581
5582 dj.destination_mask &= ~mask;
5583 unit_update_dependency_mask(other_deps, u, dj);
5584 }
5585
5586 unit_add_to_gc_queue(other);
5587
5588 /* The unit 'other' may not be wanted by the unit 'u'. */
5589 unit_submit_to_stop_when_unneeded_queue(other);
5590
5591 done = false;
5592 break;
5593 }
5594
5595 } while (!done);
5596 }
5597 }
5598
5599 static int unit_get_invocation_path(Unit *u, char **ret) {
5600 char *p;
5601 int r;
5602
5603 assert(u);
5604 assert(ret);
5605
5606 if (MANAGER_IS_SYSTEM(u->manager))
5607 p = strjoin("/run/systemd/units/invocation:", u->id);
5608 else {
5609 _cleanup_free_ char *user_path = NULL;
5610 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5611 if (r < 0)
5612 return r;
5613 p = strjoin(user_path, u->id);
5614 }
5615
5616 if (!p)
5617 return -ENOMEM;
5618
5619 *ret = p;
5620 return 0;
5621 }
5622
5623 static int unit_export_invocation_id(Unit *u) {
5624 _cleanup_free_ char *p = NULL;
5625 int r;
5626
5627 assert(u);
5628
5629 if (u->exported_invocation_id)
5630 return 0;
5631
5632 if (sd_id128_is_null(u->invocation_id))
5633 return 0;
5634
5635 r = unit_get_invocation_path(u, &p);
5636 if (r < 0)
5637 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5638
5639 r = symlink_atomic_label(u->invocation_id_string, p);
5640 if (r < 0)
5641 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5642
5643 u->exported_invocation_id = true;
5644 return 0;
5645 }
5646
5647 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5648 const char *p;
5649 char buf[2];
5650 int r;
5651
5652 assert(u);
5653 assert(c);
5654
5655 if (u->exported_log_level_max)
5656 return 0;
5657
5658 if (c->log_level_max < 0)
5659 return 0;
5660
5661 assert(c->log_level_max <= 7);
5662
5663 buf[0] = '0' + c->log_level_max;
5664 buf[1] = 0;
5665
5666 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5667 r = symlink_atomic(buf, p);
5668 if (r < 0)
5669 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5670
5671 u->exported_log_level_max = true;
5672 return 0;
5673 }
5674
5675 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5676 _cleanup_close_ int fd = -EBADF;
5677 struct iovec *iovec;
5678 const char *p;
5679 char *pattern;
5680 le64_t *sizes;
5681 ssize_t n;
5682 int r;
5683
5684 if (u->exported_log_extra_fields)
5685 return 0;
5686
5687 if (c->n_log_extra_fields <= 0)
5688 return 0;
5689
5690 sizes = newa(le64_t, c->n_log_extra_fields);
5691 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5692
5693 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5694 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5695
5696 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5697 iovec[i*2+1] = c->log_extra_fields[i];
5698 }
5699
5700 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5701 pattern = strjoina(p, ".XXXXXX");
5702
5703 fd = mkostemp_safe(pattern);
5704 if (fd < 0)
5705 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5706
5707 n = writev(fd, iovec, c->n_log_extra_fields*2);
5708 if (n < 0) {
5709 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5710 goto fail;
5711 }
5712
5713 (void) fchmod(fd, 0644);
5714
5715 if (rename(pattern, p) < 0) {
5716 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5717 goto fail;
5718 }
5719
5720 u->exported_log_extra_fields = true;
5721 return 0;
5722
5723 fail:
5724 (void) unlink(pattern);
5725 return r;
5726 }
5727
5728 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5729 _cleanup_free_ char *buf = NULL;
5730 const char *p;
5731 int r;
5732
5733 assert(u);
5734 assert(c);
5735
5736 if (u->exported_log_ratelimit_interval)
5737 return 0;
5738
5739 if (c->log_ratelimit_interval_usec == 0)
5740 return 0;
5741
5742 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5743
5744 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5745 return log_oom();
5746
5747 r = symlink_atomic(buf, p);
5748 if (r < 0)
5749 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5750
5751 u->exported_log_ratelimit_interval = true;
5752 return 0;
5753 }
5754
5755 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5756 _cleanup_free_ char *buf = NULL;
5757 const char *p;
5758 int r;
5759
5760 assert(u);
5761 assert(c);
5762
5763 if (u->exported_log_ratelimit_burst)
5764 return 0;
5765
5766 if (c->log_ratelimit_burst == 0)
5767 return 0;
5768
5769 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5770
5771 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5772 return log_oom();
5773
5774 r = symlink_atomic(buf, p);
5775 if (r < 0)
5776 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5777
5778 u->exported_log_ratelimit_burst = true;
5779 return 0;
5780 }
5781
5782 void unit_export_state_files(Unit *u) {
5783 const ExecContext *c;
5784
5785 assert(u);
5786
5787 if (!u->id)
5788 return;
5789
5790 if (MANAGER_IS_TEST_RUN(u->manager))
5791 return;
5792
5793 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5794 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5795 * the IPC system itself and PID 1 also log to the journal.
5796 *
5797 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5798 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5799 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5800 * namespace at least.
5801 *
5802 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5803 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5804 * them with one. */
5805
5806 (void) unit_export_invocation_id(u);
5807
5808 if (!MANAGER_IS_SYSTEM(u->manager))
5809 return;
5810
5811 c = unit_get_exec_context(u);
5812 if (c) {
5813 (void) unit_export_log_level_max(u, c);
5814 (void) unit_export_log_extra_fields(u, c);
5815 (void) unit_export_log_ratelimit_interval(u, c);
5816 (void) unit_export_log_ratelimit_burst(u, c);
5817 }
5818 }
5819
5820 void unit_unlink_state_files(Unit *u) {
5821 const char *p;
5822
5823 assert(u);
5824
5825 if (!u->id)
5826 return;
5827
5828 /* Undoes the effect of unit_export_state() */
5829
5830 if (u->exported_invocation_id) {
5831 _cleanup_free_ char *invocation_path = NULL;
5832 int r = unit_get_invocation_path(u, &invocation_path);
5833 if (r >= 0) {
5834 (void) unlink(invocation_path);
5835 u->exported_invocation_id = false;
5836 }
5837 }
5838
5839 if (!MANAGER_IS_SYSTEM(u->manager))
5840 return;
5841
5842 if (u->exported_log_level_max) {
5843 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5844 (void) unlink(p);
5845
5846 u->exported_log_level_max = false;
5847 }
5848
5849 if (u->exported_log_extra_fields) {
5850 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5851 (void) unlink(p);
5852
5853 u->exported_log_extra_fields = false;
5854 }
5855
5856 if (u->exported_log_ratelimit_interval) {
5857 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5858 (void) unlink(p);
5859
5860 u->exported_log_ratelimit_interval = false;
5861 }
5862
5863 if (u->exported_log_ratelimit_burst) {
5864 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5865 (void) unlink(p);
5866
5867 u->exported_log_ratelimit_burst = false;
5868 }
5869 }
5870
5871 int unit_prepare_exec(Unit *u) {
5872 int r;
5873
5874 assert(u);
5875
5876 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5877 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5878 r = bpf_firewall_load_custom(u);
5879 if (r < 0)
5880 return r;
5881
5882 /* Prepares everything so that we can fork of a process for this unit */
5883
5884 (void) unit_realize_cgroup(u);
5885
5886 if (u->reset_accounting) {
5887 (void) unit_reset_accounting(u);
5888 u->reset_accounting = false;
5889 }
5890
5891 unit_export_state_files(u);
5892
5893 r = unit_setup_exec_runtime(u);
5894 if (r < 0)
5895 return r;
5896
5897 return 0;
5898 }
5899
5900 static bool ignore_leftover_process(const char *comm) {
5901 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5902 }
5903
5904 int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5905 _cleanup_free_ char *comm = NULL;
5906
5907 assert(pidref_is_set(pid));
5908
5909 (void) pidref_get_comm(pid, &comm);
5910
5911 if (ignore_leftover_process(comm))
5912 return 0;
5913
5914 /* During start we print a warning */
5915
5916 log_unit_warning(userdata,
5917 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5918 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5919 pid->pid, strna(comm));
5920
5921 return 1;
5922 }
5923
5924 int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5925 _cleanup_free_ char *comm = NULL;
5926
5927 assert(pidref_is_set(pid));
5928
5929 (void) pidref_get_comm(pid, &comm);
5930
5931 if (ignore_leftover_process(comm))
5932 return 0;
5933
5934 /* During stop we only print an informational message */
5935
5936 log_unit_info(userdata,
5937 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5938 pid->pid, strna(comm));
5939
5940 return 1;
5941 }
5942
5943 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5944 assert(u);
5945
5946 (void) unit_pick_cgroup_path(u);
5947
5948 if (!u->cgroup_path)
5949 return 0;
5950
5951 return cg_kill_recursive(
5952 u->cgroup_path,
5953 /* sig= */ 0,
5954 /* flags= */ 0,
5955 /* set= */ NULL,
5956 log_func,
5957 u);
5958 }
5959
5960 bool unit_needs_console(Unit *u) {
5961 ExecContext *ec;
5962 UnitActiveState state;
5963
5964 assert(u);
5965
5966 state = unit_active_state(u);
5967
5968 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5969 return false;
5970
5971 if (UNIT_VTABLE(u)->needs_console)
5972 return UNIT_VTABLE(u)->needs_console(u);
5973
5974 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5975 ec = unit_get_exec_context(u);
5976 if (!ec)
5977 return false;
5978
5979 return exec_context_may_touch_console(ec);
5980 }
5981
5982 int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) {
5983 int r;
5984
5985 assert(u);
5986
5987 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5988 * and not a kernel thread either */
5989
5990 /* First, a simple range check */
5991 if (!pidref_is_set(pid))
5992 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
5993
5994 /* Some extra safety check */
5995 if (pid->pid == 1 || pidref_is_self(pid))
5996 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
5997
5998 /* Don't even begin to bother with kernel threads */
5999 r = pidref_is_kernel_thread(pid);
6000 if (r == -ESRCH)
6001 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
6002 if (r < 0)
6003 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
6004 if (r > 0)
6005 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
6006
6007 return 0;
6008 }
6009
6010 void unit_log_success(Unit *u) {
6011 assert(u);
6012
6013 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
6014 * This message has low information value for regular users and it might be a bit overwhelming on a system with
6015 * a lot of devices. */
6016 log_unit_struct(u,
6017 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
6018 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
6019 LOG_UNIT_INVOCATION_ID(u),
6020 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
6021 }
6022
6023 void unit_log_failure(Unit *u, const char *result) {
6024 assert(u);
6025 assert(result);
6026
6027 log_unit_struct(u, LOG_WARNING,
6028 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
6029 LOG_UNIT_INVOCATION_ID(u),
6030 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
6031 "UNIT_RESULT=%s", result);
6032 }
6033
6034 void unit_log_skip(Unit *u, const char *result) {
6035 assert(u);
6036 assert(result);
6037
6038 log_unit_struct(u, LOG_INFO,
6039 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
6040 LOG_UNIT_INVOCATION_ID(u),
6041 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
6042 "UNIT_RESULT=%s", result);
6043 }
6044
6045 void unit_log_process_exit(
6046 Unit *u,
6047 const char *kind,
6048 const char *command,
6049 bool success,
6050 int code,
6051 int status) {
6052
6053 int level;
6054
6055 assert(u);
6056 assert(kind);
6057
6058 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6059 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6060 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6061 * WARNING. */
6062 if (success)
6063 level = LOG_DEBUG;
6064 else if (code == CLD_EXITED)
6065 level = LOG_NOTICE;
6066 else
6067 level = LOG_WARNING;
6068
6069 log_unit_struct(u, level,
6070 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
6071 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
6072 kind,
6073 sigchld_code_to_string(code), status,
6074 strna(code == CLD_EXITED
6075 ? exit_status_to_string(status, EXIT_STATUS_FULL)
6076 : signal_to_string(status)),
6077 success ? " (success)" : ""),
6078 "EXIT_CODE=%s", sigchld_code_to_string(code),
6079 "EXIT_STATUS=%i", status,
6080 "COMMAND=%s", strna(command),
6081 LOG_UNIT_INVOCATION_ID(u));
6082 }
6083
6084 int unit_exit_status(Unit *u) {
6085 assert(u);
6086
6087 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6088 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6089 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6090 * service process has exited abnormally (signal/coredump). */
6091
6092 if (!UNIT_VTABLE(u)->exit_status)
6093 return -EOPNOTSUPP;
6094
6095 return UNIT_VTABLE(u)->exit_status(u);
6096 }
6097
6098 int unit_failure_action_exit_status(Unit *u) {
6099 int r;
6100
6101 assert(u);
6102
6103 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6104
6105 if (u->failure_action_exit_status >= 0)
6106 return u->failure_action_exit_status;
6107
6108 r = unit_exit_status(u);
6109 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6110 return 255;
6111
6112 return r;
6113 }
6114
6115 int unit_success_action_exit_status(Unit *u) {
6116 int r;
6117
6118 assert(u);
6119
6120 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6121
6122 if (u->success_action_exit_status >= 0)
6123 return u->success_action_exit_status;
6124
6125 r = unit_exit_status(u);
6126 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6127 return 255;
6128
6129 return r;
6130 }
6131
6132 int unit_test_trigger_loaded(Unit *u) {
6133 Unit *trigger;
6134
6135 /* Tests whether the unit to trigger is loaded */
6136
6137 trigger = UNIT_TRIGGER(u);
6138 if (!trigger)
6139 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6140 "Refusing to start, no unit to trigger.");
6141 if (trigger->load_state != UNIT_LOADED)
6142 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6143 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6144
6145 return 0;
6146 }
6147
6148 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
6149 assert(u);
6150 assert(context);
6151
6152 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6153 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6154 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6155
6156 exec_context_destroy_credentials(u);
6157 exec_context_destroy_mount_ns_dir(u);
6158 }
6159
6160 int unit_clean(Unit *u, ExecCleanMask mask) {
6161 UnitActiveState state;
6162
6163 assert(u);
6164
6165 /* Special return values:
6166 *
6167 * -EOPNOTSUPP → cleaning not supported for this unit type
6168 * -EUNATCH → cleaning not defined for this resource type
6169 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6170 * a job queued or similar
6171 */
6172
6173 if (!UNIT_VTABLE(u)->clean)
6174 return -EOPNOTSUPP;
6175
6176 if (mask == 0)
6177 return -EUNATCH;
6178
6179 if (u->load_state != UNIT_LOADED)
6180 return -EBUSY;
6181
6182 if (u->job)
6183 return -EBUSY;
6184
6185 state = unit_active_state(u);
6186 if (state != UNIT_INACTIVE)
6187 return -EBUSY;
6188
6189 return UNIT_VTABLE(u)->clean(u, mask);
6190 }
6191
6192 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6193 assert(u);
6194
6195 if (!UNIT_VTABLE(u)->clean ||
6196 u->load_state != UNIT_LOADED) {
6197 *ret = 0;
6198 return 0;
6199 }
6200
6201 /* When the clean() method is set, can_clean() really should be set too */
6202 assert(UNIT_VTABLE(u)->can_clean);
6203
6204 return UNIT_VTABLE(u)->can_clean(u, ret);
6205 }
6206
6207 bool unit_can_start_refuse_manual(Unit *u) {
6208 return unit_can_start(u) && !u->refuse_manual_start;
6209 }
6210
6211 bool unit_can_stop_refuse_manual(Unit *u) {
6212 return unit_can_stop(u) && !u->refuse_manual_stop;
6213 }
6214
6215 bool unit_can_isolate_refuse_manual(Unit *u) {
6216 return unit_can_isolate(u) && !u->refuse_manual_start;
6217 }
6218
6219 bool unit_can_freeze(Unit *u) {
6220 assert(u);
6221
6222 if (UNIT_VTABLE(u)->can_freeze)
6223 return UNIT_VTABLE(u)->can_freeze(u);
6224
6225 return UNIT_VTABLE(u)->freeze;
6226 }
6227
6228 void unit_frozen(Unit *u) {
6229 assert(u);
6230
6231 u->freezer_state = FREEZER_FROZEN;
6232
6233 bus_unit_send_pending_freezer_message(u, false);
6234 }
6235
6236 void unit_thawed(Unit *u) {
6237 assert(u);
6238
6239 u->freezer_state = FREEZER_RUNNING;
6240
6241 bus_unit_send_pending_freezer_message(u, false);
6242 }
6243
6244 static int unit_freezer_action(Unit *u, FreezerAction action) {
6245 UnitActiveState s;
6246 int (*method)(Unit*);
6247 int r;
6248
6249 assert(u);
6250 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6251
6252 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
6253 if (!method || !cg_freezer_supported())
6254 return -EOPNOTSUPP;
6255
6256 if (u->job)
6257 return -EBUSY;
6258
6259 if (u->load_state != UNIT_LOADED)
6260 return -EHOSTDOWN;
6261
6262 s = unit_active_state(u);
6263 if (s != UNIT_ACTIVE)
6264 return -EHOSTDOWN;
6265
6266 if ((IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING) && action == FREEZER_FREEZE) ||
6267 (u->freezer_state == FREEZER_THAWING && action == FREEZER_THAW))
6268 return -EALREADY;
6269
6270 r = method(u);
6271 if (r <= 0)
6272 return r;
6273
6274 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING));
6275
6276 return 1;
6277 }
6278
6279 int unit_freeze(Unit *u) {
6280 return unit_freezer_action(u, FREEZER_FREEZE);
6281 }
6282
6283 int unit_thaw(Unit *u) {
6284 return unit_freezer_action(u, FREEZER_THAW);
6285 }
6286
6287 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
6288 int unit_freeze_vtable_common(Unit *u) {
6289 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
6290 }
6291
6292 int unit_thaw_vtable_common(Unit *u) {
6293 return unit_cgroup_freezer_action(u, FREEZER_THAW);
6294 }
6295
6296 Condition *unit_find_failed_condition(Unit *u) {
6297 Condition *failed_trigger = NULL;
6298 bool has_succeeded_trigger = false;
6299
6300 if (u->condition_result)
6301 return NULL;
6302
6303 LIST_FOREACH(conditions, c, u->conditions)
6304 if (c->trigger) {
6305 if (c->result == CONDITION_SUCCEEDED)
6306 has_succeeded_trigger = true;
6307 else if (!failed_trigger)
6308 failed_trigger = c;
6309 } else if (c->result != CONDITION_SUCCEEDED)
6310 return c;
6311
6312 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6313 }
6314
6315 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6316 [COLLECT_INACTIVE] = "inactive",
6317 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6318 };
6319
6320 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6321
6322 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6323 Unit *i;
6324
6325 assert(u);
6326
6327 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6328 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6329 * is NULL the first entry found), or NULL if not found. */
6330
6331 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6332 if (!other || other == i)
6333 return i;
6334
6335 return NULL;
6336 }
6337
6338 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6339 _cleanup_free_ Unit **array = NULL;
6340 size_t n = 0;
6341 Unit *other;
6342
6343 assert(u);
6344 assert(ret_array);
6345
6346 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6347 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6348 * while the dependency table is continuously updated. */
6349
6350 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6351 if (!GREEDY_REALLOC(array, n + 1))
6352 return -ENOMEM;
6353
6354 array[n++] = other;
6355 }
6356
6357 *ret_array = TAKE_PTR(array);
6358
6359 assert(n <= INT_MAX);
6360 return (int) n;
6361 }
6362
6363 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6364 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6365 Unit *other;
6366 int r;
6367
6368 assert(u);
6369 assert(ret);
6370
6371 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6372
6373 do {
6374 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6375 r = set_ensure_put(&units, NULL, other);
6376 if (r < 0)
6377 return r;
6378 if (r == 0)
6379 continue;
6380 r = set_ensure_put(&queue, NULL, other);
6381 if (r < 0)
6382 return r;
6383 }
6384 } while ((u = set_steal_first(queue)));
6385
6386 *ret = TAKE_PTR(units);
6387 return 0;
6388 }
6389
6390 int unit_arm_timer(
6391 Unit *u,
6392 sd_event_source **source,
6393 bool relative,
6394 usec_t usec,
6395 sd_event_time_handler_t handler) {
6396
6397 int r;
6398
6399 assert(u);
6400 assert(source);
6401 assert(handler);
6402
6403 if (*source) {
6404 if (usec == USEC_INFINITY)
6405 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6406
6407 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6408 if (r < 0)
6409 return r;
6410
6411 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6412 }
6413
6414 if (usec == USEC_INFINITY)
6415 return 0;
6416
6417 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6418 u->manager->event,
6419 source,
6420 CLOCK_MONOTONIC,
6421 usec, 0,
6422 handler,
6423 u);
6424 if (r < 0)
6425 return r;
6426
6427 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6428 (void) sd_event_source_set_description(*source, d);
6429
6430 return 0;
6431 }
6432
6433 static int unit_get_nice(Unit *u) {
6434 ExecContext *ec;
6435
6436 ec = unit_get_exec_context(u);
6437 return ec ? ec->nice : 0;
6438 }
6439
6440 static uint64_t unit_get_cpu_weight(Unit *u) {
6441 CGroupContext *cc;
6442
6443 cc = unit_get_cgroup_context(u);
6444 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6445 }
6446
6447 int unit_compare_priority(Unit *a, Unit *b) {
6448 int ret;
6449
6450 ret = CMP(a->type, b->type);
6451 if (ret != 0)
6452 return -ret;
6453
6454 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6455 if (ret != 0)
6456 return -ret;
6457
6458 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6459 if (ret != 0)
6460 return ret;
6461
6462 return strcmp(a->id, b->id);
6463 }
6464
6465 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6466 [UNIT_PATH] = &activation_details_path_vtable,
6467 [UNIT_TIMER] = &activation_details_timer_vtable,
6468 };
6469
6470 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6471 _cleanup_free_ ActivationDetails *details = NULL;
6472
6473 assert(trigger_unit);
6474 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6475 assert(trigger_unit->id);
6476
6477 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6478 if (!details)
6479 return NULL;
6480
6481 *details = (ActivationDetails) {
6482 .n_ref = 1,
6483 .trigger_unit_type = trigger_unit->type,
6484 };
6485
6486 details->trigger_unit_name = strdup(trigger_unit->id);
6487 if (!details->trigger_unit_name)
6488 return NULL;
6489
6490 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6491 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6492
6493 return TAKE_PTR(details);
6494 }
6495
6496 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6497 if (!details)
6498 return NULL;
6499
6500 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6501 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6502
6503 free(details->trigger_unit_name);
6504
6505 return mfree(details);
6506 }
6507
6508 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6509 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6510 return;
6511
6512 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6513 if (details->trigger_unit_name)
6514 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6515 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6516 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6517 }
6518
6519 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6520 int r;
6521
6522 assert(key);
6523 assert(value);
6524 assert(details);
6525
6526 if (!*details) {
6527 UnitType t;
6528
6529 if (!streq(key, "activation-details-unit-type"))
6530 return -EINVAL;
6531
6532 t = unit_type_from_string(value);
6533 if (t < 0)
6534 return t;
6535
6536 /* The activation details vtable has defined ops only for path and timer units */
6537 if (!activation_details_vtable[t])
6538 return -EINVAL;
6539
6540 *details = malloc0(activation_details_vtable[t]->object_size);
6541 if (!*details)
6542 return -ENOMEM;
6543
6544 **details = (ActivationDetails) {
6545 .n_ref = 1,
6546 .trigger_unit_type = t,
6547 };
6548
6549 return 0;
6550 }
6551
6552 if (streq(key, "activation-details-unit-name")) {
6553 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6554 if (r < 0)
6555 return r;
6556
6557 return 0;
6558 }
6559
6560 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6561 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6562
6563 return -EINVAL;
6564 }
6565
6566 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6567 int r = 0;
6568
6569 assert(strv);
6570
6571 if (!details)
6572 return 0;
6573
6574 if (!isempty(details->trigger_unit_name)) {
6575 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6576 if (!s)
6577 return -ENOMEM;
6578
6579 r = strv_consume(strv, TAKE_PTR(s));
6580 if (r < 0)
6581 return r;
6582 }
6583
6584 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6585 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6586 if (r < 0)
6587 return r;
6588 }
6589
6590 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6591 }
6592
6593 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6594 int r = 0;
6595
6596 assert(strv);
6597
6598 if (!details)
6599 return 0;
6600
6601 if (!isempty(details->trigger_unit_name)) {
6602 r = strv_extend(strv, "trigger_unit");
6603 if (r < 0)
6604 return r;
6605
6606 r = strv_extend(strv, details->trigger_unit_name);
6607 if (r < 0)
6608 return r;
6609 }
6610
6611 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6612 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6613 if (r < 0)
6614 return r;
6615 }
6616
6617 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6618 }
6619
6620 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);