]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #26959 from poettering/creds-mount-dep-fix
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-setup.h"
19 #include "cgroup-util.h"
20 #include "chase.h"
21 #include "core-varlink.h"
22 #include "dbus-unit.h"
23 #include "dbus.h"
24 #include "dropin.h"
25 #include "env-util.h"
26 #include "escape.h"
27 #include "execute.h"
28 #include "fd-util.h"
29 #include "fileio-label.h"
30 #include "fileio.h"
31 #include "format-util.h"
32 #include "id128-util.h"
33 #include "install.h"
34 #include "io-util.h"
35 #include "label.h"
36 #include "load-dropin.h"
37 #include "load-fragment.h"
38 #include "log.h"
39 #include "logarithm.h"
40 #include "macro.h"
41 #include "missing_audit.h"
42 #include "mkdir-label.h"
43 #include "path-util.h"
44 #include "process-util.h"
45 #include "rm-rf.h"
46 #include "serialize.h"
47 #include "set.h"
48 #include "signal-util.h"
49 #include "sparse-endian.h"
50 #include "special.h"
51 #include "specifier.h"
52 #include "stat-util.h"
53 #include "stdio-util.h"
54 #include "string-table.h"
55 #include "string-util.h"
56 #include "strv.h"
57 #include "terminal-util.h"
58 #include "tmpfile-util.h"
59 #include "umask-util.h"
60 #include "unit-name.h"
61 #include "unit.h"
62 #include "user-util.h"
63 #include "virt.h"
64 #if BPF_FRAMEWORK
65 #include "bpf-link.h"
66 #endif
67
68 /* Thresholds for logging at INFO level about resource consumption */
69 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
70 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
71 #define MENTIONWORTHY_IP_BYTES (0ULL)
72
73 /* Thresholds for logging at INFO level about resource consumption */
74 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
75 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
76 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
77
78 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
79 [UNIT_SERVICE] = &service_vtable,
80 [UNIT_SOCKET] = &socket_vtable,
81 [UNIT_TARGET] = &target_vtable,
82 [UNIT_DEVICE] = &device_vtable,
83 [UNIT_MOUNT] = &mount_vtable,
84 [UNIT_AUTOMOUNT] = &automount_vtable,
85 [UNIT_SWAP] = &swap_vtable,
86 [UNIT_TIMER] = &timer_vtable,
87 [UNIT_PATH] = &path_vtable,
88 [UNIT_SLICE] = &slice_vtable,
89 [UNIT_SCOPE] = &scope_vtable,
90 };
91
92 Unit* unit_new(Manager *m, size_t size) {
93 Unit *u;
94
95 assert(m);
96 assert(size >= sizeof(Unit));
97
98 u = malloc0(size);
99 if (!u)
100 return NULL;
101
102 u->manager = m;
103 u->type = _UNIT_TYPE_INVALID;
104 u->default_dependencies = true;
105 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
106 u->unit_file_preset = -1;
107 u->on_failure_job_mode = JOB_REPLACE;
108 u->on_success_job_mode = JOB_FAIL;
109 u->cgroup_control_inotify_wd = -1;
110 u->cgroup_memory_inotify_wd = -1;
111 u->job_timeout = USEC_INFINITY;
112 u->job_running_timeout = USEC_INFINITY;
113 u->ref_uid = UID_INVALID;
114 u->ref_gid = GID_INVALID;
115 u->cpu_usage_last = NSEC_INFINITY;
116 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
117 u->failure_action_exit_status = u->success_action_exit_status = -1;
118
119 u->ip_accounting_ingress_map_fd = -EBADF;
120 u->ip_accounting_egress_map_fd = -EBADF;
121 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
122 u->io_accounting_last[i] = UINT64_MAX;
123
124 u->ipv4_allow_map_fd = -EBADF;
125 u->ipv6_allow_map_fd = -EBADF;
126 u->ipv4_deny_map_fd = -EBADF;
127 u->ipv6_deny_map_fd = -EBADF;
128
129 u->last_section_private = -1;
130
131 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
132 u->auto_start_stop_ratelimit = (const RateLimit) { 10 * USEC_PER_SEC, 16 };
133
134 return u;
135 }
136
137 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
138 _cleanup_(unit_freep) Unit *u = NULL;
139 int r;
140
141 u = unit_new(m, size);
142 if (!u)
143 return -ENOMEM;
144
145 r = unit_add_name(u, name);
146 if (r < 0)
147 return r;
148
149 *ret = TAKE_PTR(u);
150
151 return r;
152 }
153
154 bool unit_has_name(const Unit *u, const char *name) {
155 assert(u);
156 assert(name);
157
158 return streq_ptr(name, u->id) ||
159 set_contains(u->aliases, name);
160 }
161
162 static void unit_init(Unit *u) {
163 CGroupContext *cc;
164 ExecContext *ec;
165 KillContext *kc;
166
167 assert(u);
168 assert(u->manager);
169 assert(u->type >= 0);
170
171 cc = unit_get_cgroup_context(u);
172 if (cc) {
173 cgroup_context_init(cc);
174
175 /* Copy in the manager defaults into the cgroup
176 * context, _before_ the rest of the settings have
177 * been initialized */
178
179 cc->cpu_accounting = u->manager->default_cpu_accounting;
180 cc->io_accounting = u->manager->default_io_accounting;
181 cc->blockio_accounting = u->manager->default_blockio_accounting;
182 cc->memory_accounting = u->manager->default_memory_accounting;
183 cc->tasks_accounting = u->manager->default_tasks_accounting;
184 cc->ip_accounting = u->manager->default_ip_accounting;
185
186 if (u->type != UNIT_SLICE)
187 cc->tasks_max = u->manager->default_tasks_max;
188
189 cc->memory_pressure_watch = u->manager->default_memory_pressure_watch;
190 cc->memory_pressure_threshold_usec = u->manager->default_memory_pressure_threshold_usec;
191 }
192
193 ec = unit_get_exec_context(u);
194 if (ec) {
195 exec_context_init(ec);
196
197 if (u->manager->default_oom_score_adjust_set) {
198 ec->oom_score_adjust = u->manager->default_oom_score_adjust;
199 ec->oom_score_adjust_set = true;
200 }
201
202 if (MANAGER_IS_SYSTEM(u->manager))
203 ec->keyring_mode = EXEC_KEYRING_SHARED;
204 else {
205 ec->keyring_mode = EXEC_KEYRING_INHERIT;
206
207 /* User manager might have its umask redefined by PAM or UMask=. In this
208 * case let the units it manages inherit this value by default. They can
209 * still tune this value through their own unit file */
210 (void) get_process_umask(getpid_cached(), &ec->umask);
211 }
212 }
213
214 kc = unit_get_kill_context(u);
215 if (kc)
216 kill_context_init(kc);
217
218 if (UNIT_VTABLE(u)->init)
219 UNIT_VTABLE(u)->init(u);
220 }
221
222 static int unit_add_alias(Unit *u, char *donated_name) {
223 int r;
224
225 /* Make sure that u->names is allocated. We may leave u->names
226 * empty if we fail later, but this is not a problem. */
227 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
228 if (r < 0)
229 return r;
230 assert(r > 0);
231
232 return 0;
233 }
234
235 int unit_add_name(Unit *u, const char *text) {
236 _cleanup_free_ char *name = NULL, *instance = NULL;
237 UnitType t;
238 int r;
239
240 assert(u);
241 assert(text);
242
243 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
244 if (!u->instance)
245 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
246 "instance is not set when adding name '%s': %m", text);
247
248 r = unit_name_replace_instance(text, u->instance, &name);
249 if (r < 0)
250 return log_unit_debug_errno(u, r,
251 "failed to build instance name from '%s': %m", text);
252 } else {
253 name = strdup(text);
254 if (!name)
255 return -ENOMEM;
256 }
257
258 if (unit_has_name(u, name))
259 return 0;
260
261 if (hashmap_contains(u->manager->units, name))
262 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
263 "unit already exist when adding name '%s': %m", name);
264
265 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
266 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
267 "name '%s' is invalid: %m", name);
268
269 t = unit_name_to_type(name);
270 if (t < 0)
271 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
272 "failed to derive unit type from name '%s': %m", name);
273
274 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
275 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
276 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
277 u->type, t, name);
278
279 r = unit_name_to_instance(name, &instance);
280 if (r < 0)
281 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
282
283 if (instance && !unit_type_may_template(t))
284 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
285
286 /* Ensure that this unit either has no instance, or that the instance matches. */
287 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
288 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
289 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
290 name, instance, u->instance);
291
292 if (u->id && !unit_type_may_alias(t))
293 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
294 "cannot add name %s, aliases are not allowed for %s units.",
295 name, unit_type_to_string(t));
296
297 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
298 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
299
300 /* Add name to the global hashmap first, because that's easier to undo */
301 r = hashmap_put(u->manager->units, name, u);
302 if (r < 0)
303 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
304
305 if (u->id) {
306 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
307 if (r < 0) {
308 hashmap_remove(u->manager->units, name);
309 return r;
310 }
311 TAKE_PTR(name);
312
313 } else {
314 /* A new name, we don't need the set yet. */
315 assert(u->type == _UNIT_TYPE_INVALID);
316 assert(!u->instance);
317
318 u->type = t;
319 u->id = TAKE_PTR(name);
320 u->instance = TAKE_PTR(instance);
321
322 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
323 unit_init(u);
324 }
325
326 unit_add_to_dbus_queue(u);
327 return 0;
328 }
329
330 int unit_choose_id(Unit *u, const char *name) {
331 _cleanup_free_ char *t = NULL;
332 char *s;
333 int r;
334
335 assert(u);
336 assert(name);
337
338 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
339 if (!u->instance)
340 return -EINVAL;
341
342 r = unit_name_replace_instance(name, u->instance, &t);
343 if (r < 0)
344 return r;
345
346 name = t;
347 }
348
349 if (streq_ptr(u->id, name))
350 return 0; /* Nothing to do. */
351
352 /* Selects one of the aliases of this unit as the id */
353 s = set_get(u->aliases, (char*) name);
354 if (!s)
355 return -ENOENT;
356
357 if (u->id) {
358 r = set_remove_and_put(u->aliases, name, u->id);
359 if (r < 0)
360 return r;
361 } else
362 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
363
364 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
365 unit_add_to_dbus_queue(u);
366
367 return 0;
368 }
369
370 int unit_set_description(Unit *u, const char *description) {
371 int r;
372
373 assert(u);
374
375 r = free_and_strdup(&u->description, empty_to_null(description));
376 if (r < 0)
377 return r;
378 if (r > 0)
379 unit_add_to_dbus_queue(u);
380
381 return 0;
382 }
383
384 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
385 Unit *other;
386
387 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
388 if (other->job || other->nop_job)
389 return true;
390
391 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
392 if (other->job || other->nop_job)
393 return true;
394
395 return false;
396 }
397
398 void unit_release_resources(Unit *u) {
399 UnitActiveState state;
400 ExecContext *ec;
401
402 assert(u);
403
404 if (u->job || u->nop_job)
405 return;
406
407 if (u->perpetual)
408 return;
409
410 state = unit_active_state(u);
411 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
412 return;
413
414 if (unit_will_restart(u))
415 return;
416
417 ec = unit_get_exec_context(u);
418 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
419 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
420
421 if (UNIT_VTABLE(u)->release_resources)
422 UNIT_VTABLE(u)->release_resources(u);
423 }
424
425 bool unit_may_gc(Unit *u) {
426 UnitActiveState state;
427 int r;
428
429 assert(u);
430
431 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
432 * unit may be collected, and false if there's some reason to keep it loaded.
433 *
434 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
435 * using markers to properly collect dependency loops.
436 */
437
438 if (u->job || u->nop_job)
439 return false;
440
441 if (u->perpetual)
442 return false;
443
444 if (sd_bus_track_count(u->bus_track) > 0)
445 return false;
446
447 state = unit_active_state(u);
448
449 /* But we keep the unit object around for longer when it is referenced or configured to not be
450 * gc'ed */
451 switch (u->collect_mode) {
452
453 case COLLECT_INACTIVE:
454 if (state != UNIT_INACTIVE)
455 return false;
456
457 break;
458
459 case COLLECT_INACTIVE_OR_FAILED:
460 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
461 return false;
462
463 break;
464
465 default:
466 assert_not_reached();
467 }
468
469 /* Check if any OnFailure= or on Success= jobs may be pending */
470 if (unit_success_failure_handler_has_jobs(u))
471 return false;
472
473 if (u->cgroup_path) {
474 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
475 * around. Units with active processes should never be collected. */
476
477 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
478 if (r < 0)
479 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
480 if (r <= 0)
481 return false;
482 }
483
484 if (!UNIT_VTABLE(u)->may_gc)
485 return true;
486
487 return UNIT_VTABLE(u)->may_gc(u);
488 }
489
490 void unit_add_to_load_queue(Unit *u) {
491 assert(u);
492 assert(u->type != _UNIT_TYPE_INVALID);
493
494 if (u->load_state != UNIT_STUB || u->in_load_queue)
495 return;
496
497 LIST_PREPEND(load_queue, u->manager->load_queue, u);
498 u->in_load_queue = true;
499 }
500
501 void unit_add_to_cleanup_queue(Unit *u) {
502 assert(u);
503
504 if (u->in_cleanup_queue)
505 return;
506
507 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
508 u->in_cleanup_queue = true;
509 }
510
511 void unit_add_to_gc_queue(Unit *u) {
512 assert(u);
513
514 if (u->in_gc_queue || u->in_cleanup_queue)
515 return;
516
517 if (!unit_may_gc(u))
518 return;
519
520 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
521 u->in_gc_queue = true;
522 }
523
524 void unit_add_to_dbus_queue(Unit *u) {
525 assert(u);
526 assert(u->type != _UNIT_TYPE_INVALID);
527
528 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
529 return;
530
531 /* Shortcut things if nobody cares */
532 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
533 sd_bus_track_count(u->bus_track) <= 0 &&
534 set_isempty(u->manager->private_buses)) {
535 u->sent_dbus_new_signal = true;
536 return;
537 }
538
539 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
540 u->in_dbus_queue = true;
541 }
542
543 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
544 assert(u);
545
546 if (u->in_stop_when_unneeded_queue)
547 return;
548
549 if (!u->stop_when_unneeded)
550 return;
551
552 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
553 return;
554
555 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
556 u->in_stop_when_unneeded_queue = true;
557 }
558
559 void unit_submit_to_start_when_upheld_queue(Unit *u) {
560 assert(u);
561
562 if (u->in_start_when_upheld_queue)
563 return;
564
565 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
566 return;
567
568 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
569 return;
570
571 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
572 u->in_start_when_upheld_queue = true;
573 }
574
575 void unit_submit_to_stop_when_bound_queue(Unit *u) {
576 assert(u);
577
578 if (u->in_stop_when_bound_queue)
579 return;
580
581 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
582 return;
583
584 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
585 return;
586
587 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
588 u->in_stop_when_bound_queue = true;
589 }
590
591 static bool unit_can_release_resources(Unit *u) {
592 ExecContext *ec;
593
594 assert(u);
595
596 if (UNIT_VTABLE(u)->release_resources)
597 return true;
598
599 ec = unit_get_exec_context(u);
600 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
601 return true;
602
603 return false;
604 }
605
606 void unit_submit_to_release_resources_queue(Unit *u) {
607 assert(u);
608
609 if (u->in_release_resources_queue)
610 return;
611
612 if (u->job || u->nop_job)
613 return;
614
615 if (u->perpetual)
616 return;
617
618 if (!unit_can_release_resources(u))
619 return;
620
621 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
622 u->in_release_resources_queue = true;
623 }
624
625 static void unit_clear_dependencies(Unit *u) {
626 assert(u);
627
628 /* Removes all dependencies configured on u and their reverse dependencies. */
629
630 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
631
632 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
633 Hashmap *other_deps;
634
635 HASHMAP_FOREACH(other_deps, other->dependencies)
636 hashmap_remove(other_deps, u);
637
638 unit_add_to_gc_queue(other);
639 }
640
641 hashmap_free(deps);
642 }
643
644 u->dependencies = hashmap_free(u->dependencies);
645 }
646
647 static void unit_remove_transient(Unit *u) {
648 assert(u);
649
650 if (!u->transient)
651 return;
652
653 if (u->fragment_path)
654 (void) unlink(u->fragment_path);
655
656 STRV_FOREACH(i, u->dropin_paths) {
657 _cleanup_free_ char *p = NULL, *pp = NULL;
658
659 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
660 continue;
661
662 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
663 continue;
664
665 /* Only drop transient drop-ins */
666 if (!path_equal(u->manager->lookup_paths.transient, pp))
667 continue;
668
669 (void) unlink(*i);
670 (void) rmdir(p);
671 }
672 }
673
674 static void unit_free_requires_mounts_for(Unit *u) {
675 assert(u);
676
677 for (;;) {
678 _cleanup_free_ char *path = NULL;
679
680 path = hashmap_steal_first_key(u->requires_mounts_for);
681 if (!path)
682 break;
683 else {
684 char s[strlen(path) + 1];
685
686 PATH_FOREACH_PREFIX_MORE(s, path) {
687 char *y;
688 Set *x;
689
690 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
691 if (!x)
692 continue;
693
694 (void) set_remove(x, u);
695
696 if (set_isempty(x)) {
697 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
698 free(y);
699 set_free(x);
700 }
701 }
702 }
703 }
704
705 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
706 }
707
708 static void unit_done(Unit *u) {
709 ExecContext *ec;
710 CGroupContext *cc;
711
712 assert(u);
713
714 if (u->type < 0)
715 return;
716
717 if (UNIT_VTABLE(u)->done)
718 UNIT_VTABLE(u)->done(u);
719
720 ec = unit_get_exec_context(u);
721 if (ec)
722 exec_context_done(ec);
723
724 cc = unit_get_cgroup_context(u);
725 if (cc)
726 cgroup_context_done(cc);
727 }
728
729 Unit* unit_free(Unit *u) {
730 Unit *slice;
731 char *t;
732
733 if (!u)
734 return NULL;
735
736 sd_event_source_disable_unref(u->auto_start_stop_event_source);
737
738 u->transient_file = safe_fclose(u->transient_file);
739
740 if (!MANAGER_IS_RELOADING(u->manager))
741 unit_remove_transient(u);
742
743 bus_unit_send_removed_signal(u);
744
745 unit_done(u);
746
747 unit_dequeue_rewatch_pids(u);
748
749 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
750 u->bus_track = sd_bus_track_unref(u->bus_track);
751 u->deserialized_refs = strv_free(u->deserialized_refs);
752 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
753
754 unit_free_requires_mounts_for(u);
755
756 SET_FOREACH(t, u->aliases)
757 hashmap_remove_value(u->manager->units, t, u);
758 if (u->id)
759 hashmap_remove_value(u->manager->units, u->id, u);
760
761 if (!sd_id128_is_null(u->invocation_id))
762 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
763
764 if (u->job) {
765 Job *j = u->job;
766 job_uninstall(j);
767 job_free(j);
768 }
769
770 if (u->nop_job) {
771 Job *j = u->nop_job;
772 job_uninstall(j);
773 job_free(j);
774 }
775
776 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
777 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
778 slice = UNIT_GET_SLICE(u);
779 unit_clear_dependencies(u);
780 if (slice)
781 unit_add_family_to_cgroup_realize_queue(slice);
782
783 if (u->on_console)
784 manager_unref_console(u->manager);
785
786 fdset_free(u->initial_socket_bind_link_fds);
787 #if BPF_FRAMEWORK
788 bpf_link_free(u->ipv4_socket_bind_link);
789 bpf_link_free(u->ipv6_socket_bind_link);
790 #endif
791
792 unit_release_cgroup(u);
793
794 if (!MANAGER_IS_RELOADING(u->manager))
795 unit_unlink_state_files(u);
796
797 unit_unref_uid_gid(u, false);
798
799 (void) manager_update_failed_units(u->manager, u, false);
800 set_remove(u->manager->startup_units, u);
801
802 unit_unwatch_all_pids(u);
803
804 while (u->refs_by_target)
805 unit_ref_unset(u->refs_by_target);
806
807 if (u->type != _UNIT_TYPE_INVALID)
808 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
809
810 if (u->in_load_queue)
811 LIST_REMOVE(load_queue, u->manager->load_queue, u);
812
813 if (u->in_dbus_queue)
814 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
815
816 if (u->in_cleanup_queue)
817 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
818
819 if (u->in_gc_queue)
820 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
821
822 if (u->in_cgroup_realize_queue)
823 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
824
825 if (u->in_cgroup_empty_queue)
826 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
827
828 if (u->in_cgroup_oom_queue)
829 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
830
831 if (u->in_target_deps_queue)
832 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
833
834 if (u->in_stop_when_unneeded_queue)
835 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
836
837 if (u->in_start_when_upheld_queue)
838 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
839
840 if (u->in_stop_when_bound_queue)
841 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
842
843 if (u->in_release_resources_queue)
844 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
845
846 bpf_firewall_close(u);
847
848 hashmap_free(u->bpf_foreign_by_key);
849
850 bpf_program_free(u->bpf_device_control_installed);
851
852 #if BPF_FRAMEWORK
853 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
854 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
855 #endif
856 fdset_free(u->initial_restric_ifaces_link_fds);
857
858 condition_free_list(u->conditions);
859 condition_free_list(u->asserts);
860
861 free(u->description);
862 strv_free(u->documentation);
863 free(u->fragment_path);
864 free(u->source_path);
865 strv_free(u->dropin_paths);
866 free(u->instance);
867
868 free(u->job_timeout_reboot_arg);
869 free(u->reboot_arg);
870
871 free(u->access_selinux_context);
872
873 set_free_free(u->aliases);
874 free(u->id);
875
876 activation_details_unref(u->activation_details);
877
878 return mfree(u);
879 }
880
881 FreezerState unit_freezer_state(Unit *u) {
882 assert(u);
883
884 return u->freezer_state;
885 }
886
887 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
888 char *values[1] = {};
889 int r;
890
891 assert(u);
892
893 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
894 STRV_MAKE("frozen"), values);
895 if (r < 0)
896 return r;
897
898 r = _FREEZER_STATE_INVALID;
899
900 if (values[0]) {
901 if (streq(values[0], "0"))
902 r = FREEZER_RUNNING;
903 else if (streq(values[0], "1"))
904 r = FREEZER_FROZEN;
905 }
906
907 free(values[0]);
908 *ret = r;
909
910 return 0;
911 }
912
913 UnitActiveState unit_active_state(Unit *u) {
914 assert(u);
915
916 if (u->load_state == UNIT_MERGED)
917 return unit_active_state(unit_follow_merge(u));
918
919 /* After a reload it might happen that a unit is not correctly
920 * loaded but still has a process around. That's why we won't
921 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
922
923 return UNIT_VTABLE(u)->active_state(u);
924 }
925
926 const char* unit_sub_state_to_string(Unit *u) {
927 assert(u);
928
929 return UNIT_VTABLE(u)->sub_state_to_string(u);
930 }
931
932 static int unit_merge_names(Unit *u, Unit *other) {
933 char *name;
934 int r;
935
936 assert(u);
937 assert(other);
938
939 r = unit_add_alias(u, other->id);
940 if (r < 0)
941 return r;
942
943 r = set_move(u->aliases, other->aliases);
944 if (r < 0) {
945 set_remove(u->aliases, other->id);
946 return r;
947 }
948
949 TAKE_PTR(other->id);
950 other->aliases = set_free_free(other->aliases);
951
952 SET_FOREACH(name, u->aliases)
953 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
954
955 return 0;
956 }
957
958 static int unit_reserve_dependencies(Unit *u, Unit *other) {
959 size_t n_reserve;
960 Hashmap* deps;
961 void *d;
962 int r;
963
964 assert(u);
965 assert(other);
966
967 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
968 * fail.
969 *
970 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
971 * hashmaps is an estimate that is likely too high since they probably use some of the same
972 * types. But it's never too low, and that's all we need. */
973
974 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
975 if (n_reserve > 0) {
976 r = hashmap_ensure_allocated(&u->dependencies, NULL);
977 if (r < 0)
978 return r;
979
980 r = hashmap_reserve(u->dependencies, n_reserve);
981 if (r < 0)
982 return r;
983 }
984
985 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
986 * other unit's dependencies.
987 *
988 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
989 * reserve anything for. In that case other's set will be transferred as a whole to u by
990 * complete_move(). */
991
992 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
993 Hashmap *other_deps;
994
995 other_deps = hashmap_get(other->dependencies, d);
996
997 r = hashmap_reserve(deps, hashmap_size(other_deps));
998 if (r < 0)
999 return r;
1000 }
1001
1002 return 0;
1003 }
1004
1005 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
1006 /* Only warn about some unit types */
1007 return IN_SET(dependency,
1008 UNIT_CONFLICTS,
1009 UNIT_CONFLICTED_BY,
1010 UNIT_BEFORE,
1011 UNIT_AFTER,
1012 UNIT_ON_SUCCESS,
1013 UNIT_ON_FAILURE,
1014 UNIT_TRIGGERS,
1015 UNIT_TRIGGERED_BY);
1016 }
1017
1018 static int unit_per_dependency_type_hashmap_update(
1019 Hashmap *per_type,
1020 Unit *other,
1021 UnitDependencyMask origin_mask,
1022 UnitDependencyMask destination_mask) {
1023
1024 UnitDependencyInfo info;
1025 int r;
1026
1027 assert(other);
1028 assert_cc(sizeof(void*) == sizeof(info));
1029
1030 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1031 * exists, or insert it anew if not. */
1032
1033 info.data = hashmap_get(per_type, other);
1034 if (info.data) {
1035 /* Entry already exists. Add in our mask. */
1036
1037 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1038 FLAGS_SET(destination_mask, info.destination_mask))
1039 return 0; /* NOP */
1040
1041 info.origin_mask |= origin_mask;
1042 info.destination_mask |= destination_mask;
1043
1044 r = hashmap_update(per_type, other, info.data);
1045 } else {
1046 info = (UnitDependencyInfo) {
1047 .origin_mask = origin_mask,
1048 .destination_mask = destination_mask,
1049 };
1050
1051 r = hashmap_put(per_type, other, info.data);
1052 }
1053 if (r < 0)
1054 return r;
1055
1056
1057 return 1;
1058 }
1059
1060 static int unit_add_dependency_hashmap(
1061 Hashmap **dependencies,
1062 UnitDependency d,
1063 Unit *other,
1064 UnitDependencyMask origin_mask,
1065 UnitDependencyMask destination_mask) {
1066
1067 Hashmap *per_type;
1068 int r;
1069
1070 assert(dependencies);
1071 assert(other);
1072 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
1073 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
1074 assert(origin_mask > 0 || destination_mask > 0);
1075
1076 /* Ensure the top-level dependency hashmap exists that maps UnitDependency → Hashmap(Unit* →
1077 * UnitDependencyInfo) */
1078 r = hashmap_ensure_allocated(dependencies, NULL);
1079 if (r < 0)
1080 return r;
1081
1082 /* Acquire the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency
1083 * type, and if it's missing allocate it and insert it. */
1084 per_type = hashmap_get(*dependencies, UNIT_DEPENDENCY_TO_PTR(d));
1085 if (!per_type) {
1086 per_type = hashmap_new(NULL);
1087 if (!per_type)
1088 return -ENOMEM;
1089
1090 r = hashmap_put(*dependencies, UNIT_DEPENDENCY_TO_PTR(d), per_type);
1091 if (r < 0) {
1092 hashmap_free(per_type);
1093 return r;
1094 }
1095 }
1096
1097 return unit_per_dependency_type_hashmap_update(per_type, other, origin_mask, destination_mask);
1098 }
1099
1100 static void unit_merge_dependencies(Unit *u, Unit *other) {
1101 Hashmap *deps;
1102 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1103 * since the hashmaps all want it as void pointer. */
1104
1105 assert(u);
1106 assert(other);
1107
1108 if (u == other)
1109 return;
1110
1111 /* First, remove dependency to other. */
1112 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1113 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1114 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1115 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1116 other->id, other->id, u->id);
1117
1118 if (hashmap_isempty(deps))
1119 hashmap_free(hashmap_remove(u->dependencies, dt));
1120 }
1121
1122 for (;;) {
1123 _cleanup_(hashmap_freep) Hashmap *other_deps = NULL;
1124 UnitDependencyInfo di_back;
1125 Unit *back;
1126
1127 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1128 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1129 if (!other_deps)
1130 break; /* done! */
1131
1132 deps = hashmap_get(u->dependencies, dt);
1133
1134 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1135 * referenced units as 'back'. */
1136 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1137 Hashmap *back_deps;
1138 void *back_dt;
1139
1140 if (back == u) {
1141 /* This is a dependency pointing back to the unit we want to merge with?
1142 * Suppress it (but warn) */
1143 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1144 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1145 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1146 u->id, other->id, other->id, u->id);
1147
1148 hashmap_remove(other_deps, back);
1149 continue;
1150 }
1151
1152 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1153 * point to 'u' instead. */
1154 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1155 UnitDependencyInfo di_move;
1156
1157 di_move.data = hashmap_remove(back_deps, other);
1158 if (!di_move.data)
1159 continue;
1160
1161 assert_se(unit_per_dependency_type_hashmap_update(
1162 back_deps,
1163 u,
1164 di_move.origin_mask,
1165 di_move.destination_mask) >= 0);
1166 }
1167
1168 /* The target unit already has dependencies of this type, let's then merge this individually. */
1169 if (deps)
1170 assert_se(unit_per_dependency_type_hashmap_update(
1171 deps,
1172 back,
1173 di_back.origin_mask,
1174 di_back.destination_mask) >= 0);
1175 }
1176
1177 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1178 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1179 * dependencies of this type, let's move them per type wholesale. */
1180 if (!deps)
1181 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1182 }
1183
1184 other->dependencies = hashmap_free(other->dependencies);
1185 }
1186
1187 int unit_merge(Unit *u, Unit *other) {
1188 int r;
1189
1190 assert(u);
1191 assert(other);
1192 assert(u->manager == other->manager);
1193 assert(u->type != _UNIT_TYPE_INVALID);
1194
1195 other = unit_follow_merge(other);
1196
1197 if (other == u)
1198 return 0;
1199
1200 if (u->type != other->type)
1201 return -EINVAL;
1202
1203 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1204 return -EEXIST;
1205
1206 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1207 return -EEXIST;
1208
1209 if (!streq_ptr(u->instance, other->instance))
1210 return -EINVAL;
1211
1212 if (other->job)
1213 return -EEXIST;
1214
1215 if (other->nop_job)
1216 return -EEXIST;
1217
1218 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1219 return -EEXIST;
1220
1221 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1222 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1223 r = unit_reserve_dependencies(u, other);
1224 if (r < 0)
1225 return r;
1226
1227 /* Redirect all references */
1228 while (other->refs_by_target)
1229 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1230
1231 /* Merge dependencies */
1232 unit_merge_dependencies(u, other);
1233
1234 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1235 r = unit_merge_names(u, other);
1236 if (r < 0)
1237 return r;
1238
1239 other->load_state = UNIT_MERGED;
1240 other->merged_into = u;
1241
1242 if (!u->activation_details)
1243 u->activation_details = activation_details_ref(other->activation_details);
1244
1245 /* If there is still some data attached to the other node, we
1246 * don't need it anymore, and can free it. */
1247 if (other->load_state != UNIT_STUB)
1248 if (UNIT_VTABLE(other)->done)
1249 UNIT_VTABLE(other)->done(other);
1250
1251 unit_add_to_dbus_queue(u);
1252 unit_add_to_cleanup_queue(other);
1253
1254 return 0;
1255 }
1256
1257 int unit_merge_by_name(Unit *u, const char *name) {
1258 _cleanup_free_ char *s = NULL;
1259 Unit *other;
1260 int r;
1261
1262 /* Either add name to u, or if a unit with name already exists, merge it with u.
1263 * If name is a template, do the same for name@instance, where instance is u's instance. */
1264
1265 assert(u);
1266 assert(name);
1267
1268 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1269 if (!u->instance)
1270 return -EINVAL;
1271
1272 r = unit_name_replace_instance(name, u->instance, &s);
1273 if (r < 0)
1274 return r;
1275
1276 name = s;
1277 }
1278
1279 other = manager_get_unit(u->manager, name);
1280 if (other)
1281 return unit_merge(u, other);
1282
1283 return unit_add_name(u, name);
1284 }
1285
1286 Unit* unit_follow_merge(Unit *u) {
1287 assert(u);
1288
1289 while (u->load_state == UNIT_MERGED)
1290 assert_se(u = u->merged_into);
1291
1292 return u;
1293 }
1294
1295 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1296 int r;
1297
1298 assert(u);
1299 assert(c);
1300
1301 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1302
1303 if (c->working_directory && !c->working_directory_missing_ok) {
1304 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1305 if (r < 0)
1306 return r;
1307 }
1308
1309 if (c->root_directory) {
1310 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1311 if (r < 0)
1312 return r;
1313 }
1314
1315 if (c->root_image) {
1316 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1317 if (r < 0)
1318 return r;
1319 }
1320
1321 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1322 if (!u->manager->prefix[dt])
1323 continue;
1324
1325 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1326 _cleanup_free_ char *p = NULL;
1327
1328 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1329 if (!p)
1330 return -ENOMEM;
1331
1332 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1333 if (r < 0)
1334 return r;
1335 }
1336 }
1337
1338 if (!MANAGER_IS_SYSTEM(u->manager))
1339 return 0;
1340
1341 /* For the following three directory types we need write access, and /var/ is possibly on the root
1342 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1343 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1344 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1345 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1346 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1347 if (r < 0)
1348 return r;
1349 }
1350
1351 if (c->private_tmp) {
1352
1353 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1354 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1355 * /tmp specifically and masking other mount units should be handled more
1356 * gracefully too, see PR#16894. */
1357 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "tmp.mount", true, UNIT_DEPENDENCY_FILE);
1358 if (r < 0)
1359 return r;
1360
1361 r = unit_require_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE);
1362 if (r < 0)
1363 return r;
1364
1365 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1366 if (r < 0)
1367 return r;
1368 }
1369
1370 if (c->root_image) {
1371 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1372 * implicit dependency on udev */
1373
1374 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1375 if (r < 0)
1376 return r;
1377 }
1378
1379 if (!IN_SET(c->std_output,
1380 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1381 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1382 !IN_SET(c->std_error,
1383 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1384 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1385 !c->log_namespace)
1386 return 0;
1387
1388 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1389 * is run first. */
1390
1391 if (c->log_namespace) {
1392 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1393
1394 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1395 if (r < 0)
1396 return r;
1397
1398 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1399 if (r < 0)
1400 return r;
1401
1402 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1403 if (r < 0)
1404 return r;
1405
1406 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1407 if (r < 0)
1408 return r;
1409 } else
1410 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1411 if (r < 0)
1412 return r;
1413
1414 if (exec_context_has_credentials(c) && u->manager->prefix[EXEC_DIRECTORY_RUNTIME]) {
1415 _cleanup_free_ char *p = NULL, *m = NULL;
1416
1417 /* Let's make sure the credentials directory of this service is unmounted *after* the service
1418 * itself shuts down. This only matters if mount namespacing is not used for the service, and
1419 * hence the credentials mount appears on the host. */
1420
1421 p = path_join(u->manager->prefix[EXEC_DIRECTORY_RUNTIME], "credentials", u->id);
1422 if (!p)
1423 return -ENOMEM;
1424
1425 r = unit_name_from_path(p, ".mount", &m);
1426 if (r < 0)
1427 return r;
1428
1429 r = unit_add_dependency_by_name(u, UNIT_AFTER, m, /* add_reference= */ true, UNIT_DEPENDENCY_FILE);
1430 if (r < 0)
1431 return r;
1432 }
1433
1434 return 0;
1435 }
1436
1437 const char* unit_description(Unit *u) {
1438 assert(u);
1439
1440 if (u->description)
1441 return u->description;
1442
1443 return strna(u->id);
1444 }
1445
1446 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1447 assert(u);
1448 assert(u->id);
1449
1450 /* Return u->id, u->description, or "{u->id} - {u->description}".
1451 * Versions with u->description are only used if it is set.
1452 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1453 * pointer.
1454 *
1455 * Note that *ret_combined_buffer may be set to NULL. */
1456
1457 if (!u->description ||
1458 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1459 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1460 streq(u->description, u->id)) {
1461
1462 if (ret_combined_buffer)
1463 *ret_combined_buffer = NULL;
1464 return u->id;
1465 }
1466
1467 if (ret_combined_buffer) {
1468 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1469 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1470 if (*ret_combined_buffer)
1471 return *ret_combined_buffer;
1472 log_oom(); /* Fall back to ->description */
1473 } else
1474 *ret_combined_buffer = NULL;
1475 }
1476
1477 return u->description;
1478 }
1479
1480 /* Common implementation for multiple backends */
1481 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1482 int r;
1483
1484 assert(u);
1485
1486 /* Load a .{service,socket,...} file */
1487 r = unit_load_fragment(u);
1488 if (r < 0)
1489 return r;
1490
1491 if (u->load_state == UNIT_STUB) {
1492 if (fragment_required)
1493 return -ENOENT;
1494
1495 u->load_state = UNIT_LOADED;
1496 }
1497
1498 /* Load drop-in directory data. If u is an alias, we might be reloading the
1499 * target unit needlessly. But we cannot be sure which drops-ins have already
1500 * been loaded and which not, at least without doing complicated book-keeping,
1501 * so let's always reread all drop-ins. */
1502 r = unit_load_dropin(unit_follow_merge(u));
1503 if (r < 0)
1504 return r;
1505
1506 if (u->source_path) {
1507 struct stat st;
1508
1509 if (stat(u->source_path, &st) >= 0)
1510 u->source_mtime = timespec_load(&st.st_mtim);
1511 else
1512 u->source_mtime = 0;
1513 }
1514
1515 return 0;
1516 }
1517
1518 void unit_add_to_target_deps_queue(Unit *u) {
1519 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1520
1521 if (u->in_target_deps_queue)
1522 return;
1523
1524 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1525 u->in_target_deps_queue = true;
1526 }
1527
1528 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1529 assert(u);
1530 assert(target);
1531
1532 if (target->type != UNIT_TARGET)
1533 return 0;
1534
1535 /* Only add the dependency if both units are loaded, so that
1536 * that loop check below is reliable */
1537 if (u->load_state != UNIT_LOADED ||
1538 target->load_state != UNIT_LOADED)
1539 return 0;
1540
1541 /* If either side wants no automatic dependencies, then let's
1542 * skip this */
1543 if (!u->default_dependencies ||
1544 !target->default_dependencies)
1545 return 0;
1546
1547 /* Don't create loops */
1548 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1549 return 0;
1550
1551 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1552 }
1553
1554 static int unit_add_slice_dependencies(Unit *u) {
1555 Unit *slice;
1556 assert(u);
1557
1558 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1559 return 0;
1560
1561 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1562 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1563 relationship). */
1564 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1565
1566 slice = UNIT_GET_SLICE(u);
1567 if (slice)
1568 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1569
1570 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1571 return 0;
1572
1573 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1574 }
1575
1576 static int unit_add_mount_dependencies(Unit *u) {
1577 UnitDependencyInfo di;
1578 const char *path;
1579 bool changed = false;
1580 int r;
1581
1582 assert(u);
1583
1584 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1585 char prefix[strlen(path) + 1];
1586
1587 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1588 _cleanup_free_ char *p = NULL;
1589 Unit *m;
1590
1591 r = unit_name_from_path(prefix, ".mount", &p);
1592 if (r == -EINVAL)
1593 continue; /* If the path cannot be converted to a mount unit name, then it's
1594 * not manageable as a unit by systemd, and hence we don't need a
1595 * dependency on it. Let's thus silently ignore the issue. */
1596 if (r < 0)
1597 return r;
1598
1599 m = manager_get_unit(u->manager, p);
1600 if (!m) {
1601 /* Make sure to load the mount unit if it exists. If so the dependencies on
1602 * this unit will be added later during the loading of the mount unit. */
1603 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1604 continue;
1605 }
1606 if (m == u)
1607 continue;
1608
1609 if (m->load_state != UNIT_LOADED)
1610 continue;
1611
1612 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1613 if (r < 0)
1614 return r;
1615 changed = changed || r > 0;
1616
1617 if (m->fragment_path) {
1618 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1619 if (r < 0)
1620 return r;
1621 changed = changed || r > 0;
1622 }
1623 }
1624 }
1625
1626 return changed;
1627 }
1628
1629 static int unit_add_oomd_dependencies(Unit *u) {
1630 CGroupContext *c;
1631 CGroupMask mask;
1632 int r;
1633
1634 assert(u);
1635
1636 if (!u->default_dependencies)
1637 return 0;
1638
1639 c = unit_get_cgroup_context(u);
1640 if (!c)
1641 return 0;
1642
1643 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1644 if (!wants_oomd)
1645 return 0;
1646
1647 if (!cg_all_unified())
1648 return 0;
1649
1650 r = cg_mask_supported(&mask);
1651 if (r < 0)
1652 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1653
1654 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1655 return 0;
1656
1657 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1658 }
1659
1660 static int unit_add_startup_units(Unit *u) {
1661 if (!unit_has_startup_cgroup_constraints(u))
1662 return 0;
1663
1664 return set_ensure_put(&u->manager->startup_units, NULL, u);
1665 }
1666
1667 static int unit_validate_on_failure_job_mode(
1668 Unit *u,
1669 const char *job_mode_setting,
1670 JobMode job_mode,
1671 const char *dependency_name,
1672 UnitDependencyAtom atom) {
1673
1674 Unit *other, *found = NULL;
1675
1676 if (job_mode != JOB_ISOLATE)
1677 return 0;
1678
1679 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1680 if (!found)
1681 found = other;
1682 else if (found != other)
1683 return log_unit_error_errno(
1684 u, SYNTHETIC_ERRNO(ENOEXEC),
1685 "More than one %s dependencies specified but %sisolate set. Refusing.",
1686 dependency_name, job_mode_setting);
1687 }
1688
1689 return 0;
1690 }
1691
1692 int unit_load(Unit *u) {
1693 int r;
1694
1695 assert(u);
1696
1697 if (u->in_load_queue) {
1698 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1699 u->in_load_queue = false;
1700 }
1701
1702 if (u->type == _UNIT_TYPE_INVALID)
1703 return -EINVAL;
1704
1705 if (u->load_state != UNIT_STUB)
1706 return 0;
1707
1708 if (u->transient_file) {
1709 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1710 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1711
1712 r = fflush_and_check(u->transient_file);
1713 if (r < 0)
1714 goto fail;
1715
1716 u->transient_file = safe_fclose(u->transient_file);
1717 u->fragment_mtime = now(CLOCK_REALTIME);
1718 }
1719
1720 r = UNIT_VTABLE(u)->load(u);
1721 if (r < 0)
1722 goto fail;
1723
1724 assert(u->load_state != UNIT_STUB);
1725
1726 if (u->load_state == UNIT_LOADED) {
1727 unit_add_to_target_deps_queue(u);
1728
1729 r = unit_add_slice_dependencies(u);
1730 if (r < 0)
1731 goto fail;
1732
1733 r = unit_add_mount_dependencies(u);
1734 if (r < 0)
1735 goto fail;
1736
1737 r = unit_add_oomd_dependencies(u);
1738 if (r < 0)
1739 goto fail;
1740
1741 r = unit_add_startup_units(u);
1742 if (r < 0)
1743 goto fail;
1744
1745 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1746 if (r < 0)
1747 goto fail;
1748
1749 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1750 if (r < 0)
1751 goto fail;
1752
1753 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1754 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1755
1756 /* We finished loading, let's ensure our parents recalculate the members mask */
1757 unit_invalidate_cgroup_members_masks(u);
1758 }
1759
1760 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1761
1762 unit_add_to_dbus_queue(unit_follow_merge(u));
1763 unit_add_to_gc_queue(u);
1764 (void) manager_varlink_send_managed_oom_update(u);
1765
1766 return 0;
1767
1768 fail:
1769 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1770 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1771
1772 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1773 r == -ENOEXEC ? UNIT_BAD_SETTING :
1774 UNIT_ERROR;
1775 u->load_error = r;
1776
1777 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1778 * an attempt is made to load this unit, we know we need to check again. */
1779 if (u->load_state == UNIT_NOT_FOUND)
1780 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1781
1782 unit_add_to_dbus_queue(u);
1783 unit_add_to_gc_queue(u);
1784
1785 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1786 }
1787
1788 _printf_(7, 8)
1789 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1790 Unit *u = userdata;
1791 va_list ap;
1792 int r;
1793
1794 if (u && !unit_log_level_test(u, level))
1795 return -ERRNO_VALUE(error);
1796
1797 va_start(ap, format);
1798 if (u)
1799 r = log_object_internalv(level, error, file, line, func,
1800 u->manager->unit_log_field,
1801 u->id,
1802 u->manager->invocation_log_field,
1803 u->invocation_id_string,
1804 format, ap);
1805 else
1806 r = log_internalv(level, error, file, line, func, format, ap);
1807 va_end(ap);
1808
1809 return r;
1810 }
1811
1812 static bool unit_test_condition(Unit *u) {
1813 _cleanup_strv_free_ char **env = NULL;
1814 int r;
1815
1816 assert(u);
1817
1818 dual_timestamp_get(&u->condition_timestamp);
1819
1820 r = manager_get_effective_environment(u->manager, &env);
1821 if (r < 0) {
1822 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1823 u->condition_result = true;
1824 } else
1825 u->condition_result = condition_test_list(
1826 u->conditions,
1827 env,
1828 condition_type_to_string,
1829 log_unit_internal,
1830 u);
1831
1832 unit_add_to_dbus_queue(u);
1833 return u->condition_result;
1834 }
1835
1836 static bool unit_test_assert(Unit *u) {
1837 _cleanup_strv_free_ char **env = NULL;
1838 int r;
1839
1840 assert(u);
1841
1842 dual_timestamp_get(&u->assert_timestamp);
1843
1844 r = manager_get_effective_environment(u->manager, &env);
1845 if (r < 0) {
1846 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1847 u->assert_result = CONDITION_ERROR;
1848 } else
1849 u->assert_result = condition_test_list(
1850 u->asserts,
1851 env,
1852 assert_type_to_string,
1853 log_unit_internal,
1854 u);
1855
1856 unit_add_to_dbus_queue(u);
1857 return u->assert_result;
1858 }
1859
1860 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1861 if (log_get_show_color()) {
1862 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1863 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1864 else
1865 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1866 }
1867
1868 DISABLE_WARNING_FORMAT_NONLITERAL;
1869 manager_status_printf(u->manager, status_type, status, format, ident);
1870 REENABLE_WARNING;
1871 }
1872
1873 int unit_test_start_limit(Unit *u) {
1874 const char *reason;
1875
1876 assert(u);
1877
1878 if (ratelimit_below(&u->start_ratelimit)) {
1879 u->start_limit_hit = false;
1880 return 0;
1881 }
1882
1883 log_unit_warning(u, "Start request repeated too quickly.");
1884 u->start_limit_hit = true;
1885
1886 reason = strjoina("unit ", u->id, " failed");
1887
1888 emergency_action(u->manager, u->start_limit_action,
1889 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1890 u->reboot_arg, -1, reason);
1891
1892 return -ECANCELED;
1893 }
1894
1895 bool unit_shall_confirm_spawn(Unit *u) {
1896 assert(u);
1897
1898 if (manager_is_confirm_spawn_disabled(u->manager))
1899 return false;
1900
1901 /* For some reasons units remaining in the same process group
1902 * as PID 1 fail to acquire the console even if it's not used
1903 * by any process. So skip the confirmation question for them. */
1904 return !unit_get_exec_context(u)->same_pgrp;
1905 }
1906
1907 static bool unit_verify_deps(Unit *u) {
1908 Unit *other;
1909
1910 assert(u);
1911
1912 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1913 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1914 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1915 * that are not used in conjunction with After= as for them any such check would make things entirely
1916 * racy. */
1917
1918 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1919
1920 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1921 continue;
1922
1923 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1924 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1925 return false;
1926 }
1927 }
1928
1929 return true;
1930 }
1931
1932 /* Errors that aren't really errors:
1933 * -EALREADY: Unit is already started.
1934 * -ECOMM: Condition failed
1935 * -EAGAIN: An operation is already in progress. Retry later.
1936 *
1937 * Errors that are real errors:
1938 * -EBADR: This unit type does not support starting.
1939 * -ECANCELED: Start limit hit, too many requests for now
1940 * -EPROTO: Assert failed
1941 * -EINVAL: Unit not loaded
1942 * -EOPNOTSUPP: Unit type not supported
1943 * -ENOLINK: The necessary dependencies are not fulfilled.
1944 * -ESTALE: This unit has been started before and can't be started a second time
1945 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1946 */
1947 int unit_start(Unit *u, ActivationDetails *details) {
1948 UnitActiveState state;
1949 Unit *following;
1950 int r;
1951
1952 assert(u);
1953
1954 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is rate limited. */
1955 if (u->type == UNIT_MOUNT && sd_event_source_is_ratelimited(u->manager->mount_event_source))
1956 return -EAGAIN;
1957
1958 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1959 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1960 * waiting is finished. */
1961 state = unit_active_state(u);
1962 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1963 return -EALREADY;
1964 if (state == UNIT_MAINTENANCE)
1965 return -EAGAIN;
1966
1967 /* Units that aren't loaded cannot be started */
1968 if (u->load_state != UNIT_LOADED)
1969 return -EINVAL;
1970
1971 /* Refuse starting scope units more than once */
1972 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1973 return -ESTALE;
1974
1975 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1976 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1977 * recheck the condition in that case. */
1978 if (state != UNIT_ACTIVATING &&
1979 !unit_test_condition(u))
1980 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1981
1982 /* If the asserts failed, fail the entire job */
1983 if (state != UNIT_ACTIVATING &&
1984 !unit_test_assert(u))
1985 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1986
1987 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1988 * condition checks, so that we rather return condition check errors (which are usually not
1989 * considered a true failure) than "not supported" errors (which are considered a failure).
1990 */
1991 if (!unit_type_supported(u->type))
1992 return -EOPNOTSUPP;
1993
1994 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1995 * should have taken care of this already, but let's check this here again. After all, our
1996 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1997 if (!unit_verify_deps(u))
1998 return -ENOLINK;
1999
2000 /* Forward to the main object, if we aren't it. */
2001 following = unit_following(u);
2002 if (following) {
2003 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
2004 return unit_start(following, details);
2005 }
2006
2007 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
2008 if (UNIT_VTABLE(u)->can_start) {
2009 r = UNIT_VTABLE(u)->can_start(u);
2010 if (r < 0)
2011 return r;
2012 }
2013
2014 /* If it is stopped, but we cannot start it, then fail */
2015 if (!UNIT_VTABLE(u)->start)
2016 return -EBADR;
2017
2018 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
2019 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
2020 * waits for a holdoff timer to elapse before it will start again. */
2021
2022 unit_add_to_dbus_queue(u);
2023 unit_cgroup_freezer_action(u, FREEZER_THAW);
2024
2025 if (!u->activation_details) /* Older details object wins */
2026 u->activation_details = activation_details_ref(details);
2027
2028 return UNIT_VTABLE(u)->start(u);
2029 }
2030
2031 bool unit_can_start(Unit *u) {
2032 assert(u);
2033
2034 if (u->load_state != UNIT_LOADED)
2035 return false;
2036
2037 if (!unit_type_supported(u->type))
2038 return false;
2039
2040 /* Scope units may be started only once */
2041 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
2042 return false;
2043
2044 return !!UNIT_VTABLE(u)->start;
2045 }
2046
2047 bool unit_can_isolate(Unit *u) {
2048 assert(u);
2049
2050 return unit_can_start(u) &&
2051 u->allow_isolate;
2052 }
2053
2054 /* Errors:
2055 * -EBADR: This unit type does not support stopping.
2056 * -EALREADY: Unit is already stopped.
2057 * -EAGAIN: An operation is already in progress. Retry later.
2058 */
2059 int unit_stop(Unit *u) {
2060 UnitActiveState state;
2061 Unit *following;
2062
2063 assert(u);
2064
2065 state = unit_active_state(u);
2066 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2067 return -EALREADY;
2068
2069 following = unit_following(u);
2070 if (following) {
2071 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2072 return unit_stop(following);
2073 }
2074
2075 if (!UNIT_VTABLE(u)->stop)
2076 return -EBADR;
2077
2078 unit_add_to_dbus_queue(u);
2079 unit_cgroup_freezer_action(u, FREEZER_THAW);
2080
2081 return UNIT_VTABLE(u)->stop(u);
2082 }
2083
2084 bool unit_can_stop(Unit *u) {
2085 assert(u);
2086
2087 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2088 * Extrinsic units follow external state and they may stop following external state changes
2089 * (hence we return true here), but an attempt to do this through the manager will fail. */
2090
2091 if (!unit_type_supported(u->type))
2092 return false;
2093
2094 if (u->perpetual)
2095 return false;
2096
2097 return !!UNIT_VTABLE(u)->stop;
2098 }
2099
2100 /* Errors:
2101 * -EBADR: This unit type does not support reloading.
2102 * -ENOEXEC: Unit is not started.
2103 * -EAGAIN: An operation is already in progress. Retry later.
2104 */
2105 int unit_reload(Unit *u) {
2106 UnitActiveState state;
2107 Unit *following;
2108
2109 assert(u);
2110
2111 if (u->load_state != UNIT_LOADED)
2112 return -EINVAL;
2113
2114 if (!unit_can_reload(u))
2115 return -EBADR;
2116
2117 state = unit_active_state(u);
2118 if (state == UNIT_RELOADING)
2119 return -EAGAIN;
2120
2121 if (state != UNIT_ACTIVE)
2122 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2123
2124 following = unit_following(u);
2125 if (following) {
2126 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2127 return unit_reload(following);
2128 }
2129
2130 unit_add_to_dbus_queue(u);
2131
2132 if (!UNIT_VTABLE(u)->reload) {
2133 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2134 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2135 return 0;
2136 }
2137
2138 unit_cgroup_freezer_action(u, FREEZER_THAW);
2139
2140 return UNIT_VTABLE(u)->reload(u);
2141 }
2142
2143 bool unit_can_reload(Unit *u) {
2144 assert(u);
2145
2146 if (UNIT_VTABLE(u)->can_reload)
2147 return UNIT_VTABLE(u)->can_reload(u);
2148
2149 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2150 return true;
2151
2152 return UNIT_VTABLE(u)->reload;
2153 }
2154
2155 bool unit_is_unneeded(Unit *u) {
2156 Unit *other;
2157 assert(u);
2158
2159 if (!u->stop_when_unneeded)
2160 return false;
2161
2162 /* Don't clean up while the unit is transitioning or is even inactive. */
2163 if (unit_active_state(u) != UNIT_ACTIVE)
2164 return false;
2165 if (u->job)
2166 return false;
2167
2168 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2169 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2170 * restart, then don't clean this one up. */
2171
2172 if (other->job)
2173 return false;
2174
2175 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2176 return false;
2177
2178 if (unit_will_restart(other))
2179 return false;
2180 }
2181
2182 return true;
2183 }
2184
2185 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2186 Unit *other;
2187
2188 assert(u);
2189
2190 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2191 * that is active declared an Uphold= dependencies on it */
2192
2193 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2194 if (ret_culprit)
2195 *ret_culprit = NULL;
2196 return false;
2197 }
2198
2199 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2200 if (other->job)
2201 continue;
2202
2203 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2204 if (ret_culprit)
2205 *ret_culprit = other;
2206 return true;
2207 }
2208 }
2209
2210 if (ret_culprit)
2211 *ret_culprit = NULL;
2212 return false;
2213 }
2214
2215 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2216 Unit *other;
2217
2218 assert(u);
2219
2220 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2221 * because the other unit is down. */
2222
2223 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2224 /* Don't clean up while the unit is transitioning or is even inactive. */
2225 if (ret_culprit)
2226 *ret_culprit = NULL;
2227 return false;
2228 }
2229
2230 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2231 if (other->job)
2232 continue;
2233
2234 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2235 if (ret_culprit)
2236 *ret_culprit = other;
2237
2238 return true;
2239 }
2240 }
2241
2242 if (ret_culprit)
2243 *ret_culprit = NULL;
2244 return false;
2245 }
2246
2247 static void check_unneeded_dependencies(Unit *u) {
2248 Unit *other;
2249 assert(u);
2250
2251 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2252
2253 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2254 unit_submit_to_stop_when_unneeded_queue(other);
2255 }
2256
2257 static void check_uphold_dependencies(Unit *u) {
2258 Unit *other;
2259 assert(u);
2260
2261 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2262
2263 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2264 unit_submit_to_start_when_upheld_queue(other);
2265 }
2266
2267 static void check_bound_by_dependencies(Unit *u) {
2268 Unit *other;
2269 assert(u);
2270
2271 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2272
2273 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2274 unit_submit_to_stop_when_bound_queue(other);
2275 }
2276
2277 static void retroactively_start_dependencies(Unit *u) {
2278 Unit *other;
2279
2280 assert(u);
2281 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2282
2283 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2284 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2285 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2286 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2287
2288 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2289 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2290 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2291 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2292
2293 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2294 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2295 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2296 }
2297
2298 static void retroactively_stop_dependencies(Unit *u) {
2299 Unit *other;
2300
2301 assert(u);
2302 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2303
2304 /* Pull down units which are bound to us recursively if enabled */
2305 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2306 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2307 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2308 }
2309
2310 void unit_start_on_failure(
2311 Unit *u,
2312 const char *dependency_name,
2313 UnitDependencyAtom atom,
2314 JobMode job_mode) {
2315
2316 int n_jobs = -1;
2317 Unit *other;
2318 int r;
2319
2320 assert(u);
2321 assert(dependency_name);
2322 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2323
2324 /* Act on OnFailure= and OnSuccess= dependencies */
2325
2326 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2327 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2328
2329 if (n_jobs < 0) {
2330 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2331 n_jobs = 0;
2332 }
2333
2334 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2335 if (r < 0)
2336 log_unit_warning_errno(
2337 u, r, "Failed to enqueue %s job, ignoring: %s",
2338 dependency_name, bus_error_message(&error, r));
2339 n_jobs ++;
2340 }
2341
2342 if (n_jobs >= 0)
2343 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2344 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2345 }
2346
2347 void unit_trigger_notify(Unit *u) {
2348 Unit *other;
2349
2350 assert(u);
2351
2352 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2353 if (UNIT_VTABLE(other)->trigger_notify)
2354 UNIT_VTABLE(other)->trigger_notify(other, u);
2355 }
2356
2357 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2358 if (condition_notice && log_level > LOG_NOTICE)
2359 return LOG_NOTICE;
2360 if (condition_info && log_level > LOG_INFO)
2361 return LOG_INFO;
2362 return log_level;
2363 }
2364
2365 static int unit_log_resources(Unit *u) {
2366 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2367 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2368 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2369 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2370 size_t n_message_parts = 0, n_iovec = 0;
2371 char* message_parts[1 + 2 + 2 + 1], *t;
2372 nsec_t nsec = NSEC_INFINITY;
2373 int r;
2374 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2375 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2376 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2377 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2378 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2379 };
2380 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2381 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2382 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2383 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2384 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2385 };
2386
2387 assert(u);
2388
2389 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2390 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2391 * information and the complete data in structured fields. */
2392
2393 (void) unit_get_cpu_usage(u, &nsec);
2394 if (nsec != NSEC_INFINITY) {
2395 /* Format the CPU time for inclusion in the structured log message */
2396 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2397 r = log_oom();
2398 goto finish;
2399 }
2400 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2401
2402 /* Format the CPU time for inclusion in the human language message string */
2403 t = strjoin("consumed ", FORMAT_TIMESPAN(nsec / NSEC_PER_USEC, USEC_PER_MSEC), " CPU time");
2404 if (!t) {
2405 r = log_oom();
2406 goto finish;
2407 }
2408
2409 message_parts[n_message_parts++] = t;
2410
2411 log_level = raise_level(log_level,
2412 nsec > MENTIONWORTHY_CPU_NSEC,
2413 nsec > NOTICEWORTHY_CPU_NSEC);
2414 }
2415
2416 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2417 uint64_t value = UINT64_MAX;
2418
2419 assert(io_fields[k]);
2420
2421 (void) unit_get_io_accounting(u, k, k > 0, &value);
2422 if (value == UINT64_MAX)
2423 continue;
2424
2425 have_io_accounting = true;
2426 if (value > 0)
2427 any_io = true;
2428
2429 /* Format IO accounting data for inclusion in the structured log message */
2430 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2431 r = log_oom();
2432 goto finish;
2433 }
2434 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2435
2436 /* Format the IO accounting data for inclusion in the human language message string, but only
2437 * for the bytes counters (and not for the operations counters) */
2438 if (k == CGROUP_IO_READ_BYTES) {
2439 assert(!rr);
2440 rr = strjoin("read ", strna(FORMAT_BYTES(value)), " from disk");
2441 if (!rr) {
2442 r = log_oom();
2443 goto finish;
2444 }
2445 } else if (k == CGROUP_IO_WRITE_BYTES) {
2446 assert(!wr);
2447 wr = strjoin("written ", strna(FORMAT_BYTES(value)), " to disk");
2448 if (!wr) {
2449 r = log_oom();
2450 goto finish;
2451 }
2452 }
2453
2454 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2455 log_level = raise_level(log_level,
2456 value > MENTIONWORTHY_IO_BYTES,
2457 value > NOTICEWORTHY_IO_BYTES);
2458 }
2459
2460 if (have_io_accounting) {
2461 if (any_io) {
2462 if (rr)
2463 message_parts[n_message_parts++] = TAKE_PTR(rr);
2464 if (wr)
2465 message_parts[n_message_parts++] = TAKE_PTR(wr);
2466
2467 } else {
2468 char *k;
2469
2470 k = strdup("no IO");
2471 if (!k) {
2472 r = log_oom();
2473 goto finish;
2474 }
2475
2476 message_parts[n_message_parts++] = k;
2477 }
2478 }
2479
2480 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2481 uint64_t value = UINT64_MAX;
2482
2483 assert(ip_fields[m]);
2484
2485 (void) unit_get_ip_accounting(u, m, &value);
2486 if (value == UINT64_MAX)
2487 continue;
2488
2489 have_ip_accounting = true;
2490 if (value > 0)
2491 any_traffic = true;
2492
2493 /* Format IP accounting data for inclusion in the structured log message */
2494 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2495 r = log_oom();
2496 goto finish;
2497 }
2498 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2499
2500 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2501 * bytes counters (and not for the packets counters) */
2502 if (m == CGROUP_IP_INGRESS_BYTES) {
2503 assert(!igress);
2504 igress = strjoin("received ", strna(FORMAT_BYTES(value)), " IP traffic");
2505 if (!igress) {
2506 r = log_oom();
2507 goto finish;
2508 }
2509 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2510 assert(!egress);
2511 egress = strjoin("sent ", strna(FORMAT_BYTES(value)), " IP traffic");
2512 if (!egress) {
2513 r = log_oom();
2514 goto finish;
2515 }
2516 }
2517
2518 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2519 log_level = raise_level(log_level,
2520 value > MENTIONWORTHY_IP_BYTES,
2521 value > NOTICEWORTHY_IP_BYTES);
2522 }
2523
2524 /* This check is here because it is the earliest point following all possible log_level assignments. If
2525 * log_level is assigned anywhere after this point, move this check. */
2526 if (!unit_log_level_test(u, log_level)) {
2527 r = 0;
2528 goto finish;
2529 }
2530
2531 if (have_ip_accounting) {
2532 if (any_traffic) {
2533 if (igress)
2534 message_parts[n_message_parts++] = TAKE_PTR(igress);
2535 if (egress)
2536 message_parts[n_message_parts++] = TAKE_PTR(egress);
2537
2538 } else {
2539 char *k;
2540
2541 k = strdup("no IP traffic");
2542 if (!k) {
2543 r = log_oom();
2544 goto finish;
2545 }
2546
2547 message_parts[n_message_parts++] = k;
2548 }
2549 }
2550
2551 /* Is there any accounting data available at all? */
2552 if (n_iovec == 0) {
2553 r = 0;
2554 goto finish;
2555 }
2556
2557 if (n_message_parts == 0)
2558 t = strjoina("MESSAGE=", u->id, ": Completed.");
2559 else {
2560 _cleanup_free_ char *joined = NULL;
2561
2562 message_parts[n_message_parts] = NULL;
2563
2564 joined = strv_join(message_parts, ", ");
2565 if (!joined) {
2566 r = log_oom();
2567 goto finish;
2568 }
2569
2570 joined[0] = ascii_toupper(joined[0]);
2571 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2572 }
2573
2574 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2575 * and hence don't increase n_iovec for them */
2576 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2577 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2578
2579 t = strjoina(u->manager->unit_log_field, u->id);
2580 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2581
2582 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2583 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2584
2585 log_unit_struct_iovec(u, log_level, iovec, n_iovec + 4);
2586 r = 0;
2587
2588 finish:
2589 for (size_t i = 0; i < n_message_parts; i++)
2590 free(message_parts[i]);
2591
2592 for (size_t i = 0; i < n_iovec; i++)
2593 free(iovec[i].iov_base);
2594
2595 return r;
2596
2597 }
2598
2599 static void unit_update_on_console(Unit *u) {
2600 bool b;
2601
2602 assert(u);
2603
2604 b = unit_needs_console(u);
2605 if (u->on_console == b)
2606 return;
2607
2608 u->on_console = b;
2609 if (b)
2610 manager_ref_console(u->manager);
2611 else
2612 manager_unref_console(u->manager);
2613 }
2614
2615 static void unit_emit_audit_start(Unit *u) {
2616 assert(u);
2617
2618 if (u->type != UNIT_SERVICE)
2619 return;
2620
2621 /* Write audit record if we have just finished starting up */
2622 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2623 u->in_audit = true;
2624 }
2625
2626 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2627 assert(u);
2628
2629 if (u->type != UNIT_SERVICE)
2630 return;
2631
2632 if (u->in_audit) {
2633 /* Write audit record if we have just finished shutting down */
2634 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2635 u->in_audit = false;
2636 } else {
2637 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2638 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2639
2640 if (state == UNIT_INACTIVE)
2641 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2642 }
2643 }
2644
2645 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2646 bool unexpected = false;
2647 JobResult result;
2648
2649 assert(j);
2650
2651 if (j->state == JOB_WAITING)
2652 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2653 * due to EAGAIN. */
2654 job_add_to_run_queue(j);
2655
2656 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2657 * hence needs to invalidate jobs. */
2658
2659 switch (j->type) {
2660
2661 case JOB_START:
2662 case JOB_VERIFY_ACTIVE:
2663
2664 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2665 job_finish_and_invalidate(j, JOB_DONE, true, false);
2666 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2667 unexpected = true;
2668
2669 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2670 if (ns == UNIT_FAILED)
2671 result = JOB_FAILED;
2672 else
2673 result = JOB_DONE;
2674
2675 job_finish_and_invalidate(j, result, true, false);
2676 }
2677 }
2678
2679 break;
2680
2681 case JOB_RELOAD:
2682 case JOB_RELOAD_OR_START:
2683 case JOB_TRY_RELOAD:
2684
2685 if (j->state == JOB_RUNNING) {
2686 if (ns == UNIT_ACTIVE)
2687 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2688 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2689 unexpected = true;
2690
2691 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2692 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2693 }
2694 }
2695
2696 break;
2697
2698 case JOB_STOP:
2699 case JOB_RESTART:
2700 case JOB_TRY_RESTART:
2701
2702 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2703 job_finish_and_invalidate(j, JOB_DONE, true, false);
2704 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2705 unexpected = true;
2706 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2707 }
2708
2709 break;
2710
2711 default:
2712 assert_not_reached();
2713 }
2714
2715 return unexpected;
2716 }
2717
2718 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2719 const char *reason;
2720 Manager *m;
2721
2722 assert(u);
2723 assert(os < _UNIT_ACTIVE_STATE_MAX);
2724 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2725
2726 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2727 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2728 * remounted this function will be called too! */
2729
2730 m = u->manager;
2731
2732 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2733 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2734 unit_add_to_dbus_queue(u);
2735
2736 /* Update systemd-oomd on the property/state change */
2737 if (os != ns) {
2738 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2739 * monitoring.
2740 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2741 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2742 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2743 * have the information on the property. Thus, indiscriminately send an update. */
2744 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2745 (void) manager_varlink_send_managed_oom_update(u);
2746 }
2747
2748 /* Update timestamps for state changes */
2749 if (!MANAGER_IS_RELOADING(m)) {
2750 dual_timestamp_get(&u->state_change_timestamp);
2751
2752 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2753 u->inactive_exit_timestamp = u->state_change_timestamp;
2754 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2755 u->inactive_enter_timestamp = u->state_change_timestamp;
2756
2757 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2758 u->active_enter_timestamp = u->state_change_timestamp;
2759 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2760 u->active_exit_timestamp = u->state_change_timestamp;
2761 }
2762
2763 /* Keep track of failed units */
2764 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2765
2766 /* Make sure the cgroup and state files are always removed when we become inactive */
2767 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2768 SET_FLAG(u->markers,
2769 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2770 false);
2771 unit_prune_cgroup(u);
2772 unit_unlink_state_files(u);
2773 } else if (ns != os && ns == UNIT_RELOADING)
2774 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2775
2776 unit_update_on_console(u);
2777
2778 if (!MANAGER_IS_RELOADING(m)) {
2779 bool unexpected;
2780
2781 /* Let's propagate state changes to the job */
2782 if (u->job)
2783 unexpected = unit_process_job(u->job, ns, reload_success);
2784 else
2785 unexpected = true;
2786
2787 /* If this state change happened without being requested by a job, then let's retroactively start or
2788 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2789 * additional jobs just because something is already activated. */
2790
2791 if (unexpected) {
2792 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2793 retroactively_start_dependencies(u);
2794 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2795 retroactively_stop_dependencies(u);
2796 }
2797
2798 if (ns != os && ns == UNIT_FAILED) {
2799 log_unit_debug(u, "Unit entered failed state.");
2800 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2801 }
2802
2803 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2804 /* This unit just finished starting up */
2805
2806 unit_emit_audit_start(u);
2807 manager_send_unit_plymouth(m, u);
2808 }
2809
2810 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2811 /* This unit just stopped/failed. */
2812
2813 unit_emit_audit_stop(u, ns);
2814 unit_log_resources(u);
2815 }
2816
2817 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2818 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2819 }
2820
2821 manager_recheck_journal(m);
2822 manager_recheck_dbus(m);
2823
2824 unit_trigger_notify(u);
2825
2826 if (!MANAGER_IS_RELOADING(m)) {
2827 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2828 reason = strjoina("unit ", u->id, " failed");
2829 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2830 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2831 reason = strjoina("unit ", u->id, " succeeded");
2832 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2833 }
2834 }
2835
2836 /* And now, add the unit or depending units to various queues that will act on the new situation if
2837 * needed. These queues generally check for continuous state changes rather than events (like most of
2838 * the state propagation above), and do work deferred instead of instantly, since they typically
2839 * don't want to run during reloading, and usually involve checking combined state of multiple units
2840 * at once. */
2841
2842 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2843 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2844 check_unneeded_dependencies(u);
2845 check_bound_by_dependencies(u);
2846
2847 /* Maybe someone wants us to remain up? */
2848 unit_submit_to_start_when_upheld_queue(u);
2849
2850 /* Maybe the unit should be GC'ed now? */
2851 unit_add_to_gc_queue(u);
2852
2853 /* Maybe we can release some resources now? */
2854 unit_submit_to_release_resources_queue(u);
2855 }
2856
2857 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2858 /* Start uphold units regardless if going up was expected or not */
2859 check_uphold_dependencies(u);
2860
2861 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2862 unit_submit_to_stop_when_unneeded_queue(u);
2863
2864 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2865 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2866 * inactive, without ever entering started.) */
2867 unit_submit_to_stop_when_bound_queue(u);
2868 }
2869 }
2870
2871 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2872 int r;
2873
2874 assert(u);
2875 assert(pid_is_valid(pid));
2876
2877 /* Watch a specific PID */
2878
2879 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2880 * opportunity to remove any stalled references to this PID as they can be created
2881 * easily (when watching a process which is not our direct child). */
2882 if (exclusive)
2883 manager_unwatch_pid(u->manager, pid);
2884
2885 r = set_ensure_allocated(&u->pids, NULL);
2886 if (r < 0)
2887 return r;
2888
2889 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2890 if (r < 0)
2891 return r;
2892
2893 /* First try, let's add the unit keyed by "pid". */
2894 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2895 if (r == -EEXIST) {
2896 Unit **array;
2897 bool found = false;
2898 size_t n = 0;
2899
2900 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2901 * to an array of Units rather than just a Unit), lists us already. */
2902
2903 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2904 if (array)
2905 for (; array[n]; n++)
2906 if (array[n] == u)
2907 found = true;
2908
2909 if (found) /* Found it already? if so, do nothing */
2910 r = 0;
2911 else {
2912 Unit **new_array;
2913
2914 /* Allocate a new array */
2915 new_array = new(Unit*, n + 2);
2916 if (!new_array)
2917 return -ENOMEM;
2918
2919 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2920 new_array[n] = u;
2921 new_array[n+1] = NULL;
2922
2923 /* Add or replace the old array */
2924 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2925 if (r < 0) {
2926 free(new_array);
2927 return r;
2928 }
2929
2930 free(array);
2931 }
2932 } else if (r < 0)
2933 return r;
2934
2935 r = set_put(u->pids, PID_TO_PTR(pid));
2936 if (r < 0)
2937 return r;
2938
2939 return 0;
2940 }
2941
2942 void unit_unwatch_pid(Unit *u, pid_t pid) {
2943 Unit **array;
2944
2945 assert(u);
2946 assert(pid_is_valid(pid));
2947
2948 /* First let's drop the unit in case it's keyed as "pid". */
2949 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2950
2951 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2952 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2953 if (array) {
2954 /* Let's iterate through the array, dropping our own entry */
2955
2956 size_t m = 0;
2957 for (size_t n = 0; array[n]; n++)
2958 if (array[n] != u)
2959 array[m++] = array[n];
2960 array[m] = NULL;
2961
2962 if (m == 0) {
2963 /* The array is now empty, remove the entire entry */
2964 assert_se(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2965 free(array);
2966 }
2967 }
2968
2969 (void) set_remove(u->pids, PID_TO_PTR(pid));
2970 }
2971
2972 void unit_unwatch_all_pids(Unit *u) {
2973 assert(u);
2974
2975 while (!set_isempty(u->pids))
2976 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2977
2978 u->pids = set_free(u->pids);
2979 }
2980
2981 static void unit_tidy_watch_pids(Unit *u) {
2982 pid_t except1, except2;
2983 void *e;
2984
2985 assert(u);
2986
2987 /* Cleans dead PIDs from our list */
2988
2989 except1 = unit_main_pid(u);
2990 except2 = unit_control_pid(u);
2991
2992 SET_FOREACH(e, u->pids) {
2993 pid_t pid = PTR_TO_PID(e);
2994
2995 if (pid == except1 || pid == except2)
2996 continue;
2997
2998 if (!pid_is_unwaited(pid))
2999 unit_unwatch_pid(u, pid);
3000 }
3001 }
3002
3003 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
3004 Unit *u = ASSERT_PTR(userdata);
3005
3006 assert(s);
3007
3008 unit_tidy_watch_pids(u);
3009 unit_watch_all_pids(u);
3010
3011 /* If the PID set is empty now, then let's finish this off. */
3012 unit_synthesize_cgroup_empty_event(u);
3013
3014 return 0;
3015 }
3016
3017 int unit_enqueue_rewatch_pids(Unit *u) {
3018 int r;
3019
3020 assert(u);
3021
3022 if (!u->cgroup_path)
3023 return -ENOENT;
3024
3025 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
3026 if (r < 0)
3027 return r;
3028 if (r > 0) /* On unified we can use proper notifications */
3029 return 0;
3030
3031 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
3032 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
3033 * involves issuing kill(pid, 0) on all processes we watch. */
3034
3035 if (!u->rewatch_pids_event_source) {
3036 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
3037
3038 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
3039 if (r < 0)
3040 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
3041
3042 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
3043 if (r < 0)
3044 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
3045
3046 (void) sd_event_source_set_description(s, "tidy-watch-pids");
3047
3048 u->rewatch_pids_event_source = TAKE_PTR(s);
3049 }
3050
3051 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
3052 if (r < 0)
3053 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
3054
3055 return 0;
3056 }
3057
3058 void unit_dequeue_rewatch_pids(Unit *u) {
3059 int r;
3060 assert(u);
3061
3062 if (!u->rewatch_pids_event_source)
3063 return;
3064
3065 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
3066 if (r < 0)
3067 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3068
3069 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
3070 }
3071
3072 bool unit_job_is_applicable(Unit *u, JobType j) {
3073 assert(u);
3074 assert(j >= 0 && j < _JOB_TYPE_MAX);
3075
3076 switch (j) {
3077
3078 case JOB_VERIFY_ACTIVE:
3079 case JOB_START:
3080 case JOB_NOP:
3081 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3082 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3083 * jobs for it. */
3084 return true;
3085
3086 case JOB_STOP:
3087 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3088 * external events), hence it makes no sense to permit enqueuing such a request either. */
3089 return !u->perpetual;
3090
3091 case JOB_RESTART:
3092 case JOB_TRY_RESTART:
3093 return unit_can_stop(u) && unit_can_start(u);
3094
3095 case JOB_RELOAD:
3096 case JOB_TRY_RELOAD:
3097 return unit_can_reload(u);
3098
3099 case JOB_RELOAD_OR_START:
3100 return unit_can_reload(u) && unit_can_start(u);
3101
3102 default:
3103 assert_not_reached();
3104 }
3105 }
3106
3107 int unit_add_dependency(
3108 Unit *u,
3109 UnitDependency d,
3110 Unit *other,
3111 bool add_reference,
3112 UnitDependencyMask mask) {
3113
3114 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3115 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3116 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3117 [UNIT_WANTS] = UNIT_WANTED_BY,
3118 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3119 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3120 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3121 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3122 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3123 [UNIT_WANTED_BY] = UNIT_WANTS,
3124 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3125 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3126 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3127 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3128 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3129 [UNIT_BEFORE] = UNIT_AFTER,
3130 [UNIT_AFTER] = UNIT_BEFORE,
3131 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3132 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3133 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3134 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3135 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3136 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3137 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3138 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3139 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3140 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3141 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3142 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3143 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3144 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3145 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3146 };
3147 UnitDependencyAtom a;
3148 int r;
3149
3150 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3151 * there, no need to notify! */
3152 bool notify, notify_other = false;
3153
3154 assert(u);
3155 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3156 assert(other);
3157
3158 u = unit_follow_merge(u);
3159 other = unit_follow_merge(other);
3160 a = unit_dependency_to_atom(d);
3161 assert(a >= 0);
3162
3163 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3164 if (u == other) {
3165 if (unit_should_warn_about_dependency(d))
3166 log_unit_warning(u, "Dependency %s=%s is dropped.",
3167 unit_dependency_to_string(d), u->id);
3168 return 0;
3169 }
3170
3171 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3172 return 0;
3173
3174 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3175 * running timeout at a specific time. */
3176 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3177 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3178 return 0;
3179 }
3180
3181 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3182 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3183 return 0;
3184 }
3185
3186 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3187 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3188 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3189 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3190 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3191 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3192
3193 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3194 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3195 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3196 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3197 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3198 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3199
3200 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3201 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3202 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3203
3204 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3205 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3206 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3207
3208 r = unit_add_dependency_hashmap(&u->dependencies, d, other, mask, 0);
3209 if (r < 0)
3210 return r;
3211 notify = r > 0;
3212
3213 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
3214 r = unit_add_dependency_hashmap(&other->dependencies, inverse_table[d], u, 0, mask);
3215 if (r < 0)
3216 return r;
3217 notify_other = r > 0;
3218 }
3219
3220 if (add_reference) {
3221 r = unit_add_dependency_hashmap(&u->dependencies, UNIT_REFERENCES, other, mask, 0);
3222 if (r < 0)
3223 return r;
3224 notify = notify || r > 0;
3225
3226 r = unit_add_dependency_hashmap(&other->dependencies, UNIT_REFERENCED_BY, u, 0, mask);
3227 if (r < 0)
3228 return r;
3229 notify_other = notify_other || r > 0;
3230 }
3231
3232 if (notify)
3233 unit_add_to_dbus_queue(u);
3234 if (notify_other)
3235 unit_add_to_dbus_queue(other);
3236
3237 return notify || notify_other;
3238 }
3239
3240 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3241 int r, s;
3242
3243 assert(u);
3244
3245 r = unit_add_dependency(u, d, other, add_reference, mask);
3246 if (r < 0)
3247 return r;
3248
3249 s = unit_add_dependency(u, e, other, add_reference, mask);
3250 if (s < 0)
3251 return s;
3252
3253 return r > 0 || s > 0;
3254 }
3255
3256 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3257 int r;
3258
3259 assert(u);
3260 assert(name);
3261 assert(buf);
3262 assert(ret);
3263
3264 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3265 *buf = NULL;
3266 *ret = name;
3267 return 0;
3268 }
3269
3270 if (u->instance)
3271 r = unit_name_replace_instance(name, u->instance, buf);
3272 else {
3273 _cleanup_free_ char *i = NULL;
3274
3275 r = unit_name_to_prefix(u->id, &i);
3276 if (r < 0)
3277 return r;
3278
3279 r = unit_name_replace_instance(name, i, buf);
3280 }
3281 if (r < 0)
3282 return r;
3283
3284 *ret = *buf;
3285 return 0;
3286 }
3287
3288 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3289 _cleanup_free_ char *buf = NULL;
3290 Unit *other;
3291 int r;
3292
3293 assert(u);
3294 assert(name);
3295
3296 r = resolve_template(u, name, &buf, &name);
3297 if (r < 0)
3298 return r;
3299
3300 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3301 return 0;
3302
3303 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3304 if (r < 0)
3305 return r;
3306
3307 return unit_add_dependency(u, d, other, add_reference, mask);
3308 }
3309
3310 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3311 _cleanup_free_ char *buf = NULL;
3312 Unit *other;
3313 int r;
3314
3315 assert(u);
3316 assert(name);
3317
3318 r = resolve_template(u, name, &buf, &name);
3319 if (r < 0)
3320 return r;
3321
3322 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3323 return 0;
3324
3325 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3326 if (r < 0)
3327 return r;
3328
3329 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3330 }
3331
3332 int set_unit_path(const char *p) {
3333 /* This is mostly for debug purposes */
3334 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3335 }
3336
3337 char *unit_dbus_path(Unit *u) {
3338 assert(u);
3339
3340 if (!u->id)
3341 return NULL;
3342
3343 return unit_dbus_path_from_name(u->id);
3344 }
3345
3346 char *unit_dbus_path_invocation_id(Unit *u) {
3347 assert(u);
3348
3349 if (sd_id128_is_null(u->invocation_id))
3350 return NULL;
3351
3352 return unit_dbus_path_from_name(u->invocation_id_string);
3353 }
3354
3355 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3356 int r;
3357
3358 assert(u);
3359
3360 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3361
3362 if (sd_id128_equal(u->invocation_id, id))
3363 return 0;
3364
3365 if (!sd_id128_is_null(u->invocation_id))
3366 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3367
3368 if (sd_id128_is_null(id)) {
3369 r = 0;
3370 goto reset;
3371 }
3372
3373 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3374 if (r < 0)
3375 goto reset;
3376
3377 u->invocation_id = id;
3378 sd_id128_to_string(id, u->invocation_id_string);
3379
3380 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3381 if (r < 0)
3382 goto reset;
3383
3384 return 0;
3385
3386 reset:
3387 u->invocation_id = SD_ID128_NULL;
3388 u->invocation_id_string[0] = 0;
3389 return r;
3390 }
3391
3392 int unit_set_slice(Unit *u, Unit *slice) {
3393 int r;
3394
3395 assert(u);
3396 assert(slice);
3397
3398 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3399 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3400 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3401
3402 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3403 return -EOPNOTSUPP;
3404
3405 if (u->type == UNIT_SLICE)
3406 return -EINVAL;
3407
3408 if (unit_active_state(u) != UNIT_INACTIVE)
3409 return -EBUSY;
3410
3411 if (slice->type != UNIT_SLICE)
3412 return -EINVAL;
3413
3414 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3415 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3416 return -EPERM;
3417
3418 if (UNIT_GET_SLICE(u) == slice)
3419 return 0;
3420
3421 /* Disallow slice changes if @u is already bound to cgroups */
3422 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3423 return -EBUSY;
3424
3425 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3426 if (UNIT_GET_SLICE(u))
3427 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3428
3429 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3430 if (r < 0)
3431 return r;
3432
3433 return 1;
3434 }
3435
3436 int unit_set_default_slice(Unit *u) {
3437 const char *slice_name;
3438 Unit *slice;
3439 int r;
3440
3441 assert(u);
3442
3443 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3444 return 0;
3445
3446 if (UNIT_GET_SLICE(u))
3447 return 0;
3448
3449 if (u->instance) {
3450 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3451
3452 /* Implicitly place all instantiated units in their
3453 * own per-template slice */
3454
3455 r = unit_name_to_prefix(u->id, &prefix);
3456 if (r < 0)
3457 return r;
3458
3459 /* The prefix is already escaped, but it might include
3460 * "-" which has a special meaning for slice units,
3461 * hence escape it here extra. */
3462 escaped = unit_name_escape(prefix);
3463 if (!escaped)
3464 return -ENOMEM;
3465
3466 if (MANAGER_IS_SYSTEM(u->manager))
3467 slice_name = strjoina("system-", escaped, ".slice");
3468 else
3469 slice_name = strjoina("app-", escaped, ".slice");
3470
3471 } else if (unit_is_extrinsic(u))
3472 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3473 * the root slice. They don't really belong in one of the subslices. */
3474 slice_name = SPECIAL_ROOT_SLICE;
3475
3476 else if (MANAGER_IS_SYSTEM(u->manager))
3477 slice_name = SPECIAL_SYSTEM_SLICE;
3478 else
3479 slice_name = SPECIAL_APP_SLICE;
3480
3481 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3482 if (r < 0)
3483 return r;
3484
3485 return unit_set_slice(u, slice);
3486 }
3487
3488 const char *unit_slice_name(Unit *u) {
3489 Unit *slice;
3490 assert(u);
3491
3492 slice = UNIT_GET_SLICE(u);
3493 if (!slice)
3494 return NULL;
3495
3496 return slice->id;
3497 }
3498
3499 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3500 _cleanup_free_ char *t = NULL;
3501 int r;
3502
3503 assert(u);
3504 assert(type);
3505 assert(_found);
3506
3507 r = unit_name_change_suffix(u->id, type, &t);
3508 if (r < 0)
3509 return r;
3510 if (unit_has_name(u, t))
3511 return -EINVAL;
3512
3513 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3514 assert(r < 0 || *_found != u);
3515 return r;
3516 }
3517
3518 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3519 const char *new_owner;
3520 Unit *u = ASSERT_PTR(userdata);
3521 int r;
3522
3523 assert(message);
3524
3525 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3526 if (r < 0) {
3527 bus_log_parse_error(r);
3528 return 0;
3529 }
3530
3531 if (UNIT_VTABLE(u)->bus_name_owner_change)
3532 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3533
3534 return 0;
3535 }
3536
3537 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3538 const sd_bus_error *e;
3539 const char *new_owner;
3540 Unit *u = ASSERT_PTR(userdata);
3541 int r;
3542
3543 assert(message);
3544
3545 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3546
3547 e = sd_bus_message_get_error(message);
3548 if (e) {
3549 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3550 r = sd_bus_error_get_errno(e);
3551 log_unit_error_errno(u, r,
3552 "Unexpected error response from GetNameOwner(): %s",
3553 bus_error_message(e, r));
3554 }
3555
3556 new_owner = NULL;
3557 } else {
3558 r = sd_bus_message_read(message, "s", &new_owner);
3559 if (r < 0)
3560 return bus_log_parse_error(r);
3561
3562 assert(!isempty(new_owner));
3563 }
3564
3565 if (UNIT_VTABLE(u)->bus_name_owner_change)
3566 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3567
3568 return 0;
3569 }
3570
3571 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3572 const char *match;
3573 int r;
3574
3575 assert(u);
3576 assert(bus);
3577 assert(name);
3578
3579 if (u->match_bus_slot || u->get_name_owner_slot)
3580 return -EBUSY;
3581
3582 match = strjoina("type='signal',"
3583 "sender='org.freedesktop.DBus',"
3584 "path='/org/freedesktop/DBus',"
3585 "interface='org.freedesktop.DBus',"
3586 "member='NameOwnerChanged',"
3587 "arg0='", name, "'");
3588
3589 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3590 if (r < 0)
3591 return r;
3592
3593 r = sd_bus_call_method_async(
3594 bus,
3595 &u->get_name_owner_slot,
3596 "org.freedesktop.DBus",
3597 "/org/freedesktop/DBus",
3598 "org.freedesktop.DBus",
3599 "GetNameOwner",
3600 get_name_owner_handler,
3601 u,
3602 "s", name);
3603 if (r < 0) {
3604 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3605 return r;
3606 }
3607
3608 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3609 return 0;
3610 }
3611
3612 int unit_watch_bus_name(Unit *u, const char *name) {
3613 int r;
3614
3615 assert(u);
3616 assert(name);
3617
3618 /* Watch a specific name on the bus. We only support one unit
3619 * watching each name for now. */
3620
3621 if (u->manager->api_bus) {
3622 /* If the bus is already available, install the match directly.
3623 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3624 r = unit_install_bus_match(u, u->manager->api_bus, name);
3625 if (r < 0)
3626 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3627 }
3628
3629 r = hashmap_put(u->manager->watch_bus, name, u);
3630 if (r < 0) {
3631 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3632 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3633 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3634 }
3635
3636 return 0;
3637 }
3638
3639 void unit_unwatch_bus_name(Unit *u, const char *name) {
3640 assert(u);
3641 assert(name);
3642
3643 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3644 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3645 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3646 }
3647
3648 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3649 _cleanup_free_ char *e = NULL;
3650 Unit *device;
3651 int r;
3652
3653 assert(u);
3654
3655 /* Adds in links to the device node that this unit is based on */
3656 if (isempty(what))
3657 return 0;
3658
3659 if (!is_device_path(what))
3660 return 0;
3661
3662 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3663 if (!unit_type_supported(UNIT_DEVICE))
3664 return 0;
3665
3666 r = unit_name_from_path(what, ".device", &e);
3667 if (r < 0)
3668 return r;
3669
3670 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3671 if (r < 0)
3672 return r;
3673
3674 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3675 dep = UNIT_BINDS_TO;
3676
3677 return unit_add_two_dependencies(u, UNIT_AFTER,
3678 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3679 device, true, mask);
3680 }
3681
3682 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3683 _cleanup_free_ char *escaped = NULL, *target = NULL;
3684 int r;
3685
3686 assert(u);
3687
3688 if (isempty(what))
3689 return 0;
3690
3691 if (!path_startswith(what, "/dev/"))
3692 return 0;
3693
3694 /* If we don't support devices, then also don't bother with blockdev@.target */
3695 if (!unit_type_supported(UNIT_DEVICE))
3696 return 0;
3697
3698 r = unit_name_path_escape(what, &escaped);
3699 if (r < 0)
3700 return r;
3701
3702 r = unit_name_build("blockdev", escaped, ".target", &target);
3703 if (r < 0)
3704 return r;
3705
3706 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3707 }
3708
3709 int unit_coldplug(Unit *u) {
3710 int r = 0, q;
3711
3712 assert(u);
3713
3714 /* Make sure we don't enter a loop, when coldplugging recursively. */
3715 if (u->coldplugged)
3716 return 0;
3717
3718 u->coldplugged = true;
3719
3720 STRV_FOREACH(i, u->deserialized_refs) {
3721 q = bus_unit_track_add_name(u, *i);
3722 if (q < 0 && r >= 0)
3723 r = q;
3724 }
3725 u->deserialized_refs = strv_free(u->deserialized_refs);
3726
3727 if (UNIT_VTABLE(u)->coldplug) {
3728 q = UNIT_VTABLE(u)->coldplug(u);
3729 if (q < 0 && r >= 0)
3730 r = q;
3731 }
3732
3733 if (u->job) {
3734 q = job_coldplug(u->job);
3735 if (q < 0 && r >= 0)
3736 r = q;
3737 }
3738 if (u->nop_job) {
3739 q = job_coldplug(u->nop_job);
3740 if (q < 0 && r >= 0)
3741 r = q;
3742 }
3743
3744 return r;
3745 }
3746
3747 void unit_catchup(Unit *u) {
3748 assert(u);
3749
3750 if (UNIT_VTABLE(u)->catchup)
3751 UNIT_VTABLE(u)->catchup(u);
3752
3753 unit_cgroup_catchup(u);
3754 }
3755
3756 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3757 struct stat st;
3758
3759 if (!path)
3760 return false;
3761
3762 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3763 * are never out-of-date. */
3764 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3765 return false;
3766
3767 if (stat(path, &st) < 0)
3768 /* What, cannot access this anymore? */
3769 return true;
3770
3771 if (path_masked)
3772 /* For masked files check if they are still so */
3773 return !null_or_empty(&st);
3774 else
3775 /* For non-empty files check the mtime */
3776 return timespec_load(&st.st_mtim) > mtime;
3777
3778 return false;
3779 }
3780
3781 bool unit_need_daemon_reload(Unit *u) {
3782 _cleanup_strv_free_ char **t = NULL;
3783
3784 assert(u);
3785
3786 /* For unit files, we allow masking… */
3787 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3788 u->load_state == UNIT_MASKED))
3789 return true;
3790
3791 /* Source paths should not be masked… */
3792 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3793 return true;
3794
3795 if (u->load_state == UNIT_LOADED)
3796 (void) unit_find_dropin_paths(u, &t);
3797 if (!strv_equal(u->dropin_paths, t))
3798 return true;
3799
3800 /* … any drop-ins that are masked are simply omitted from the list. */
3801 STRV_FOREACH(path, u->dropin_paths)
3802 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3803 return true;
3804
3805 return false;
3806 }
3807
3808 void unit_reset_failed(Unit *u) {
3809 assert(u);
3810
3811 if (UNIT_VTABLE(u)->reset_failed)
3812 UNIT_VTABLE(u)->reset_failed(u);
3813
3814 ratelimit_reset(&u->start_ratelimit);
3815 u->start_limit_hit = false;
3816 }
3817
3818 Unit *unit_following(Unit *u) {
3819 assert(u);
3820
3821 if (UNIT_VTABLE(u)->following)
3822 return UNIT_VTABLE(u)->following(u);
3823
3824 return NULL;
3825 }
3826
3827 bool unit_stop_pending(Unit *u) {
3828 assert(u);
3829
3830 /* This call does check the current state of the unit. It's
3831 * hence useful to be called from state change calls of the
3832 * unit itself, where the state isn't updated yet. This is
3833 * different from unit_inactive_or_pending() which checks both
3834 * the current state and for a queued job. */
3835
3836 return unit_has_job_type(u, JOB_STOP);
3837 }
3838
3839 bool unit_inactive_or_pending(Unit *u) {
3840 assert(u);
3841
3842 /* Returns true if the unit is inactive or going down */
3843
3844 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3845 return true;
3846
3847 if (unit_stop_pending(u))
3848 return true;
3849
3850 return false;
3851 }
3852
3853 bool unit_active_or_pending(Unit *u) {
3854 assert(u);
3855
3856 /* Returns true if the unit is active or going up */
3857
3858 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3859 return true;
3860
3861 if (u->job &&
3862 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3863 return true;
3864
3865 return false;
3866 }
3867
3868 bool unit_will_restart_default(Unit *u) {
3869 assert(u);
3870
3871 return unit_has_job_type(u, JOB_START);
3872 }
3873
3874 bool unit_will_restart(Unit *u) {
3875 assert(u);
3876
3877 if (!UNIT_VTABLE(u)->will_restart)
3878 return false;
3879
3880 return UNIT_VTABLE(u)->will_restart(u);
3881 }
3882
3883 int unit_kill(Unit *u, KillWho w, int signo, int code, int value, sd_bus_error *error) {
3884 assert(u);
3885 assert(w >= 0 && w < _KILL_WHO_MAX);
3886 assert(SIGNAL_VALID(signo));
3887 assert(IN_SET(code, SI_USER, SI_QUEUE));
3888
3889 if (!UNIT_VTABLE(u)->kill)
3890 return -EOPNOTSUPP;
3891
3892 return UNIT_VTABLE(u)->kill(u, w, signo, code, value, error);
3893 }
3894
3895 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3896 assert(u);
3897
3898 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3899 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3900 }
3901
3902 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3903 _cleanup_set_free_ Set *pid_set = NULL;
3904 int r;
3905
3906 pid_set = set_new(NULL);
3907 if (!pid_set)
3908 return NULL;
3909
3910 /* Exclude the main/control pids from being killed via the cgroup */
3911 if (main_pid > 0) {
3912 r = set_put(pid_set, PID_TO_PTR(main_pid));
3913 if (r < 0)
3914 return NULL;
3915 }
3916
3917 if (control_pid > 0) {
3918 r = set_put(pid_set, PID_TO_PTR(control_pid));
3919 if (r < 0)
3920 return NULL;
3921 }
3922
3923 return TAKE_PTR(pid_set);
3924 }
3925
3926 static int kill_common_log(pid_t pid, int signo, void *userdata) {
3927 _cleanup_free_ char *comm = NULL;
3928 Unit *u = ASSERT_PTR(userdata);
3929
3930 (void) get_process_comm(pid, &comm);
3931 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3932 signal_to_string(signo), pid, strna(comm));
3933
3934 return 1;
3935 }
3936
3937 static int kill_or_sigqueue(pid_t pid, int signo, int code, int value) {
3938 assert(pid > 0);
3939 assert(SIGNAL_VALID(signo));
3940
3941 switch (code) {
3942
3943 case SI_USER:
3944 log_debug("Killing " PID_FMT " with signal SIG%s.", pid, signal_to_string(signo));
3945 return RET_NERRNO(kill(pid, signo));
3946
3947 case SI_QUEUE:
3948 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pid, signal_to_string(signo));
3949 return RET_NERRNO(sigqueue(pid, signo, (const union sigval) { .sival_int = value }));
3950
3951 default:
3952 assert_not_reached();
3953 }
3954 }
3955
3956 int unit_kill_common(
3957 Unit *u,
3958 KillWho who,
3959 int signo,
3960 int code,
3961 int value,
3962 pid_t main_pid,
3963 pid_t control_pid,
3964 sd_bus_error *error) {
3965
3966 bool killed = false;
3967 int ret = 0, r;
3968
3969 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
3970 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
3971 * stop a service ourselves. */
3972
3973 assert(u);
3974 assert(who >= 0);
3975 assert(who < _KILL_WHO_MAX);
3976 assert(SIGNAL_VALID(signo));
3977 assert(IN_SET(code, SI_USER, SI_QUEUE));
3978
3979 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3980 if (main_pid < 0)
3981 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3982 if (main_pid == 0)
3983 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3984 }
3985
3986 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3987 if (control_pid < 0)
3988 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3989 if (control_pid == 0)
3990 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3991 }
3992
3993 if (control_pid > 0 &&
3994 IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
3995 _cleanup_free_ char *comm = NULL;
3996 (void) get_process_comm(control_pid, &comm);
3997
3998 r = kill_or_sigqueue(control_pid, signo, code, value);
3999 if (r < 0) {
4000 ret = r;
4001
4002 /* Report this failure both to the logs and to the client */
4003 sd_bus_error_set_errnof(
4004 error, r,
4005 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
4006 signal_to_string(signo), control_pid, strna(comm));
4007 log_unit_warning_errno(
4008 u, r,
4009 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
4010 signal_to_string(signo), control_pid, strna(comm));
4011 } else {
4012 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
4013 signal_to_string(signo), control_pid, strna(comm));
4014 killed = true;
4015 }
4016 }
4017
4018 if (main_pid > 0 &&
4019 IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4020
4021 _cleanup_free_ char *comm = NULL;
4022 (void) get_process_comm(main_pid, &comm);
4023
4024 r = kill_or_sigqueue(main_pid, signo, code, value);
4025 if (r < 0) {
4026 if (ret == 0) {
4027 ret = r;
4028
4029 sd_bus_error_set_errnof(
4030 error, r,
4031 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
4032 signal_to_string(signo), main_pid, strna(comm));
4033 }
4034
4035 log_unit_warning_errno(
4036 u, r,
4037 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
4038 signal_to_string(signo), main_pid, strna(comm));
4039
4040 } else {
4041 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
4042 signal_to_string(signo), main_pid, strna(comm));
4043 killed = true;
4044 }
4045 }
4046
4047 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4048 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4049 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4050 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path && code == SI_USER) {
4051 _cleanup_set_free_ Set *pid_set = NULL;
4052
4053 /* Exclude the main/control pids from being killed via the cgroup */
4054 pid_set = unit_pid_set(main_pid, control_pid);
4055 if (!pid_set)
4056 return log_oom();
4057
4058 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4059 if (r < 0) {
4060 if (!IN_SET(r, -ESRCH, -ENOENT)) {
4061 if (ret == 0) {
4062 ret = r;
4063
4064 sd_bus_error_set_errnof(
4065 error, r,
4066 "Failed to send signal SIG%s to auxiliary processes: %m",
4067 signal_to_string(signo));
4068 }
4069
4070 log_unit_warning_errno(
4071 u, r,
4072 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4073 signal_to_string(signo));
4074 }
4075 } else
4076 killed = true;
4077 }
4078
4079 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4080 if (ret == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4081 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4082
4083 return ret;
4084 }
4085
4086 int unit_following_set(Unit *u, Set **s) {
4087 assert(u);
4088 assert(s);
4089
4090 if (UNIT_VTABLE(u)->following_set)
4091 return UNIT_VTABLE(u)->following_set(u, s);
4092
4093 *s = NULL;
4094 return 0;
4095 }
4096
4097 UnitFileState unit_get_unit_file_state(Unit *u) {
4098 int r;
4099
4100 assert(u);
4101
4102 if (u->unit_file_state < 0 && u->fragment_path) {
4103 r = unit_file_get_state(
4104 u->manager->runtime_scope,
4105 NULL,
4106 u->id,
4107 &u->unit_file_state);
4108 if (r < 0)
4109 u->unit_file_state = UNIT_FILE_BAD;
4110 }
4111
4112 return u->unit_file_state;
4113 }
4114
4115 PresetAction unit_get_unit_file_preset(Unit *u) {
4116 int r;
4117
4118 assert(u);
4119
4120 if (u->unit_file_preset < 0 && u->fragment_path) {
4121 _cleanup_free_ char *bn = NULL;
4122
4123 r = path_extract_filename(u->fragment_path, &bn);
4124 if (r < 0)
4125 return (u->unit_file_preset = r);
4126
4127 if (r == O_DIRECTORY)
4128 return (u->unit_file_preset = -EISDIR);
4129
4130 u->unit_file_preset = unit_file_query_preset(
4131 u->manager->runtime_scope,
4132 NULL,
4133 bn,
4134 NULL);
4135 }
4136
4137 return u->unit_file_preset;
4138 }
4139
4140 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4141 assert(ref);
4142 assert(source);
4143 assert(target);
4144
4145 if (ref->target)
4146 unit_ref_unset(ref);
4147
4148 ref->source = source;
4149 ref->target = target;
4150 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4151 return target;
4152 }
4153
4154 void unit_ref_unset(UnitRef *ref) {
4155 assert(ref);
4156
4157 if (!ref->target)
4158 return;
4159
4160 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4161 * be unreferenced now. */
4162 unit_add_to_gc_queue(ref->target);
4163
4164 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4165 ref->source = ref->target = NULL;
4166 }
4167
4168 static int user_from_unit_name(Unit *u, char **ret) {
4169
4170 static const uint8_t hash_key[] = {
4171 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4172 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4173 };
4174
4175 _cleanup_free_ char *n = NULL;
4176 int r;
4177
4178 r = unit_name_to_prefix(u->id, &n);
4179 if (r < 0)
4180 return r;
4181
4182 if (valid_user_group_name(n, 0)) {
4183 *ret = TAKE_PTR(n);
4184 return 0;
4185 }
4186
4187 /* If we can't use the unit name as a user name, then let's hash it and use that */
4188 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4189 return -ENOMEM;
4190
4191 return 0;
4192 }
4193
4194 int unit_patch_contexts(Unit *u) {
4195 CGroupContext *cc;
4196 ExecContext *ec;
4197 int r;
4198
4199 assert(u);
4200
4201 /* Patch in the manager defaults into the exec and cgroup
4202 * contexts, _after_ the rest of the settings have been
4203 * initialized */
4204
4205 ec = unit_get_exec_context(u);
4206 if (ec) {
4207 /* This only copies in the ones that need memory */
4208 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4209 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4210 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4211 if (!ec->rlimit[i])
4212 return -ENOMEM;
4213 }
4214
4215 if (MANAGER_IS_USER(u->manager) &&
4216 !ec->working_directory) {
4217
4218 r = get_home_dir(&ec->working_directory);
4219 if (r < 0)
4220 return r;
4221
4222 /* Allow user services to run, even if the
4223 * home directory is missing */
4224 ec->working_directory_missing_ok = true;
4225 }
4226
4227 if (ec->private_devices)
4228 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4229
4230 if (ec->protect_kernel_modules)
4231 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4232
4233 if (ec->protect_kernel_logs)
4234 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4235
4236 if (ec->protect_clock)
4237 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4238
4239 if (ec->dynamic_user) {
4240 if (!ec->user) {
4241 r = user_from_unit_name(u, &ec->user);
4242 if (r < 0)
4243 return r;
4244 }
4245
4246 if (!ec->group) {
4247 ec->group = strdup(ec->user);
4248 if (!ec->group)
4249 return -ENOMEM;
4250 }
4251
4252 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4253 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4254 * sandbox. */
4255
4256 ec->private_tmp = true;
4257 ec->remove_ipc = true;
4258 ec->protect_system = PROTECT_SYSTEM_STRICT;
4259 if (ec->protect_home == PROTECT_HOME_NO)
4260 ec->protect_home = PROTECT_HOME_READ_ONLY;
4261
4262 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4263 * them. */
4264 ec->no_new_privileges = true;
4265 ec->restrict_suid_sgid = true;
4266 }
4267
4268 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4269 exec_directory_sort(ec->directories + dt);
4270 }
4271
4272 cc = unit_get_cgroup_context(u);
4273 if (cc && ec) {
4274
4275 if (ec->private_devices &&
4276 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4277 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4278
4279 /* Only add these if needed, as they imply that everything else is blocked. */
4280 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4281 if (ec->root_image || ec->mount_images) {
4282
4283 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4284 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4285 r = cgroup_add_device_allow(cc, p, "rw");
4286 if (r < 0)
4287 return r;
4288 }
4289 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4290 r = cgroup_add_device_allow(cc, p, "rwm");
4291 if (r < 0)
4292 return r;
4293 }
4294
4295 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4296 * Same for mapper and verity. */
4297 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4298 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4299 if (r < 0)
4300 return r;
4301 }
4302 }
4303
4304 if (ec->protect_clock) {
4305 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4306 if (r < 0)
4307 return r;
4308 }
4309
4310 /* If there are encrypted credentials we might need to access the TPM. */
4311 if (exec_context_has_encrypted_credentials(ec)) {
4312 r = cgroup_add_device_allow(cc, "char-tpm", "rw");
4313 if (r < 0)
4314 return r;
4315 }
4316 }
4317 }
4318
4319 return 0;
4320 }
4321
4322 ExecContext *unit_get_exec_context(const Unit *u) {
4323 size_t offset;
4324 assert(u);
4325
4326 if (u->type < 0)
4327 return NULL;
4328
4329 offset = UNIT_VTABLE(u)->exec_context_offset;
4330 if (offset <= 0)
4331 return NULL;
4332
4333 return (ExecContext*) ((uint8_t*) u + offset);
4334 }
4335
4336 KillContext *unit_get_kill_context(Unit *u) {
4337 size_t offset;
4338 assert(u);
4339
4340 if (u->type < 0)
4341 return NULL;
4342
4343 offset = UNIT_VTABLE(u)->kill_context_offset;
4344 if (offset <= 0)
4345 return NULL;
4346
4347 return (KillContext*) ((uint8_t*) u + offset);
4348 }
4349
4350 CGroupContext *unit_get_cgroup_context(Unit *u) {
4351 size_t offset;
4352
4353 if (u->type < 0)
4354 return NULL;
4355
4356 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4357 if (offset <= 0)
4358 return NULL;
4359
4360 return (CGroupContext*) ((uint8_t*) u + offset);
4361 }
4362
4363 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4364 size_t offset;
4365
4366 if (u->type < 0)
4367 return NULL;
4368
4369 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4370 if (offset <= 0)
4371 return NULL;
4372
4373 return *(ExecRuntime**) ((uint8_t*) u + offset);
4374 }
4375
4376 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4377 assert(u);
4378
4379 if (UNIT_WRITE_FLAGS_NOOP(flags))
4380 return NULL;
4381
4382 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4383 return u->manager->lookup_paths.transient;
4384
4385 if (flags & UNIT_PERSISTENT)
4386 return u->manager->lookup_paths.persistent_control;
4387
4388 if (flags & UNIT_RUNTIME)
4389 return u->manager->lookup_paths.runtime_control;
4390
4391 return NULL;
4392 }
4393
4394 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4395 assert(s);
4396 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4397 assert(buf);
4398
4399 _cleanup_free_ char *t = NULL;
4400
4401 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4402 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4403 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4404 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4405 * allocations. */
4406
4407 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4408 t = specifier_escape(s);
4409 if (!t)
4410 return NULL;
4411
4412 s = t;
4413 }
4414
4415 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4416 * ExecStart= and friends, i.e. '$' and quotes. */
4417
4418 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4419 char *t2;
4420
4421 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4422 t2 = strreplace(s, "$", "$$");
4423 if (!t2)
4424 return NULL;
4425 free_and_replace(t, t2);
4426 }
4427
4428 t2 = shell_escape(t ?: s, "\"");
4429 if (!t2)
4430 return NULL;
4431 free_and_replace(t, t2);
4432
4433 s = t;
4434
4435 } else if (flags & UNIT_ESCAPE_C) {
4436 char *t2;
4437
4438 t2 = cescape(s);
4439 if (!t2)
4440 return NULL;
4441 free_and_replace(t, t2);
4442
4443 s = t;
4444 }
4445
4446 *buf = TAKE_PTR(t);
4447 return s;
4448 }
4449
4450 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4451 _cleanup_free_ char *result = NULL;
4452 size_t n = 0;
4453
4454 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4455 * lines in a way suitable for ExecStart= stanzas. */
4456
4457 STRV_FOREACH(i, l) {
4458 _cleanup_free_ char *buf = NULL;
4459 const char *p;
4460 size_t a;
4461 char *q;
4462
4463 p = unit_escape_setting(*i, flags, &buf);
4464 if (!p)
4465 return NULL;
4466
4467 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4468 if (!GREEDY_REALLOC(result, n + a + 1))
4469 return NULL;
4470
4471 q = result + n;
4472 if (n > 0)
4473 *(q++) = ' ';
4474
4475 *(q++) = '"';
4476 q = stpcpy(q, p);
4477 *(q++) = '"';
4478
4479 n += a;
4480 }
4481
4482 if (!GREEDY_REALLOC(result, n + 1))
4483 return NULL;
4484
4485 result[n] = 0;
4486
4487 return TAKE_PTR(result);
4488 }
4489
4490 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4491 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4492 const char *dir, *wrapped;
4493 int r;
4494
4495 assert(u);
4496 assert(name);
4497 assert(data);
4498
4499 if (UNIT_WRITE_FLAGS_NOOP(flags))
4500 return 0;
4501
4502 data = unit_escape_setting(data, flags, &escaped);
4503 if (!data)
4504 return -ENOMEM;
4505
4506 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4507 * previous section header is the same */
4508
4509 if (flags & UNIT_PRIVATE) {
4510 if (!UNIT_VTABLE(u)->private_section)
4511 return -EINVAL;
4512
4513 if (!u->transient_file || u->last_section_private < 0)
4514 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4515 else if (u->last_section_private == 0)
4516 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4517 } else {
4518 if (!u->transient_file || u->last_section_private < 0)
4519 data = strjoina("[Unit]\n", data);
4520 else if (u->last_section_private > 0)
4521 data = strjoina("\n[Unit]\n", data);
4522 }
4523
4524 if (u->transient_file) {
4525 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4526 * write to the transient unit file. */
4527 fputs(data, u->transient_file);
4528
4529 if (!endswith(data, "\n"))
4530 fputc('\n', u->transient_file);
4531
4532 /* Remember which section we wrote this entry to */
4533 u->last_section_private = !!(flags & UNIT_PRIVATE);
4534 return 0;
4535 }
4536
4537 dir = unit_drop_in_dir(u, flags);
4538 if (!dir)
4539 return -EINVAL;
4540
4541 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4542 "# or an equivalent operation. Do not edit.\n",
4543 data,
4544 "\n");
4545
4546 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4547 if (r < 0)
4548 return r;
4549
4550 (void) mkdir_p_label(p, 0755);
4551
4552 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4553 * recreate the cache after every drop-in we write. */
4554 if (u->manager->unit_path_cache) {
4555 r = set_put_strdup(&u->manager->unit_path_cache, p);
4556 if (r < 0)
4557 return r;
4558 }
4559
4560 r = write_string_file_atomic_label(q, wrapped);
4561 if (r < 0)
4562 return r;
4563
4564 r = strv_push(&u->dropin_paths, q);
4565 if (r < 0)
4566 return r;
4567 q = NULL;
4568
4569 strv_uniq(u->dropin_paths);
4570
4571 u->dropin_mtime = now(CLOCK_REALTIME);
4572
4573 return 0;
4574 }
4575
4576 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4577 _cleanup_free_ char *p = NULL;
4578 va_list ap;
4579 int r;
4580
4581 assert(u);
4582 assert(name);
4583 assert(format);
4584
4585 if (UNIT_WRITE_FLAGS_NOOP(flags))
4586 return 0;
4587
4588 va_start(ap, format);
4589 r = vasprintf(&p, format, ap);
4590 va_end(ap);
4591
4592 if (r < 0)
4593 return -ENOMEM;
4594
4595 return unit_write_setting(u, flags, name, p);
4596 }
4597
4598 int unit_make_transient(Unit *u) {
4599 _cleanup_free_ char *path = NULL;
4600 FILE *f;
4601
4602 assert(u);
4603
4604 if (!UNIT_VTABLE(u)->can_transient)
4605 return -EOPNOTSUPP;
4606
4607 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4608
4609 path = path_join(u->manager->lookup_paths.transient, u->id);
4610 if (!path)
4611 return -ENOMEM;
4612
4613 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4614 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4615
4616 WITH_UMASK(0022) {
4617 f = fopen(path, "we");
4618 if (!f)
4619 return -errno;
4620 }
4621
4622 safe_fclose(u->transient_file);
4623 u->transient_file = f;
4624
4625 free_and_replace(u->fragment_path, path);
4626
4627 u->source_path = mfree(u->source_path);
4628 u->dropin_paths = strv_free(u->dropin_paths);
4629 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4630
4631 u->load_state = UNIT_STUB;
4632 u->load_error = 0;
4633 u->transient = true;
4634
4635 unit_add_to_dbus_queue(u);
4636 unit_add_to_gc_queue(u);
4637
4638 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4639 u->transient_file);
4640
4641 return 0;
4642 }
4643
4644 static int log_kill(pid_t pid, int sig, void *userdata) {
4645 _cleanup_free_ char *comm = NULL;
4646
4647 (void) get_process_comm(pid, &comm);
4648
4649 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4650 only, like for example systemd's own PAM stub process. */
4651 if (comm && comm[0] == '(')
4652 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4653 * here to let the manager know that a process was killed. */
4654 return 1;
4655
4656 log_unit_notice(userdata,
4657 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4658 pid,
4659 strna(comm),
4660 signal_to_string(sig));
4661
4662 return 1;
4663 }
4664
4665 static int operation_to_signal(
4666 const KillContext *c,
4667 KillOperation k,
4668 bool *ret_noteworthy) {
4669
4670 assert(c);
4671
4672 switch (k) {
4673
4674 case KILL_TERMINATE:
4675 case KILL_TERMINATE_AND_LOG:
4676 *ret_noteworthy = false;
4677 return c->kill_signal;
4678
4679 case KILL_RESTART:
4680 *ret_noteworthy = false;
4681 return restart_kill_signal(c);
4682
4683 case KILL_KILL:
4684 *ret_noteworthy = true;
4685 return c->final_kill_signal;
4686
4687 case KILL_WATCHDOG:
4688 *ret_noteworthy = true;
4689 return c->watchdog_signal;
4690
4691 default:
4692 assert_not_reached();
4693 }
4694 }
4695
4696 int unit_kill_context(
4697 Unit *u,
4698 KillContext *c,
4699 KillOperation k,
4700 pid_t main_pid,
4701 pid_t control_pid,
4702 bool main_pid_alien) {
4703
4704 bool wait_for_exit = false, send_sighup;
4705 cg_kill_log_func_t log_func = NULL;
4706 int sig, r;
4707
4708 assert(u);
4709 assert(c);
4710
4711 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4712 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4713 * which is used for user-requested killing of unit processes. */
4714
4715 if (c->kill_mode == KILL_NONE)
4716 return 0;
4717
4718 bool noteworthy;
4719 sig = operation_to_signal(c, k, &noteworthy);
4720 if (noteworthy)
4721 log_func = log_kill;
4722
4723 send_sighup =
4724 c->send_sighup &&
4725 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4726 sig != SIGHUP;
4727
4728 if (main_pid > 0) {
4729 if (log_func)
4730 log_func(main_pid, sig, u);
4731
4732 r = kill_and_sigcont(main_pid, sig);
4733 if (r < 0 && r != -ESRCH) {
4734 _cleanup_free_ char *comm = NULL;
4735 (void) get_process_comm(main_pid, &comm);
4736
4737 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4738 } else {
4739 if (!main_pid_alien)
4740 wait_for_exit = true;
4741
4742 if (r != -ESRCH && send_sighup)
4743 (void) kill(main_pid, SIGHUP);
4744 }
4745 }
4746
4747 if (control_pid > 0) {
4748 if (log_func)
4749 log_func(control_pid, sig, u);
4750
4751 r = kill_and_sigcont(control_pid, sig);
4752 if (r < 0 && r != -ESRCH) {
4753 _cleanup_free_ char *comm = NULL;
4754 (void) get_process_comm(control_pid, &comm);
4755
4756 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4757 } else {
4758 wait_for_exit = true;
4759
4760 if (r != -ESRCH && send_sighup)
4761 (void) kill(control_pid, SIGHUP);
4762 }
4763 }
4764
4765 if (u->cgroup_path &&
4766 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4767 _cleanup_set_free_ Set *pid_set = NULL;
4768
4769 /* Exclude the main/control pids from being killed via the cgroup */
4770 pid_set = unit_pid_set(main_pid, control_pid);
4771 if (!pid_set)
4772 return -ENOMEM;
4773
4774 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4775 sig,
4776 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4777 pid_set,
4778 log_func, u);
4779 if (r < 0) {
4780 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4781 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4782
4783 } else if (r > 0) {
4784
4785 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4786 * we are running in a container or if this is a delegation unit, simply because cgroup
4787 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4788 * of containers it can be confused easily by left-over directories in the cgroup — which
4789 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4790 * there we get proper events. Hence rely on them. */
4791
4792 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4793 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4794 wait_for_exit = true;
4795
4796 if (send_sighup) {
4797 set_free(pid_set);
4798
4799 pid_set = unit_pid_set(main_pid, control_pid);
4800 if (!pid_set)
4801 return -ENOMEM;
4802
4803 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4804 SIGHUP,
4805 CGROUP_IGNORE_SELF,
4806 pid_set,
4807 NULL, NULL);
4808 }
4809 }
4810 }
4811
4812 return wait_for_exit;
4813 }
4814
4815 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4816 int r;
4817
4818 assert(u);
4819 assert(path);
4820
4821 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4822 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4823 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4824 * appearing mount units can easily determine which units to make themselves a dependency of. */
4825
4826 if (!path_is_absolute(path))
4827 return -EINVAL;
4828
4829 if (hashmap_contains(u->requires_mounts_for, path)) /* Exit quickly if the path is already covered. */
4830 return 0;
4831
4832 _cleanup_free_ char *p = strdup(path);
4833 if (!p)
4834 return -ENOMEM;
4835
4836 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4837 * only after simplification, since path_is_normalized() rejects paths with '.'.
4838 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4839 path = path_simplify(p);
4840
4841 if (!path_is_normalized(path))
4842 return -EPERM;
4843
4844 UnitDependencyInfo di = {
4845 .origin_mask = mask
4846 };
4847
4848 r = hashmap_ensure_put(&u->requires_mounts_for, &path_hash_ops, p, di.data);
4849 if (r < 0)
4850 return r;
4851 assert(r > 0);
4852 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4853
4854 char prefix[strlen(path) + 1];
4855 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4856 Set *x;
4857
4858 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4859 if (!x) {
4860 _cleanup_free_ char *q = NULL;
4861
4862 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4863 if (r < 0)
4864 return r;
4865
4866 q = strdup(prefix);
4867 if (!q)
4868 return -ENOMEM;
4869
4870 x = set_new(NULL);
4871 if (!x)
4872 return -ENOMEM;
4873
4874 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4875 if (r < 0) {
4876 set_free(x);
4877 return r;
4878 }
4879 q = NULL;
4880 }
4881
4882 r = set_put(x, u);
4883 if (r < 0)
4884 return r;
4885 }
4886
4887 return 0;
4888 }
4889
4890 int unit_setup_exec_runtime(Unit *u) {
4891 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
4892 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
4893 ExecRuntime **rt;
4894 ExecContext *ec;
4895 size_t offset;
4896 Unit *other;
4897 int r;
4898
4899 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4900 assert(offset > 0);
4901
4902 /* Check if there already is an ExecRuntime for this unit? */
4903 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4904 if (*rt)
4905 return 0;
4906
4907 ec = unit_get_exec_context(u);
4908 assert(ec);
4909
4910 /* Try to get it from somebody else */
4911 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_JOINS_NAMESPACE_OF) {
4912 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
4913 if (r < 0)
4914 return r;
4915 if (r > 0)
4916 break;
4917 }
4918
4919 if (!esr) {
4920 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
4921 if (r < 0)
4922 return r;
4923 }
4924
4925 if (ec->dynamic_user) {
4926 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
4927 if (r < 0)
4928 return r;
4929 }
4930
4931 r = exec_runtime_make(esr, dcreds, rt);
4932 if (r < 0)
4933 return r;
4934
4935 TAKE_PTR(esr);
4936 TAKE_PTR(dcreds);
4937
4938 return r;
4939 }
4940
4941 bool unit_type_supported(UnitType t) {
4942 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
4943 int r;
4944
4945 if (_unlikely_(t < 0))
4946 return false;
4947 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4948 return false;
4949
4950 if (cache[t] == 0) {
4951 char *e;
4952
4953 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
4954
4955 r = getenv_bool(ascii_strupper(e));
4956 if (r < 0 && r != -ENXIO)
4957 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
4958
4959 cache[t] = r == 0 ? -1 : 1;
4960 }
4961 if (cache[t] < 0)
4962 return false;
4963
4964 if (!unit_vtable[t]->supported)
4965 return true;
4966
4967 return unit_vtable[t]->supported();
4968 }
4969
4970 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4971 int r;
4972
4973 assert(u);
4974 assert(where);
4975
4976 if (!unit_log_level_test(u, LOG_NOTICE))
4977 return;
4978
4979 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
4980 if (r > 0 || r == -ENOTDIR)
4981 return;
4982 if (r < 0) {
4983 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4984 return;
4985 }
4986
4987 log_unit_struct(u, LOG_NOTICE,
4988 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4989 LOG_UNIT_INVOCATION_ID(u),
4990 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4991 "WHERE=%s", where);
4992 }
4993
4994 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4995 _cleanup_free_ char *canonical_where = NULL;
4996 int r;
4997
4998 assert(u);
4999 assert(where);
5000
5001 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5002 if (r < 0) {
5003 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5004 return 0;
5005 }
5006
5007 /* We will happily ignore a trailing slash (or any redundant slashes) */
5008 if (path_equal(where, canonical_where))
5009 return 0;
5010
5011 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5012 log_unit_struct(u, LOG_ERR,
5013 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5014 LOG_UNIT_INVOCATION_ID(u),
5015 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5016 "WHERE=%s", where);
5017
5018 return -ELOOP;
5019 }
5020
5021 bool unit_is_pristine(Unit *u) {
5022 assert(u);
5023
5024 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5025 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5026 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5027 *
5028 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5029 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5030 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5031 */
5032
5033 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5034 !u->fragment_path &&
5035 !u->source_path &&
5036 !u->job &&
5037 !u->merged_into;
5038 }
5039
5040 pid_t unit_control_pid(Unit *u) {
5041 assert(u);
5042
5043 if (UNIT_VTABLE(u)->control_pid)
5044 return UNIT_VTABLE(u)->control_pid(u);
5045
5046 return 0;
5047 }
5048
5049 pid_t unit_main_pid(Unit *u) {
5050 assert(u);
5051
5052 if (UNIT_VTABLE(u)->main_pid)
5053 return UNIT_VTABLE(u)->main_pid(u);
5054
5055 return 0;
5056 }
5057
5058 static void unit_unref_uid_internal(
5059 Unit *u,
5060 uid_t *ref_uid,
5061 bool destroy_now,
5062 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5063
5064 assert(u);
5065 assert(ref_uid);
5066 assert(_manager_unref_uid);
5067
5068 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5069 * gid_t are actually the same time, with the same validity rules.
5070 *
5071 * Drops a reference to UID/GID from a unit. */
5072
5073 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5074 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5075
5076 if (!uid_is_valid(*ref_uid))
5077 return;
5078
5079 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5080 *ref_uid = UID_INVALID;
5081 }
5082
5083 static void unit_unref_uid(Unit *u, bool destroy_now) {
5084 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5085 }
5086
5087 static void unit_unref_gid(Unit *u, bool destroy_now) {
5088 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5089 }
5090
5091 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5092 assert(u);
5093
5094 unit_unref_uid(u, destroy_now);
5095 unit_unref_gid(u, destroy_now);
5096 }
5097
5098 static int unit_ref_uid_internal(
5099 Unit *u,
5100 uid_t *ref_uid,
5101 uid_t uid,
5102 bool clean_ipc,
5103 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5104
5105 int r;
5106
5107 assert(u);
5108 assert(ref_uid);
5109 assert(uid_is_valid(uid));
5110 assert(_manager_ref_uid);
5111
5112 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5113 * are actually the same type, and have the same validity rules.
5114 *
5115 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5116 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5117 * drops to zero. */
5118
5119 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5120 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5121
5122 if (*ref_uid == uid)
5123 return 0;
5124
5125 if (uid_is_valid(*ref_uid)) /* Already set? */
5126 return -EBUSY;
5127
5128 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5129 if (r < 0)
5130 return r;
5131
5132 *ref_uid = uid;
5133 return 1;
5134 }
5135
5136 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5137 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5138 }
5139
5140 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5141 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5142 }
5143
5144 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5145 int r = 0, q = 0;
5146
5147 assert(u);
5148
5149 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5150
5151 if (uid_is_valid(uid)) {
5152 r = unit_ref_uid(u, uid, clean_ipc);
5153 if (r < 0)
5154 return r;
5155 }
5156
5157 if (gid_is_valid(gid)) {
5158 q = unit_ref_gid(u, gid, clean_ipc);
5159 if (q < 0) {
5160 if (r > 0)
5161 unit_unref_uid(u, false);
5162
5163 return q;
5164 }
5165 }
5166
5167 return r > 0 || q > 0;
5168 }
5169
5170 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5171 ExecContext *c;
5172 int r;
5173
5174 assert(u);
5175
5176 c = unit_get_exec_context(u);
5177
5178 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5179 if (r < 0)
5180 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5181
5182 return r;
5183 }
5184
5185 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5186 int r;
5187
5188 assert(u);
5189
5190 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5191 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5192 * objects when no service references the UID/GID anymore. */
5193
5194 r = unit_ref_uid_gid(u, uid, gid);
5195 if (r > 0)
5196 unit_add_to_dbus_queue(u);
5197 }
5198
5199 int unit_acquire_invocation_id(Unit *u) {
5200 sd_id128_t id;
5201 int r;
5202
5203 assert(u);
5204
5205 r = sd_id128_randomize(&id);
5206 if (r < 0)
5207 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5208
5209 r = unit_set_invocation_id(u, id);
5210 if (r < 0)
5211 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5212
5213 unit_add_to_dbus_queue(u);
5214 return 0;
5215 }
5216
5217 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5218 int r;
5219
5220 assert(u);
5221 assert(p);
5222
5223 /* Copy parameters from manager */
5224 r = manager_get_effective_environment(u->manager, &p->environment);
5225 if (r < 0)
5226 return r;
5227
5228 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5229 p->cgroup_supported = u->manager->cgroup_supported;
5230 p->prefix = u->manager->prefix;
5231 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5232
5233 /* Copy parameters from unit */
5234 p->cgroup_path = u->cgroup_path;
5235 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5236
5237 p->received_credentials_directory = u->manager->received_credentials_directory;
5238 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5239
5240 return 0;
5241 }
5242
5243 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5244 int r;
5245
5246 assert(u);
5247 assert(ret);
5248
5249 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5250 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5251
5252 (void) unit_realize_cgroup(u);
5253
5254 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5255 if (r != 0)
5256 return r;
5257
5258 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5259 (void) ignore_signals(SIGPIPE);
5260
5261 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5262
5263 if (u->cgroup_path) {
5264 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5265 if (r < 0) {
5266 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5267 _exit(EXIT_CGROUP);
5268 }
5269 }
5270
5271 return 0;
5272 }
5273
5274 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5275 pid_t pid;
5276 int r;
5277
5278 assert(u);
5279 assert(ret_pid);
5280
5281 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5282 if (r < 0)
5283 return r;
5284 if (r == 0) {
5285 int ret = EXIT_SUCCESS;
5286
5287 STRV_FOREACH(i, paths) {
5288 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5289 if (r < 0) {
5290 log_error_errno(r, "Failed to remove '%s': %m", *i);
5291 ret = EXIT_FAILURE;
5292 }
5293 }
5294
5295 _exit(ret);
5296 }
5297
5298 r = unit_watch_pid(u, pid, true);
5299 if (r < 0)
5300 return r;
5301
5302 *ret_pid = pid;
5303 return 0;
5304 }
5305
5306 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5307 assert(deps);
5308 assert(other);
5309
5310 if (di.origin_mask == 0 && di.destination_mask == 0)
5311 /* No bit set anymore, let's drop the whole entry */
5312 assert_se(hashmap_remove(deps, other));
5313 else
5314 /* Mask was reduced, let's update the entry */
5315 assert_se(hashmap_update(deps, other, di.data) == 0);
5316 }
5317
5318 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5319 Hashmap *deps;
5320 assert(u);
5321
5322 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5323
5324 if (mask == 0)
5325 return;
5326
5327 HASHMAP_FOREACH(deps, u->dependencies) {
5328 bool done;
5329
5330 do {
5331 UnitDependencyInfo di;
5332 Unit *other;
5333
5334 done = true;
5335
5336 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5337 Hashmap *other_deps;
5338
5339 if (FLAGS_SET(~mask, di.origin_mask))
5340 continue;
5341
5342 di.origin_mask &= ~mask;
5343 unit_update_dependency_mask(deps, other, di);
5344
5345 /* We updated the dependency from our unit to the other unit now. But most
5346 * dependencies imply a reverse dependency. Hence, let's delete that one
5347 * too. For that we go through all dependency types on the other unit and
5348 * delete all those which point to us and have the right mask set. */
5349
5350 HASHMAP_FOREACH(other_deps, other->dependencies) {
5351 UnitDependencyInfo dj;
5352
5353 dj.data = hashmap_get(other_deps, u);
5354 if (FLAGS_SET(~mask, dj.destination_mask))
5355 continue;
5356
5357 dj.destination_mask &= ~mask;
5358 unit_update_dependency_mask(other_deps, u, dj);
5359 }
5360
5361 unit_add_to_gc_queue(other);
5362
5363 /* The unit 'other' may not be wanted by the unit 'u'. */
5364 unit_submit_to_stop_when_unneeded_queue(other);
5365
5366 done = false;
5367 break;
5368 }
5369
5370 } while (!done);
5371 }
5372 }
5373
5374 static int unit_get_invocation_path(Unit *u, char **ret) {
5375 char *p;
5376 int r;
5377
5378 assert(u);
5379 assert(ret);
5380
5381 if (MANAGER_IS_SYSTEM(u->manager))
5382 p = strjoin("/run/systemd/units/invocation:", u->id);
5383 else {
5384 _cleanup_free_ char *user_path = NULL;
5385 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5386 if (r < 0)
5387 return r;
5388 p = strjoin(user_path, u->id);
5389 }
5390
5391 if (!p)
5392 return -ENOMEM;
5393
5394 *ret = p;
5395 return 0;
5396 }
5397
5398 static int unit_export_invocation_id(Unit *u) {
5399 _cleanup_free_ char *p = NULL;
5400 int r;
5401
5402 assert(u);
5403
5404 if (u->exported_invocation_id)
5405 return 0;
5406
5407 if (sd_id128_is_null(u->invocation_id))
5408 return 0;
5409
5410 r = unit_get_invocation_path(u, &p);
5411 if (r < 0)
5412 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5413
5414 r = symlink_atomic_label(u->invocation_id_string, p);
5415 if (r < 0)
5416 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5417
5418 u->exported_invocation_id = true;
5419 return 0;
5420 }
5421
5422 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5423 const char *p;
5424 char buf[2];
5425 int r;
5426
5427 assert(u);
5428 assert(c);
5429
5430 if (u->exported_log_level_max)
5431 return 0;
5432
5433 if (c->log_level_max < 0)
5434 return 0;
5435
5436 assert(c->log_level_max <= 7);
5437
5438 buf[0] = '0' + c->log_level_max;
5439 buf[1] = 0;
5440
5441 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5442 r = symlink_atomic(buf, p);
5443 if (r < 0)
5444 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5445
5446 u->exported_log_level_max = true;
5447 return 0;
5448 }
5449
5450 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5451 _cleanup_close_ int fd = -EBADF;
5452 struct iovec *iovec;
5453 const char *p;
5454 char *pattern;
5455 le64_t *sizes;
5456 ssize_t n;
5457 int r;
5458
5459 if (u->exported_log_extra_fields)
5460 return 0;
5461
5462 if (c->n_log_extra_fields <= 0)
5463 return 0;
5464
5465 sizes = newa(le64_t, c->n_log_extra_fields);
5466 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5467
5468 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5469 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5470
5471 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5472 iovec[i*2+1] = c->log_extra_fields[i];
5473 }
5474
5475 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5476 pattern = strjoina(p, ".XXXXXX");
5477
5478 fd = mkostemp_safe(pattern);
5479 if (fd < 0)
5480 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5481
5482 n = writev(fd, iovec, c->n_log_extra_fields*2);
5483 if (n < 0) {
5484 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5485 goto fail;
5486 }
5487
5488 (void) fchmod(fd, 0644);
5489
5490 if (rename(pattern, p) < 0) {
5491 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5492 goto fail;
5493 }
5494
5495 u->exported_log_extra_fields = true;
5496 return 0;
5497
5498 fail:
5499 (void) unlink(pattern);
5500 return r;
5501 }
5502
5503 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5504 _cleanup_free_ char *buf = NULL;
5505 const char *p;
5506 int r;
5507
5508 assert(u);
5509 assert(c);
5510
5511 if (u->exported_log_ratelimit_interval)
5512 return 0;
5513
5514 if (c->log_ratelimit_interval_usec == 0)
5515 return 0;
5516
5517 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5518
5519 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5520 return log_oom();
5521
5522 r = symlink_atomic(buf, p);
5523 if (r < 0)
5524 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5525
5526 u->exported_log_ratelimit_interval = true;
5527 return 0;
5528 }
5529
5530 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5531 _cleanup_free_ char *buf = NULL;
5532 const char *p;
5533 int r;
5534
5535 assert(u);
5536 assert(c);
5537
5538 if (u->exported_log_ratelimit_burst)
5539 return 0;
5540
5541 if (c->log_ratelimit_burst == 0)
5542 return 0;
5543
5544 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5545
5546 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5547 return log_oom();
5548
5549 r = symlink_atomic(buf, p);
5550 if (r < 0)
5551 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5552
5553 u->exported_log_ratelimit_burst = true;
5554 return 0;
5555 }
5556
5557 void unit_export_state_files(Unit *u) {
5558 const ExecContext *c;
5559
5560 assert(u);
5561
5562 if (!u->id)
5563 return;
5564
5565 if (MANAGER_IS_TEST_RUN(u->manager))
5566 return;
5567
5568 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5569 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5570 * the IPC system itself and PID 1 also log to the journal.
5571 *
5572 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5573 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5574 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5575 * namespace at least.
5576 *
5577 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5578 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5579 * them with one. */
5580
5581 (void) unit_export_invocation_id(u);
5582
5583 if (!MANAGER_IS_SYSTEM(u->manager))
5584 return;
5585
5586 c = unit_get_exec_context(u);
5587 if (c) {
5588 (void) unit_export_log_level_max(u, c);
5589 (void) unit_export_log_extra_fields(u, c);
5590 (void) unit_export_log_ratelimit_interval(u, c);
5591 (void) unit_export_log_ratelimit_burst(u, c);
5592 }
5593 }
5594
5595 void unit_unlink_state_files(Unit *u) {
5596 const char *p;
5597
5598 assert(u);
5599
5600 if (!u->id)
5601 return;
5602
5603 /* Undoes the effect of unit_export_state() */
5604
5605 if (u->exported_invocation_id) {
5606 _cleanup_free_ char *invocation_path = NULL;
5607 int r = unit_get_invocation_path(u, &invocation_path);
5608 if (r >= 0) {
5609 (void) unlink(invocation_path);
5610 u->exported_invocation_id = false;
5611 }
5612 }
5613
5614 if (!MANAGER_IS_SYSTEM(u->manager))
5615 return;
5616
5617 if (u->exported_log_level_max) {
5618 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5619 (void) unlink(p);
5620
5621 u->exported_log_level_max = false;
5622 }
5623
5624 if (u->exported_log_extra_fields) {
5625 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5626 (void) unlink(p);
5627
5628 u->exported_log_extra_fields = false;
5629 }
5630
5631 if (u->exported_log_ratelimit_interval) {
5632 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5633 (void) unlink(p);
5634
5635 u->exported_log_ratelimit_interval = false;
5636 }
5637
5638 if (u->exported_log_ratelimit_burst) {
5639 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5640 (void) unlink(p);
5641
5642 u->exported_log_ratelimit_burst = false;
5643 }
5644 }
5645
5646 int unit_prepare_exec(Unit *u) {
5647 int r;
5648
5649 assert(u);
5650
5651 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5652 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5653 r = bpf_firewall_load_custom(u);
5654 if (r < 0)
5655 return r;
5656
5657 /* Prepares everything so that we can fork of a process for this unit */
5658
5659 (void) unit_realize_cgroup(u);
5660
5661 if (u->reset_accounting) {
5662 (void) unit_reset_accounting(u);
5663 u->reset_accounting = false;
5664 }
5665
5666 unit_export_state_files(u);
5667
5668 r = unit_setup_exec_runtime(u);
5669 if (r < 0)
5670 return r;
5671
5672 return 0;
5673 }
5674
5675 static bool ignore_leftover_process(const char *comm) {
5676 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5677 }
5678
5679 int unit_log_leftover_process_start(pid_t pid, int sig, void *userdata) {
5680 _cleanup_free_ char *comm = NULL;
5681
5682 (void) get_process_comm(pid, &comm);
5683
5684 if (ignore_leftover_process(comm))
5685 return 0;
5686
5687 /* During start we print a warning */
5688
5689 log_unit_warning(userdata,
5690 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5691 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5692 pid, strna(comm));
5693
5694 return 1;
5695 }
5696
5697 int unit_log_leftover_process_stop(pid_t pid, int sig, void *userdata) {
5698 _cleanup_free_ char *comm = NULL;
5699
5700 (void) get_process_comm(pid, &comm);
5701
5702 if (ignore_leftover_process(comm))
5703 return 0;
5704
5705 /* During stop we only print an informational message */
5706
5707 log_unit_info(userdata,
5708 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5709 pid, strna(comm));
5710
5711 return 1;
5712 }
5713
5714 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5715 assert(u);
5716
5717 (void) unit_pick_cgroup_path(u);
5718
5719 if (!u->cgroup_path)
5720 return 0;
5721
5722 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_func, u);
5723 }
5724
5725 bool unit_needs_console(Unit *u) {
5726 ExecContext *ec;
5727 UnitActiveState state;
5728
5729 assert(u);
5730
5731 state = unit_active_state(u);
5732
5733 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5734 return false;
5735
5736 if (UNIT_VTABLE(u)->needs_console)
5737 return UNIT_VTABLE(u)->needs_console(u);
5738
5739 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5740 ec = unit_get_exec_context(u);
5741 if (!ec)
5742 return false;
5743
5744 return exec_context_may_touch_console(ec);
5745 }
5746
5747 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5748 int r;
5749
5750 assert(u);
5751
5752 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5753 * and not a kernel thread either */
5754
5755 /* First, a simple range check */
5756 if (!pid_is_valid(pid))
5757 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5758
5759 /* Some extra safety check */
5760 if (pid == 1 || pid == getpid_cached())
5761 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5762
5763 /* Don't even begin to bother with kernel threads */
5764 r = is_kernel_thread(pid);
5765 if (r == -ESRCH)
5766 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5767 if (r < 0)
5768 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5769 if (r > 0)
5770 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5771
5772 return 0;
5773 }
5774
5775 void unit_log_success(Unit *u) {
5776 assert(u);
5777
5778 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5779 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5780 * a lot of devices. */
5781 log_unit_struct(u,
5782 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5783 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5784 LOG_UNIT_INVOCATION_ID(u),
5785 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5786 }
5787
5788 void unit_log_failure(Unit *u, const char *result) {
5789 assert(u);
5790 assert(result);
5791
5792 log_unit_struct(u, LOG_WARNING,
5793 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5794 LOG_UNIT_INVOCATION_ID(u),
5795 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5796 "UNIT_RESULT=%s", result);
5797 }
5798
5799 void unit_log_skip(Unit *u, const char *result) {
5800 assert(u);
5801 assert(result);
5802
5803 log_unit_struct(u, LOG_INFO,
5804 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5805 LOG_UNIT_INVOCATION_ID(u),
5806 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5807 "UNIT_RESULT=%s", result);
5808 }
5809
5810 void unit_log_process_exit(
5811 Unit *u,
5812 const char *kind,
5813 const char *command,
5814 bool success,
5815 int code,
5816 int status) {
5817
5818 int level;
5819
5820 assert(u);
5821 assert(kind);
5822
5823 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5824 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5825 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5826 * WARNING. */
5827 if (success)
5828 level = LOG_DEBUG;
5829 else if (code == CLD_EXITED)
5830 level = LOG_NOTICE;
5831 else
5832 level = LOG_WARNING;
5833
5834 log_unit_struct(u, level,
5835 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5836 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
5837 kind,
5838 sigchld_code_to_string(code), status,
5839 strna(code == CLD_EXITED
5840 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5841 : signal_to_string(status)),
5842 success ? " (success)" : ""),
5843 "EXIT_CODE=%s", sigchld_code_to_string(code),
5844 "EXIT_STATUS=%i", status,
5845 "COMMAND=%s", strna(command),
5846 LOG_UNIT_INVOCATION_ID(u));
5847 }
5848
5849 int unit_exit_status(Unit *u) {
5850 assert(u);
5851
5852 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5853 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5854 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5855 * service process has exited abnormally (signal/coredump). */
5856
5857 if (!UNIT_VTABLE(u)->exit_status)
5858 return -EOPNOTSUPP;
5859
5860 return UNIT_VTABLE(u)->exit_status(u);
5861 }
5862
5863 int unit_failure_action_exit_status(Unit *u) {
5864 int r;
5865
5866 assert(u);
5867
5868 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5869
5870 if (u->failure_action_exit_status >= 0)
5871 return u->failure_action_exit_status;
5872
5873 r = unit_exit_status(u);
5874 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5875 return 255;
5876
5877 return r;
5878 }
5879
5880 int unit_success_action_exit_status(Unit *u) {
5881 int r;
5882
5883 assert(u);
5884
5885 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5886
5887 if (u->success_action_exit_status >= 0)
5888 return u->success_action_exit_status;
5889
5890 r = unit_exit_status(u);
5891 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5892 return 255;
5893
5894 return r;
5895 }
5896
5897 int unit_test_trigger_loaded(Unit *u) {
5898 Unit *trigger;
5899
5900 /* Tests whether the unit to trigger is loaded */
5901
5902 trigger = UNIT_TRIGGER(u);
5903 if (!trigger)
5904 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5905 "Refusing to start, no unit to trigger.");
5906 if (trigger->load_state != UNIT_LOADED)
5907 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5908 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5909
5910 return 0;
5911 }
5912
5913 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
5914 assert(u);
5915 assert(context);
5916
5917 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
5918 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
5919 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5920
5921 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
5922 exec_context_destroy_mount_ns_dir(u);
5923 }
5924
5925 int unit_clean(Unit *u, ExecCleanMask mask) {
5926 UnitActiveState state;
5927
5928 assert(u);
5929
5930 /* Special return values:
5931 *
5932 * -EOPNOTSUPP → cleaning not supported for this unit type
5933 * -EUNATCH → cleaning not defined for this resource type
5934 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5935 * a job queued or similar
5936 */
5937
5938 if (!UNIT_VTABLE(u)->clean)
5939 return -EOPNOTSUPP;
5940
5941 if (mask == 0)
5942 return -EUNATCH;
5943
5944 if (u->load_state != UNIT_LOADED)
5945 return -EBUSY;
5946
5947 if (u->job)
5948 return -EBUSY;
5949
5950 state = unit_active_state(u);
5951 if (state != UNIT_INACTIVE)
5952 return -EBUSY;
5953
5954 return UNIT_VTABLE(u)->clean(u, mask);
5955 }
5956
5957 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5958 assert(u);
5959
5960 if (!UNIT_VTABLE(u)->clean ||
5961 u->load_state != UNIT_LOADED) {
5962 *ret = 0;
5963 return 0;
5964 }
5965
5966 /* When the clean() method is set, can_clean() really should be set too */
5967 assert(UNIT_VTABLE(u)->can_clean);
5968
5969 return UNIT_VTABLE(u)->can_clean(u, ret);
5970 }
5971
5972 bool unit_can_freeze(Unit *u) {
5973 assert(u);
5974
5975 if (UNIT_VTABLE(u)->can_freeze)
5976 return UNIT_VTABLE(u)->can_freeze(u);
5977
5978 return UNIT_VTABLE(u)->freeze;
5979 }
5980
5981 void unit_frozen(Unit *u) {
5982 assert(u);
5983
5984 u->freezer_state = FREEZER_FROZEN;
5985
5986 bus_unit_send_pending_freezer_message(u, false);
5987 }
5988
5989 void unit_thawed(Unit *u) {
5990 assert(u);
5991
5992 u->freezer_state = FREEZER_RUNNING;
5993
5994 bus_unit_send_pending_freezer_message(u, false);
5995 }
5996
5997 static int unit_freezer_action(Unit *u, FreezerAction action) {
5998 UnitActiveState s;
5999 int (*method)(Unit*);
6000 int r;
6001
6002 assert(u);
6003 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6004
6005 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
6006 if (!method || !cg_freezer_supported())
6007 return -EOPNOTSUPP;
6008
6009 if (u->job)
6010 return -EBUSY;
6011
6012 if (u->load_state != UNIT_LOADED)
6013 return -EHOSTDOWN;
6014
6015 s = unit_active_state(u);
6016 if (s != UNIT_ACTIVE)
6017 return -EHOSTDOWN;
6018
6019 if ((IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING) && action == FREEZER_FREEZE) ||
6020 (u->freezer_state == FREEZER_THAWING && action == FREEZER_THAW))
6021 return -EALREADY;
6022
6023 r = method(u);
6024 if (r <= 0)
6025 return r;
6026
6027 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING));
6028
6029 return 1;
6030 }
6031
6032 int unit_freeze(Unit *u) {
6033 return unit_freezer_action(u, FREEZER_FREEZE);
6034 }
6035
6036 int unit_thaw(Unit *u) {
6037 return unit_freezer_action(u, FREEZER_THAW);
6038 }
6039
6040 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
6041 int unit_freeze_vtable_common(Unit *u) {
6042 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
6043 }
6044
6045 int unit_thaw_vtable_common(Unit *u) {
6046 return unit_cgroup_freezer_action(u, FREEZER_THAW);
6047 }
6048
6049 Condition *unit_find_failed_condition(Unit *u) {
6050 Condition *failed_trigger = NULL;
6051 bool has_succeeded_trigger = false;
6052
6053 if (u->condition_result)
6054 return NULL;
6055
6056 LIST_FOREACH(conditions, c, u->conditions)
6057 if (c->trigger) {
6058 if (c->result == CONDITION_SUCCEEDED)
6059 has_succeeded_trigger = true;
6060 else if (!failed_trigger)
6061 failed_trigger = c;
6062 } else if (c->result != CONDITION_SUCCEEDED)
6063 return c;
6064
6065 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6066 }
6067
6068 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6069 [COLLECT_INACTIVE] = "inactive",
6070 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6071 };
6072
6073 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6074
6075 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6076 Unit *i;
6077
6078 assert(u);
6079
6080 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6081 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6082 * is NULL the first entry found), or NULL if not found. */
6083
6084 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6085 if (!other || other == i)
6086 return i;
6087
6088 return NULL;
6089 }
6090
6091 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6092 _cleanup_free_ Unit **array = NULL;
6093 size_t n = 0;
6094 Unit *other;
6095
6096 assert(u);
6097 assert(ret_array);
6098
6099 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6100 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6101 * while the dependency table is continuously updated. */
6102
6103 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6104 if (!GREEDY_REALLOC(array, n + 1))
6105 return -ENOMEM;
6106
6107 array[n++] = other;
6108 }
6109
6110 *ret_array = TAKE_PTR(array);
6111
6112 assert(n <= INT_MAX);
6113 return (int) n;
6114 }
6115
6116 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6117 [UNIT_PATH] = &activation_details_path_vtable,
6118 [UNIT_TIMER] = &activation_details_timer_vtable,
6119 };
6120
6121 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6122 _cleanup_free_ ActivationDetails *details = NULL;
6123
6124 assert(trigger_unit);
6125 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6126 assert(trigger_unit->id);
6127
6128 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6129 if (!details)
6130 return NULL;
6131
6132 *details = (ActivationDetails) {
6133 .n_ref = 1,
6134 .trigger_unit_type = trigger_unit->type,
6135 };
6136
6137 details->trigger_unit_name = strdup(trigger_unit->id);
6138 if (!details->trigger_unit_name)
6139 return NULL;
6140
6141 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6142 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6143
6144 return TAKE_PTR(details);
6145 }
6146
6147 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6148 if (!details)
6149 return NULL;
6150
6151 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6152 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6153
6154 free(details->trigger_unit_name);
6155
6156 return mfree(details);
6157 }
6158
6159 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6160 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6161 return;
6162
6163 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6164 if (details->trigger_unit_name)
6165 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6166 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6167 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6168 }
6169
6170 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6171 int r;
6172
6173 assert(key);
6174 assert(value);
6175 assert(details);
6176
6177 if (!*details) {
6178 UnitType t;
6179
6180 if (!streq(key, "activation-details-unit-type"))
6181 return -EINVAL;
6182
6183 t = unit_type_from_string(value);
6184 if (t < 0)
6185 return t;
6186
6187 /* The activation details vtable has defined ops only for path and timer units */
6188 if (!activation_details_vtable[t])
6189 return -EINVAL;
6190
6191 *details = malloc0(activation_details_vtable[t]->object_size);
6192 if (!*details)
6193 return -ENOMEM;
6194
6195 **details = (ActivationDetails) {
6196 .n_ref = 1,
6197 .trigger_unit_type = t,
6198 };
6199
6200 return 0;
6201 }
6202
6203 if (streq(key, "activation-details-unit-name")) {
6204 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6205 if (r < 0)
6206 return r;
6207
6208 return 0;
6209 }
6210
6211 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6212 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6213
6214 return -EINVAL;
6215 }
6216
6217 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6218 int r = 0;
6219
6220 assert(strv);
6221
6222 if (!details)
6223 return 0;
6224
6225 if (!isempty(details->trigger_unit_name)) {
6226 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6227 if (!s)
6228 return -ENOMEM;
6229
6230 r = strv_consume(strv, TAKE_PTR(s));
6231 if (r < 0)
6232 return r;
6233 }
6234
6235 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6236 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6237 if (r < 0)
6238 return r;
6239 }
6240
6241 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6242 }
6243
6244 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6245 int r = 0;
6246
6247 assert(strv);
6248
6249 if (!details)
6250 return 0;
6251
6252 if (!isempty(details->trigger_unit_name)) {
6253 r = strv_extend(strv, "trigger_unit");
6254 if (r < 0)
6255 return r;
6256
6257 r = strv_extend(strv, details->trigger_unit_name);
6258 if (r < 0)
6259 return r;
6260 }
6261
6262 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6263 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6264 if (r < 0)
6265 return r;
6266 }
6267
6268 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6269 }
6270
6271 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);