]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core/unit: use FOREACH_ARRAY at one more place
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
24 #include "dbus.h"
25 #include "dropin.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio-label.h"
32 #include "fileio.h"
33 #include "format-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "macro.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
46 #include "rm-rf.h"
47 #include "serialize.h"
48 #include "set.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
51 #include "special.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "strv.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
62 #include "unit.h"
63 #include "user-util.h"
64 #include "virt.h"
65 #if BPF_FRAMEWORK
66 #include "bpf-link.h"
67 #endif
68
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
74
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
80
81 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
82 [UNIT_SERVICE] = &service_vtable,
83 [UNIT_SOCKET] = &socket_vtable,
84 [UNIT_TARGET] = &target_vtable,
85 [UNIT_DEVICE] = &device_vtable,
86 [UNIT_MOUNT] = &mount_vtable,
87 [UNIT_AUTOMOUNT] = &automount_vtable,
88 [UNIT_SWAP] = &swap_vtable,
89 [UNIT_TIMER] = &timer_vtable,
90 [UNIT_PATH] = &path_vtable,
91 [UNIT_SLICE] = &slice_vtable,
92 [UNIT_SCOPE] = &scope_vtable,
93 };
94
95 Unit* unit_new(Manager *m, size_t size) {
96 Unit *u;
97
98 assert(m);
99 assert(size >= sizeof(Unit));
100
101 u = malloc0(size);
102 if (!u)
103 return NULL;
104
105 u->manager = m;
106 u->type = _UNIT_TYPE_INVALID;
107 u->default_dependencies = true;
108 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
109 u->unit_file_preset = -1;
110 u->on_failure_job_mode = JOB_REPLACE;
111 u->on_success_job_mode = JOB_FAIL;
112 u->job_timeout = USEC_INFINITY;
113 u->job_running_timeout = USEC_INFINITY;
114 u->ref_uid = UID_INVALID;
115 u->ref_gid = GID_INVALID;
116
117 u->failure_action_exit_status = u->success_action_exit_status = -1;
118
119 u->last_section_private = -1;
120
121 u->start_ratelimit = (const RateLimit) {
122 m->defaults.start_limit_interval,
123 m->defaults.start_limit_burst,
124 };
125
126 u->auto_start_stop_ratelimit = (const RateLimit) {
127 .interval = 10 * USEC_PER_SEC,
128 .burst = 16
129 };
130
131 unit_reset_memory_accounting_last(u);
132 unit_reset_io_accounting_last(u);
133
134 return u;
135 }
136
137 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
138 _cleanup_(unit_freep) Unit *u = NULL;
139 int r;
140
141 u = unit_new(m, size);
142 if (!u)
143 return -ENOMEM;
144
145 r = unit_add_name(u, name);
146 if (r < 0)
147 return r;
148
149 *ret = TAKE_PTR(u);
150
151 return r;
152 }
153
154 bool unit_has_name(const Unit *u, const char *name) {
155 assert(u);
156 assert(name);
157
158 return streq_ptr(name, u->id) ||
159 set_contains(u->aliases, name);
160 }
161
162 static void unit_init(Unit *u) {
163 CGroupContext *cc;
164 ExecContext *ec;
165 KillContext *kc;
166
167 assert(u);
168 assert(u->manager);
169 assert(u->type >= 0);
170
171 cc = unit_get_cgroup_context(u);
172 if (cc) {
173 cgroup_context_init(cc);
174
175 /* Copy in the manager defaults into the cgroup
176 * context, _before_ the rest of the settings have
177 * been initialized */
178
179 cc->cpu_accounting = u->manager->defaults.cpu_accounting;
180 cc->io_accounting = u->manager->defaults.io_accounting;
181 cc->blockio_accounting = u->manager->defaults.blockio_accounting;
182 cc->memory_accounting = u->manager->defaults.memory_accounting;
183 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
184 cc->ip_accounting = u->manager->defaults.ip_accounting;
185
186 if (u->type != UNIT_SLICE)
187 cc->tasks_max = u->manager->defaults.tasks_max;
188
189 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
190 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
191 }
192
193 ec = unit_get_exec_context(u);
194 if (ec) {
195 exec_context_init(ec);
196
197 if (u->manager->defaults.oom_score_adjust_set) {
198 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
199 ec->oom_score_adjust_set = true;
200 }
201
202 if (MANAGER_IS_SYSTEM(u->manager))
203 ec->keyring_mode = EXEC_KEYRING_SHARED;
204 else {
205 ec->keyring_mode = EXEC_KEYRING_INHERIT;
206
207 /* User manager might have its umask redefined by PAM or UMask=. In this
208 * case let the units it manages inherit this value by default. They can
209 * still tune this value through their own unit file */
210 (void) get_process_umask(0, &ec->umask);
211 }
212 }
213
214 kc = unit_get_kill_context(u);
215 if (kc)
216 kill_context_init(kc);
217
218 if (UNIT_VTABLE(u)->init)
219 UNIT_VTABLE(u)->init(u);
220 }
221
222 static int unit_add_alias(Unit *u, char *donated_name) {
223 int r;
224
225 /* Make sure that u->names is allocated. We may leave u->names
226 * empty if we fail later, but this is not a problem. */
227 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
228 if (r < 0)
229 return r;
230 assert(r > 0);
231
232 return 0;
233 }
234
235 int unit_add_name(Unit *u, const char *text) {
236 _cleanup_free_ char *name = NULL, *instance = NULL;
237 UnitType t;
238 int r;
239
240 assert(u);
241 assert(text);
242
243 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
244 if (!u->instance)
245 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
246 "instance is not set when adding name '%s': %m", text);
247
248 r = unit_name_replace_instance(text, u->instance, &name);
249 if (r < 0)
250 return log_unit_debug_errno(u, r,
251 "failed to build instance name from '%s': %m", text);
252 } else {
253 name = strdup(text);
254 if (!name)
255 return -ENOMEM;
256 }
257
258 if (unit_has_name(u, name))
259 return 0;
260
261 if (hashmap_contains(u->manager->units, name))
262 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
263 "unit already exist when adding name '%s': %m", name);
264
265 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
266 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
267 "name '%s' is invalid: %m", name);
268
269 t = unit_name_to_type(name);
270 if (t < 0)
271 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
272 "failed to derive unit type from name '%s': %m", name);
273
274 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
275 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
276 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
277 u->type, t, name);
278
279 r = unit_name_to_instance(name, &instance);
280 if (r < 0)
281 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
282
283 if (instance && !unit_type_may_template(t))
284 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
285
286 /* Ensure that this unit either has no instance, or that the instance matches. */
287 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
288 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
289 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
290 name, instance, u->instance);
291
292 if (u->id && !unit_type_may_alias(t))
293 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
294 "cannot add name %s, aliases are not allowed for %s units.",
295 name, unit_type_to_string(t));
296
297 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
298 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
299
300 /* Add name to the global hashmap first, because that's easier to undo */
301 r = hashmap_put(u->manager->units, name, u);
302 if (r < 0)
303 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
304
305 if (u->id) {
306 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
307 if (r < 0) {
308 hashmap_remove(u->manager->units, name);
309 return r;
310 }
311 TAKE_PTR(name);
312
313 } else {
314 /* A new name, we don't need the set yet. */
315 assert(u->type == _UNIT_TYPE_INVALID);
316 assert(!u->instance);
317
318 u->type = t;
319 u->id = TAKE_PTR(name);
320 u->instance = TAKE_PTR(instance);
321
322 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
323 unit_init(u);
324 }
325
326 unit_add_to_dbus_queue(u);
327 return 0;
328 }
329
330 int unit_choose_id(Unit *u, const char *name) {
331 _cleanup_free_ char *t = NULL;
332 char *s;
333 int r;
334
335 assert(u);
336 assert(name);
337
338 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
339 if (!u->instance)
340 return -EINVAL;
341
342 r = unit_name_replace_instance(name, u->instance, &t);
343 if (r < 0)
344 return r;
345
346 name = t;
347 }
348
349 if (streq_ptr(u->id, name))
350 return 0; /* Nothing to do. */
351
352 /* Selects one of the aliases of this unit as the id */
353 s = set_get(u->aliases, (char*) name);
354 if (!s)
355 return -ENOENT;
356
357 if (u->id) {
358 r = set_remove_and_put(u->aliases, name, u->id);
359 if (r < 0)
360 return r;
361 } else
362 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
363
364 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
365 unit_add_to_dbus_queue(u);
366
367 return 0;
368 }
369
370 int unit_set_description(Unit *u, const char *description) {
371 int r;
372
373 assert(u);
374
375 r = free_and_strdup(&u->description, empty_to_null(description));
376 if (r < 0)
377 return r;
378 if (r > 0)
379 unit_add_to_dbus_queue(u);
380
381 return 0;
382 }
383
384 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
385 Unit *other;
386
387 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
388 if (other->job || other->nop_job)
389 return true;
390
391 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
392 if (other->job || other->nop_job)
393 return true;
394
395 return false;
396 }
397
398 void unit_release_resources(Unit *u) {
399 UnitActiveState state;
400 ExecContext *ec;
401
402 assert(u);
403
404 if (u->job || u->nop_job)
405 return;
406
407 if (u->perpetual)
408 return;
409
410 state = unit_active_state(u);
411 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
412 return;
413
414 if (unit_will_restart(u))
415 return;
416
417 ec = unit_get_exec_context(u);
418 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
419 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
420
421 if (UNIT_VTABLE(u)->release_resources)
422 UNIT_VTABLE(u)->release_resources(u);
423 }
424
425 bool unit_may_gc(Unit *u) {
426 UnitActiveState state;
427 int r;
428
429 assert(u);
430
431 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
432 * unit may be collected, and false if there's some reason to keep it loaded.
433 *
434 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
435 * using markers to properly collect dependency loops.
436 */
437
438 if (u->job || u->nop_job)
439 return false;
440
441 if (u->perpetual)
442 return false;
443
444 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
445 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
446 * before we release the unit. */
447 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
448 return false;
449
450 /* Make sure to send out D-Bus events before we unload the unit */
451 if (u->in_dbus_queue)
452 return false;
453
454 if (sd_bus_track_count(u->bus_track) > 0)
455 return false;
456
457 state = unit_active_state(u);
458
459 /* But we keep the unit object around for longer when it is referenced or configured to not be
460 * gc'ed */
461 switch (u->collect_mode) {
462
463 case COLLECT_INACTIVE:
464 if (state != UNIT_INACTIVE)
465 return false;
466
467 break;
468
469 case COLLECT_INACTIVE_OR_FAILED:
470 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
471 return false;
472
473 break;
474
475 default:
476 assert_not_reached();
477 }
478
479 /* Check if any OnFailure= or on Success= jobs may be pending */
480 if (unit_success_failure_handler_has_jobs(u))
481 return false;
482
483 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
484 * around. Units with active processes should never be collected. */
485 r = unit_cgroup_is_empty(u);
486 if (r <= 0 && r != -ENXIO)
487 return false; /* ENXIO means: currently not realized */
488
489 if (!UNIT_VTABLE(u)->may_gc)
490 return true;
491
492 return UNIT_VTABLE(u)->may_gc(u);
493 }
494
495 void unit_add_to_load_queue(Unit *u) {
496 assert(u);
497 assert(u->type != _UNIT_TYPE_INVALID);
498
499 if (u->load_state != UNIT_STUB || u->in_load_queue)
500 return;
501
502 LIST_PREPEND(load_queue, u->manager->load_queue, u);
503 u->in_load_queue = true;
504 }
505
506 void unit_add_to_cleanup_queue(Unit *u) {
507 assert(u);
508
509 if (u->in_cleanup_queue)
510 return;
511
512 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
513 u->in_cleanup_queue = true;
514 }
515
516 void unit_add_to_gc_queue(Unit *u) {
517 assert(u);
518
519 if (u->in_gc_queue || u->in_cleanup_queue)
520 return;
521
522 if (!unit_may_gc(u))
523 return;
524
525 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
526 u->in_gc_queue = true;
527 }
528
529 void unit_add_to_dbus_queue(Unit *u) {
530 assert(u);
531 assert(u->type != _UNIT_TYPE_INVALID);
532
533 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
534 return;
535
536 /* Shortcut things if nobody cares */
537 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
538 sd_bus_track_count(u->bus_track) <= 0 &&
539 set_isempty(u->manager->private_buses)) {
540 u->sent_dbus_new_signal = true;
541 return;
542 }
543
544 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
545 u->in_dbus_queue = true;
546 }
547
548 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
549 assert(u);
550
551 if (u->in_stop_when_unneeded_queue)
552 return;
553
554 if (!u->stop_when_unneeded)
555 return;
556
557 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
558 return;
559
560 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
561 u->in_stop_when_unneeded_queue = true;
562 }
563
564 void unit_submit_to_start_when_upheld_queue(Unit *u) {
565 assert(u);
566
567 if (u->in_start_when_upheld_queue)
568 return;
569
570 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
571 return;
572
573 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
574 return;
575
576 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
577 u->in_start_when_upheld_queue = true;
578 }
579
580 void unit_submit_to_stop_when_bound_queue(Unit *u) {
581 assert(u);
582
583 if (u->in_stop_when_bound_queue)
584 return;
585
586 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
587 return;
588
589 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
590 return;
591
592 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
593 u->in_stop_when_bound_queue = true;
594 }
595
596 static bool unit_can_release_resources(Unit *u) {
597 ExecContext *ec;
598
599 assert(u);
600
601 if (UNIT_VTABLE(u)->release_resources)
602 return true;
603
604 ec = unit_get_exec_context(u);
605 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
606 return true;
607
608 return false;
609 }
610
611 void unit_submit_to_release_resources_queue(Unit *u) {
612 assert(u);
613
614 if (u->in_release_resources_queue)
615 return;
616
617 if (u->job || u->nop_job)
618 return;
619
620 if (u->perpetual)
621 return;
622
623 if (!unit_can_release_resources(u))
624 return;
625
626 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
627 u->in_release_resources_queue = true;
628 }
629
630 static void unit_clear_dependencies(Unit *u) {
631 assert(u);
632
633 /* Removes all dependencies configured on u and their reverse dependencies. */
634
635 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
636
637 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
638 Hashmap *other_deps;
639
640 HASHMAP_FOREACH(other_deps, other->dependencies)
641 hashmap_remove(other_deps, u);
642
643 unit_add_to_gc_queue(other);
644 }
645
646 hashmap_free(deps);
647 }
648
649 u->dependencies = hashmap_free(u->dependencies);
650 }
651
652 static void unit_remove_transient(Unit *u) {
653 assert(u);
654
655 if (!u->transient)
656 return;
657
658 if (u->fragment_path)
659 (void) unlink(u->fragment_path);
660
661 STRV_FOREACH(i, u->dropin_paths) {
662 _cleanup_free_ char *p = NULL, *pp = NULL;
663
664 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
665 continue;
666
667 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
668 continue;
669
670 /* Only drop transient drop-ins */
671 if (!path_equal(u->manager->lookup_paths.transient, pp))
672 continue;
673
674 (void) unlink(*i);
675 (void) rmdir(p);
676 }
677 }
678
679 static void unit_free_mounts_for(Unit *u) {
680 assert(u);
681
682 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
683 for (;;) {
684 _cleanup_free_ char *path = NULL;
685
686 path = hashmap_steal_first_key(u->mounts_for[t]);
687 if (!path)
688 break;
689
690 char s[strlen(path) + 1];
691
692 PATH_FOREACH_PREFIX_MORE(s, path) {
693 char *y;
694 Set *x;
695
696 x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y);
697 if (!x)
698 continue;
699
700 (void) set_remove(x, u);
701
702 if (set_isempty(x)) {
703 assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y));
704 free(y);
705 set_free(x);
706 }
707 }
708 }
709
710 u->mounts_for[t] = hashmap_free(u->mounts_for[t]);
711 }
712 }
713
714 static void unit_done(Unit *u) {
715 ExecContext *ec;
716 CGroupContext *cc;
717
718 assert(u);
719
720 if (u->type < 0)
721 return;
722
723 if (UNIT_VTABLE(u)->done)
724 UNIT_VTABLE(u)->done(u);
725
726 ec = unit_get_exec_context(u);
727 if (ec)
728 exec_context_done(ec);
729
730 cc = unit_get_cgroup_context(u);
731 if (cc)
732 cgroup_context_done(cc);
733 }
734
735 Unit* unit_free(Unit *u) {
736 Unit *slice;
737 char *t;
738
739 if (!u)
740 return NULL;
741
742 sd_event_source_disable_unref(u->auto_start_stop_event_source);
743
744 u->transient_file = safe_fclose(u->transient_file);
745
746 if (!MANAGER_IS_RELOADING(u->manager))
747 unit_remove_transient(u);
748
749 bus_unit_send_removed_signal(u);
750
751 unit_done(u);
752
753 unit_dequeue_rewatch_pids(u);
754
755 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
756 u->bus_track = sd_bus_track_unref(u->bus_track);
757 u->deserialized_refs = strv_free(u->deserialized_refs);
758 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
759
760 unit_free_mounts_for(u);
761
762 SET_FOREACH(t, u->aliases)
763 hashmap_remove_value(u->manager->units, t, u);
764 if (u->id)
765 hashmap_remove_value(u->manager->units, u->id, u);
766
767 if (!sd_id128_is_null(u->invocation_id))
768 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
769
770 if (u->job) {
771 Job *j = u->job;
772 job_uninstall(j);
773 job_free(j);
774 }
775
776 if (u->nop_job) {
777 Job *j = u->nop_job;
778 job_uninstall(j);
779 job_free(j);
780 }
781
782 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
783 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
784 slice = UNIT_GET_SLICE(u);
785 unit_clear_dependencies(u);
786 if (slice)
787 unit_add_family_to_cgroup_realize_queue(slice);
788
789 if (u->on_console)
790 manager_unref_console(u->manager);
791
792 unit_release_cgroup(u);
793
794 if (!MANAGER_IS_RELOADING(u->manager))
795 unit_unlink_state_files(u);
796
797 unit_unref_uid_gid(u, false);
798
799 (void) manager_update_failed_units(u->manager, u, false);
800 set_remove(u->manager->startup_units, u);
801
802 unit_unwatch_all_pids(u);
803
804 while (u->refs_by_target)
805 unit_ref_unset(u->refs_by_target);
806
807 if (u->type != _UNIT_TYPE_INVALID)
808 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
809
810 if (u->in_load_queue)
811 LIST_REMOVE(load_queue, u->manager->load_queue, u);
812
813 if (u->in_dbus_queue)
814 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
815
816 if (u->in_cleanup_queue)
817 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
818
819 if (u->in_gc_queue)
820 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
821
822 if (u->in_cgroup_realize_queue)
823 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
824
825 if (u->in_cgroup_empty_queue)
826 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
827
828 if (u->in_cgroup_oom_queue)
829 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
830
831 if (u->in_target_deps_queue)
832 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
833
834 if (u->in_stop_when_unneeded_queue)
835 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
836
837 if (u->in_start_when_upheld_queue)
838 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
839
840 if (u->in_stop_when_bound_queue)
841 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
842
843 if (u->in_release_resources_queue)
844 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
845
846 bpf_firewall_close(u);
847
848 condition_free_list(u->conditions);
849 condition_free_list(u->asserts);
850
851 free(u->description);
852 strv_free(u->documentation);
853 free(u->fragment_path);
854 free(u->source_path);
855 strv_free(u->dropin_paths);
856 free(u->instance);
857
858 free(u->job_timeout_reboot_arg);
859 free(u->reboot_arg);
860
861 free(u->access_selinux_context);
862
863 set_free_free(u->aliases);
864 free(u->id);
865
866 activation_details_unref(u->activation_details);
867
868 return mfree(u);
869 }
870
871 FreezerState unit_freezer_state(Unit *u) {
872 assert(u);
873
874 return u->freezer_state;
875 }
876
877 UnitActiveState unit_active_state(Unit *u) {
878 assert(u);
879
880 if (u->load_state == UNIT_MERGED)
881 return unit_active_state(unit_follow_merge(u));
882
883 /* After a reload it might happen that a unit is not correctly
884 * loaded but still has a process around. That's why we won't
885 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
886
887 return UNIT_VTABLE(u)->active_state(u);
888 }
889
890 const char* unit_sub_state_to_string(Unit *u) {
891 assert(u);
892
893 return UNIT_VTABLE(u)->sub_state_to_string(u);
894 }
895
896 static int unit_merge_names(Unit *u, Unit *other) {
897 char *name;
898 int r;
899
900 assert(u);
901 assert(other);
902
903 r = unit_add_alias(u, other->id);
904 if (r < 0)
905 return r;
906
907 r = set_move(u->aliases, other->aliases);
908 if (r < 0) {
909 set_remove(u->aliases, other->id);
910 return r;
911 }
912
913 TAKE_PTR(other->id);
914 other->aliases = set_free_free(other->aliases);
915
916 SET_FOREACH(name, u->aliases)
917 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
918
919 return 0;
920 }
921
922 static int unit_reserve_dependencies(Unit *u, Unit *other) {
923 size_t n_reserve;
924 Hashmap* deps;
925 void *d;
926 int r;
927
928 assert(u);
929 assert(other);
930
931 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
932 * fail.
933 *
934 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
935 * hashmaps is an estimate that is likely too high since they probably use some of the same
936 * types. But it's never too low, and that's all we need. */
937
938 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
939 if (n_reserve > 0) {
940 r = hashmap_ensure_allocated(&u->dependencies, NULL);
941 if (r < 0)
942 return r;
943
944 r = hashmap_reserve(u->dependencies, n_reserve);
945 if (r < 0)
946 return r;
947 }
948
949 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
950 * other unit's dependencies.
951 *
952 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
953 * reserve anything for. In that case other's set will be transferred as a whole to u by
954 * complete_move(). */
955
956 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
957 Hashmap *other_deps;
958
959 other_deps = hashmap_get(other->dependencies, d);
960
961 r = hashmap_reserve(deps, hashmap_size(other_deps));
962 if (r < 0)
963 return r;
964 }
965
966 return 0;
967 }
968
969 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
970 /* Only warn about some unit types */
971 return IN_SET(dependency,
972 UNIT_CONFLICTS,
973 UNIT_CONFLICTED_BY,
974 UNIT_BEFORE,
975 UNIT_AFTER,
976 UNIT_ON_SUCCESS,
977 UNIT_ON_FAILURE,
978 UNIT_TRIGGERS,
979 UNIT_TRIGGERED_BY);
980 }
981
982 static int unit_per_dependency_type_hashmap_update(
983 Hashmap *per_type,
984 Unit *other,
985 UnitDependencyMask origin_mask,
986 UnitDependencyMask destination_mask) {
987
988 UnitDependencyInfo info;
989 int r;
990
991 assert(other);
992 assert_cc(sizeof(void*) == sizeof(info));
993
994 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
995 * exists, or insert it anew if not. */
996
997 info.data = hashmap_get(per_type, other);
998 if (info.data) {
999 /* Entry already exists. Add in our mask. */
1000
1001 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1002 FLAGS_SET(destination_mask, info.destination_mask))
1003 return 0; /* NOP */
1004
1005 info.origin_mask |= origin_mask;
1006 info.destination_mask |= destination_mask;
1007
1008 r = hashmap_update(per_type, other, info.data);
1009 } else {
1010 info = (UnitDependencyInfo) {
1011 .origin_mask = origin_mask,
1012 .destination_mask = destination_mask,
1013 };
1014
1015 r = hashmap_put(per_type, other, info.data);
1016 }
1017 if (r < 0)
1018 return r;
1019
1020 return 1;
1021 }
1022
1023 static void unit_merge_dependencies(Unit *u, Unit *other) {
1024 Hashmap *deps;
1025 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1026 * since the hashmaps all want it as void pointer. */
1027
1028 assert(u);
1029 assert(other);
1030
1031 if (u == other)
1032 return;
1033
1034 /* First, remove dependency to other. */
1035 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1036 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1037 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1038 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1039 other->id, other->id, u->id);
1040
1041 if (hashmap_isempty(deps))
1042 hashmap_free(hashmap_remove(u->dependencies, dt));
1043 }
1044
1045 for (;;) {
1046 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1047 UnitDependencyInfo di_back;
1048 Unit *back;
1049
1050 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1051 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1052 if (!other_deps)
1053 break; /* done! */
1054
1055 deps = hashmap_get(u->dependencies, dt);
1056
1057 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1058 * referenced units as 'back'. */
1059 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1060 Hashmap *back_deps;
1061 void *back_dt;
1062
1063 if (back == u) {
1064 /* This is a dependency pointing back to the unit we want to merge with?
1065 * Suppress it (but warn) */
1066 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1067 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1068 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1069 u->id, other->id, other->id, u->id);
1070
1071 hashmap_remove(other_deps, back);
1072 continue;
1073 }
1074
1075 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1076 * point to 'u' instead. */
1077 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1078 UnitDependencyInfo di_move;
1079
1080 di_move.data = hashmap_remove(back_deps, other);
1081 if (!di_move.data)
1082 continue;
1083
1084 assert_se(unit_per_dependency_type_hashmap_update(
1085 back_deps,
1086 u,
1087 di_move.origin_mask,
1088 di_move.destination_mask) >= 0);
1089 }
1090
1091 /* The target unit already has dependencies of this type, let's then merge this individually. */
1092 if (deps)
1093 assert_se(unit_per_dependency_type_hashmap_update(
1094 deps,
1095 back,
1096 di_back.origin_mask,
1097 di_back.destination_mask) >= 0);
1098 }
1099
1100 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1101 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1102 * dependencies of this type, let's move them per type wholesale. */
1103 if (!deps)
1104 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1105 }
1106
1107 other->dependencies = hashmap_free(other->dependencies);
1108 }
1109
1110 int unit_merge(Unit *u, Unit *other) {
1111 int r;
1112
1113 assert(u);
1114 assert(other);
1115 assert(u->manager == other->manager);
1116 assert(u->type != _UNIT_TYPE_INVALID);
1117
1118 other = unit_follow_merge(other);
1119
1120 if (other == u)
1121 return 0;
1122
1123 if (u->type != other->type)
1124 return -EINVAL;
1125
1126 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1127 return -EEXIST;
1128
1129 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1130 return -EEXIST;
1131
1132 if (!streq_ptr(u->instance, other->instance))
1133 return -EINVAL;
1134
1135 if (other->job)
1136 return -EEXIST;
1137
1138 if (other->nop_job)
1139 return -EEXIST;
1140
1141 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1142 return -EEXIST;
1143
1144 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1145 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1146 r = unit_reserve_dependencies(u, other);
1147 if (r < 0)
1148 return r;
1149
1150 /* Redirect all references */
1151 while (other->refs_by_target)
1152 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1153
1154 /* Merge dependencies */
1155 unit_merge_dependencies(u, other);
1156
1157 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1158 r = unit_merge_names(u, other);
1159 if (r < 0)
1160 return r;
1161
1162 other->load_state = UNIT_MERGED;
1163 other->merged_into = u;
1164
1165 if (!u->activation_details)
1166 u->activation_details = activation_details_ref(other->activation_details);
1167
1168 /* If there is still some data attached to the other node, we
1169 * don't need it anymore, and can free it. */
1170 if (other->load_state != UNIT_STUB)
1171 if (UNIT_VTABLE(other)->done)
1172 UNIT_VTABLE(other)->done(other);
1173
1174 unit_add_to_dbus_queue(u);
1175 unit_add_to_cleanup_queue(other);
1176
1177 return 0;
1178 }
1179
1180 int unit_merge_by_name(Unit *u, const char *name) {
1181 _cleanup_free_ char *s = NULL;
1182 Unit *other;
1183 int r;
1184
1185 /* Either add name to u, or if a unit with name already exists, merge it with u.
1186 * If name is a template, do the same for name@instance, where instance is u's instance. */
1187
1188 assert(u);
1189 assert(name);
1190
1191 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1192 if (!u->instance)
1193 return -EINVAL;
1194
1195 r = unit_name_replace_instance(name, u->instance, &s);
1196 if (r < 0)
1197 return r;
1198
1199 name = s;
1200 }
1201
1202 other = manager_get_unit(u->manager, name);
1203 if (other)
1204 return unit_merge(u, other);
1205
1206 return unit_add_name(u, name);
1207 }
1208
1209 Unit* unit_follow_merge(Unit *u) {
1210 assert(u);
1211
1212 while (u->load_state == UNIT_MERGED)
1213 assert_se(u = u->merged_into);
1214
1215 return u;
1216 }
1217
1218 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1219 int r;
1220
1221 assert(u);
1222 assert(c);
1223
1224 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1225
1226 if (c->working_directory) {
1227 r = unit_add_mounts_for(
1228 u,
1229 c->working_directory,
1230 UNIT_DEPENDENCY_FILE,
1231 c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES);
1232 if (r < 0)
1233 return r;
1234 }
1235
1236 if (c->root_directory) {
1237 r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1238 if (r < 0)
1239 return r;
1240 }
1241
1242 if (c->root_image) {
1243 r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1244 if (r < 0)
1245 return r;
1246 }
1247
1248 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1249 if (!u->manager->prefix[dt])
1250 continue;
1251
1252 FOREACH_ARRAY(i, c->directories[dt].items, c->directories[dt].n_items) {
1253 _cleanup_free_ char *p = NULL;
1254
1255 p = path_join(u->manager->prefix[dt], i->path);
1256 if (!p)
1257 return -ENOMEM;
1258
1259 r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES);
1260 if (r < 0)
1261 return r;
1262 }
1263 }
1264
1265 if (!MANAGER_IS_SYSTEM(u->manager))
1266 return 0;
1267
1268 /* For the following three directory types we need write access, and /var/ is possibly on the root
1269 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1270 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1271 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1272 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1273 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1274 if (r < 0)
1275 return r;
1276 }
1277
1278 if (c->private_tmp) {
1279 r = unit_add_mounts_for(u, "/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1280 if (r < 0)
1281 return r;
1282
1283 r = unit_add_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1284 if (r < 0)
1285 return r;
1286
1287 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1288 if (r < 0)
1289 return r;
1290 }
1291
1292 if (c->root_image) {
1293 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1294 * implicit dependency on udev */
1295
1296 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1297 if (r < 0)
1298 return r;
1299 }
1300
1301 if (!IN_SET(c->std_output,
1302 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1303 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1304 !IN_SET(c->std_error,
1305 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1306 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1307 !c->log_namespace)
1308 return 0;
1309
1310 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1311 * is run first. */
1312
1313 if (c->log_namespace) {
1314 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1315
1316 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1317 if (r < 0)
1318 return r;
1319
1320 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1321 if (r < 0)
1322 return r;
1323
1324 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1325 if (r < 0)
1326 return r;
1327
1328 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1329 if (r < 0)
1330 return r;
1331 } else {
1332 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1333 if (r < 0)
1334 return r;
1335 }
1336
1337 r = unit_add_default_credential_dependencies(u, c);
1338 if (r < 0)
1339 return r;
1340
1341 return 0;
1342 }
1343
1344 const char* unit_description(Unit *u) {
1345 assert(u);
1346
1347 if (u->description)
1348 return u->description;
1349
1350 return strna(u->id);
1351 }
1352
1353 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1354 assert(u);
1355 assert(u->id);
1356
1357 /* Return u->id, u->description, or "{u->id} - {u->description}".
1358 * Versions with u->description are only used if it is set.
1359 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1360 * pointer.
1361 *
1362 * Note that *ret_combined_buffer may be set to NULL. */
1363
1364 if (!u->description ||
1365 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1366 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1367 streq(u->description, u->id)) {
1368
1369 if (ret_combined_buffer)
1370 *ret_combined_buffer = NULL;
1371 return u->id;
1372 }
1373
1374 if (ret_combined_buffer) {
1375 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1376 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1377 if (*ret_combined_buffer)
1378 return *ret_combined_buffer;
1379 log_oom(); /* Fall back to ->description */
1380 } else
1381 *ret_combined_buffer = NULL;
1382 }
1383
1384 return u->description;
1385 }
1386
1387 /* Common implementation for multiple backends */
1388 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1389 int r;
1390
1391 assert(u);
1392
1393 /* Load a .{service,socket,...} file */
1394 r = unit_load_fragment(u);
1395 if (r < 0)
1396 return r;
1397
1398 if (u->load_state == UNIT_STUB) {
1399 if (fragment_required)
1400 return -ENOENT;
1401
1402 u->load_state = UNIT_LOADED;
1403 }
1404
1405 /* Load drop-in directory data. If u is an alias, we might be reloading the
1406 * target unit needlessly. But we cannot be sure which drops-ins have already
1407 * been loaded and which not, at least without doing complicated book-keeping,
1408 * so let's always reread all drop-ins. */
1409 r = unit_load_dropin(unit_follow_merge(u));
1410 if (r < 0)
1411 return r;
1412
1413 if (u->source_path) {
1414 struct stat st;
1415
1416 if (stat(u->source_path, &st) >= 0)
1417 u->source_mtime = timespec_load(&st.st_mtim);
1418 else
1419 u->source_mtime = 0;
1420 }
1421
1422 return 0;
1423 }
1424
1425 void unit_add_to_target_deps_queue(Unit *u) {
1426 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1427
1428 if (u->in_target_deps_queue)
1429 return;
1430
1431 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1432 u->in_target_deps_queue = true;
1433 }
1434
1435 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1436 assert(u);
1437 assert(target);
1438
1439 if (target->type != UNIT_TARGET)
1440 return 0;
1441
1442 /* Only add the dependency if both units are loaded, so that
1443 * that loop check below is reliable */
1444 if (u->load_state != UNIT_LOADED ||
1445 target->load_state != UNIT_LOADED)
1446 return 0;
1447
1448 /* If either side wants no automatic dependencies, then let's
1449 * skip this */
1450 if (!u->default_dependencies ||
1451 !target->default_dependencies)
1452 return 0;
1453
1454 /* Don't create loops */
1455 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1456 return 0;
1457
1458 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1459 }
1460
1461 static int unit_add_slice_dependencies(Unit *u) {
1462 Unit *slice;
1463 assert(u);
1464
1465 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1466 return 0;
1467
1468 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1469 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1470 relationship). */
1471 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1472
1473 slice = UNIT_GET_SLICE(u);
1474 if (slice)
1475 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1476
1477 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1478 return 0;
1479
1480 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1481 }
1482
1483 static int unit_add_mount_dependencies(Unit *u) {
1484 bool changed = false;
1485 int r;
1486
1487 assert(u);
1488
1489 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
1490 UnitDependencyInfo di;
1491 const char *path;
1492
1493 HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) {
1494
1495 char prefix[strlen(ASSERT_PTR(path)) + 1];
1496
1497 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1498 _cleanup_free_ char *p = NULL;
1499 Unit *m;
1500
1501 r = unit_name_from_path(prefix, ".mount", &p);
1502 if (r == -EINVAL)
1503 continue; /* If the path cannot be converted to a mount unit name,
1504 * then it's not manageable as a unit by systemd, and
1505 * hence we don't need a dependency on it. Let's thus
1506 * silently ignore the issue. */
1507 if (r < 0)
1508 return r;
1509
1510 m = manager_get_unit(u->manager, p);
1511 if (!m) {
1512 /* Make sure to load the mount unit if it exists. If so the
1513 * dependencies on this unit will be added later during the loading
1514 * of the mount unit. */
1515 (void) manager_load_unit_prepare(
1516 u->manager,
1517 p,
1518 /* path= */NULL,
1519 /* e= */NULL,
1520 &m);
1521 continue;
1522 }
1523 if (m == u)
1524 continue;
1525
1526 if (m->load_state != UNIT_LOADED)
1527 continue;
1528
1529 r = unit_add_dependency(
1530 u,
1531 UNIT_AFTER,
1532 m,
1533 /* add_reference= */ true,
1534 di.origin_mask);
1535 if (r < 0)
1536 return r;
1537 changed = changed || r > 0;
1538
1539 if (m->fragment_path) {
1540 r = unit_add_dependency(
1541 u,
1542 unit_mount_dependency_type_to_dependency_type(t),
1543 m,
1544 /* add_reference= */ true,
1545 di.origin_mask);
1546 if (r < 0)
1547 return r;
1548 changed = changed || r > 0;
1549 }
1550 }
1551 }
1552 }
1553
1554 return changed;
1555 }
1556
1557 static int unit_add_oomd_dependencies(Unit *u) {
1558 CGroupContext *c;
1559 CGroupMask mask;
1560 int r;
1561
1562 assert(u);
1563
1564 if (!u->default_dependencies)
1565 return 0;
1566
1567 c = unit_get_cgroup_context(u);
1568 if (!c)
1569 return 0;
1570
1571 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1572 if (!wants_oomd)
1573 return 0;
1574
1575 if (!cg_all_unified())
1576 return 0;
1577
1578 r = cg_mask_supported(&mask);
1579 if (r < 0)
1580 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1581
1582 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1583 return 0;
1584
1585 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1586 }
1587
1588 static int unit_add_startup_units(Unit *u) {
1589 if (!unit_has_startup_cgroup_constraints(u))
1590 return 0;
1591
1592 return set_ensure_put(&u->manager->startup_units, NULL, u);
1593 }
1594
1595 static int unit_validate_on_failure_job_mode(
1596 Unit *u,
1597 const char *job_mode_setting,
1598 JobMode job_mode,
1599 const char *dependency_name,
1600 UnitDependencyAtom atom) {
1601
1602 Unit *other, *found = NULL;
1603
1604 if (job_mode != JOB_ISOLATE)
1605 return 0;
1606
1607 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1608 if (!found)
1609 found = other;
1610 else if (found != other)
1611 return log_unit_error_errno(
1612 u, SYNTHETIC_ERRNO(ENOEXEC),
1613 "More than one %s dependencies specified but %sisolate set. Refusing.",
1614 dependency_name, job_mode_setting);
1615 }
1616
1617 return 0;
1618 }
1619
1620 int unit_load(Unit *u) {
1621 int r;
1622
1623 assert(u);
1624
1625 if (u->in_load_queue) {
1626 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1627 u->in_load_queue = false;
1628 }
1629
1630 if (u->type == _UNIT_TYPE_INVALID)
1631 return -EINVAL;
1632
1633 if (u->load_state != UNIT_STUB)
1634 return 0;
1635
1636 if (u->transient_file) {
1637 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1638 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1639
1640 r = fflush_and_check(u->transient_file);
1641 if (r < 0)
1642 goto fail;
1643
1644 u->transient_file = safe_fclose(u->transient_file);
1645 u->fragment_mtime = now(CLOCK_REALTIME);
1646 }
1647
1648 r = UNIT_VTABLE(u)->load(u);
1649 if (r < 0)
1650 goto fail;
1651
1652 assert(u->load_state != UNIT_STUB);
1653
1654 if (u->load_state == UNIT_LOADED) {
1655 unit_add_to_target_deps_queue(u);
1656
1657 r = unit_add_slice_dependencies(u);
1658 if (r < 0)
1659 goto fail;
1660
1661 r = unit_add_mount_dependencies(u);
1662 if (r < 0)
1663 goto fail;
1664
1665 r = unit_add_oomd_dependencies(u);
1666 if (r < 0)
1667 goto fail;
1668
1669 r = unit_add_startup_units(u);
1670 if (r < 0)
1671 goto fail;
1672
1673 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1674 if (r < 0)
1675 goto fail;
1676
1677 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1678 if (r < 0)
1679 goto fail;
1680
1681 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1682 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1683
1684 /* We finished loading, let's ensure our parents recalculate the members mask */
1685 unit_invalidate_cgroup_members_masks(u);
1686 }
1687
1688 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1689
1690 unit_add_to_dbus_queue(unit_follow_merge(u));
1691 unit_add_to_gc_queue(u);
1692 (void) manager_varlink_send_managed_oom_update(u);
1693
1694 return 0;
1695
1696 fail:
1697 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1698 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1699
1700 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1701 r == -ENOEXEC ? UNIT_BAD_SETTING :
1702 UNIT_ERROR;
1703 u->load_error = r;
1704
1705 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1706 * an attempt is made to load this unit, we know we need to check again. */
1707 if (u->load_state == UNIT_NOT_FOUND)
1708 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1709
1710 unit_add_to_dbus_queue(u);
1711 unit_add_to_gc_queue(u);
1712
1713 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1714 }
1715
1716 _printf_(7, 8)
1717 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1718 Unit *u = userdata;
1719 va_list ap;
1720 int r;
1721
1722 if (u && !unit_log_level_test(u, level))
1723 return -ERRNO_VALUE(error);
1724
1725 va_start(ap, format);
1726 if (u)
1727 r = log_object_internalv(level, error, file, line, func,
1728 u->manager->unit_log_field,
1729 u->id,
1730 u->manager->invocation_log_field,
1731 u->invocation_id_string,
1732 format, ap);
1733 else
1734 r = log_internalv(level, error, file, line, func, format, ap);
1735 va_end(ap);
1736
1737 return r;
1738 }
1739
1740 static bool unit_test_condition(Unit *u) {
1741 _cleanup_strv_free_ char **env = NULL;
1742 int r;
1743
1744 assert(u);
1745
1746 dual_timestamp_now(&u->condition_timestamp);
1747
1748 r = manager_get_effective_environment(u->manager, &env);
1749 if (r < 0) {
1750 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1751 u->condition_result = true;
1752 } else
1753 u->condition_result = condition_test_list(
1754 u->conditions,
1755 env,
1756 condition_type_to_string,
1757 log_unit_internal,
1758 u);
1759
1760 unit_add_to_dbus_queue(u);
1761 return u->condition_result;
1762 }
1763
1764 static bool unit_test_assert(Unit *u) {
1765 _cleanup_strv_free_ char **env = NULL;
1766 int r;
1767
1768 assert(u);
1769
1770 dual_timestamp_now(&u->assert_timestamp);
1771
1772 r = manager_get_effective_environment(u->manager, &env);
1773 if (r < 0) {
1774 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1775 u->assert_result = CONDITION_ERROR;
1776 } else
1777 u->assert_result = condition_test_list(
1778 u->asserts,
1779 env,
1780 assert_type_to_string,
1781 log_unit_internal,
1782 u);
1783
1784 unit_add_to_dbus_queue(u);
1785 return u->assert_result;
1786 }
1787
1788 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1789 if (log_get_show_color()) {
1790 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1791 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1792 else
1793 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1794 }
1795
1796 DISABLE_WARNING_FORMAT_NONLITERAL;
1797 manager_status_printf(u->manager, status_type, status, format, ident);
1798 REENABLE_WARNING;
1799 }
1800
1801 int unit_test_start_limit(Unit *u) {
1802 const char *reason;
1803
1804 assert(u);
1805
1806 if (ratelimit_below(&u->start_ratelimit)) {
1807 u->start_limit_hit = false;
1808 return 0;
1809 }
1810
1811 log_unit_warning(u, "Start request repeated too quickly.");
1812 u->start_limit_hit = true;
1813
1814 reason = strjoina("unit ", u->id, " failed");
1815
1816 emergency_action(u->manager, u->start_limit_action,
1817 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1818 u->reboot_arg, -1, reason);
1819
1820 return -ECANCELED;
1821 }
1822
1823 static bool unit_verify_deps(Unit *u) {
1824 Unit *other;
1825
1826 assert(u);
1827
1828 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1829 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1830 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1831 * that are not used in conjunction with After= as for them any such check would make things entirely
1832 * racy. */
1833
1834 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1835
1836 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1837 continue;
1838
1839 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1840 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1841 return false;
1842 }
1843 }
1844
1845 return true;
1846 }
1847
1848 /* Errors that aren't really errors:
1849 * -EALREADY: Unit is already started.
1850 * -ECOMM: Condition failed
1851 * -EAGAIN: An operation is already in progress. Retry later.
1852 *
1853 * Errors that are real errors:
1854 * -EBADR: This unit type does not support starting.
1855 * -ECANCELED: Start limit hit, too many requests for now
1856 * -EPROTO: Assert failed
1857 * -EINVAL: Unit not loaded
1858 * -EOPNOTSUPP: Unit type not supported
1859 * -ENOLINK: The necessary dependencies are not fulfilled.
1860 * -ESTALE: This unit has been started before and can't be started a second time
1861 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1862 */
1863 int unit_start(Unit *u, ActivationDetails *details) {
1864 UnitActiveState state;
1865 Unit *following;
1866 int r;
1867
1868 assert(u);
1869
1870 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1871 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1872 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1873 if (r < 0)
1874 return r;
1875 if (r > 0)
1876 return -EAGAIN;
1877 }
1878
1879 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1880 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1881 * waiting is finished. */
1882 state = unit_active_state(u);
1883 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1884 return -EALREADY;
1885 if (state == UNIT_MAINTENANCE)
1886 return -EAGAIN;
1887
1888 /* Units that aren't loaded cannot be started */
1889 if (u->load_state != UNIT_LOADED)
1890 return -EINVAL;
1891
1892 /* Refuse starting scope units more than once */
1893 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1894 return -ESTALE;
1895
1896 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1897 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1898 * recheck the condition in that case. */
1899 if (state != UNIT_ACTIVATING &&
1900 !unit_test_condition(u))
1901 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1902
1903 /* If the asserts failed, fail the entire job */
1904 if (state != UNIT_ACTIVATING &&
1905 !unit_test_assert(u))
1906 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1907
1908 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1909 * condition checks, so that we rather return condition check errors (which are usually not
1910 * considered a true failure) than "not supported" errors (which are considered a failure).
1911 */
1912 if (!unit_type_supported(u->type))
1913 return -EOPNOTSUPP;
1914
1915 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1916 * should have taken care of this already, but let's check this here again. After all, our
1917 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1918 if (!unit_verify_deps(u))
1919 return -ENOLINK;
1920
1921 /* Forward to the main object, if we aren't it. */
1922 following = unit_following(u);
1923 if (following) {
1924 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1925 return unit_start(following, details);
1926 }
1927
1928 /* Check to make sure the unit isn't frozen */
1929 if (u->freezer_state != FREEZER_RUNNING)
1930 return -EDEADLK;
1931
1932 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1933 if (UNIT_VTABLE(u)->can_start) {
1934 r = UNIT_VTABLE(u)->can_start(u);
1935 if (r < 0)
1936 return r;
1937 }
1938
1939 /* If it is stopped, but we cannot start it, then fail */
1940 if (!UNIT_VTABLE(u)->start)
1941 return -EBADR;
1942
1943 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1944 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1945 * waits for a holdoff timer to elapse before it will start again. */
1946
1947 unit_add_to_dbus_queue(u);
1948
1949 if (!u->activation_details) /* Older details object wins */
1950 u->activation_details = activation_details_ref(details);
1951
1952 return UNIT_VTABLE(u)->start(u);
1953 }
1954
1955 bool unit_can_start(Unit *u) {
1956 assert(u);
1957
1958 if (u->load_state != UNIT_LOADED)
1959 return false;
1960
1961 if (!unit_type_supported(u->type))
1962 return false;
1963
1964 /* Scope units may be started only once */
1965 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1966 return false;
1967
1968 return !!UNIT_VTABLE(u)->start;
1969 }
1970
1971 bool unit_can_isolate(Unit *u) {
1972 assert(u);
1973
1974 return unit_can_start(u) &&
1975 u->allow_isolate;
1976 }
1977
1978 /* Errors:
1979 * -EBADR: This unit type does not support stopping.
1980 * -EALREADY: Unit is already stopped.
1981 * -EAGAIN: An operation is already in progress. Retry later.
1982 * -EDEADLK: Unit is frozen
1983 */
1984 int unit_stop(Unit *u) {
1985 UnitActiveState state;
1986 Unit *following;
1987
1988 assert(u);
1989
1990 state = unit_active_state(u);
1991 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1992 return -EALREADY;
1993
1994 following = unit_following(u);
1995 if (following) {
1996 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1997 return unit_stop(following);
1998 }
1999
2000 /* Check to make sure the unit isn't frozen */
2001 if (u->freezer_state != FREEZER_RUNNING)
2002 return -EDEADLK;
2003
2004 if (!UNIT_VTABLE(u)->stop)
2005 return -EBADR;
2006
2007 unit_add_to_dbus_queue(u);
2008
2009 return UNIT_VTABLE(u)->stop(u);
2010 }
2011
2012 bool unit_can_stop(Unit *u) {
2013 assert(u);
2014
2015 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2016 * Extrinsic units follow external state and they may stop following external state changes
2017 * (hence we return true here), but an attempt to do this through the manager will fail. */
2018
2019 if (!unit_type_supported(u->type))
2020 return false;
2021
2022 if (u->perpetual)
2023 return false;
2024
2025 return !!UNIT_VTABLE(u)->stop;
2026 }
2027
2028 /* Errors:
2029 * -EBADR: This unit type does not support reloading.
2030 * -ENOEXEC: Unit is not started.
2031 * -EAGAIN: An operation is already in progress. Retry later.
2032 * -EDEADLK: Unit is frozen.
2033 */
2034 int unit_reload(Unit *u) {
2035 UnitActiveState state;
2036 Unit *following;
2037
2038 assert(u);
2039
2040 if (u->load_state != UNIT_LOADED)
2041 return -EINVAL;
2042
2043 if (!unit_can_reload(u))
2044 return -EBADR;
2045
2046 state = unit_active_state(u);
2047 if (state == UNIT_RELOADING)
2048 return -EAGAIN;
2049
2050 if (state != UNIT_ACTIVE)
2051 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2052
2053 following = unit_following(u);
2054 if (following) {
2055 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2056 return unit_reload(following);
2057 }
2058
2059 /* Check to make sure the unit isn't frozen */
2060 if (u->freezer_state != FREEZER_RUNNING)
2061 return -EDEADLK;
2062
2063 unit_add_to_dbus_queue(u);
2064
2065 if (!UNIT_VTABLE(u)->reload) {
2066 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2067 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2068 return 0;
2069 }
2070
2071 return UNIT_VTABLE(u)->reload(u);
2072 }
2073
2074 bool unit_can_reload(Unit *u) {
2075 assert(u);
2076
2077 if (UNIT_VTABLE(u)->can_reload)
2078 return UNIT_VTABLE(u)->can_reload(u);
2079
2080 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2081 return true;
2082
2083 return UNIT_VTABLE(u)->reload;
2084 }
2085
2086 bool unit_is_unneeded(Unit *u) {
2087 Unit *other;
2088 assert(u);
2089
2090 if (!u->stop_when_unneeded)
2091 return false;
2092
2093 /* Don't clean up while the unit is transitioning or is even inactive. */
2094 if (unit_active_state(u) != UNIT_ACTIVE)
2095 return false;
2096 if (u->job)
2097 return false;
2098
2099 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2100 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2101 * restart, then don't clean this one up. */
2102
2103 if (other->job)
2104 return false;
2105
2106 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2107 return false;
2108
2109 if (unit_will_restart(other))
2110 return false;
2111 }
2112
2113 return true;
2114 }
2115
2116 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2117 Unit *other;
2118
2119 assert(u);
2120
2121 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2122 * that is active declared an Uphold= dependencies on it */
2123
2124 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2125 if (ret_culprit)
2126 *ret_culprit = NULL;
2127 return false;
2128 }
2129
2130 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2131 if (other->job)
2132 continue;
2133
2134 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2135 if (ret_culprit)
2136 *ret_culprit = other;
2137 return true;
2138 }
2139 }
2140
2141 if (ret_culprit)
2142 *ret_culprit = NULL;
2143 return false;
2144 }
2145
2146 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2147 Unit *other;
2148
2149 assert(u);
2150
2151 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2152 * because the other unit is down. */
2153
2154 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2155 /* Don't clean up while the unit is transitioning or is even inactive. */
2156 if (ret_culprit)
2157 *ret_culprit = NULL;
2158 return false;
2159 }
2160
2161 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2162 if (other->job)
2163 continue;
2164
2165 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2166 if (ret_culprit)
2167 *ret_culprit = other;
2168
2169 return true;
2170 }
2171 }
2172
2173 if (ret_culprit)
2174 *ret_culprit = NULL;
2175 return false;
2176 }
2177
2178 static void check_unneeded_dependencies(Unit *u) {
2179 Unit *other;
2180 assert(u);
2181
2182 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2183
2184 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2185 unit_submit_to_stop_when_unneeded_queue(other);
2186 }
2187
2188 static void check_uphold_dependencies(Unit *u) {
2189 Unit *other;
2190 assert(u);
2191
2192 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2193
2194 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2195 unit_submit_to_start_when_upheld_queue(other);
2196 }
2197
2198 static void check_bound_by_dependencies(Unit *u) {
2199 Unit *other;
2200 assert(u);
2201
2202 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2203
2204 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2205 unit_submit_to_stop_when_bound_queue(other);
2206 }
2207
2208 static void retroactively_start_dependencies(Unit *u) {
2209 Unit *other;
2210
2211 assert(u);
2212 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2213
2214 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2215 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2216 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2217 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2218
2219 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2220 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2221 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2222 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2223
2224 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2225 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2226 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2227 }
2228
2229 static void retroactively_stop_dependencies(Unit *u) {
2230 Unit *other;
2231
2232 assert(u);
2233 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2234
2235 /* Pull down units which are bound to us recursively if enabled */
2236 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2237 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2238 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2239 }
2240
2241 void unit_start_on_failure(
2242 Unit *u,
2243 const char *dependency_name,
2244 UnitDependencyAtom atom,
2245 JobMode job_mode) {
2246
2247 int n_jobs = -1;
2248 Unit *other;
2249 int r;
2250
2251 assert(u);
2252 assert(dependency_name);
2253 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2254
2255 /* Act on OnFailure= and OnSuccess= dependencies */
2256
2257 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2258 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2259
2260 if (n_jobs < 0) {
2261 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2262 n_jobs = 0;
2263 }
2264
2265 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2266 if (r < 0)
2267 log_unit_warning_errno(
2268 u, r, "Failed to enqueue %s job, ignoring: %s",
2269 dependency_name, bus_error_message(&error, r));
2270 n_jobs++;
2271 }
2272
2273 if (n_jobs >= 0)
2274 log_unit_debug(u, "Triggering %s dependencies done (%i %s).",
2275 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2276 }
2277
2278 void unit_trigger_notify(Unit *u) {
2279 Unit *other;
2280
2281 assert(u);
2282
2283 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2284 if (UNIT_VTABLE(other)->trigger_notify)
2285 UNIT_VTABLE(other)->trigger_notify(other, u);
2286 }
2287
2288 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2289 if (condition_notice && log_level > LOG_NOTICE)
2290 return LOG_NOTICE;
2291 if (condition_info && log_level > LOG_INFO)
2292 return LOG_INFO;
2293 return log_level;
2294 }
2295
2296 static int unit_log_resources(Unit *u) {
2297
2298 static const struct {
2299 const char *journal_field;
2300 const char *message_suffix;
2301 } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
2302 [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" },
2303 [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2304 }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2305 [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2306 [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2307 [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL },
2308 [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL },
2309 }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2310 [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" },
2311 [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2312 [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL },
2313 [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL },
2314 };
2315
2316 struct iovec *iovec = NULL;
2317 size_t n_iovec = 0;
2318 _cleanup_free_ char *message = NULL, *t = NULL;
2319 nsec_t cpu_nsec = NSEC_INFINITY;
2320 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2321
2322 assert(u);
2323
2324 CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free);
2325
2326 iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) +
2327 _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4);
2328 if (!iovec)
2329 return log_oom();
2330
2331 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2332 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2333 * information and the complete data in structured fields. */
2334
2335 (void) unit_get_cpu_usage(u, &cpu_nsec);
2336 if (cpu_nsec != NSEC_INFINITY) {
2337 /* Format the CPU time for inclusion in the structured log message */
2338 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0)
2339 return log_oom();
2340 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2341
2342 /* Format the CPU time for inclusion in the human language message string */
2343 if (strextendf_with_separator(&message, ", ",
2344 "Consumed %s CPU time",
2345 FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0)
2346 return log_oom();
2347
2348 log_level = raise_level(log_level,
2349 cpu_nsec > MENTIONWORTHY_CPU_NSEC,
2350 cpu_nsec > NOTICEWORTHY_CPU_NSEC);
2351 }
2352
2353 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
2354 uint64_t value = UINT64_MAX;
2355
2356 assert(memory_fields[metric].journal_field);
2357 assert(memory_fields[metric].message_suffix);
2358
2359 (void) unit_get_memory_accounting(u, metric, &value);
2360 if (value == UINT64_MAX)
2361 continue;
2362
2363 if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, value) < 0)
2364 return log_oom();
2365 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2366
2367 /* If value is 0, we don't log it in the MESSAGE= field. */
2368 if (value == 0)
2369 continue;
2370
2371 if (strextendf_with_separator(&message, ", ", "%s %s",
2372 FORMAT_BYTES(value), memory_fields[metric].message_suffix) < 0)
2373 return log_oom();
2374
2375 log_level = raise_level(log_level,
2376 value > MENTIONWORTHY_MEMORY_BYTES,
2377 value > NOTICEWORTHY_MEMORY_BYTES);
2378 }
2379
2380 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2381 uint64_t value = UINT64_MAX;
2382
2383 assert(io_fields[k].journal_field);
2384
2385 (void) unit_get_io_accounting(u, k, k > 0, &value);
2386 if (value == UINT64_MAX)
2387 continue;
2388
2389 /* Format IO accounting data for inclusion in the structured log message */
2390 if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0)
2391 return log_oom();
2392 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2393
2394 /* If value is 0, we don't log it in the MESSAGE= field. */
2395 if (value == 0)
2396 continue;
2397
2398 /* Format the IO accounting data for inclusion in the human language message string, but only
2399 * for the bytes counters (and not for the operations counters) */
2400 if (io_fields[k].message_suffix) {
2401 if (strextendf_with_separator(&message, ", ", "%s %s",
2402 FORMAT_BYTES(value), io_fields[k].message_suffix) < 0)
2403 return log_oom();
2404
2405 log_level = raise_level(log_level,
2406 value > MENTIONWORTHY_IO_BYTES,
2407 value > NOTICEWORTHY_IO_BYTES);
2408 }
2409 }
2410
2411 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2412 uint64_t value = UINT64_MAX;
2413
2414 assert(ip_fields[m].journal_field);
2415
2416 (void) unit_get_ip_accounting(u, m, &value);
2417 if (value == UINT64_MAX)
2418 continue;
2419
2420 /* Format IP accounting data for inclusion in the structured log message */
2421 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0)
2422 return log_oom();
2423 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2424
2425 /* If value is 0, we don't log it in the MESSAGE= field. */
2426 if (value == 0)
2427 continue;
2428
2429 /* Format the IP accounting data for inclusion in the human language message string, but only
2430 * for the bytes counters (and not for the packets counters) */
2431 if (ip_fields[m].message_suffix) {
2432 if (strextendf_with_separator(&message, ", ", "%s %s",
2433 FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0)
2434 return log_oom();
2435
2436 log_level = raise_level(log_level,
2437 value > MENTIONWORTHY_IP_BYTES,
2438 value > NOTICEWORTHY_IP_BYTES);
2439 }
2440 }
2441
2442 /* This check is here because it is the earliest point following all possible log_level assignments.
2443 * (If log_level is assigned anywhere after this point, move this check.) */
2444 if (!unit_log_level_test(u, log_level))
2445 return 0;
2446
2447 /* Is there any accounting data available at all? */
2448 if (n_iovec == 0) {
2449 assert(!message);
2450 return 0;
2451 }
2452
2453 t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", ".");
2454 if (!t)
2455 return log_oom();
2456 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2457
2458 if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR))
2459 return log_oom();
2460
2461 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->unit_log_field, u->id))
2462 return log_oom();
2463
2464 if (!set_iovec_string_field(iovec, &n_iovec, u->manager->invocation_log_field, u->invocation_id_string))
2465 return log_oom();
2466
2467 log_unit_struct_iovec(u, log_level, iovec, n_iovec);
2468
2469 return 0;
2470 }
2471
2472 static void unit_update_on_console(Unit *u) {
2473 bool b;
2474
2475 assert(u);
2476
2477 b = unit_needs_console(u);
2478 if (u->on_console == b)
2479 return;
2480
2481 u->on_console = b;
2482 if (b)
2483 manager_ref_console(u->manager);
2484 else
2485 manager_unref_console(u->manager);
2486 }
2487
2488 static void unit_emit_audit_start(Unit *u) {
2489 assert(u);
2490
2491 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2492 return;
2493
2494 /* Write audit record if we have just finished starting up */
2495 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2496 u->in_audit = true;
2497 }
2498
2499 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2500 assert(u);
2501
2502 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2503 return;
2504
2505 if (u->in_audit) {
2506 /* Write audit record if we have just finished shutting down */
2507 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2508 u->in_audit = false;
2509 } else {
2510 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2511 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2512
2513 if (state == UNIT_INACTIVE)
2514 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2515 }
2516 }
2517
2518 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2519 bool unexpected = false;
2520 JobResult result;
2521
2522 assert(j);
2523
2524 if (j->state == JOB_WAITING)
2525 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2526 * due to EAGAIN. */
2527 job_add_to_run_queue(j);
2528
2529 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2530 * hence needs to invalidate jobs. */
2531
2532 switch (j->type) {
2533
2534 case JOB_START:
2535 case JOB_VERIFY_ACTIVE:
2536
2537 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2538 job_finish_and_invalidate(j, JOB_DONE, true, false);
2539 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2540 unexpected = true;
2541
2542 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2543 if (ns == UNIT_FAILED)
2544 result = JOB_FAILED;
2545 else
2546 result = JOB_DONE;
2547
2548 job_finish_and_invalidate(j, result, true, false);
2549 }
2550 }
2551
2552 break;
2553
2554 case JOB_RELOAD:
2555 case JOB_RELOAD_OR_START:
2556 case JOB_TRY_RELOAD:
2557
2558 if (j->state == JOB_RUNNING) {
2559 if (ns == UNIT_ACTIVE)
2560 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2561 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2562 unexpected = true;
2563
2564 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2565 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2566 }
2567 }
2568
2569 break;
2570
2571 case JOB_STOP:
2572 case JOB_RESTART:
2573 case JOB_TRY_RESTART:
2574
2575 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2576 job_finish_and_invalidate(j, JOB_DONE, true, false);
2577 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2578 unexpected = true;
2579 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2580 }
2581
2582 break;
2583
2584 default:
2585 assert_not_reached();
2586 }
2587
2588 return unexpected;
2589 }
2590
2591 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2592 const char *reason;
2593 Manager *m;
2594
2595 assert(u);
2596 assert(os < _UNIT_ACTIVE_STATE_MAX);
2597 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2598
2599 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2600 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2601 * remounted this function will be called too! */
2602
2603 m = u->manager;
2604
2605 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2606 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2607 unit_add_to_dbus_queue(u);
2608
2609 /* Update systemd-oomd on the property/state change */
2610 if (os != ns) {
2611 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2612 * monitoring.
2613 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2614 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2615 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2616 * have the information on the property. Thus, indiscriminately send an update. */
2617 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2618 (void) manager_varlink_send_managed_oom_update(u);
2619 }
2620
2621 /* Update timestamps for state changes */
2622 if (!MANAGER_IS_RELOADING(m)) {
2623 dual_timestamp_now(&u->state_change_timestamp);
2624
2625 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2626 u->inactive_exit_timestamp = u->state_change_timestamp;
2627 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2628 u->inactive_enter_timestamp = u->state_change_timestamp;
2629
2630 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2631 u->active_enter_timestamp = u->state_change_timestamp;
2632 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2633 u->active_exit_timestamp = u->state_change_timestamp;
2634 }
2635
2636 /* Keep track of failed units */
2637 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2638
2639 /* Make sure the cgroup and state files are always removed when we become inactive */
2640 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2641 SET_FLAG(u->markers,
2642 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2643 false);
2644 unit_prune_cgroup(u);
2645 unit_unlink_state_files(u);
2646 } else if (ns != os && ns == UNIT_RELOADING)
2647 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2648
2649 unit_update_on_console(u);
2650
2651 if (!MANAGER_IS_RELOADING(m)) {
2652 bool unexpected;
2653
2654 /* Let's propagate state changes to the job */
2655 if (u->job)
2656 unexpected = unit_process_job(u->job, ns, reload_success);
2657 else
2658 unexpected = true;
2659
2660 /* If this state change happened without being requested by a job, then let's retroactively start or
2661 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2662 * additional jobs just because something is already activated. */
2663
2664 if (unexpected) {
2665 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2666 retroactively_start_dependencies(u);
2667 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2668 retroactively_stop_dependencies(u);
2669 }
2670
2671 if (ns != os && ns == UNIT_FAILED) {
2672 log_unit_debug(u, "Unit entered failed state.");
2673 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2674 }
2675
2676 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2677 /* This unit just finished starting up */
2678
2679 unit_emit_audit_start(u);
2680 manager_send_unit_plymouth(m, u);
2681 manager_send_unit_supervisor(m, u, /* active= */ true);
2682 }
2683
2684 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2685 /* This unit just stopped/failed. */
2686
2687 unit_emit_audit_stop(u, ns);
2688 manager_send_unit_supervisor(m, u, /* active= */ false);
2689 unit_log_resources(u);
2690 }
2691
2692 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2693 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2694 }
2695
2696 manager_recheck_journal(m);
2697 manager_recheck_dbus(m);
2698
2699 unit_trigger_notify(u);
2700
2701 if (!MANAGER_IS_RELOADING(m)) {
2702 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2703 reason = strjoina("unit ", u->id, " failed");
2704 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2705 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2706 reason = strjoina("unit ", u->id, " succeeded");
2707 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2708 }
2709 }
2710
2711 /* And now, add the unit or depending units to various queues that will act on the new situation if
2712 * needed. These queues generally check for continuous state changes rather than events (like most of
2713 * the state propagation above), and do work deferred instead of instantly, since they typically
2714 * don't want to run during reloading, and usually involve checking combined state of multiple units
2715 * at once. */
2716
2717 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2718 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2719 check_unneeded_dependencies(u);
2720 check_bound_by_dependencies(u);
2721
2722 /* Maybe someone wants us to remain up? */
2723 unit_submit_to_start_when_upheld_queue(u);
2724
2725 /* Maybe the unit should be GC'ed now? */
2726 unit_add_to_gc_queue(u);
2727
2728 /* Maybe we can release some resources now? */
2729 unit_submit_to_release_resources_queue(u);
2730 }
2731
2732 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2733 /* Start uphold units regardless if going up was expected or not */
2734 check_uphold_dependencies(u);
2735
2736 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2737 unit_submit_to_stop_when_unneeded_queue(u);
2738
2739 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2740 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2741 * inactive, without ever entering started.) */
2742 unit_submit_to_stop_when_bound_queue(u);
2743 }
2744 }
2745
2746 int unit_watch_pidref(Unit *u, const PidRef *pid, bool exclusive) {
2747 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2748 int r;
2749
2750 /* Adds a specific PID to the set of PIDs this unit watches. */
2751
2752 assert(u);
2753 assert(pidref_is_set(pid));
2754
2755 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2756 * opportunity to remove any stalled references to this PID as they can be created
2757 * easily (when watching a process which is not our direct child). */
2758 if (exclusive)
2759 manager_unwatch_pidref(u->manager, pid);
2760
2761 if (set_contains(u->pids, pid)) /* early exit if already being watched */
2762 return 0;
2763
2764 r = pidref_dup(pid, &pid_dup);
2765 if (r < 0)
2766 return r;
2767
2768 /* First, insert into the set of PIDs maintained by the unit */
2769 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2770 if (r < 0)
2771 return r;
2772
2773 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2774
2775 /* Second, insert it into the simple global table, see if that works */
2776 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops_free, pid, u);
2777 if (r != -EEXIST)
2778 return r;
2779
2780 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2781 * hashmap that points to an array. */
2782
2783 PidRef *old_pid = NULL;
2784 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2785
2786 /* Count entries in array */
2787 size_t n = 0;
2788 for (; array && array[n]; n++)
2789 ;
2790
2791 /* Allocate a new array */
2792 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2793 if (!new_array)
2794 return -ENOMEM;
2795
2796 /* Append us to the end */
2797 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2798 new_array[n] = u;
2799 new_array[n+1] = NULL;
2800
2801 /* Make sure the hashmap is allocated */
2802 r = hashmap_ensure_allocated(&u->manager->watch_pids_more, &pidref_hash_ops_free);
2803 if (r < 0)
2804 return r;
2805
2806 /* Add or replace the old array */
2807 r = hashmap_replace(u->manager->watch_pids_more, old_pid ?: pid, new_array);
2808 if (r < 0)
2809 return r;
2810
2811 TAKE_PTR(new_array); /* Now part of the hash table */
2812 free(array); /* Which means we can now delete the old version */
2813 return 0;
2814 }
2815
2816 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2817 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
2818 int r;
2819
2820 assert(u);
2821 assert(pid_is_valid(pid));
2822
2823 r = pidref_set_pid(&pidref, pid);
2824 if (r < 0)
2825 return r;
2826
2827 return unit_watch_pidref(u, &pidref, exclusive);
2828 }
2829
2830 void unit_unwatch_pidref(Unit *u, const PidRef *pid) {
2831 assert(u);
2832 assert(pidref_is_set(pid));
2833
2834 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2835 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2836 if (!pid1)
2837 return; /* Early exit if this PID was never watched by us */
2838
2839 /* First let's drop the unit from the simple hash table, if it is included there */
2840 PidRef *pid2 = NULL;
2841 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2842
2843 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2844 assert((uu == u) == (pid1 == pid2));
2845
2846 if (uu == u)
2847 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2848 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2849 else {
2850 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2851 PidRef *pid3 = NULL;
2852 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2853
2854 /* Let's iterate through the array, dropping our own entry */
2855 size_t m = 0, n = 0;
2856 for (; array && array[n]; n++)
2857 if (array[n] != u)
2858 array[m++] = array[n];
2859 if (n == m)
2860 return; /* Not there */
2861
2862 array[m] = NULL; /* set trailing NULL marker on the new end */
2863
2864 if (m == 0) {
2865 /* The array is now empty, remove the entire entry */
2866 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2867 free(array);
2868 } else {
2869 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2870 * we will delete, but by the PidRef object of the Unit that is now first in the
2871 * array. */
2872
2873 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2874 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2875 }
2876 }
2877 }
2878
2879 void unit_unwatch_pid(Unit *u, pid_t pid) {
2880 return unit_unwatch_pidref(u, &PIDREF_MAKE_FROM_PID(pid));
2881 }
2882
2883 void unit_unwatch_all_pids(Unit *u) {
2884 assert(u);
2885
2886 while (!set_isempty(u->pids))
2887 unit_unwatch_pidref(u, set_first(u->pids));
2888
2889 u->pids = set_free(u->pids);
2890 }
2891
2892 void unit_unwatch_pidref_done(Unit *u, PidRef *pidref) {
2893 assert(u);
2894
2895 if (!pidref_is_set(pidref))
2896 return;
2897
2898 unit_unwatch_pidref(u, pidref);
2899 pidref_done(pidref);
2900 }
2901
2902 static void unit_tidy_watch_pids(Unit *u) {
2903 PidRef *except1, *except2, *e;
2904
2905 assert(u);
2906
2907 /* Cleans dead PIDs from our list */
2908
2909 except1 = unit_main_pid(u);
2910 except2 = unit_control_pid(u);
2911
2912 SET_FOREACH(e, u->pids) {
2913 if (pidref_equal(except1, e) || pidref_equal(except2, e))
2914 continue;
2915
2916 if (pidref_is_unwaited(e) <= 0)
2917 unit_unwatch_pidref(u, e);
2918 }
2919 }
2920
2921 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2922 Unit *u = ASSERT_PTR(userdata);
2923
2924 assert(s);
2925
2926 unit_tidy_watch_pids(u);
2927 unit_watch_all_pids(u);
2928
2929 /* If the PID set is empty now, then let's finish this off. */
2930 unit_synthesize_cgroup_empty_event(u);
2931
2932 return 0;
2933 }
2934
2935 int unit_enqueue_rewatch_pids(Unit *u) {
2936 int r;
2937
2938 assert(u);
2939
2940 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
2941 if (!crt || !crt->cgroup_path)
2942 return -ENOENT;
2943
2944 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2945 if (r < 0)
2946 return r;
2947 if (r > 0) /* On unified we can use proper notifications */
2948 return 0;
2949
2950 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2951 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2952 * involves issuing kill(pid, 0) on all processes we watch. */
2953
2954 if (!u->rewatch_pids_event_source) {
2955 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2956
2957 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2958 if (r < 0)
2959 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2960
2961 r = sd_event_source_set_priority(s, EVENT_PRIORITY_REWATCH_PIDS);
2962 if (r < 0)
2963 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2964
2965 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2966
2967 u->rewatch_pids_event_source = TAKE_PTR(s);
2968 }
2969
2970 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2971 if (r < 0)
2972 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2973
2974 return 0;
2975 }
2976
2977 void unit_dequeue_rewatch_pids(Unit *u) {
2978 int r;
2979 assert(u);
2980
2981 if (!u->rewatch_pids_event_source)
2982 return;
2983
2984 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2985 if (r < 0)
2986 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2987
2988 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
2989 }
2990
2991 bool unit_job_is_applicable(Unit *u, JobType j) {
2992 assert(u);
2993 assert(j >= 0 && j < _JOB_TYPE_MAX);
2994
2995 switch (j) {
2996
2997 case JOB_VERIFY_ACTIVE:
2998 case JOB_START:
2999 case JOB_NOP:
3000 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3001 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3002 * jobs for it. */
3003 return true;
3004
3005 case JOB_STOP:
3006 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3007 * external events), hence it makes no sense to permit enqueuing such a request either. */
3008 return !u->perpetual;
3009
3010 case JOB_RESTART:
3011 case JOB_TRY_RESTART:
3012 return unit_can_stop(u) && unit_can_start(u);
3013
3014 case JOB_RELOAD:
3015 case JOB_TRY_RELOAD:
3016 return unit_can_reload(u);
3017
3018 case JOB_RELOAD_OR_START:
3019 return unit_can_reload(u) && unit_can_start(u);
3020
3021 default:
3022 assert_not_reached();
3023 }
3024 }
3025
3026 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3027 Hashmap *deps;
3028
3029 assert(u);
3030 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3031
3032 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3033 if (!deps) {
3034 _cleanup_hashmap_free_ Hashmap *h = NULL;
3035
3036 h = hashmap_new(NULL);
3037 if (!h)
3038 return NULL;
3039
3040 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3041 return NULL;
3042
3043 deps = TAKE_PTR(h);
3044 }
3045
3046 return deps;
3047 }
3048
3049 typedef enum NotifyDependencyFlags {
3050 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3051 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3052 } NotifyDependencyFlags;
3053
3054 static int unit_add_dependency_impl(
3055 Unit *u,
3056 UnitDependency d,
3057 Unit *other,
3058 UnitDependencyMask mask) {
3059
3060 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3061 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3062 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3063 [UNIT_WANTS] = UNIT_WANTED_BY,
3064 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3065 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3066 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3067 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3068 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3069 [UNIT_WANTED_BY] = UNIT_WANTS,
3070 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3071 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3072 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3073 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3074 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3075 [UNIT_BEFORE] = UNIT_AFTER,
3076 [UNIT_AFTER] = UNIT_BEFORE,
3077 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3078 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3079 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3080 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3081 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3082 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3083 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3084 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3085 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3086 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3087 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3088 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3089 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3090 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3091 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3092 };
3093
3094 Hashmap *u_deps, *other_deps;
3095 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3096 NotifyDependencyFlags flags = 0;
3097 int r;
3098
3099 assert(u);
3100 assert(other);
3101 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3102 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3103 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3104
3105 /* Ensure the following two hashmaps for each unit exist:
3106 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3107 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3108 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3109 if (!u_deps)
3110 return -ENOMEM;
3111
3112 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3113 if (!other_deps)
3114 return -ENOMEM;
3115
3116 /* Save the original dependency info. */
3117 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3118 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3119
3120 /* Update dependency info. */
3121 u_info.origin_mask |= mask;
3122 other_info.destination_mask |= mask;
3123
3124 /* Save updated dependency info. */
3125 if (u_info.data != u_info_old.data) {
3126 r = hashmap_replace(u_deps, other, u_info.data);
3127 if (r < 0)
3128 return r;
3129
3130 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3131 }
3132
3133 if (other_info.data != other_info_old.data) {
3134 r = hashmap_replace(other_deps, u, other_info.data);
3135 if (r < 0) {
3136 if (u_info.data != u_info_old.data) {
3137 /* Restore the old dependency. */
3138 if (u_info_old.data)
3139 (void) hashmap_update(u_deps, other, u_info_old.data);
3140 else
3141 hashmap_remove(u_deps, other);
3142 }
3143 return r;
3144 }
3145
3146 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3147 }
3148
3149 return flags;
3150 }
3151
3152 int unit_add_dependency(
3153 Unit *u,
3154 UnitDependency d,
3155 Unit *other,
3156 bool add_reference,
3157 UnitDependencyMask mask) {
3158
3159 UnitDependencyAtom a;
3160 int r;
3161
3162 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3163 * there, no need to notify! */
3164 NotifyDependencyFlags notify_flags;
3165
3166 assert(u);
3167 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3168 assert(other);
3169
3170 u = unit_follow_merge(u);
3171 other = unit_follow_merge(other);
3172 a = unit_dependency_to_atom(d);
3173 assert(a >= 0);
3174
3175 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3176 if (u == other) {
3177 if (unit_should_warn_about_dependency(d))
3178 log_unit_warning(u, "Dependency %s=%s is dropped.",
3179 unit_dependency_to_string(d), u->id);
3180 return 0;
3181 }
3182
3183 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3184 return 0;
3185
3186 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3187 * running timeout at a specific time. */
3188 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3189 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3190 return 0;
3191 }
3192
3193 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3194 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3195 return 0;
3196 }
3197
3198 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3199 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3200 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3201 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3202 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3203 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3204
3205 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3206 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3207 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3208 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3209 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3210 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3211
3212 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3213 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3214 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3215
3216 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3217 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3218 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3219
3220 r = unit_add_dependency_impl(u, d, other, mask);
3221 if (r < 0)
3222 return r;
3223 notify_flags = r;
3224
3225 if (add_reference) {
3226 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3227 if (r < 0)
3228 return r;
3229 notify_flags |= r;
3230 }
3231
3232 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3233 unit_add_to_dbus_queue(u);
3234 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3235 unit_add_to_dbus_queue(other);
3236
3237 return notify_flags != 0;
3238 }
3239
3240 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3241 int r = 0, s = 0;
3242
3243 assert(u);
3244 assert(d >= 0 || e >= 0);
3245
3246 if (d >= 0) {
3247 r = unit_add_dependency(u, d, other, add_reference, mask);
3248 if (r < 0)
3249 return r;
3250 }
3251
3252 if (e >= 0) {
3253 s = unit_add_dependency(u, e, other, add_reference, mask);
3254 if (s < 0)
3255 return s;
3256 }
3257
3258 return r > 0 || s > 0;
3259 }
3260
3261 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3262 int r;
3263
3264 assert(u);
3265 assert(name);
3266 assert(buf);
3267 assert(ret);
3268
3269 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3270 *buf = NULL;
3271 *ret = name;
3272 return 0;
3273 }
3274
3275 if (u->instance)
3276 r = unit_name_replace_instance(name, u->instance, buf);
3277 else {
3278 _cleanup_free_ char *i = NULL;
3279
3280 r = unit_name_to_prefix(u->id, &i);
3281 if (r < 0)
3282 return r;
3283
3284 r = unit_name_replace_instance(name, i, buf);
3285 }
3286 if (r < 0)
3287 return r;
3288
3289 *ret = *buf;
3290 return 0;
3291 }
3292
3293 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3294 _cleanup_free_ char *buf = NULL;
3295 Unit *other;
3296 int r;
3297
3298 assert(u);
3299 assert(name);
3300
3301 r = resolve_template(u, name, &buf, &name);
3302 if (r < 0)
3303 return r;
3304
3305 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3306 return 0;
3307
3308 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3309 if (r < 0)
3310 return r;
3311
3312 return unit_add_dependency(u, d, other, add_reference, mask);
3313 }
3314
3315 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3316 _cleanup_free_ char *buf = NULL;
3317 Unit *other;
3318 int r;
3319
3320 assert(u);
3321 assert(name);
3322
3323 r = resolve_template(u, name, &buf, &name);
3324 if (r < 0)
3325 return r;
3326
3327 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3328 return 0;
3329
3330 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3331 if (r < 0)
3332 return r;
3333
3334 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3335 }
3336
3337 int set_unit_path(const char *p) {
3338 /* This is mostly for debug purposes */
3339 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3340 }
3341
3342 char *unit_dbus_path(Unit *u) {
3343 assert(u);
3344
3345 if (!u->id)
3346 return NULL;
3347
3348 return unit_dbus_path_from_name(u->id);
3349 }
3350
3351 char *unit_dbus_path_invocation_id(Unit *u) {
3352 assert(u);
3353
3354 if (sd_id128_is_null(u->invocation_id))
3355 return NULL;
3356
3357 return unit_dbus_path_from_name(u->invocation_id_string);
3358 }
3359
3360 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3361 int r;
3362
3363 assert(u);
3364
3365 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3366
3367 if (sd_id128_equal(u->invocation_id, id))
3368 return 0;
3369
3370 if (!sd_id128_is_null(u->invocation_id))
3371 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3372
3373 if (sd_id128_is_null(id)) {
3374 r = 0;
3375 goto reset;
3376 }
3377
3378 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3379 if (r < 0)
3380 goto reset;
3381
3382 u->invocation_id = id;
3383 sd_id128_to_string(id, u->invocation_id_string);
3384
3385 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3386 if (r < 0)
3387 goto reset;
3388
3389 return 0;
3390
3391 reset:
3392 u->invocation_id = SD_ID128_NULL;
3393 u->invocation_id_string[0] = 0;
3394 return r;
3395 }
3396
3397 int unit_set_slice(Unit *u, Unit *slice) {
3398 int r;
3399
3400 assert(u);
3401 assert(slice);
3402
3403 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3404 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3405 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3406
3407 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3408 return -EOPNOTSUPP;
3409
3410 if (u->type == UNIT_SLICE)
3411 return -EINVAL;
3412
3413 if (unit_active_state(u) != UNIT_INACTIVE)
3414 return -EBUSY;
3415
3416 if (slice->type != UNIT_SLICE)
3417 return -EINVAL;
3418
3419 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3420 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3421 return -EPERM;
3422
3423 if (UNIT_GET_SLICE(u) == slice)
3424 return 0;
3425
3426 /* Disallow slice changes if @u is already bound to cgroups */
3427 if (UNIT_GET_SLICE(u)) {
3428 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3429 if (crt && crt->cgroup_realized)
3430 return -EBUSY;
3431 }
3432
3433 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3434 if (UNIT_GET_SLICE(u))
3435 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3436
3437 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3438 if (r < 0)
3439 return r;
3440
3441 return 1;
3442 }
3443
3444 int unit_set_default_slice(Unit *u) {
3445 const char *slice_name;
3446 Unit *slice;
3447 int r;
3448
3449 assert(u);
3450
3451 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3452 return 0;
3453
3454 if (UNIT_GET_SLICE(u))
3455 return 0;
3456
3457 if (u->instance) {
3458 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3459
3460 /* Implicitly place all instantiated units in their
3461 * own per-template slice */
3462
3463 r = unit_name_to_prefix(u->id, &prefix);
3464 if (r < 0)
3465 return r;
3466
3467 /* The prefix is already escaped, but it might include
3468 * "-" which has a special meaning for slice units,
3469 * hence escape it here extra. */
3470 escaped = unit_name_escape(prefix);
3471 if (!escaped)
3472 return -ENOMEM;
3473
3474 if (MANAGER_IS_SYSTEM(u->manager))
3475 slice_name = strjoina("system-", escaped, ".slice");
3476 else
3477 slice_name = strjoina("app-", escaped, ".slice");
3478
3479 } else if (unit_is_extrinsic(u))
3480 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3481 * the root slice. They don't really belong in one of the subslices. */
3482 slice_name = SPECIAL_ROOT_SLICE;
3483
3484 else if (MANAGER_IS_SYSTEM(u->manager))
3485 slice_name = SPECIAL_SYSTEM_SLICE;
3486 else
3487 slice_name = SPECIAL_APP_SLICE;
3488
3489 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3490 if (r < 0)
3491 return r;
3492
3493 return unit_set_slice(u, slice);
3494 }
3495
3496 const char *unit_slice_name(Unit *u) {
3497 Unit *slice;
3498 assert(u);
3499
3500 slice = UNIT_GET_SLICE(u);
3501 if (!slice)
3502 return NULL;
3503
3504 return slice->id;
3505 }
3506
3507 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3508 _cleanup_free_ char *t = NULL;
3509 int r;
3510
3511 assert(u);
3512 assert(type);
3513 assert(_found);
3514
3515 r = unit_name_change_suffix(u->id, type, &t);
3516 if (r < 0)
3517 return r;
3518 if (unit_has_name(u, t))
3519 return -EINVAL;
3520
3521 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3522 assert(r < 0 || *_found != u);
3523 return r;
3524 }
3525
3526 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3527 const char *new_owner;
3528 Unit *u = ASSERT_PTR(userdata);
3529 int r;
3530
3531 assert(message);
3532
3533 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3534 if (r < 0) {
3535 bus_log_parse_error(r);
3536 return 0;
3537 }
3538
3539 if (UNIT_VTABLE(u)->bus_name_owner_change)
3540 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3541
3542 return 0;
3543 }
3544
3545 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3546 const sd_bus_error *e;
3547 const char *new_owner;
3548 Unit *u = ASSERT_PTR(userdata);
3549 int r;
3550
3551 assert(message);
3552
3553 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3554
3555 e = sd_bus_message_get_error(message);
3556 if (e) {
3557 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3558 r = sd_bus_error_get_errno(e);
3559 log_unit_error_errno(u, r,
3560 "Unexpected error response from GetNameOwner(): %s",
3561 bus_error_message(e, r));
3562 }
3563
3564 new_owner = NULL;
3565 } else {
3566 r = sd_bus_message_read(message, "s", &new_owner);
3567 if (r < 0)
3568 return bus_log_parse_error(r);
3569
3570 assert(!isempty(new_owner));
3571 }
3572
3573 if (UNIT_VTABLE(u)->bus_name_owner_change)
3574 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3575
3576 return 0;
3577 }
3578
3579 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3580 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3581 const char *match;
3582 usec_t timeout_usec = 0;
3583 int r;
3584
3585 assert(u);
3586 assert(bus);
3587 assert(name);
3588
3589 if (u->match_bus_slot || u->get_name_owner_slot)
3590 return -EBUSY;
3591
3592 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3593 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3594 * value defined above. */
3595 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3596 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3597
3598 match = strjoina("type='signal',"
3599 "sender='org.freedesktop.DBus',"
3600 "path='/org/freedesktop/DBus',"
3601 "interface='org.freedesktop.DBus',"
3602 "member='NameOwnerChanged',"
3603 "arg0='", name, "'");
3604
3605 r = bus_add_match_full(
3606 bus,
3607 &u->match_bus_slot,
3608 true,
3609 match,
3610 signal_name_owner_changed,
3611 NULL,
3612 u,
3613 timeout_usec);
3614 if (r < 0)
3615 return r;
3616
3617 r = sd_bus_message_new_method_call(
3618 bus,
3619 &m,
3620 "org.freedesktop.DBus",
3621 "/org/freedesktop/DBus",
3622 "org.freedesktop.DBus",
3623 "GetNameOwner");
3624 if (r < 0)
3625 return r;
3626
3627 r = sd_bus_message_append(m, "s", name);
3628 if (r < 0)
3629 return r;
3630
3631 r = sd_bus_call_async(
3632 bus,
3633 &u->get_name_owner_slot,
3634 m,
3635 get_name_owner_handler,
3636 u,
3637 timeout_usec);
3638
3639 if (r < 0) {
3640 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3641 return r;
3642 }
3643
3644 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3645 return 0;
3646 }
3647
3648 int unit_watch_bus_name(Unit *u, const char *name) {
3649 int r;
3650
3651 assert(u);
3652 assert(name);
3653
3654 /* Watch a specific name on the bus. We only support one unit
3655 * watching each name for now. */
3656
3657 if (u->manager->api_bus) {
3658 /* If the bus is already available, install the match directly.
3659 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3660 r = unit_install_bus_match(u, u->manager->api_bus, name);
3661 if (r < 0)
3662 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3663 }
3664
3665 r = hashmap_put(u->manager->watch_bus, name, u);
3666 if (r < 0) {
3667 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3668 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3669 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3670 }
3671
3672 return 0;
3673 }
3674
3675 void unit_unwatch_bus_name(Unit *u, const char *name) {
3676 assert(u);
3677 assert(name);
3678
3679 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3680 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3681 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3682 }
3683
3684 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3685 _cleanup_free_ char *e = NULL;
3686 Unit *device;
3687 int r;
3688
3689 assert(u);
3690
3691 /* Adds in links to the device node that this unit is based on */
3692 if (isempty(what))
3693 return 0;
3694
3695 if (!is_device_path(what))
3696 return 0;
3697
3698 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3699 if (!unit_type_supported(UNIT_DEVICE))
3700 return 0;
3701
3702 r = unit_name_from_path(what, ".device", &e);
3703 if (r < 0)
3704 return r;
3705
3706 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3707 if (r < 0)
3708 return r;
3709
3710 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3711 dep = UNIT_BINDS_TO;
3712
3713 return unit_add_two_dependencies(u, UNIT_AFTER,
3714 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3715 device, true, mask);
3716 }
3717
3718 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3719 _cleanup_free_ char *escaped = NULL, *target = NULL;
3720 int r;
3721
3722 assert(u);
3723
3724 if (isempty(what))
3725 return 0;
3726
3727 if (!path_startswith(what, "/dev/"))
3728 return 0;
3729
3730 /* If we don't support devices, then also don't bother with blockdev@.target */
3731 if (!unit_type_supported(UNIT_DEVICE))
3732 return 0;
3733
3734 r = unit_name_path_escape(what, &escaped);
3735 if (r < 0)
3736 return r;
3737
3738 r = unit_name_build("blockdev", escaped, ".target", &target);
3739 if (r < 0)
3740 return r;
3741
3742 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3743 }
3744
3745 int unit_coldplug(Unit *u) {
3746 int r = 0;
3747
3748 assert(u);
3749
3750 /* Make sure we don't enter a loop, when coldplugging recursively. */
3751 if (u->coldplugged)
3752 return 0;
3753
3754 u->coldplugged = true;
3755
3756 STRV_FOREACH(i, u->deserialized_refs)
3757 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3758
3759 u->deserialized_refs = strv_free(u->deserialized_refs);
3760
3761 if (UNIT_VTABLE(u)->coldplug)
3762 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3763
3764 if (u->job)
3765 RET_GATHER(r, job_coldplug(u->job));
3766 if (u->nop_job)
3767 RET_GATHER(r, job_coldplug(u->nop_job));
3768
3769 unit_modify_nft_set(u, /* add = */ true);
3770 return r;
3771 }
3772
3773 void unit_catchup(Unit *u) {
3774 assert(u);
3775
3776 if (UNIT_VTABLE(u)->catchup)
3777 UNIT_VTABLE(u)->catchup(u);
3778
3779 unit_cgroup_catchup(u);
3780 }
3781
3782 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3783 struct stat st;
3784
3785 if (!path)
3786 return false;
3787
3788 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3789 * are never out-of-date. */
3790 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3791 return false;
3792
3793 if (stat(path, &st) < 0)
3794 /* What, cannot access this anymore? */
3795 return true;
3796
3797 if (path_masked)
3798 /* For masked files check if they are still so */
3799 return !null_or_empty(&st);
3800 else
3801 /* For non-empty files check the mtime */
3802 return timespec_load(&st.st_mtim) > mtime;
3803
3804 return false;
3805 }
3806
3807 bool unit_need_daemon_reload(Unit *u) {
3808 _cleanup_strv_free_ char **dropins = NULL;
3809
3810 assert(u);
3811 assert(u->manager);
3812
3813 if (u->manager->unit_file_state_outdated)
3814 return true;
3815
3816 /* For unit files, we allow masking… */
3817 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3818 u->load_state == UNIT_MASKED))
3819 return true;
3820
3821 /* Source paths should not be masked… */
3822 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3823 return true;
3824
3825 if (u->load_state == UNIT_LOADED)
3826 (void) unit_find_dropin_paths(u, &dropins);
3827 if (!strv_equal(u->dropin_paths, dropins))
3828 return true;
3829
3830 /* … any drop-ins that are masked are simply omitted from the list. */
3831 STRV_FOREACH(path, u->dropin_paths)
3832 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3833 return true;
3834
3835 return false;
3836 }
3837
3838 void unit_reset_failed(Unit *u) {
3839 assert(u);
3840
3841 if (UNIT_VTABLE(u)->reset_failed)
3842 UNIT_VTABLE(u)->reset_failed(u);
3843
3844 ratelimit_reset(&u->start_ratelimit);
3845 u->start_limit_hit = false;
3846 }
3847
3848 Unit *unit_following(Unit *u) {
3849 assert(u);
3850
3851 if (UNIT_VTABLE(u)->following)
3852 return UNIT_VTABLE(u)->following(u);
3853
3854 return NULL;
3855 }
3856
3857 bool unit_stop_pending(Unit *u) {
3858 assert(u);
3859
3860 /* This call does check the current state of the unit. It's
3861 * hence useful to be called from state change calls of the
3862 * unit itself, where the state isn't updated yet. This is
3863 * different from unit_inactive_or_pending() which checks both
3864 * the current state and for a queued job. */
3865
3866 return unit_has_job_type(u, JOB_STOP);
3867 }
3868
3869 bool unit_inactive_or_pending(Unit *u) {
3870 assert(u);
3871
3872 /* Returns true if the unit is inactive or going down */
3873
3874 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3875 return true;
3876
3877 if (unit_stop_pending(u))
3878 return true;
3879
3880 return false;
3881 }
3882
3883 bool unit_active_or_pending(Unit *u) {
3884 assert(u);
3885
3886 /* Returns true if the unit is active or going up */
3887
3888 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3889 return true;
3890
3891 if (u->job &&
3892 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3893 return true;
3894
3895 return false;
3896 }
3897
3898 bool unit_will_restart_default(Unit *u) {
3899 assert(u);
3900
3901 return unit_has_job_type(u, JOB_START);
3902 }
3903
3904 bool unit_will_restart(Unit *u) {
3905 assert(u);
3906
3907 if (!UNIT_VTABLE(u)->will_restart)
3908 return false;
3909
3910 return UNIT_VTABLE(u)->will_restart(u);
3911 }
3912
3913 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3914 assert(u);
3915
3916 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3917 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3918 }
3919
3920 static int unit_pid_set(Unit *u, Set **pid_set) {
3921 int r;
3922
3923 assert(u);
3924 assert(pid_set);
3925
3926 set_clear(*pid_set); /* This updates input. */
3927
3928 /* Exclude the main/control pids from being killed via the cgroup */
3929
3930 PidRef *pid;
3931 FOREACH_ARGUMENT(pid, unit_main_pid(u), unit_control_pid(u))
3932 if (pidref_is_set(pid)) {
3933 r = set_ensure_put(pid_set, NULL, PID_TO_PTR(pid->pid));
3934 if (r < 0)
3935 return r;
3936 }
3937
3938 return 0;
3939 }
3940
3941 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
3942 _cleanup_free_ char *comm = NULL;
3943 Unit *u = ASSERT_PTR(userdata);
3944
3945 (void) pidref_get_comm(pid, &comm);
3946
3947 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3948 signal_to_string(signo), pid->pid, strna(comm));
3949
3950 return 1;
3951 }
3952
3953 static int kill_or_sigqueue(PidRef* pidref, int signo, int code, int value) {
3954 assert(pidref_is_set(pidref));
3955 assert(SIGNAL_VALID(signo));
3956
3957 switch (code) {
3958
3959 case SI_USER:
3960 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
3961 return pidref_kill(pidref, signo);
3962
3963 case SI_QUEUE:
3964 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
3965 return pidref_sigqueue(pidref, signo, value);
3966
3967 default:
3968 assert_not_reached();
3969 }
3970 }
3971
3972 static int unit_kill_one(
3973 Unit *u,
3974 PidRef *pidref,
3975 const char *type,
3976 int signo,
3977 int code,
3978 int value,
3979 sd_bus_error *ret_error) {
3980
3981 int r;
3982
3983 assert(u);
3984 assert(type);
3985
3986 if (!pidref_is_set(pidref))
3987 return 0;
3988
3989 _cleanup_free_ char *comm = NULL;
3990 (void) pidref_get_comm(pidref, &comm);
3991
3992 r = kill_or_sigqueue(pidref, signo, code, value);
3993 if (r == -ESRCH)
3994 return 0;
3995 if (r < 0) {
3996 /* Report this failure both to the logs and to the client */
3997 if (ret_error)
3998 sd_bus_error_set_errnof(
3999 ret_error, r,
4000 "Failed to send signal SIG%s to %s process " PID_FMT " (%s): %m",
4001 signal_to_string(signo), type, pidref->pid, strna(comm));
4002
4003 return log_unit_warning_errno(
4004 u, r,
4005 "Failed to send signal SIG%s to %s process " PID_FMT " (%s) on client request: %m",
4006 signal_to_string(signo), type, pidref->pid, strna(comm));
4007 }
4008
4009 log_unit_info(u, "Sent signal SIG%s to %s process " PID_FMT " (%s) on client request.",
4010 signal_to_string(signo), type, pidref->pid, strna(comm));
4011 return 1; /* killed */
4012 }
4013
4014 int unit_kill(
4015 Unit *u,
4016 KillWho who,
4017 int signo,
4018 int code,
4019 int value,
4020 sd_bus_error *ret_error) {
4021
4022 PidRef *main_pid, *control_pid;
4023 bool killed = false;
4024 int ret = 0, r;
4025
4026 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4027 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4028 * stop a service ourselves. */
4029
4030 assert(u);
4031 assert(who >= 0);
4032 assert(who < _KILL_WHO_MAX);
4033 assert(SIGNAL_VALID(signo));
4034 assert(IN_SET(code, SI_USER, SI_QUEUE));
4035
4036 main_pid = unit_main_pid(u);
4037 control_pid = unit_control_pid(u);
4038
4039 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4040 return sd_bus_error_setf(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4041
4042 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4043 if (!main_pid)
4044 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4045 if (!pidref_is_set(main_pid))
4046 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4047 }
4048
4049 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4050 if (!control_pid)
4051 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4052 if (!pidref_is_set(control_pid))
4053 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4054 }
4055
4056 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4057 r = unit_kill_one(u, control_pid, "control", signo, code, value, ret_error);
4058 RET_GATHER(ret, r);
4059 killed = killed || r > 0;
4060 }
4061
4062 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4063 r = unit_kill_one(u, main_pid, "main", signo, code, value, ret >= 0 ? ret_error : NULL);
4064 RET_GATHER(ret, r);
4065 killed = killed || r > 0;
4066 }
4067
4068 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4069 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4070 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4071 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && code == SI_USER) {
4072 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4073
4074 if (crt && crt->cgroup_path) {
4075 _cleanup_set_free_ Set *pid_set = NULL;
4076
4077 /* Exclude the main/control pids from being killed via the cgroup */
4078 r = unit_pid_set(u, &pid_set);
4079 if (r < 0)
4080 return log_oom();
4081
4082 r = cg_kill_recursive(crt->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4083 if (r < 0 && !IN_SET(r, -ESRCH, -ENOENT)) {
4084 if (ret >= 0)
4085 sd_bus_error_set_errnof(
4086 ret_error, r,
4087 "Failed to send signal SIG%s to auxiliary processes: %m",
4088 signal_to_string(signo));
4089
4090 log_unit_warning_errno(
4091 u, r,
4092 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4093 signal_to_string(signo));
4094
4095 RET_GATHER(ret, r);
4096 }
4097
4098 killed = killed || r >= 0;
4099 }
4100 }
4101
4102 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4103 if (ret >= 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4104 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4105
4106 return ret;
4107 }
4108
4109 int unit_following_set(Unit *u, Set **s) {
4110 assert(u);
4111 assert(s);
4112
4113 if (UNIT_VTABLE(u)->following_set)
4114 return UNIT_VTABLE(u)->following_set(u, s);
4115
4116 *s = NULL;
4117 return 0;
4118 }
4119
4120 UnitFileState unit_get_unit_file_state(Unit *u) {
4121 int r;
4122
4123 assert(u);
4124
4125 if (u->unit_file_state < 0 && u->fragment_path) {
4126 r = unit_file_get_state(
4127 u->manager->runtime_scope,
4128 NULL,
4129 u->id,
4130 &u->unit_file_state);
4131 if (r < 0)
4132 u->unit_file_state = UNIT_FILE_BAD;
4133 }
4134
4135 return u->unit_file_state;
4136 }
4137
4138 PresetAction unit_get_unit_file_preset(Unit *u) {
4139 int r;
4140
4141 assert(u);
4142
4143 if (u->unit_file_preset < 0 && u->fragment_path) {
4144 _cleanup_free_ char *bn = NULL;
4145
4146 r = path_extract_filename(u->fragment_path, &bn);
4147 if (r < 0)
4148 return (u->unit_file_preset = r);
4149
4150 if (r == O_DIRECTORY)
4151 return (u->unit_file_preset = -EISDIR);
4152
4153 u->unit_file_preset = unit_file_query_preset(
4154 u->manager->runtime_scope,
4155 NULL,
4156 bn,
4157 NULL);
4158 }
4159
4160 return u->unit_file_preset;
4161 }
4162
4163 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4164 assert(ref);
4165 assert(source);
4166 assert(target);
4167
4168 if (ref->target)
4169 unit_ref_unset(ref);
4170
4171 ref->source = source;
4172 ref->target = target;
4173 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4174 return target;
4175 }
4176
4177 void unit_ref_unset(UnitRef *ref) {
4178 assert(ref);
4179
4180 if (!ref->target)
4181 return;
4182
4183 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4184 * be unreferenced now. */
4185 unit_add_to_gc_queue(ref->target);
4186
4187 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4188 ref->source = ref->target = NULL;
4189 }
4190
4191 static int user_from_unit_name(Unit *u, char **ret) {
4192
4193 static const uint8_t hash_key[] = {
4194 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4195 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4196 };
4197
4198 _cleanup_free_ char *n = NULL;
4199 int r;
4200
4201 r = unit_name_to_prefix(u->id, &n);
4202 if (r < 0)
4203 return r;
4204
4205 if (valid_user_group_name(n, 0)) {
4206 *ret = TAKE_PTR(n);
4207 return 0;
4208 }
4209
4210 /* If we can't use the unit name as a user name, then let's hash it and use that */
4211 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4212 return -ENOMEM;
4213
4214 return 0;
4215 }
4216
4217 int unit_patch_contexts(Unit *u) {
4218 CGroupContext *cc;
4219 ExecContext *ec;
4220 int r;
4221
4222 assert(u);
4223
4224 /* Patch in the manager defaults into the exec and cgroup
4225 * contexts, _after_ the rest of the settings have been
4226 * initialized */
4227
4228 ec = unit_get_exec_context(u);
4229 if (ec) {
4230 /* This only copies in the ones that need memory */
4231 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4232 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4233 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4234 if (!ec->rlimit[i])
4235 return -ENOMEM;
4236 }
4237
4238 if (MANAGER_IS_USER(u->manager) &&
4239 !ec->working_directory) {
4240
4241 r = get_home_dir(&ec->working_directory);
4242 if (r < 0)
4243 return r;
4244
4245 /* Allow user services to run, even if the
4246 * home directory is missing */
4247 ec->working_directory_missing_ok = true;
4248 }
4249
4250 if (ec->private_devices)
4251 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4252
4253 if (ec->protect_kernel_modules)
4254 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4255
4256 if (ec->protect_kernel_logs)
4257 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4258
4259 if (ec->protect_clock)
4260 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4261
4262 if (ec->dynamic_user) {
4263 if (!ec->user) {
4264 r = user_from_unit_name(u, &ec->user);
4265 if (r < 0)
4266 return r;
4267 }
4268
4269 if (!ec->group) {
4270 ec->group = strdup(ec->user);
4271 if (!ec->group)
4272 return -ENOMEM;
4273 }
4274
4275 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4276 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4277 * sandbox. */
4278
4279 ec->private_tmp = true;
4280 ec->remove_ipc = true;
4281 ec->protect_system = PROTECT_SYSTEM_STRICT;
4282 if (ec->protect_home == PROTECT_HOME_NO)
4283 ec->protect_home = PROTECT_HOME_READ_ONLY;
4284
4285 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4286 * them. */
4287 ec->no_new_privileges = true;
4288 ec->restrict_suid_sgid = true;
4289 }
4290
4291 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
4292 exec_directory_sort(ec->directories + dt);
4293 }
4294
4295 cc = unit_get_cgroup_context(u);
4296 if (cc && ec) {
4297
4298 if (ec->private_devices &&
4299 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4300 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4301
4302 /* Only add these if needed, as they imply that everything else is blocked. */
4303 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4304 if (ec->root_image || ec->mount_images) {
4305
4306 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4307 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4308 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4309 if (r < 0)
4310 return r;
4311 }
4312 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4313 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4314 if (r < 0)
4315 return r;
4316 }
4317
4318 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4319 * Same for mapper and verity. */
4320 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4321 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4322 if (r < 0)
4323 return r;
4324 }
4325 }
4326
4327 if (ec->protect_clock) {
4328 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4329 if (r < 0)
4330 return r;
4331 }
4332
4333 /* If there are encrypted credentials we might need to access the TPM. */
4334 if (exec_context_has_encrypted_credentials(ec)) {
4335 r = cgroup_context_add_device_allow(cc, "char-tpm", CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4336 if (r < 0)
4337 return r;
4338 }
4339 }
4340 }
4341
4342 return 0;
4343 }
4344
4345 ExecContext *unit_get_exec_context(const Unit *u) {
4346 size_t offset;
4347 assert(u);
4348
4349 if (u->type < 0)
4350 return NULL;
4351
4352 offset = UNIT_VTABLE(u)->exec_context_offset;
4353 if (offset <= 0)
4354 return NULL;
4355
4356 return (ExecContext*) ((uint8_t*) u + offset);
4357 }
4358
4359 KillContext *unit_get_kill_context(const Unit *u) {
4360 size_t offset;
4361 assert(u);
4362
4363 if (u->type < 0)
4364 return NULL;
4365
4366 offset = UNIT_VTABLE(u)->kill_context_offset;
4367 if (offset <= 0)
4368 return NULL;
4369
4370 return (KillContext*) ((uint8_t*) u + offset);
4371 }
4372
4373 CGroupContext *unit_get_cgroup_context(const Unit *u) {
4374 size_t offset;
4375
4376 if (u->type < 0)
4377 return NULL;
4378
4379 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4380 if (offset <= 0)
4381 return NULL;
4382
4383 return (CGroupContext*) ((uint8_t*) u + offset);
4384 }
4385
4386 ExecRuntime *unit_get_exec_runtime(const Unit *u) {
4387 size_t offset;
4388
4389 if (u->type < 0)
4390 return NULL;
4391
4392 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4393 if (offset <= 0)
4394 return NULL;
4395
4396 return *(ExecRuntime**) ((uint8_t*) u + offset);
4397 }
4398
4399 CGroupRuntime *unit_get_cgroup_runtime(const Unit *u) {
4400 size_t offset;
4401
4402 if (u->type < 0)
4403 return NULL;
4404
4405 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
4406 if (offset <= 0)
4407 return NULL;
4408
4409 return *(CGroupRuntime**) ((uint8_t*) u + offset);
4410 }
4411
4412 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4413 assert(u);
4414
4415 if (UNIT_WRITE_FLAGS_NOOP(flags))
4416 return NULL;
4417
4418 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4419 return u->manager->lookup_paths.transient;
4420
4421 if (flags & UNIT_PERSISTENT)
4422 return u->manager->lookup_paths.persistent_control;
4423
4424 if (flags & UNIT_RUNTIME)
4425 return u->manager->lookup_paths.runtime_control;
4426
4427 return NULL;
4428 }
4429
4430 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4431 assert(s);
4432 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4433 assert(buf);
4434
4435 _cleanup_free_ char *t = NULL;
4436
4437 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4438 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4439 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4440 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4441 * allocations. */
4442
4443 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4444 t = specifier_escape(s);
4445 if (!t)
4446 return NULL;
4447
4448 s = t;
4449 }
4450
4451 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4452 * ExecStart= and friends, i.e. '$' and quotes. */
4453
4454 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4455 char *t2;
4456
4457 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4458 t2 = strreplace(s, "$", "$$");
4459 if (!t2)
4460 return NULL;
4461 free_and_replace(t, t2);
4462 }
4463
4464 t2 = shell_escape(t ?: s, "\"");
4465 if (!t2)
4466 return NULL;
4467 free_and_replace(t, t2);
4468
4469 s = t;
4470
4471 } else if (flags & UNIT_ESCAPE_C) {
4472 char *t2;
4473
4474 t2 = cescape(s);
4475 if (!t2)
4476 return NULL;
4477 free_and_replace(t, t2);
4478
4479 s = t;
4480 }
4481
4482 *buf = TAKE_PTR(t);
4483 return s;
4484 }
4485
4486 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4487 _cleanup_free_ char *result = NULL;
4488 size_t n = 0;
4489
4490 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4491 * lines in a way suitable for ExecStart= stanzas. */
4492
4493 STRV_FOREACH(i, l) {
4494 _cleanup_free_ char *buf = NULL;
4495 const char *p;
4496 size_t a;
4497 char *q;
4498
4499 p = unit_escape_setting(*i, flags, &buf);
4500 if (!p)
4501 return NULL;
4502
4503 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4504 if (!GREEDY_REALLOC(result, n + a + 1))
4505 return NULL;
4506
4507 q = result + n;
4508 if (n > 0)
4509 *(q++) = ' ';
4510
4511 *(q++) = '"';
4512 q = stpcpy(q, p);
4513 *(q++) = '"';
4514
4515 n += a;
4516 }
4517
4518 if (!GREEDY_REALLOC(result, n + 1))
4519 return NULL;
4520
4521 result[n] = 0;
4522
4523 return TAKE_PTR(result);
4524 }
4525
4526 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4527 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4528 const char *dir, *wrapped;
4529 int r;
4530
4531 assert(u);
4532 assert(name);
4533 assert(data);
4534
4535 if (UNIT_WRITE_FLAGS_NOOP(flags))
4536 return 0;
4537
4538 data = unit_escape_setting(data, flags, &escaped);
4539 if (!data)
4540 return -ENOMEM;
4541
4542 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4543 * previous section header is the same */
4544
4545 if (flags & UNIT_PRIVATE) {
4546 if (!UNIT_VTABLE(u)->private_section)
4547 return -EINVAL;
4548
4549 if (!u->transient_file || u->last_section_private < 0)
4550 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4551 else if (u->last_section_private == 0)
4552 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4553 } else {
4554 if (!u->transient_file || u->last_section_private < 0)
4555 data = strjoina("[Unit]\n", data);
4556 else if (u->last_section_private > 0)
4557 data = strjoina("\n[Unit]\n", data);
4558 }
4559
4560 if (u->transient_file) {
4561 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4562 * write to the transient unit file. */
4563 fputs(data, u->transient_file);
4564
4565 if (!endswith(data, "\n"))
4566 fputc('\n', u->transient_file);
4567
4568 /* Remember which section we wrote this entry to */
4569 u->last_section_private = !!(flags & UNIT_PRIVATE);
4570 return 0;
4571 }
4572
4573 dir = unit_drop_in_dir(u, flags);
4574 if (!dir)
4575 return -EINVAL;
4576
4577 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4578 "# or an equivalent operation. Do not edit.\n",
4579 data,
4580 "\n");
4581
4582 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4583 if (r < 0)
4584 return r;
4585
4586 (void) mkdir_p_label(p, 0755);
4587
4588 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4589 * recreate the cache after every drop-in we write. */
4590 if (u->manager->unit_path_cache) {
4591 r = set_put_strdup(&u->manager->unit_path_cache, p);
4592 if (r < 0)
4593 return r;
4594 }
4595
4596 r = write_string_file_atomic_label(q, wrapped);
4597 if (r < 0)
4598 return r;
4599
4600 r = strv_push(&u->dropin_paths, q);
4601 if (r < 0)
4602 return r;
4603 q = NULL;
4604
4605 strv_uniq(u->dropin_paths);
4606
4607 u->dropin_mtime = now(CLOCK_REALTIME);
4608
4609 return 0;
4610 }
4611
4612 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4613 _cleanup_free_ char *p = NULL;
4614 va_list ap;
4615 int r;
4616
4617 assert(u);
4618 assert(name);
4619 assert(format);
4620
4621 if (UNIT_WRITE_FLAGS_NOOP(flags))
4622 return 0;
4623
4624 va_start(ap, format);
4625 r = vasprintf(&p, format, ap);
4626 va_end(ap);
4627
4628 if (r < 0)
4629 return -ENOMEM;
4630
4631 return unit_write_setting(u, flags, name, p);
4632 }
4633
4634 int unit_make_transient(Unit *u) {
4635 _cleanup_free_ char *path = NULL;
4636 FILE *f;
4637
4638 assert(u);
4639
4640 if (!UNIT_VTABLE(u)->can_transient)
4641 return -EOPNOTSUPP;
4642
4643 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4644
4645 path = path_join(u->manager->lookup_paths.transient, u->id);
4646 if (!path)
4647 return -ENOMEM;
4648
4649 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4650 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4651
4652 WITH_UMASK(0022) {
4653 f = fopen(path, "we");
4654 if (!f)
4655 return -errno;
4656 }
4657
4658 safe_fclose(u->transient_file);
4659 u->transient_file = f;
4660
4661 free_and_replace(u->fragment_path, path);
4662
4663 u->source_path = mfree(u->source_path);
4664 u->dropin_paths = strv_free(u->dropin_paths);
4665 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4666
4667 u->load_state = UNIT_STUB;
4668 u->load_error = 0;
4669 u->transient = true;
4670
4671 unit_add_to_dbus_queue(u);
4672 unit_add_to_gc_queue(u);
4673
4674 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4675 u->transient_file);
4676
4677 return 0;
4678 }
4679
4680 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4681 _cleanup_free_ char *comm = NULL;
4682
4683 assert(pidref_is_set(pid));
4684
4685 (void) pidref_get_comm(pid, &comm);
4686
4687 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4688 only, like for example systemd's own PAM stub process. */
4689 if (comm && comm[0] == '(')
4690 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4691 * here to let the manager know that a process was killed. */
4692 return 1;
4693
4694 log_unit_notice(userdata,
4695 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4696 pid->pid,
4697 strna(comm),
4698 signal_to_string(sig));
4699
4700 return 1;
4701 }
4702
4703 static int operation_to_signal(
4704 const KillContext *c,
4705 KillOperation k,
4706 bool *ret_noteworthy) {
4707
4708 assert(c);
4709
4710 switch (k) {
4711
4712 case KILL_TERMINATE:
4713 case KILL_TERMINATE_AND_LOG:
4714 *ret_noteworthy = false;
4715 return c->kill_signal;
4716
4717 case KILL_RESTART:
4718 *ret_noteworthy = false;
4719 return restart_kill_signal(c);
4720
4721 case KILL_KILL:
4722 *ret_noteworthy = true;
4723 return c->final_kill_signal;
4724
4725 case KILL_WATCHDOG:
4726 *ret_noteworthy = true;
4727 return c->watchdog_signal;
4728
4729 default:
4730 assert_not_reached();
4731 }
4732 }
4733
4734 static int unit_kill_context_one(
4735 Unit *u,
4736 const PidRef *pidref,
4737 const char *type,
4738 bool is_alien,
4739 int sig,
4740 bool send_sighup,
4741 cg_kill_log_func_t log_func) {
4742
4743 int r;
4744
4745 assert(u);
4746 assert(type);
4747
4748 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4749
4750 if (!pidref_is_set(pidref))
4751 return 0;
4752
4753 if (log_func)
4754 log_func(pidref, sig, u);
4755
4756 r = pidref_kill_and_sigcont(pidref, sig);
4757 if (r == -ESRCH)
4758 return !is_alien;
4759 if (r < 0) {
4760 _cleanup_free_ char *comm = NULL;
4761
4762 (void) pidref_get_comm(pidref, &comm);
4763 return log_unit_warning_errno(u, r, "Failed to kill %s process " PID_FMT " (%s), ignoring: %m", type, pidref->pid, strna(comm));
4764 }
4765
4766 if (send_sighup)
4767 (void) pidref_kill(pidref, SIGHUP);
4768
4769 return !is_alien;
4770 }
4771
4772 int unit_kill_context(Unit *u, KillOperation k) {
4773 bool wait_for_exit = false, send_sighup;
4774 cg_kill_log_func_t log_func = NULL;
4775 int sig, r;
4776
4777 assert(u);
4778
4779 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4780 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4781 * which is used for user-requested killing of unit processes. */
4782
4783 KillContext *c = unit_get_kill_context(u);
4784 if (!c || c->kill_mode == KILL_NONE)
4785 return 0;
4786
4787 bool noteworthy;
4788 sig = operation_to_signal(c, k, &noteworthy);
4789 if (noteworthy)
4790 log_func = log_kill;
4791
4792 send_sighup =
4793 c->send_sighup &&
4794 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4795 sig != SIGHUP;
4796
4797 bool is_alien;
4798 PidRef *main_pid = unit_main_pid_full(u, &is_alien);
4799 r = unit_kill_context_one(u, main_pid, "main", is_alien, sig, send_sighup, log_func);
4800 wait_for_exit = wait_for_exit || r > 0;
4801
4802 r = unit_kill_context_one(u, unit_control_pid(u), "control", /* is_alien = */ false, sig, send_sighup, log_func);
4803 wait_for_exit = wait_for_exit || r > 0;
4804
4805 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4806 if (crt && crt->cgroup_path &&
4807 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4808 _cleanup_set_free_ Set *pid_set = NULL;
4809
4810 /* Exclude the main/control pids from being killed via the cgroup */
4811 r = unit_pid_set(u, &pid_set);
4812 if (r < 0)
4813 return r;
4814
4815 r = cg_kill_recursive(
4816 crt->cgroup_path,
4817 sig,
4818 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4819 pid_set,
4820 log_func, u);
4821 if (r < 0) {
4822 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4823 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt->cgroup_path));
4824
4825 } else if (r > 0) {
4826
4827 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4828 * we are running in a container or if this is a delegation unit, simply because cgroup
4829 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4830 * of containers it can be confused easily by left-over directories in the cgroup — which
4831 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4832 * there we get proper events. Hence rely on them. */
4833
4834 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4835 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4836 wait_for_exit = true;
4837
4838 if (send_sighup) {
4839 r = unit_pid_set(u, &pid_set);
4840 if (r < 0)
4841 return r;
4842
4843 (void) cg_kill_recursive(
4844 crt->cgroup_path,
4845 SIGHUP,
4846 CGROUP_IGNORE_SELF,
4847 pid_set,
4848 /* kill_log= */ NULL,
4849 /* userdata= */ NULL);
4850 }
4851 }
4852 }
4853
4854 return wait_for_exit;
4855 }
4856
4857 int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) {
4858 Hashmap **unit_map, **manager_map;
4859 int r;
4860
4861 assert(u);
4862 assert(path);
4863 assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4864
4865 unit_map = &u->mounts_for[type];
4866 manager_map = &u->manager->units_needing_mounts_for[type];
4867
4868 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4869 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4870 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4871 * appearing mount units can easily determine which units to make themselves a dependency of. */
4872
4873 if (!path_is_absolute(path))
4874 return -EINVAL;
4875
4876 if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */
4877 return 0;
4878
4879 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4880 * only after simplification, since path_is_normalized() rejects paths with '.'.
4881 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4882 _cleanup_free_ char *p = NULL;
4883 r = path_simplify_alloc(path, &p);
4884 if (r < 0)
4885 return r;
4886 path = p;
4887
4888 if (!path_is_normalized(path))
4889 return -EPERM;
4890
4891 UnitDependencyInfo di = {
4892 .origin_mask = mask
4893 };
4894
4895 r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data);
4896 if (r < 0)
4897 return r;
4898 assert(r > 0);
4899 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4900
4901 char prefix[strlen(path) + 1];
4902 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4903 Set *x;
4904
4905 x = hashmap_get(*manager_map, prefix);
4906 if (!x) {
4907 _cleanup_free_ char *q = NULL;
4908
4909 r = hashmap_ensure_allocated(manager_map, &path_hash_ops);
4910 if (r < 0)
4911 return r;
4912
4913 q = strdup(prefix);
4914 if (!q)
4915 return -ENOMEM;
4916
4917 x = set_new(NULL);
4918 if (!x)
4919 return -ENOMEM;
4920
4921 r = hashmap_put(*manager_map, q, x);
4922 if (r < 0) {
4923 set_free(x);
4924 return r;
4925 }
4926 q = NULL;
4927 }
4928
4929 r = set_put(x, u);
4930 if (r < 0)
4931 return r;
4932 }
4933
4934 return 0;
4935 }
4936
4937 int unit_setup_exec_runtime(Unit *u) {
4938 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
4939 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
4940 _cleanup_set_free_ Set *units = NULL;
4941 ExecRuntime **rt;
4942 ExecContext *ec;
4943 size_t offset;
4944 Unit *other;
4945 int r;
4946
4947 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4948 assert(offset > 0);
4949
4950 /* Check if there already is an ExecRuntime for this unit? */
4951 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4952 if (*rt)
4953 return 0;
4954
4955 ec = ASSERT_PTR(unit_get_exec_context(u));
4956
4957 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
4958 if (r < 0)
4959 return r;
4960
4961 /* Try to get it from somebody else */
4962 SET_FOREACH(other, units) {
4963 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
4964 if (r < 0)
4965 return r;
4966 if (r > 0)
4967 break;
4968 }
4969
4970 if (!esr) {
4971 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
4972 if (r < 0)
4973 return r;
4974 }
4975
4976 if (ec->dynamic_user) {
4977 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
4978 if (r < 0)
4979 return r;
4980 }
4981
4982 r = exec_runtime_make(u, ec, esr, dcreds, rt);
4983 if (r < 0)
4984 return r;
4985
4986 TAKE_PTR(esr);
4987 TAKE_PTR(dcreds);
4988
4989 return r;
4990 }
4991
4992 CGroupRuntime *unit_setup_cgroup_runtime(Unit *u) {
4993 size_t offset;
4994
4995 assert(u);
4996
4997 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
4998 assert(offset > 0);
4999
5000 CGroupRuntime **rt = (CGroupRuntime**) ((uint8_t*) u + offset);
5001 if (*rt)
5002 return *rt;
5003
5004 return (*rt = cgroup_runtime_new());
5005 }
5006
5007 bool unit_type_supported(UnitType t) {
5008 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5009 int r;
5010
5011 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5012
5013 if (cache[t] == 0) {
5014 char *e;
5015
5016 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5017
5018 r = getenv_bool(ascii_strupper(e));
5019 if (r < 0 && r != -ENXIO)
5020 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5021
5022 cache[t] = r == 0 ? -1 : 1;
5023 }
5024 if (cache[t] < 0)
5025 return false;
5026
5027 if (!unit_vtable[t]->supported)
5028 return true;
5029
5030 return unit_vtable[t]->supported();
5031 }
5032
5033 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5034 int r;
5035
5036 assert(u);
5037 assert(where);
5038
5039 if (!unit_log_level_test(u, LOG_NOTICE))
5040 return;
5041
5042 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5043 if (r > 0 || r == -ENOTDIR)
5044 return;
5045 if (r < 0) {
5046 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5047 return;
5048 }
5049
5050 log_unit_struct(u, LOG_NOTICE,
5051 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5052 LOG_UNIT_INVOCATION_ID(u),
5053 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5054 "WHERE=%s", where);
5055 }
5056
5057 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5058 _cleanup_free_ char *canonical_where = NULL;
5059 int r;
5060
5061 assert(u);
5062 assert(where);
5063
5064 r = chase(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5065 if (r < 0) {
5066 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5067 return 0;
5068 }
5069
5070 /* We will happily ignore a trailing slash (or any redundant slashes) */
5071 if (path_equal(where, canonical_where))
5072 return 0;
5073
5074 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5075 log_unit_struct(u, LOG_ERR,
5076 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5077 LOG_UNIT_INVOCATION_ID(u),
5078 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5079 "WHERE=%s", where);
5080
5081 return -ELOOP;
5082 }
5083
5084 bool unit_is_pristine(Unit *u) {
5085 assert(u);
5086
5087 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5088 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5089 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5090 *
5091 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5092 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5093 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5094 */
5095
5096 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5097 !u->fragment_path &&
5098 !u->source_path &&
5099 !u->job &&
5100 !u->merged_into;
5101 }
5102
5103 PidRef* unit_control_pid(Unit *u) {
5104 assert(u);
5105
5106 if (UNIT_VTABLE(u)->control_pid)
5107 return UNIT_VTABLE(u)->control_pid(u);
5108
5109 return NULL;
5110 }
5111
5112 PidRef* unit_main_pid_full(Unit *u, bool *ret_is_alien) {
5113 assert(u);
5114
5115 if (UNIT_VTABLE(u)->main_pid)
5116 return UNIT_VTABLE(u)->main_pid(u, ret_is_alien);
5117
5118 if (ret_is_alien)
5119 *ret_is_alien = false;
5120 return NULL;
5121 }
5122
5123 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5124 int r;
5125
5126 assert(u);
5127
5128 if (!MANAGER_IS_SYSTEM(u->manager))
5129 return;
5130
5131 CGroupContext *c;
5132 c = unit_get_cgroup_context(u);
5133 if (!c)
5134 return;
5135
5136 if (!u->manager->fw_ctx) {
5137 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5138 if (r < 0)
5139 return;
5140
5141 assert(u->manager->fw_ctx);
5142 }
5143
5144 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5145 if (nft_set->source != source)
5146 continue;
5147
5148 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5149 if (r < 0)
5150 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5151 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5152 else
5153 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5154 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5155 }
5156 }
5157
5158 static void unit_unref_uid_internal(
5159 Unit *u,
5160 uid_t *ref_uid,
5161 bool destroy_now,
5162 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5163
5164 assert(u);
5165 assert(ref_uid);
5166 assert(_manager_unref_uid);
5167
5168 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5169 * gid_t are actually the same time, with the same validity rules.
5170 *
5171 * Drops a reference to UID/GID from a unit. */
5172
5173 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5174 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5175
5176 if (!uid_is_valid(*ref_uid))
5177 return;
5178
5179 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5180 *ref_uid = UID_INVALID;
5181 }
5182
5183 static void unit_unref_uid(Unit *u, bool destroy_now) {
5184 assert(u);
5185
5186 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5187
5188 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5189 }
5190
5191 static void unit_unref_gid(Unit *u, bool destroy_now) {
5192 assert(u);
5193
5194 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5195
5196 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5197 }
5198
5199 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5200 assert(u);
5201
5202 unit_unref_uid(u, destroy_now);
5203 unit_unref_gid(u, destroy_now);
5204 }
5205
5206 static int unit_ref_uid_internal(
5207 Unit *u,
5208 uid_t *ref_uid,
5209 uid_t uid,
5210 bool clean_ipc,
5211 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5212
5213 int r;
5214
5215 assert(u);
5216 assert(ref_uid);
5217 assert(uid_is_valid(uid));
5218 assert(_manager_ref_uid);
5219
5220 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5221 * are actually the same type, and have the same validity rules.
5222 *
5223 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5224 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5225 * drops to zero. */
5226
5227 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5228 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5229
5230 if (*ref_uid == uid)
5231 return 0;
5232
5233 if (uid_is_valid(*ref_uid)) /* Already set? */
5234 return -EBUSY;
5235
5236 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5237 if (r < 0)
5238 return r;
5239
5240 *ref_uid = uid;
5241 return 1;
5242 }
5243
5244 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5245 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5246 }
5247
5248 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5249 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5250 }
5251
5252 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5253 int r = 0, q = 0;
5254
5255 assert(u);
5256
5257 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5258
5259 if (uid_is_valid(uid)) {
5260 r = unit_ref_uid(u, uid, clean_ipc);
5261 if (r < 0)
5262 return r;
5263 }
5264
5265 if (gid_is_valid(gid)) {
5266 q = unit_ref_gid(u, gid, clean_ipc);
5267 if (q < 0) {
5268 if (r > 0)
5269 unit_unref_uid(u, false);
5270
5271 return q;
5272 }
5273 }
5274
5275 return r > 0 || q > 0;
5276 }
5277
5278 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5279 ExecContext *c;
5280 int r;
5281
5282 assert(u);
5283
5284 c = unit_get_exec_context(u);
5285
5286 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5287 if (r < 0)
5288 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5289
5290 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5291 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5292
5293 return r;
5294 }
5295
5296 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5297 int r;
5298
5299 assert(u);
5300
5301 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5302 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5303 * objects when no service references the UID/GID anymore. */
5304
5305 r = unit_ref_uid_gid(u, uid, gid);
5306 if (r > 0)
5307 unit_add_to_dbus_queue(u);
5308 }
5309
5310 int unit_acquire_invocation_id(Unit *u) {
5311 sd_id128_t id;
5312 int r;
5313
5314 assert(u);
5315
5316 r = sd_id128_randomize(&id);
5317 if (r < 0)
5318 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5319
5320 r = unit_set_invocation_id(u, id);
5321 if (r < 0)
5322 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5323
5324 unit_add_to_dbus_queue(u);
5325 return 0;
5326 }
5327
5328 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5329 int r;
5330
5331 assert(u);
5332 assert(p);
5333
5334 /* Copy parameters from manager */
5335 r = manager_get_effective_environment(u->manager, &p->environment);
5336 if (r < 0)
5337 return r;
5338
5339 p->runtime_scope = u->manager->runtime_scope;
5340
5341 r = strdup_to(&p->confirm_spawn, manager_get_confirm_spawn(u->manager));
5342 if (r < 0)
5343 return r;
5344
5345 p->cgroup_supported = u->manager->cgroup_supported;
5346 p->prefix = u->manager->prefix;
5347 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5348
5349 /* Copy parameters from unit */
5350 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5351 p->cgroup_path = crt ? crt->cgroup_path : NULL;
5352 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5353
5354 p->received_credentials_directory = u->manager->received_credentials_directory;
5355 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5356
5357 p->shall_confirm_spawn = u->manager->confirm_spawn;
5358
5359 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5360
5361 if (u->manager->restrict_fs && p->bpf_restrict_fs_map_fd < 0) {
5362 int fd = bpf_restrict_fs_map_fd(u);
5363 if (fd < 0)
5364 return fd;
5365
5366 p->bpf_restrict_fs_map_fd = fd;
5367 }
5368
5369 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5370
5371 p->cgroup_id = crt ? crt->cgroup_id : 0;
5372 p->invocation_id = u->invocation_id;
5373 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5374 p->unit_id = strdup(u->id);
5375 if (!p->unit_id)
5376 return -ENOMEM;
5377
5378 return 0;
5379 }
5380
5381 int unit_fork_helper_process(Unit *u, const char *name, PidRef *ret) {
5382 pid_t pid;
5383 int r;
5384
5385 assert(u);
5386 assert(ret);
5387
5388 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5389 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5390
5391 (void) unit_realize_cgroup(u);
5392
5393 CGroupRuntime *crt = unit_setup_cgroup_runtime(u);
5394 if (!crt)
5395 return -ENOMEM;
5396
5397 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5398 if (r < 0)
5399 return r;
5400 if (r > 0) {
5401 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5402 int q;
5403
5404 /* Parent */
5405
5406 q = pidref_set_pid(&pidref, pid);
5407 if (q < 0)
5408 return q;
5409
5410 *ret = TAKE_PIDREF(pidref);
5411 return r;
5412 }
5413
5414 /* Child */
5415
5416 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5417 (void) ignore_signals(SIGPIPE);
5418
5419 if (crt->cgroup_path) {
5420 r = cg_attach_everywhere(u->manager->cgroup_supported, crt->cgroup_path, 0, NULL, NULL);
5421 if (r < 0) {
5422 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(crt->cgroup_path));
5423 _exit(EXIT_CGROUP);
5424 }
5425 }
5426
5427 return 0;
5428 }
5429
5430 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5431 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5432 int r;
5433
5434 assert(u);
5435 assert(ret_pid);
5436
5437 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5438 if (r < 0)
5439 return r;
5440 if (r == 0) {
5441 int ret = EXIT_SUCCESS;
5442
5443 STRV_FOREACH(i, paths) {
5444 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5445 if (r < 0) {
5446 log_error_errno(r, "Failed to remove '%s': %m", *i);
5447 ret = EXIT_FAILURE;
5448 }
5449 }
5450
5451 _exit(ret);
5452 }
5453
5454 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5455 if (r < 0)
5456 return r;
5457
5458 *ret_pid = TAKE_PIDREF(pid);
5459 return 0;
5460 }
5461
5462 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5463 assert(deps);
5464 assert(other);
5465
5466 if (di.origin_mask == 0 && di.destination_mask == 0)
5467 /* No bit set anymore, let's drop the whole entry */
5468 assert_se(hashmap_remove(deps, other));
5469 else
5470 /* Mask was reduced, let's update the entry */
5471 assert_se(hashmap_update(deps, other, di.data) == 0);
5472 }
5473
5474 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5475 Hashmap *deps;
5476 assert(u);
5477
5478 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5479
5480 if (mask == 0)
5481 return;
5482
5483 HASHMAP_FOREACH(deps, u->dependencies) {
5484 bool done;
5485
5486 do {
5487 UnitDependencyInfo di;
5488 Unit *other;
5489
5490 done = true;
5491
5492 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5493 Hashmap *other_deps;
5494
5495 if (FLAGS_SET(~mask, di.origin_mask))
5496 continue;
5497
5498 di.origin_mask &= ~mask;
5499 unit_update_dependency_mask(deps, other, di);
5500
5501 /* We updated the dependency from our unit to the other unit now. But most
5502 * dependencies imply a reverse dependency. Hence, let's delete that one
5503 * too. For that we go through all dependency types on the other unit and
5504 * delete all those which point to us and have the right mask set. */
5505
5506 HASHMAP_FOREACH(other_deps, other->dependencies) {
5507 UnitDependencyInfo dj;
5508
5509 dj.data = hashmap_get(other_deps, u);
5510 if (FLAGS_SET(~mask, dj.destination_mask))
5511 continue;
5512
5513 dj.destination_mask &= ~mask;
5514 unit_update_dependency_mask(other_deps, u, dj);
5515 }
5516
5517 unit_add_to_gc_queue(other);
5518
5519 /* The unit 'other' may not be wanted by the unit 'u'. */
5520 unit_submit_to_stop_when_unneeded_queue(other);
5521
5522 done = false;
5523 break;
5524 }
5525
5526 } while (!done);
5527 }
5528 }
5529
5530 static int unit_get_invocation_path(Unit *u, char **ret) {
5531 char *p;
5532 int r;
5533
5534 assert(u);
5535 assert(ret);
5536
5537 if (MANAGER_IS_SYSTEM(u->manager))
5538 p = strjoin("/run/systemd/units/invocation:", u->id);
5539 else {
5540 _cleanup_free_ char *user_path = NULL;
5541 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5542 if (r < 0)
5543 return r;
5544 p = strjoin(user_path, u->id);
5545 }
5546
5547 if (!p)
5548 return -ENOMEM;
5549
5550 *ret = p;
5551 return 0;
5552 }
5553
5554 static int unit_export_invocation_id(Unit *u) {
5555 _cleanup_free_ char *p = NULL;
5556 int r;
5557
5558 assert(u);
5559
5560 if (u->exported_invocation_id)
5561 return 0;
5562
5563 if (sd_id128_is_null(u->invocation_id))
5564 return 0;
5565
5566 r = unit_get_invocation_path(u, &p);
5567 if (r < 0)
5568 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5569
5570 r = symlink_atomic_label(u->invocation_id_string, p);
5571 if (r < 0)
5572 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5573
5574 u->exported_invocation_id = true;
5575 return 0;
5576 }
5577
5578 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5579 const char *p;
5580 char buf[2];
5581 int r;
5582
5583 assert(u);
5584 assert(c);
5585
5586 if (u->exported_log_level_max)
5587 return 0;
5588
5589 if (c->log_level_max < 0)
5590 return 0;
5591
5592 assert(c->log_level_max <= 7);
5593
5594 buf[0] = '0' + c->log_level_max;
5595 buf[1] = 0;
5596
5597 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5598 r = symlink_atomic(buf, p);
5599 if (r < 0)
5600 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5601
5602 u->exported_log_level_max = true;
5603 return 0;
5604 }
5605
5606 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5607 _cleanup_close_ int fd = -EBADF;
5608 struct iovec *iovec;
5609 const char *p;
5610 char *pattern;
5611 le64_t *sizes;
5612 ssize_t n;
5613 int r;
5614
5615 if (u->exported_log_extra_fields)
5616 return 0;
5617
5618 if (c->n_log_extra_fields <= 0)
5619 return 0;
5620
5621 sizes = newa(le64_t, c->n_log_extra_fields);
5622 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5623
5624 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5625 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5626
5627 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5628 iovec[i*2+1] = c->log_extra_fields[i];
5629 }
5630
5631 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5632 pattern = strjoina(p, ".XXXXXX");
5633
5634 fd = mkostemp_safe(pattern);
5635 if (fd < 0)
5636 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5637
5638 n = writev(fd, iovec, c->n_log_extra_fields*2);
5639 if (n < 0) {
5640 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5641 goto fail;
5642 }
5643
5644 (void) fchmod(fd, 0644);
5645
5646 if (rename(pattern, p) < 0) {
5647 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5648 goto fail;
5649 }
5650
5651 u->exported_log_extra_fields = true;
5652 return 0;
5653
5654 fail:
5655 (void) unlink(pattern);
5656 return r;
5657 }
5658
5659 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5660 _cleanup_free_ char *buf = NULL;
5661 const char *p;
5662 int r;
5663
5664 assert(u);
5665 assert(c);
5666
5667 if (u->exported_log_ratelimit_interval)
5668 return 0;
5669
5670 if (c->log_ratelimit_interval_usec == 0)
5671 return 0;
5672
5673 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5674
5675 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5676 return log_oom();
5677
5678 r = symlink_atomic(buf, p);
5679 if (r < 0)
5680 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5681
5682 u->exported_log_ratelimit_interval = true;
5683 return 0;
5684 }
5685
5686 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5687 _cleanup_free_ char *buf = NULL;
5688 const char *p;
5689 int r;
5690
5691 assert(u);
5692 assert(c);
5693
5694 if (u->exported_log_ratelimit_burst)
5695 return 0;
5696
5697 if (c->log_ratelimit_burst == 0)
5698 return 0;
5699
5700 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5701
5702 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5703 return log_oom();
5704
5705 r = symlink_atomic(buf, p);
5706 if (r < 0)
5707 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5708
5709 u->exported_log_ratelimit_burst = true;
5710 return 0;
5711 }
5712
5713 void unit_export_state_files(Unit *u) {
5714 const ExecContext *c;
5715
5716 assert(u);
5717
5718 if (!u->id)
5719 return;
5720
5721 if (MANAGER_IS_TEST_RUN(u->manager))
5722 return;
5723
5724 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5725 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5726 * the IPC system itself and PID 1 also log to the journal.
5727 *
5728 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5729 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5730 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5731 * namespace at least.
5732 *
5733 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5734 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5735 * them with one. */
5736
5737 (void) unit_export_invocation_id(u);
5738
5739 if (!MANAGER_IS_SYSTEM(u->manager))
5740 return;
5741
5742 c = unit_get_exec_context(u);
5743 if (c) {
5744 (void) unit_export_log_level_max(u, c);
5745 (void) unit_export_log_extra_fields(u, c);
5746 (void) unit_export_log_ratelimit_interval(u, c);
5747 (void) unit_export_log_ratelimit_burst(u, c);
5748 }
5749 }
5750
5751 void unit_unlink_state_files(Unit *u) {
5752 const char *p;
5753
5754 assert(u);
5755
5756 if (!u->id)
5757 return;
5758
5759 /* Undoes the effect of unit_export_state() */
5760
5761 if (u->exported_invocation_id) {
5762 _cleanup_free_ char *invocation_path = NULL;
5763 int r = unit_get_invocation_path(u, &invocation_path);
5764 if (r >= 0) {
5765 (void) unlink(invocation_path);
5766 u->exported_invocation_id = false;
5767 }
5768 }
5769
5770 if (!MANAGER_IS_SYSTEM(u->manager))
5771 return;
5772
5773 if (u->exported_log_level_max) {
5774 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5775 (void) unlink(p);
5776
5777 u->exported_log_level_max = false;
5778 }
5779
5780 if (u->exported_log_extra_fields) {
5781 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5782 (void) unlink(p);
5783
5784 u->exported_log_extra_fields = false;
5785 }
5786
5787 if (u->exported_log_ratelimit_interval) {
5788 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5789 (void) unlink(p);
5790
5791 u->exported_log_ratelimit_interval = false;
5792 }
5793
5794 if (u->exported_log_ratelimit_burst) {
5795 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5796 (void) unlink(p);
5797
5798 u->exported_log_ratelimit_burst = false;
5799 }
5800 }
5801
5802 int unit_prepare_exec(Unit *u) {
5803 int r;
5804
5805 assert(u);
5806
5807 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5808 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5809 r = bpf_firewall_load_custom(u);
5810 if (r < 0)
5811 return r;
5812
5813 /* Prepares everything so that we can fork of a process for this unit */
5814
5815 (void) unit_realize_cgroup(u);
5816
5817 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5818 if (crt && crt->reset_accounting) {
5819 (void) unit_reset_accounting(u);
5820 crt->reset_accounting = false;
5821 }
5822
5823 unit_export_state_files(u);
5824
5825 r = unit_setup_exec_runtime(u);
5826 if (r < 0)
5827 return r;
5828
5829 return 0;
5830 }
5831
5832 static bool ignore_leftover_process(const char *comm) {
5833 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5834 }
5835
5836 int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5837 _cleanup_free_ char *comm = NULL;
5838
5839 assert(pidref_is_set(pid));
5840
5841 (void) pidref_get_comm(pid, &comm);
5842
5843 if (ignore_leftover_process(comm))
5844 return 0;
5845
5846 /* During start we print a warning */
5847
5848 log_unit_warning(userdata,
5849 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5850 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5851 pid->pid, strna(comm));
5852
5853 return 1;
5854 }
5855
5856 int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5857 _cleanup_free_ char *comm = NULL;
5858
5859 assert(pidref_is_set(pid));
5860
5861 (void) pidref_get_comm(pid, &comm);
5862
5863 if (ignore_leftover_process(comm))
5864 return 0;
5865
5866 /* During stop we only print an informational message */
5867
5868 log_unit_info(userdata,
5869 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5870 pid->pid, strna(comm));
5871
5872 return 1;
5873 }
5874
5875 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5876 assert(u);
5877
5878 (void) unit_pick_cgroup_path(u);
5879
5880 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5881
5882 if (!crt || !crt->cgroup_path)
5883 return 0;
5884
5885 return cg_kill_recursive(
5886 crt->cgroup_path,
5887 /* sig= */ 0,
5888 /* flags= */ 0,
5889 /* set= */ NULL,
5890 log_func,
5891 u);
5892 }
5893
5894 bool unit_needs_console(Unit *u) {
5895 ExecContext *ec;
5896 UnitActiveState state;
5897
5898 assert(u);
5899
5900 state = unit_active_state(u);
5901
5902 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5903 return false;
5904
5905 if (UNIT_VTABLE(u)->needs_console)
5906 return UNIT_VTABLE(u)->needs_console(u);
5907
5908 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5909 ec = unit_get_exec_context(u);
5910 if (!ec)
5911 return false;
5912
5913 return exec_context_may_touch_console(ec);
5914 }
5915
5916 int unit_pid_attachable(Unit *u, const PidRef *pid, sd_bus_error *error) {
5917 int r;
5918
5919 assert(u);
5920
5921 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5922 * and not a kernel thread either */
5923
5924 /* First, a simple range check */
5925 if (!pidref_is_set(pid))
5926 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
5927
5928 /* Some extra safety check */
5929 if (pid->pid == 1 || pidref_is_self(pid))
5930 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
5931
5932 /* Don't even begin to bother with kernel threads */
5933 r = pidref_is_kernel_thread(pid);
5934 if (r == -ESRCH)
5935 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
5936 if (r < 0)
5937 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
5938 if (r > 0)
5939 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
5940
5941 return 0;
5942 }
5943
5944 void unit_log_success(Unit *u) {
5945 assert(u);
5946
5947 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5948 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5949 * a lot of devices. */
5950 log_unit_struct(u,
5951 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5952 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5953 LOG_UNIT_INVOCATION_ID(u),
5954 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5955 }
5956
5957 void unit_log_failure(Unit *u, const char *result) {
5958 assert(u);
5959 assert(result);
5960
5961 log_unit_struct(u, LOG_WARNING,
5962 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5963 LOG_UNIT_INVOCATION_ID(u),
5964 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5965 "UNIT_RESULT=%s", result);
5966 }
5967
5968 void unit_log_skip(Unit *u, const char *result) {
5969 assert(u);
5970 assert(result);
5971
5972 log_unit_struct(u, LOG_INFO,
5973 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5974 LOG_UNIT_INVOCATION_ID(u),
5975 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5976 "UNIT_RESULT=%s", result);
5977 }
5978
5979 void unit_log_process_exit(
5980 Unit *u,
5981 const char *kind,
5982 const char *command,
5983 bool success,
5984 int code,
5985 int status) {
5986
5987 int level;
5988
5989 assert(u);
5990 assert(kind);
5991
5992 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5993 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5994 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5995 * WARNING. */
5996 if (success)
5997 level = LOG_DEBUG;
5998 else if (code == CLD_EXITED)
5999 level = LOG_NOTICE;
6000 else
6001 level = LOG_WARNING;
6002
6003 log_unit_struct(u, level,
6004 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
6005 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
6006 kind,
6007 sigchld_code_to_string(code), status,
6008 strna(code == CLD_EXITED
6009 ? exit_status_to_string(status, EXIT_STATUS_FULL)
6010 : signal_to_string(status)),
6011 success ? " (success)" : ""),
6012 "EXIT_CODE=%s", sigchld_code_to_string(code),
6013 "EXIT_STATUS=%i", status,
6014 "COMMAND=%s", strna(command),
6015 LOG_UNIT_INVOCATION_ID(u));
6016 }
6017
6018 int unit_exit_status(Unit *u) {
6019 assert(u);
6020
6021 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6022 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6023 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6024 * service process has exited abnormally (signal/coredump). */
6025
6026 if (!UNIT_VTABLE(u)->exit_status)
6027 return -EOPNOTSUPP;
6028
6029 return UNIT_VTABLE(u)->exit_status(u);
6030 }
6031
6032 int unit_failure_action_exit_status(Unit *u) {
6033 int r;
6034
6035 assert(u);
6036
6037 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6038
6039 if (u->failure_action_exit_status >= 0)
6040 return u->failure_action_exit_status;
6041
6042 r = unit_exit_status(u);
6043 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6044 return 255;
6045
6046 return r;
6047 }
6048
6049 int unit_success_action_exit_status(Unit *u) {
6050 int r;
6051
6052 assert(u);
6053
6054 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6055
6056 if (u->success_action_exit_status >= 0)
6057 return u->success_action_exit_status;
6058
6059 r = unit_exit_status(u);
6060 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6061 return 255;
6062
6063 return r;
6064 }
6065
6066 int unit_test_trigger_loaded(Unit *u) {
6067 Unit *trigger;
6068
6069 /* Tests whether the unit to trigger is loaded */
6070
6071 trigger = UNIT_TRIGGER(u);
6072 if (!trigger)
6073 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6074 "Refusing to start, no unit to trigger.");
6075 if (trigger->load_state != UNIT_LOADED)
6076 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6077 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6078
6079 return 0;
6080 }
6081
6082 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
6083 assert(u);
6084 assert(context);
6085
6086 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6087 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6088 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6089
6090 exec_context_destroy_credentials(u);
6091 exec_context_destroy_mount_ns_dir(u);
6092 }
6093
6094 int unit_clean(Unit *u, ExecCleanMask mask) {
6095 UnitActiveState state;
6096
6097 assert(u);
6098
6099 /* Special return values:
6100 *
6101 * -EOPNOTSUPP → cleaning not supported for this unit type
6102 * -EUNATCH → cleaning not defined for this resource type
6103 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6104 * a job queued or similar
6105 */
6106
6107 if (!UNIT_VTABLE(u)->clean)
6108 return -EOPNOTSUPP;
6109
6110 if (mask == 0)
6111 return -EUNATCH;
6112
6113 if (u->load_state != UNIT_LOADED)
6114 return -EBUSY;
6115
6116 if (u->job)
6117 return -EBUSY;
6118
6119 state = unit_active_state(u);
6120 if (state != UNIT_INACTIVE)
6121 return -EBUSY;
6122
6123 return UNIT_VTABLE(u)->clean(u, mask);
6124 }
6125
6126 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6127 assert(u);
6128
6129 if (!UNIT_VTABLE(u)->clean ||
6130 u->load_state != UNIT_LOADED) {
6131 *ret = 0;
6132 return 0;
6133 }
6134
6135 /* When the clean() method is set, can_clean() really should be set too */
6136 assert(UNIT_VTABLE(u)->can_clean);
6137
6138 return UNIT_VTABLE(u)->can_clean(u, ret);
6139 }
6140
6141 bool unit_can_start_refuse_manual(Unit *u) {
6142 return unit_can_start(u) && !u->refuse_manual_start;
6143 }
6144
6145 bool unit_can_stop_refuse_manual(Unit *u) {
6146 return unit_can_stop(u) && !u->refuse_manual_stop;
6147 }
6148
6149 bool unit_can_isolate_refuse_manual(Unit *u) {
6150 return unit_can_isolate(u) && !u->refuse_manual_start;
6151 }
6152
6153 void unit_next_freezer_state(Unit *u, FreezerAction action, FreezerState *ret, FreezerState *ret_target) {
6154 Unit *slice;
6155 FreezerState curr, parent, next, tgt;
6156
6157 assert(u);
6158 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_PARENT_FREEZE,
6159 FREEZER_THAW, FREEZER_PARENT_THAW));
6160 assert(ret);
6161 assert(ret_target);
6162
6163 /* This function determines the correct freezer state transitions for a unit
6164 * given the action being requested. It returns the next state, and also the "target",
6165 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6166 * ultimately want to achieve. */
6167
6168 curr = u->freezer_state;
6169 slice = UNIT_GET_SLICE(u);
6170 if (slice)
6171 parent = slice->freezer_state;
6172 else
6173 parent = FREEZER_RUNNING;
6174
6175 if (action == FREEZER_FREEZE) {
6176 /* We always "promote" a freeze initiated by parent into a normal freeze */
6177 if (IN_SET(curr, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6178 next = FREEZER_FROZEN;
6179 else
6180 next = FREEZER_FREEZING;
6181 } else if (action == FREEZER_THAW) {
6182 /* Thawing is the most complicated operation here, because we can't thaw a unit
6183 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6184 * initiated by parent if the parent is frozen */
6185 if (IN_SET(curr, FREEZER_RUNNING, FREEZER_THAWING, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6186 next = curr;
6187 else if (curr == FREEZER_FREEZING) {
6188 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6189 next = FREEZER_THAWING;
6190 else
6191 next = FREEZER_FREEZING_BY_PARENT;
6192 } else {
6193 assert(curr == FREEZER_FROZEN);
6194 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6195 next = FREEZER_THAWING;
6196 else
6197 next = FREEZER_FROZEN_BY_PARENT;
6198 }
6199 } else if (action == FREEZER_PARENT_FREEZE) {
6200 /* We need to avoid accidentally demoting units frozen manually */
6201 if (IN_SET(curr, FREEZER_FREEZING, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6202 next = curr;
6203 else
6204 next = FREEZER_FREEZING_BY_PARENT;
6205 } else {
6206 assert(action == FREEZER_PARENT_THAW);
6207
6208 /* We don't want to thaw units from a parent if they were frozen
6209 * manually, so for such units this action is a no-op */
6210 if (IN_SET(curr, FREEZER_RUNNING, FREEZER_FREEZING, FREEZER_FROZEN))
6211 next = curr;
6212 else
6213 next = FREEZER_THAWING;
6214 }
6215
6216 tgt = freezer_state_finish(next);
6217 if (tgt == FREEZER_FROZEN_BY_PARENT)
6218 tgt = FREEZER_FROZEN;
6219 assert(IN_SET(tgt, FREEZER_RUNNING, FREEZER_FROZEN));
6220
6221 *ret = next;
6222 *ret_target = tgt;
6223 }
6224
6225 bool unit_can_freeze(Unit *u) {
6226 assert(u);
6227
6228 if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE))
6229 return false;
6230
6231 if (UNIT_VTABLE(u)->can_freeze)
6232 return UNIT_VTABLE(u)->can_freeze(u);
6233
6234 return UNIT_VTABLE(u)->freezer_action;
6235 }
6236
6237 void unit_frozen(Unit *u) {
6238 assert(u);
6239
6240 u->freezer_state = u->freezer_state == FREEZER_FREEZING_BY_PARENT
6241 ? FREEZER_FROZEN_BY_PARENT
6242 : FREEZER_FROZEN;
6243
6244 log_unit_debug(u, "Unit now %s.", freezer_state_to_string(u->freezer_state));
6245
6246 bus_unit_send_pending_freezer_message(u, false);
6247 }
6248
6249 void unit_thawed(Unit *u) {
6250 assert(u);
6251
6252 u->freezer_state = FREEZER_RUNNING;
6253
6254 log_unit_debug(u, "Unit thawed.");
6255
6256 bus_unit_send_pending_freezer_message(u, false);
6257 }
6258
6259 int unit_freezer_action(Unit *u, FreezerAction action) {
6260 UnitActiveState s;
6261 int r;
6262
6263 assert(u);
6264 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6265
6266 if (!cg_freezer_supported() || !unit_can_freeze(u))
6267 return -EOPNOTSUPP;
6268
6269 if (u->job)
6270 return -EBUSY;
6271
6272 if (u->load_state != UNIT_LOADED)
6273 return -EHOSTDOWN;
6274
6275 s = unit_active_state(u);
6276 if (s != UNIT_ACTIVE)
6277 return -EHOSTDOWN;
6278
6279 if (action == FREEZER_FREEZE && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT))
6280 return -EALREADY;
6281 if (action == FREEZER_THAW && u->freezer_state == FREEZER_THAWING)
6282 return -EALREADY;
6283 if (action == FREEZER_THAW && IN_SET(u->freezer_state, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6284 return -ECHILD;
6285
6286 r = UNIT_VTABLE(u)->freezer_action(u, action);
6287 if (r <= 0)
6288 return r;
6289
6290 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING));
6291 return 1;
6292 }
6293
6294 Condition *unit_find_failed_condition(Unit *u) {
6295 Condition *failed_trigger = NULL;
6296 bool has_succeeded_trigger = false;
6297
6298 if (u->condition_result)
6299 return NULL;
6300
6301 LIST_FOREACH(conditions, c, u->conditions)
6302 if (c->trigger) {
6303 if (c->result == CONDITION_SUCCEEDED)
6304 has_succeeded_trigger = true;
6305 else if (!failed_trigger)
6306 failed_trigger = c;
6307 } else if (c->result != CONDITION_SUCCEEDED)
6308 return c;
6309
6310 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6311 }
6312
6313 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6314 [COLLECT_INACTIVE] = "inactive",
6315 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6316 };
6317
6318 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6319
6320 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6321 Unit *i;
6322
6323 assert(u);
6324
6325 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6326 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6327 * is NULL the first entry found), or NULL if not found. */
6328
6329 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6330 if (!other || other == i)
6331 return i;
6332
6333 return NULL;
6334 }
6335
6336 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6337 _cleanup_free_ Unit **array = NULL;
6338 size_t n = 0;
6339 Unit *other;
6340
6341 assert(u);
6342 assert(ret_array);
6343
6344 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6345 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6346 * while the dependency table is continuously updated. */
6347
6348 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6349 if (!GREEDY_REALLOC(array, n + 1))
6350 return -ENOMEM;
6351
6352 array[n++] = other;
6353 }
6354
6355 *ret_array = TAKE_PTR(array);
6356
6357 assert(n <= INT_MAX);
6358 return (int) n;
6359 }
6360
6361 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6362 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6363 Unit *other;
6364 int r;
6365
6366 assert(u);
6367 assert(ret);
6368
6369 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6370
6371 do {
6372 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6373 r = set_ensure_put(&units, NULL, other);
6374 if (r < 0)
6375 return r;
6376 if (r == 0)
6377 continue;
6378 r = set_ensure_put(&queue, NULL, other);
6379 if (r < 0)
6380 return r;
6381 }
6382 } while ((u = set_steal_first(queue)));
6383
6384 *ret = TAKE_PTR(units);
6385 return 0;
6386 }
6387
6388 int unit_arm_timer(
6389 Unit *u,
6390 sd_event_source **source,
6391 bool relative,
6392 usec_t usec,
6393 sd_event_time_handler_t handler) {
6394
6395 int r;
6396
6397 assert(u);
6398 assert(source);
6399 assert(handler);
6400
6401 if (*source) {
6402 if (usec == USEC_INFINITY)
6403 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6404
6405 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6406 if (r < 0)
6407 return r;
6408
6409 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6410 }
6411
6412 if (usec == USEC_INFINITY)
6413 return 0;
6414
6415 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6416 u->manager->event,
6417 source,
6418 CLOCK_MONOTONIC,
6419 usec, 0,
6420 handler,
6421 u);
6422 if (r < 0)
6423 return r;
6424
6425 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6426 (void) sd_event_source_set_description(*source, d);
6427
6428 return 0;
6429 }
6430
6431 static int unit_get_nice(Unit *u) {
6432 ExecContext *ec;
6433
6434 ec = unit_get_exec_context(u);
6435 return ec ? ec->nice : 0;
6436 }
6437
6438 static uint64_t unit_get_cpu_weight(Unit *u) {
6439 CGroupContext *cc;
6440
6441 cc = unit_get_cgroup_context(u);
6442 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6443 }
6444
6445 int unit_compare_priority(Unit *a, Unit *b) {
6446 int ret;
6447
6448 ret = CMP(a->type, b->type);
6449 if (ret != 0)
6450 return -ret;
6451
6452 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6453 if (ret != 0)
6454 return -ret;
6455
6456 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6457 if (ret != 0)
6458 return ret;
6459
6460 return strcmp(a->id, b->id);
6461 }
6462
6463 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6464 [UNIT_PATH] = &activation_details_path_vtable,
6465 [UNIT_TIMER] = &activation_details_timer_vtable,
6466 };
6467
6468 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6469 _cleanup_free_ ActivationDetails *details = NULL;
6470
6471 assert(trigger_unit);
6472 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6473 assert(trigger_unit->id);
6474
6475 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6476 if (!details)
6477 return NULL;
6478
6479 *details = (ActivationDetails) {
6480 .n_ref = 1,
6481 .trigger_unit_type = trigger_unit->type,
6482 };
6483
6484 details->trigger_unit_name = strdup(trigger_unit->id);
6485 if (!details->trigger_unit_name)
6486 return NULL;
6487
6488 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6489 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6490
6491 return TAKE_PTR(details);
6492 }
6493
6494 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6495 if (!details)
6496 return NULL;
6497
6498 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6499 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6500
6501 free(details->trigger_unit_name);
6502
6503 return mfree(details);
6504 }
6505
6506 void activation_details_serialize(ActivationDetails *details, FILE *f) {
6507 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6508 return;
6509
6510 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6511 if (details->trigger_unit_name)
6512 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6513 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6514 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6515 }
6516
6517 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6518 int r;
6519
6520 assert(key);
6521 assert(value);
6522 assert(details);
6523
6524 if (!*details) {
6525 UnitType t;
6526
6527 if (!streq(key, "activation-details-unit-type"))
6528 return -EINVAL;
6529
6530 t = unit_type_from_string(value);
6531 if (t < 0)
6532 return t;
6533
6534 /* The activation details vtable has defined ops only for path and timer units */
6535 if (!activation_details_vtable[t])
6536 return -EINVAL;
6537
6538 *details = malloc0(activation_details_vtable[t]->object_size);
6539 if (!*details)
6540 return -ENOMEM;
6541
6542 **details = (ActivationDetails) {
6543 .n_ref = 1,
6544 .trigger_unit_type = t,
6545 };
6546
6547 return 0;
6548 }
6549
6550 if (streq(key, "activation-details-unit-name")) {
6551 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6552 if (r < 0)
6553 return r;
6554
6555 return 0;
6556 }
6557
6558 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6559 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6560
6561 return -EINVAL;
6562 }
6563
6564 int activation_details_append_env(ActivationDetails *details, char ***strv) {
6565 int r = 0;
6566
6567 assert(strv);
6568
6569 if (!details)
6570 return 0;
6571
6572 if (!isempty(details->trigger_unit_name)) {
6573 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6574 if (!s)
6575 return -ENOMEM;
6576
6577 r = strv_consume(strv, TAKE_PTR(s));
6578 if (r < 0)
6579 return r;
6580 }
6581
6582 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6583 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6584 if (r < 0)
6585 return r;
6586 }
6587
6588 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6589 }
6590
6591 int activation_details_append_pair(ActivationDetails *details, char ***strv) {
6592 int r = 0;
6593
6594 assert(strv);
6595
6596 if (!details)
6597 return 0;
6598
6599 if (!isempty(details->trigger_unit_name)) {
6600 r = strv_extend_many(strv, "trigger_unit", details->trigger_unit_name);
6601 if (r < 0)
6602 return r;
6603 }
6604
6605 if (ACTIVATION_DETAILS_VTABLE(details)->append_pair) {
6606 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6607 if (r < 0)
6608 return r;
6609 }
6610
6611 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6612 }
6613
6614 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);
6615
6616 static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = {
6617 [UNIT_MOUNT_WANTS] = "WantsMountsFor",
6618 [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor",
6619 };
6620
6621 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType);
6622
6623 UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) {
6624 switch (t) {
6625
6626 case UNIT_MOUNT_WANTS:
6627 return UNIT_WANTS;
6628
6629 case UNIT_MOUNT_REQUIRES:
6630 return UNIT_REQUIRES;
6631
6632 default:
6633 assert_not_reached();
6634 }
6635 }