]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
1d23d24cc878bac1ee06c7e70f118d8b28c82857
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <fnmatch.h>
4 #include <linux/capability.h>
5 #include <unistd.h>
6
7 #include "sd-bus.h"
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "ansi-color.h"
14 #include "bpf-firewall.h"
15 #include "bpf-restrict-fs.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
18 #include "bus-util.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
21 #include "chase.h"
22 #include "condition.h"
23 #include "dbus-unit.h"
24 #include "dropin.h"
25 #include "dynamic-user.h"
26 #include "env-util.h"
27 #include "escape.h"
28 #include "exec-credential.h"
29 #include "execute.h"
30 #include "fd-util.h"
31 #include "fileio.h"
32 #include "format-util.h"
33 #include "fs-util.h"
34 #include "id128-util.h"
35 #include "install.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
40 #include "log.h"
41 #include "logarithm.h"
42 #include "mkdir-label.h"
43 #include "manager.h"
44 #include "mount-util.h"
45 #include "mountpoint-util.h"
46 #include "path-util.h"
47 #include "process-util.h"
48 #include "rm-rf.h"
49 #include "serialize.h"
50 #include "set.h"
51 #include "signal-util.h"
52 #include "siphash24.h"
53 #include "sparse-endian.h"
54 #include "special.h"
55 #include "specifier.h"
56 #include "stat-util.h"
57 #include "string-table.h"
58 #include "string-util.h"
59 #include "strv.h"
60 #include "tmpfile-util.h"
61 #include "umask-util.h"
62 #include "unit.h"
63 #include "unit-name.h"
64 #include "user-util.h"
65 #include "varlink.h"
66
67 /* Thresholds for logging at INFO level about resource consumption */
68 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
69 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
70 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
71 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
72
73 /* Thresholds for logging at NOTICE level about resource consumption */
74 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
75 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
76 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
77 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
78
79 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
80 [UNIT_SERVICE] = &service_vtable,
81 [UNIT_SOCKET] = &socket_vtable,
82 [UNIT_TARGET] = &target_vtable,
83 [UNIT_DEVICE] = &device_vtable,
84 [UNIT_MOUNT] = &mount_vtable,
85 [UNIT_AUTOMOUNT] = &automount_vtable,
86 [UNIT_SWAP] = &swap_vtable,
87 [UNIT_TIMER] = &timer_vtable,
88 [UNIT_PATH] = &path_vtable,
89 [UNIT_SLICE] = &slice_vtable,
90 [UNIT_SCOPE] = &scope_vtable,
91 };
92
93 Unit* unit_new(Manager *m, size_t size) {
94 Unit *u;
95
96 assert(m);
97 assert(size >= sizeof(Unit));
98
99 u = malloc0(size);
100 if (!u)
101 return NULL;
102
103 u->manager = m;
104 u->type = _UNIT_TYPE_INVALID;
105 u->default_dependencies = true;
106 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
107 u->unit_file_preset = _PRESET_ACTION_INVALID;
108 u->on_failure_job_mode = JOB_REPLACE;
109 u->on_success_job_mode = JOB_FAIL;
110 u->job_timeout = USEC_INFINITY;
111 u->job_running_timeout = USEC_INFINITY;
112 u->ref_uid = UID_INVALID;
113 u->ref_gid = GID_INVALID;
114
115 u->failure_action_exit_status = u->success_action_exit_status = -1;
116
117 u->last_section_private = -1;
118
119 u->start_ratelimit = m->defaults.start_limit;
120
121 u->auto_start_stop_ratelimit = (const RateLimit) {
122 .interval = 10 * USEC_PER_SEC,
123 .burst = 16
124 };
125
126 return u;
127 }
128
129 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
130 _cleanup_(unit_freep) Unit *u = NULL;
131 int r;
132
133 u = unit_new(m, size);
134 if (!u)
135 return -ENOMEM;
136
137 r = unit_add_name(u, name);
138 if (r < 0)
139 return r;
140
141 *ret = TAKE_PTR(u);
142
143 return r;
144 }
145
146 bool unit_has_name(const Unit *u, const char *name) {
147 assert(u);
148 assert(name);
149
150 return streq_ptr(name, u->id) ||
151 set_contains(u->aliases, name);
152 }
153
154 static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->io_accounting = u->manager->defaults.io_accounting;
172 cc->memory_accounting = u->manager->defaults.memory_accounting;
173 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
174 cc->ip_accounting = u->manager->defaults.ip_accounting;
175
176 if (u->type != UNIT_SLICE)
177 cc->tasks_max = u->manager->defaults.tasks_max;
178
179 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
180 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
181 }
182
183 ec = unit_get_exec_context(u);
184 if (ec) {
185 exec_context_init(ec);
186
187 if (u->manager->defaults.oom_score_adjust_set) {
188 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
189 ec->oom_score_adjust_set = true;
190 }
191
192 if (MANAGER_IS_SYSTEM(u->manager))
193 ec->keyring_mode = EXEC_KEYRING_SHARED;
194 else {
195 ec->keyring_mode = EXEC_KEYRING_INHERIT;
196
197 /* User manager might have its umask redefined by PAM or UMask=. In this
198 * case let the units it manages inherit this value by default. They can
199 * still tune this value through their own unit file */
200 (void) get_process_umask(0, &ec->umask);
201 }
202 }
203
204 kc = unit_get_kill_context(u);
205 if (kc)
206 kill_context_init(kc);
207
208 if (UNIT_VTABLE(u)->init)
209 UNIT_VTABLE(u)->init(u);
210 }
211
212 static int unit_add_alias(Unit *u, char *donated_name) {
213 int r;
214
215 /* Make sure that u->names is allocated. We may leave u->names
216 * empty if we fail later, but this is not a problem. */
217 r = set_ensure_put(&u->aliases, &string_hash_ops_free, donated_name);
218 if (r < 0)
219 return r;
220 assert(r > 0);
221
222 return 0;
223 }
224
225 int unit_add_name(Unit *u, const char *text) {
226 _cleanup_free_ char *name = NULL, *instance = NULL;
227 UnitType t;
228 int r;
229
230 assert(u);
231 assert(text);
232
233 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
234 if (!u->instance)
235 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
236 "Instance is not set when adding name '%s'.", text);
237
238 r = unit_name_replace_instance(text, u->instance, &name);
239 if (r < 0)
240 return log_unit_debug_errno(u, r,
241 "Failed to build instance name from '%s': %m", text);
242 } else {
243 name = strdup(text);
244 if (!name)
245 return -ENOMEM;
246 }
247
248 if (unit_has_name(u, name))
249 return 0;
250
251 if (hashmap_contains(u->manager->units, name))
252 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
253 "Unit already exist when adding name '%s'.", name);
254
255 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
256 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
257 "Name '%s' is invalid.", name);
258
259 t = unit_name_to_type(name);
260 if (t < 0)
261 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
262 "failed to derive unit type from name '%s'.", name);
263
264 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
266 "Unit type is illegal: u->type(%d) and t(%d) for name '%s'.",
267 u->type, t, name);
268
269 r = unit_name_to_instance(name, &instance);
270 if (r < 0)
271 return log_unit_debug_errno(u, r, "Failed to extract instance from name '%s': %m", name);
272
273 if (instance && !unit_type_may_template(t))
274 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "Templates are not allowed for name '%s'.", name);
275
276 /* Ensure that this unit either has no instance, or that the instance matches. */
277 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
278 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
279 "Cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
280 name, instance, u->instance);
281
282 if (u->id && !unit_type_may_alias(t))
283 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
284 "Cannot add name %s, aliases are not allowed for %s units.",
285 name, unit_type_to_string(t));
286
287 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
288 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "Cannot add name, manager has too many units.");
289
290 /* Add name to the global hashmap first, because that's easier to undo */
291 r = hashmap_put(u->manager->units, name, u);
292 if (r < 0)
293 return log_unit_debug_errno(u, r, "Add unit to hashmap failed for name '%s': %m", text);
294
295 if (u->id) {
296 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
297 if (r < 0) {
298 hashmap_remove(u->manager->units, name);
299 return r;
300 }
301 TAKE_PTR(name);
302
303 } else {
304 /* A new name, we don't need the set yet. */
305 assert(u->type == _UNIT_TYPE_INVALID);
306 assert(!u->instance);
307
308 u->type = t;
309 u->id = TAKE_PTR(name);
310 u->instance = TAKE_PTR(instance);
311
312 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
313 unit_init(u);
314 }
315
316 unit_add_to_dbus_queue(u);
317 return 0;
318 }
319
320 int unit_choose_id(Unit *u, const char *name) {
321 _cleanup_free_ char *t = NULL;
322 char *s;
323 int r;
324
325 assert(u);
326 assert(name);
327
328 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
329 if (!u->instance)
330 return -EINVAL;
331
332 r = unit_name_replace_instance(name, u->instance, &t);
333 if (r < 0)
334 return r;
335
336 name = t;
337 }
338
339 if (streq_ptr(u->id, name))
340 return 0; /* Nothing to do. */
341
342 /* Selects one of the aliases of this unit as the id */
343 s = set_get(u->aliases, (char*) name);
344 if (!s)
345 return -ENOENT;
346
347 if (u->id) {
348 r = set_remove_and_put(u->aliases, name, u->id);
349 if (r < 0)
350 return r;
351 } else
352 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
353
354 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
355 unit_add_to_dbus_queue(u);
356
357 return 0;
358 }
359
360 int unit_set_description(Unit *u, const char *description) {
361 int r;
362
363 assert(u);
364
365 r = free_and_strdup(&u->description, empty_to_null(description));
366 if (r < 0)
367 return r;
368 if (r > 0)
369 unit_add_to_dbus_queue(u);
370
371 return 0;
372 }
373
374 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
375 Unit *other;
376
377 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
378 if (other->job || other->nop_job)
379 return true;
380
381 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
382 if (other->job || other->nop_job)
383 return true;
384
385 return false;
386 }
387
388 void unit_release_resources(Unit *u) {
389 UnitActiveState state;
390 ExecContext *ec;
391
392 assert(u);
393
394 if (u->job || u->nop_job)
395 return;
396
397 if (u->perpetual)
398 return;
399
400 state = unit_active_state(u);
401 if (!UNIT_IS_INACTIVE_OR_FAILED(state))
402 return;
403
404 if (unit_will_restart(u))
405 return;
406
407 ec = unit_get_exec_context(u);
408 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
409 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
410
411 if (UNIT_VTABLE(u)->release_resources)
412 UNIT_VTABLE(u)->release_resources(u);
413 }
414
415 bool unit_may_gc(Unit *u) {
416 UnitActiveState state;
417 int r;
418
419 assert(u);
420
421 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
422 * unit may be collected, and false if there's some reason to keep it loaded.
423 *
424 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
425 * using markers to properly collect dependency loops.
426 */
427
428 if (u->job || u->nop_job)
429 return false;
430
431 if (u->perpetual)
432 return false;
433
434 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
435 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
436 * before we release the unit. */
437 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
438 return false;
439
440 /* Make sure to send out D-Bus events before we unload the unit */
441 if (u->in_dbus_queue)
442 return false;
443
444 if (sd_bus_track_count(u->bus_track) > 0)
445 return false;
446
447 state = unit_active_state(u);
448
449 /* But we keep the unit object around for longer when it is referenced or configured to not be
450 * gc'ed */
451 switch (u->collect_mode) {
452
453 case COLLECT_INACTIVE:
454 if (state != UNIT_INACTIVE)
455 return false;
456
457 break;
458
459 case COLLECT_INACTIVE_OR_FAILED:
460 if (!UNIT_IS_INACTIVE_OR_FAILED(state))
461 return false;
462
463 break;
464
465 default:
466 assert_not_reached();
467 }
468
469 /* Check if any OnFailure= or on Success= jobs may be pending */
470 if (unit_success_failure_handler_has_jobs(u))
471 return false;
472
473 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
474 * around. Units with active processes should never be collected. */
475 r = unit_cgroup_is_empty(u);
476 if (r <= 0 && !IN_SET(r, -ENXIO, -EOWNERDEAD))
477 return false; /* ENXIO/EOWNERDEAD means: currently not realized */
478
479 if (!UNIT_VTABLE(u)->may_gc)
480 return true;
481
482 return UNIT_VTABLE(u)->may_gc(u);
483 }
484
485 void unit_add_to_load_queue(Unit *u) {
486 assert(u);
487 assert(u->type != _UNIT_TYPE_INVALID);
488
489 if (u->load_state != UNIT_STUB || u->in_load_queue)
490 return;
491
492 LIST_PREPEND(load_queue, u->manager->load_queue, u);
493 u->in_load_queue = true;
494 }
495
496 void unit_add_to_cleanup_queue(Unit *u) {
497 assert(u);
498
499 if (u->in_cleanup_queue)
500 return;
501
502 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
503 u->in_cleanup_queue = true;
504 }
505
506 void unit_add_to_gc_queue(Unit *u) {
507 assert(u);
508
509 if (u->in_gc_queue || u->in_cleanup_queue)
510 return;
511
512 if (!unit_may_gc(u))
513 return;
514
515 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
516 u->in_gc_queue = true;
517 }
518
519 void unit_add_to_dbus_queue(Unit *u) {
520 assert(u);
521 assert(u->type != _UNIT_TYPE_INVALID);
522
523 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
524 return;
525
526 /* Shortcut things if nobody cares */
527 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
528 sd_bus_track_count(u->bus_track) <= 0 &&
529 set_isempty(u->manager->private_buses)) {
530 u->sent_dbus_new_signal = true;
531 return;
532 }
533
534 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
535 u->in_dbus_queue = true;
536 }
537
538 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
539 assert(u);
540
541 if (u->in_stop_when_unneeded_queue)
542 return;
543
544 if (!u->stop_when_unneeded)
545 return;
546
547 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
548 return;
549
550 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
551 u->in_stop_when_unneeded_queue = true;
552 }
553
554 void unit_submit_to_start_when_upheld_queue(Unit *u) {
555 assert(u);
556
557 if (u->in_start_when_upheld_queue)
558 return;
559
560 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
561 return;
562
563 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
564 return;
565
566 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
567 u->in_start_when_upheld_queue = true;
568 }
569
570 void unit_submit_to_stop_when_bound_queue(Unit *u) {
571 assert(u);
572
573 if (u->in_stop_when_bound_queue)
574 return;
575
576 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
577 return;
578
579 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
580 return;
581
582 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
583 u->in_stop_when_bound_queue = true;
584 }
585
586 static bool unit_can_release_resources(Unit *u) {
587 ExecContext *ec;
588
589 assert(u);
590
591 if (UNIT_VTABLE(u)->release_resources)
592 return true;
593
594 ec = unit_get_exec_context(u);
595 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
596 return true;
597
598 return false;
599 }
600
601 void unit_submit_to_release_resources_queue(Unit *u) {
602 assert(u);
603
604 if (u->in_release_resources_queue)
605 return;
606
607 if (u->job || u->nop_job)
608 return;
609
610 if (u->perpetual)
611 return;
612
613 if (!unit_can_release_resources(u))
614 return;
615
616 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
617 u->in_release_resources_queue = true;
618 }
619
620 static void unit_clear_dependencies(Unit *u) {
621 assert(u);
622
623 /* Removes all dependencies configured on u and their reverse dependencies. */
624
625 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
626
627 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
628 Hashmap *other_deps;
629
630 HASHMAP_FOREACH(other_deps, other->dependencies)
631 hashmap_remove(other_deps, u);
632
633 unit_add_to_gc_queue(other);
634 other->dependency_generation++;
635 }
636
637 hashmap_free(deps);
638 }
639
640 u->dependencies = hashmap_free(u->dependencies);
641 u->dependency_generation++;
642 }
643
644 static void unit_remove_transient(Unit *u) {
645 assert(u);
646 assert(u->manager);
647
648 if (!u->transient)
649 return;
650
651 STRV_FOREACH(i, u->dropin_paths) {
652 _cleanup_free_ char *p = NULL, *pp = NULL;
653
654 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
655 continue;
656
657 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
658 continue;
659
660 /* Only drop transient drop-ins */
661 if (!path_equal(u->manager->lookup_paths.transient, pp))
662 continue;
663
664 (void) unlink(*i);
665 (void) rmdir(p);
666 }
667
668 if (u->fragment_path) {
669 (void) unlink(u->fragment_path);
670 (void) unit_file_remove_from_name_map(
671 &u->manager->lookup_paths,
672 &u->manager->unit_cache_timestamp_hash,
673 &u->manager->unit_id_map,
674 &u->manager->unit_name_map,
675 &u->manager->unit_path_cache,
676 u->fragment_path);
677 }
678 }
679
680 static void unit_free_mounts_for(Unit *u) {
681 assert(u);
682
683 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
684 for (;;) {
685 _cleanup_free_ char *path = NULL;
686
687 path = hashmap_steal_first_key(u->mounts_for[t]);
688 if (!path)
689 break;
690
691 char s[strlen(path) + 1];
692
693 PATH_FOREACH_PREFIX_MORE(s, path) {
694 char *y;
695 Set *x;
696
697 x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y);
698 if (!x)
699 continue;
700
701 (void) set_remove(x, u);
702
703 if (set_isempty(x)) {
704 assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y));
705 free(y);
706 set_free(x);
707 }
708 }
709 }
710
711 u->mounts_for[t] = hashmap_free(u->mounts_for[t]);
712 }
713 }
714
715 static void unit_done(Unit *u) {
716 ExecContext *ec;
717 CGroupContext *cc;
718
719 assert(u);
720
721 if (u->type < 0)
722 return;
723
724 if (UNIT_VTABLE(u)->done)
725 UNIT_VTABLE(u)->done(u);
726
727 ec = unit_get_exec_context(u);
728 if (ec)
729 exec_context_done(ec);
730
731 cc = unit_get_cgroup_context(u);
732 if (cc)
733 cgroup_context_done(cc);
734 }
735
736 Unit* unit_free(Unit *u) {
737 Unit *slice;
738 char *t;
739
740 if (!u)
741 return NULL;
742
743 sd_event_source_disable_unref(u->auto_start_stop_event_source);
744
745 u->transient_file = safe_fclose(u->transient_file);
746
747 if (!MANAGER_IS_RELOADING(u->manager))
748 unit_remove_transient(u);
749
750 bus_unit_send_removed_signal(u);
751
752 unit_done(u);
753
754 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
755 u->bus_track = sd_bus_track_unref(u->bus_track);
756 u->deserialized_refs = strv_free(u->deserialized_refs);
757 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
758
759 unit_free_mounts_for(u);
760
761 SET_FOREACH(t, u->aliases)
762 hashmap_remove_value(u->manager->units, t, u);
763 if (u->id)
764 hashmap_remove_value(u->manager->units, u->id, u);
765
766 if (!sd_id128_is_null(u->invocation_id))
767 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
768
769 if (u->job) {
770 Job *j = u->job;
771 job_uninstall(j);
772 job_free(j);
773 }
774
775 if (u->nop_job) {
776 Job *j = u->nop_job;
777 job_uninstall(j);
778 job_free(j);
779 }
780
781 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
782 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
783 slice = UNIT_GET_SLICE(u);
784 unit_clear_dependencies(u);
785 if (slice)
786 unit_add_family_to_cgroup_realize_queue(slice);
787
788 if (u->on_console)
789 manager_unref_console(u->manager);
790
791 unit_release_cgroup(u, /* drop_cgroup_runtime = */ true);
792
793 if (!MANAGER_IS_RELOADING(u->manager))
794 unit_unlink_state_files(u);
795
796 unit_unref_uid_gid(u, false);
797
798 (void) manager_update_failed_units(u->manager, u, false);
799 set_remove(u->manager->startup_units, u);
800
801 unit_unwatch_all_pids(u);
802
803 while (u->refs_by_target)
804 unit_ref_unset(u->refs_by_target);
805
806 if (u->type != _UNIT_TYPE_INVALID)
807 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
808
809 if (u->in_load_queue)
810 LIST_REMOVE(load_queue, u->manager->load_queue, u);
811
812 if (u->in_dbus_queue)
813 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
814
815 if (u->in_cleanup_queue)
816 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
817
818 if (u->in_gc_queue)
819 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
820
821 if (u->in_cgroup_realize_queue)
822 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
823
824 if (u->in_cgroup_empty_queue)
825 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
826
827 if (u->in_cgroup_oom_queue)
828 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
829
830 if (u->in_target_deps_queue)
831 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
832
833 if (u->in_stop_when_unneeded_queue)
834 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
835
836 if (u->in_start_when_upheld_queue)
837 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
838
839 if (u->in_stop_when_bound_queue)
840 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
841
842 if (u->in_release_resources_queue)
843 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
844
845 condition_free_list(u->conditions);
846 condition_free_list(u->asserts);
847
848 free(u->description);
849 strv_free(u->documentation);
850 free(u->fragment_path);
851 free(u->source_path);
852 strv_free(u->dropin_paths);
853 free(u->instance);
854
855 free(u->job_timeout_reboot_arg);
856 free(u->reboot_arg);
857
858 free(u->access_selinux_context);
859
860 set_free(u->aliases);
861 free(u->id);
862
863 activation_details_unref(u->activation_details);
864
865 return mfree(u);
866 }
867
868 UnitActiveState unit_active_state(Unit *u) {
869 assert(u);
870
871 if (u->load_state == UNIT_MERGED)
872 return unit_active_state(unit_follow_merge(u));
873
874 /* After a reload it might happen that a unit is not correctly
875 * loaded but still has a process around. That's why we won't
876 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
877
878 return UNIT_VTABLE(u)->active_state(u);
879 }
880
881 const char* unit_sub_state_to_string(Unit *u) {
882 assert(u);
883
884 return UNIT_VTABLE(u)->sub_state_to_string(u);
885 }
886
887 static int unit_merge_names(Unit *u, Unit *other) {
888 char *name;
889 int r;
890
891 assert(u);
892 assert(other);
893
894 r = unit_add_alias(u, other->id);
895 if (r < 0)
896 return r;
897
898 r = set_move(u->aliases, other->aliases);
899 if (r < 0) {
900 set_remove(u->aliases, other->id);
901 return r;
902 }
903
904 TAKE_PTR(other->id);
905 other->aliases = set_free(other->aliases);
906
907 SET_FOREACH(name, u->aliases)
908 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
909
910 return 0;
911 }
912
913 static int unit_reserve_dependencies(Unit *u, Unit *other) {
914 size_t n_reserve;
915 Hashmap* deps;
916 void *d;
917 int r;
918
919 assert(u);
920 assert(other);
921
922 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
923 * fail.
924 *
925 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
926 * hashmaps is an estimate that is likely too high since they probably use some of the same
927 * types. But it's never too low, and that's all we need. */
928
929 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
930 if (n_reserve > 0) {
931 r = hashmap_ensure_allocated(&u->dependencies, NULL);
932 if (r < 0)
933 return r;
934
935 r = hashmap_reserve(u->dependencies, n_reserve);
936 if (r < 0)
937 return r;
938 }
939
940 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
941 * other unit's dependencies.
942 *
943 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
944 * reserve anything for. In that case other's set will be transferred as a whole to u by
945 * complete_move(). */
946
947 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
948 Hashmap *other_deps;
949
950 other_deps = hashmap_get(other->dependencies, d);
951
952 r = hashmap_reserve(deps, hashmap_size(other_deps));
953 if (r < 0)
954 return r;
955 }
956
957 return 0;
958 }
959
960 static bool unit_should_warn_about_dependency(UnitDependency dependency) {
961 /* Only warn about some unit types */
962 return IN_SET(dependency,
963 UNIT_CONFLICTS,
964 UNIT_CONFLICTED_BY,
965 UNIT_BEFORE,
966 UNIT_AFTER,
967 UNIT_ON_SUCCESS,
968 UNIT_ON_FAILURE,
969 UNIT_TRIGGERS,
970 UNIT_TRIGGERED_BY);
971 }
972
973 static int unit_per_dependency_type_hashmap_update(
974 Hashmap *per_type,
975 Unit *other,
976 UnitDependencyMask origin_mask,
977 UnitDependencyMask destination_mask) {
978
979 UnitDependencyInfo info;
980 int r;
981
982 assert(other);
983 assert_cc(sizeof(void*) == sizeof(info));
984
985 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
986 * exists, or insert it anew if not. */
987
988 info.data = hashmap_get(per_type, other);
989 if (info.data) {
990 /* Entry already exists. Add in our mask. */
991
992 if (FLAGS_SET(origin_mask, info.origin_mask) &&
993 FLAGS_SET(destination_mask, info.destination_mask))
994 return 0; /* NOP */
995
996 info.origin_mask |= origin_mask;
997 info.destination_mask |= destination_mask;
998
999 r = hashmap_update(per_type, other, info.data);
1000 } else {
1001 info = (UnitDependencyInfo) {
1002 .origin_mask = origin_mask,
1003 .destination_mask = destination_mask,
1004 };
1005
1006 r = hashmap_put(per_type, other, info.data);
1007 }
1008 if (r < 0)
1009 return r;
1010
1011 return 1;
1012 }
1013
1014 static void unit_merge_dependencies(Unit *u, Unit *other) {
1015 Hashmap *deps;
1016 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1017 * since the hashmaps all want it as void pointer. */
1018
1019 assert(u);
1020 assert(other);
1021
1022 if (u == other)
1023 return;
1024
1025 /* First, remove dependency to other. */
1026 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1027 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1028 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1029 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1030 other->id, other->id, u->id);
1031
1032 if (hashmap_isempty(deps))
1033 hashmap_free(hashmap_remove(u->dependencies, dt));
1034 }
1035
1036 for (;;) {
1037 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1038 UnitDependencyInfo di_back;
1039 Unit *back;
1040
1041 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1042 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1043 if (!other_deps)
1044 break; /* done! */
1045
1046 deps = hashmap_get(u->dependencies, dt);
1047
1048 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1049 * referenced units as 'back'. */
1050 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1051 Hashmap *back_deps;
1052 void *back_dt;
1053
1054 if (back == u) {
1055 /* This is a dependency pointing back to the unit we want to merge with?
1056 * Suppress it (but warn) */
1057 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1058 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1059 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1060 u->id, other->id, other->id, u->id);
1061
1062 hashmap_remove(other_deps, back);
1063 continue;
1064 }
1065
1066 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1067 * point to 'u' instead. */
1068 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1069 UnitDependencyInfo di_move;
1070
1071 di_move.data = hashmap_remove(back_deps, other);
1072 if (!di_move.data)
1073 continue;
1074
1075 assert_se(unit_per_dependency_type_hashmap_update(
1076 back_deps,
1077 u,
1078 di_move.origin_mask,
1079 di_move.destination_mask) >= 0);
1080 }
1081
1082 /* The target unit already has dependencies of this type, let's then merge this individually. */
1083 if (deps)
1084 assert_se(unit_per_dependency_type_hashmap_update(
1085 deps,
1086 back,
1087 di_back.origin_mask,
1088 di_back.destination_mask) >= 0);
1089 }
1090
1091 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1092 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1093 * dependencies of this type, let's move them per type wholesale. */
1094 if (!deps)
1095 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1096 }
1097
1098 other->dependencies = hashmap_free(other->dependencies);
1099
1100 u->dependency_generation++;
1101 other->dependency_generation++;
1102 }
1103
1104 int unit_merge(Unit *u, Unit *other) {
1105 int r;
1106
1107 assert(u);
1108 assert(other);
1109 assert(u->manager == other->manager);
1110 assert(u->type != _UNIT_TYPE_INVALID);
1111
1112 other = unit_follow_merge(other);
1113
1114 if (other == u)
1115 return 0;
1116
1117 if (u->type != other->type)
1118 return -EINVAL;
1119
1120 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1121 return -EEXIST;
1122
1123 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1124 return -EEXIST;
1125
1126 if (!streq_ptr(u->instance, other->instance))
1127 return -EINVAL;
1128
1129 if (other->job)
1130 return -EEXIST;
1131
1132 if (other->nop_job)
1133 return -EEXIST;
1134
1135 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1136 return -EEXIST;
1137
1138 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1139 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1140 r = unit_reserve_dependencies(u, other);
1141 if (r < 0)
1142 return r;
1143
1144 /* Redirect all references */
1145 while (other->refs_by_target)
1146 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1147
1148 /* Merge dependencies */
1149 unit_merge_dependencies(u, other);
1150
1151 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1152 r = unit_merge_names(u, other);
1153 if (r < 0)
1154 return r;
1155
1156 other->load_state = UNIT_MERGED;
1157 other->merged_into = u;
1158
1159 if (!u->activation_details)
1160 u->activation_details = activation_details_ref(other->activation_details);
1161
1162 /* If there is still some data attached to the other node, we
1163 * don't need it anymore, and can free it. */
1164 if (other->load_state != UNIT_STUB)
1165 if (UNIT_VTABLE(other)->done)
1166 UNIT_VTABLE(other)->done(other);
1167
1168 unit_add_to_dbus_queue(u);
1169 unit_add_to_cleanup_queue(other);
1170
1171 return 0;
1172 }
1173
1174 int unit_merge_by_name(Unit *u, const char *name) {
1175 _cleanup_free_ char *s = NULL;
1176 Unit *other;
1177 int r;
1178
1179 /* Either add name to u, or if a unit with name already exists, merge it with u.
1180 * If name is a template, do the same for name@instance, where instance is u's instance. */
1181
1182 assert(u);
1183 assert(name);
1184
1185 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1186 if (!u->instance)
1187 return -EINVAL;
1188
1189 r = unit_name_replace_instance(name, u->instance, &s);
1190 if (r < 0)
1191 return r;
1192
1193 name = s;
1194 }
1195
1196 other = manager_get_unit(u->manager, name);
1197 if (other)
1198 return unit_merge(u, other);
1199
1200 return unit_add_name(u, name);
1201 }
1202
1203 Unit* unit_follow_merge(Unit *u) {
1204 assert(u);
1205
1206 while (u->load_state == UNIT_MERGED)
1207 assert_se(u = u->merged_into);
1208
1209 return u;
1210 }
1211
1212 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1213 int r;
1214
1215 assert(u);
1216 assert(c);
1217
1218 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1219
1220 if (c->working_directory) {
1221 r = unit_add_mounts_for(
1222 u,
1223 c->working_directory,
1224 UNIT_DEPENDENCY_FILE,
1225 c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES);
1226 if (r < 0)
1227 return r;
1228 }
1229
1230 if (c->root_directory) {
1231 r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1232 if (r < 0)
1233 return r;
1234 }
1235
1236 if (c->root_image) {
1237 r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1238 if (r < 0)
1239 return r;
1240 }
1241
1242 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1243 if (!u->manager->prefix[dt])
1244 continue;
1245
1246 FOREACH_ARRAY(i, c->directories[dt].items, c->directories[dt].n_items) {
1247 _cleanup_free_ char *p = NULL;
1248
1249 p = path_join(u->manager->prefix[dt], i->path);
1250 if (!p)
1251 return -ENOMEM;
1252
1253 r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES);
1254 if (r < 0)
1255 return r;
1256 }
1257 }
1258
1259 if (!MANAGER_IS_SYSTEM(u->manager))
1260 return 0;
1261
1262 /* For the following three directory types we need write access, and /var/ is possibly on the root
1263 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1264 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1265 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1266 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1267 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1268 if (r < 0)
1269 return r;
1270 }
1271
1272 /* This must be already set in unit_patch_contexts(). */
1273 assert(c->private_var_tmp >= 0 && c->private_var_tmp < _PRIVATE_TMP_MAX);
1274
1275 if (c->private_tmp == PRIVATE_TMP_CONNECTED) {
1276 assert(c->private_var_tmp == PRIVATE_TMP_CONNECTED);
1277
1278 r = unit_add_mounts_for(u, "/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1279 if (r < 0)
1280 return r;
1281
1282 r = unit_add_mounts_for(u, "/var/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1283 if (r < 0)
1284 return r;
1285
1286 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1287 if (r < 0)
1288 return r;
1289
1290 } else if (c->private_var_tmp == PRIVATE_TMP_DISCONNECTED && !exec_context_with_rootfs(c)) {
1291 /* Even if PrivateTmp=disconnected, we still require /var/tmp/ mountpoint to be present,
1292 * i.e. /var/ needs to be mounted. See comments in unit_patch_contexts(). */
1293 r = unit_add_mounts_for(u, "/var/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1294 if (r < 0)
1295 return r;
1296 }
1297
1298 if (c->root_image) {
1299 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1300 * implicit dependency on udev */
1301
1302 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1303 if (r < 0)
1304 return r;
1305 }
1306
1307 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1308 * is run first. */
1309 if (c->log_namespace) {
1310 static const struct {
1311 const char *template;
1312 UnitType type;
1313 } deps[] = {
1314 { "systemd-journald", UNIT_SOCKET, },
1315 { "systemd-journald-varlink", UNIT_SOCKET, },
1316 { "systemd-journald-sync", UNIT_SERVICE, },
1317 };
1318
1319 FOREACH_ELEMENT(i, deps) {
1320 _cleanup_free_ char *unit = NULL;
1321
1322 r = unit_name_build_from_type(i->template, c->log_namespace, i->type, &unit);
1323 if (r < 0)
1324 return r;
1325
1326 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, unit, true, UNIT_DEPENDENCY_FILE);
1327 if (r < 0)
1328 return r;
1329 }
1330 } else if (IN_SET(c->std_output, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1331 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) ||
1332 IN_SET(c->std_error, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1333 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE)) {
1334
1335 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1336 if (r < 0)
1337 return r;
1338 }
1339
1340 return 0;
1341 }
1342
1343 const char* unit_description(Unit *u) {
1344 assert(u);
1345
1346 if (u->description)
1347 return u->description;
1348
1349 return strna(u->id);
1350 }
1351
1352 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1353 assert(u);
1354 assert(u->id);
1355
1356 /* Return u->id, u->description, or "{u->id} - {u->description}".
1357 * Versions with u->description are only used if it is set.
1358 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1359 * pointer.
1360 *
1361 * Note that *ret_combined_buffer may be set to NULL. */
1362
1363 if (!u->description ||
1364 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1365 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1366 streq(u->description, u->id)) {
1367
1368 if (ret_combined_buffer)
1369 *ret_combined_buffer = NULL;
1370 return u->id;
1371 }
1372
1373 if (ret_combined_buffer) {
1374 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1375 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1376 if (*ret_combined_buffer)
1377 return *ret_combined_buffer;
1378 log_oom(); /* Fall back to ->description */
1379 } else
1380 *ret_combined_buffer = NULL;
1381 }
1382
1383 return u->description;
1384 }
1385
1386 /* Common implementation for multiple backends */
1387 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1388 int r;
1389
1390 assert(u);
1391
1392 /* Load a .{service,socket,...} file */
1393 r = unit_load_fragment(u);
1394 if (r < 0)
1395 return r;
1396
1397 if (u->load_state == UNIT_STUB) {
1398 if (fragment_required)
1399 return -ENOENT;
1400
1401 u->load_state = UNIT_LOADED;
1402 }
1403
1404 u = unit_follow_merge(u);
1405
1406 /* Load drop-in directory data. If u is an alias, we might be reloading the
1407 * target unit needlessly. But we cannot be sure which drops-ins have already
1408 * been loaded and which not, at least without doing complicated book-keeping,
1409 * so let's always reread all drop-ins. */
1410 r = unit_load_dropin(u);
1411 if (r < 0)
1412 return r;
1413
1414 if (u->source_path) {
1415 struct stat st;
1416
1417 if (stat(u->source_path, &st) >= 0)
1418 u->source_mtime = timespec_load(&st.st_mtim);
1419 else
1420 u->source_mtime = 0;
1421 }
1422
1423 return 0;
1424 }
1425
1426 void unit_add_to_target_deps_queue(Unit *u) {
1427 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1428
1429 if (u->in_target_deps_queue)
1430 return;
1431
1432 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1433 u->in_target_deps_queue = true;
1434 }
1435
1436 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1437 assert(u);
1438 assert(target);
1439
1440 if (target->type != UNIT_TARGET)
1441 return 0;
1442
1443 /* Only add the dependency if both units are loaded, so that
1444 * that loop check below is reliable */
1445 if (u->load_state != UNIT_LOADED ||
1446 target->load_state != UNIT_LOADED)
1447 return 0;
1448
1449 /* If either side wants no automatic dependencies, then let's
1450 * skip this */
1451 if (!u->default_dependencies ||
1452 !target->default_dependencies)
1453 return 0;
1454
1455 /* Don't create loops */
1456 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1457 return 0;
1458
1459 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1460 }
1461
1462 static int unit_add_slice_dependencies(Unit *u) {
1463 Unit *slice;
1464
1465 assert(u);
1466
1467 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1468 return 0;
1469
1470 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1471 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1472 relationship). */
1473 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1474
1475 slice = UNIT_GET_SLICE(u);
1476 if (slice) {
1477 if (!IN_SET(slice->freezer_state, FREEZER_RUNNING, FREEZER_THAWING))
1478 u->freezer_state = FREEZER_FROZEN_BY_PARENT;
1479
1480 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1481 }
1482
1483 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1484 return 0;
1485
1486 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1487 }
1488
1489 static int unit_add_mount_dependencies(Unit *u) {
1490 bool changed = false;
1491 int r;
1492
1493 assert(u);
1494
1495 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
1496 UnitDependencyInfo di;
1497 const char *path;
1498
1499 HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) {
1500
1501 char prefix[strlen(ASSERT_PTR(path)) + 1];
1502
1503 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1504 _cleanup_free_ char *p = NULL;
1505 Unit *m;
1506
1507 r = unit_name_from_path(prefix, ".mount", &p);
1508 if (r == -EINVAL)
1509 continue; /* If the path cannot be converted to a mount unit name,
1510 * then it's not manageable as a unit by systemd, and
1511 * hence we don't need a dependency on it. Let's thus
1512 * silently ignore the issue. */
1513 if (r < 0)
1514 return r;
1515
1516 m = manager_get_unit(u->manager, p);
1517 if (!m) {
1518 /* Make sure to load the mount unit if it exists. If so the
1519 * dependencies on this unit will be added later during the loading
1520 * of the mount unit. */
1521 (void) manager_load_unit_prepare(
1522 u->manager,
1523 p,
1524 /* path= */NULL,
1525 /* e= */NULL,
1526 &m);
1527 continue;
1528 }
1529 if (m == u)
1530 continue;
1531
1532 if (m->load_state != UNIT_LOADED)
1533 continue;
1534
1535 r = unit_add_dependency(
1536 u,
1537 UNIT_AFTER,
1538 m,
1539 /* add_reference= */ true,
1540 di.origin_mask);
1541 if (r < 0)
1542 return r;
1543 changed = changed || r > 0;
1544
1545 if (m->fragment_path) {
1546 r = unit_add_dependency(
1547 u,
1548 unit_mount_dependency_type_to_dependency_type(t),
1549 m,
1550 /* add_reference= */ true,
1551 di.origin_mask);
1552 if (r < 0)
1553 return r;
1554 changed = changed || r > 0;
1555 }
1556 }
1557 }
1558 }
1559
1560 return changed;
1561 }
1562
1563 static int unit_add_oomd_dependencies(Unit *u) {
1564 CGroupContext *c;
1565 CGroupMask mask;
1566 int r;
1567
1568 assert(u);
1569
1570 if (!u->default_dependencies)
1571 return 0;
1572
1573 c = unit_get_cgroup_context(u);
1574 if (!c)
1575 return 0;
1576
1577 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1578 if (!wants_oomd)
1579 return 0;
1580
1581 r = cg_mask_supported(&mask);
1582 if (r < 0)
1583 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1584
1585 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1586 return 0;
1587
1588 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1589 }
1590
1591 static int unit_add_startup_units(Unit *u) {
1592 if (!unit_has_startup_cgroup_constraints(u))
1593 return 0;
1594
1595 return set_ensure_put(&u->manager->startup_units, NULL, u);
1596 }
1597
1598 static const struct {
1599 UnitDependencyAtom atom;
1600 size_t job_mode_offset;
1601 const char *dependency_name;
1602 const char *job_mode_setting_name;
1603 } on_termination_settings[] = {
1604 { UNIT_ATOM_ON_SUCCESS, offsetof(Unit, on_success_job_mode), "OnSuccess=", "OnSuccessJobMode=" },
1605 { UNIT_ATOM_ON_FAILURE, offsetof(Unit, on_failure_job_mode), "OnFailure=", "OnFailureJobMode=" },
1606 };
1607
1608 static int unit_validate_on_termination_job_modes(Unit *u) {
1609 assert(u);
1610
1611 /* Verify that if On{Success,Failure}JobMode=isolate, only one unit gets specified. */
1612
1613 FOREACH_ELEMENT(setting, on_termination_settings) {
1614 JobMode job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset);
1615
1616 if (job_mode != JOB_ISOLATE)
1617 continue;
1618
1619 Unit *other, *found = NULL;
1620 UNIT_FOREACH_DEPENDENCY(other, u, setting->atom) {
1621 if (!found)
1622 found = other;
1623 else if (found != other)
1624 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC),
1625 "More than one %s dependencies specified but %sisolate set. Refusing.",
1626 setting->dependency_name, setting->job_mode_setting_name);
1627 }
1628 }
1629
1630 return 0;
1631 }
1632
1633 int unit_load(Unit *u) {
1634 int r;
1635
1636 assert(u);
1637
1638 if (u->in_load_queue) {
1639 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1640 u->in_load_queue = false;
1641 }
1642
1643 if (u->type == _UNIT_TYPE_INVALID)
1644 return -EINVAL;
1645
1646 if (u->load_state != UNIT_STUB)
1647 return 0;
1648
1649 if (u->transient_file) {
1650 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1651 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1652
1653 r = fflush_and_check(u->transient_file);
1654 if (r < 0)
1655 goto fail;
1656
1657 u->transient_file = safe_fclose(u->transient_file);
1658 u->fragment_mtime = now(CLOCK_REALTIME);
1659 }
1660
1661 r = UNIT_VTABLE(u)->load(u);
1662 if (r < 0)
1663 goto fail;
1664
1665 assert(u->load_state != UNIT_STUB);
1666
1667 if (u->load_state == UNIT_LOADED) {
1668 unit_add_to_target_deps_queue(u);
1669
1670 r = unit_add_slice_dependencies(u);
1671 if (r < 0)
1672 goto fail;
1673
1674 r = unit_add_mount_dependencies(u);
1675 if (r < 0)
1676 goto fail;
1677
1678 r = unit_add_oomd_dependencies(u);
1679 if (r < 0)
1680 goto fail;
1681
1682 r = unit_add_startup_units(u);
1683 if (r < 0)
1684 goto fail;
1685
1686 r = unit_validate_on_termination_job_modes(u);
1687 if (r < 0)
1688 goto fail;
1689
1690 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1691 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1692
1693 /* We finished loading, let's ensure our parents recalculate the members mask */
1694 unit_invalidate_cgroup_members_masks(u);
1695 }
1696
1697 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1698
1699 unit_add_to_dbus_queue(unit_follow_merge(u));
1700 unit_add_to_gc_queue(u);
1701 (void) manager_varlink_send_managed_oom_update(u);
1702
1703 return 0;
1704
1705 fail:
1706 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1707 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1708
1709 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1710 r == -ENOEXEC ? UNIT_BAD_SETTING :
1711 UNIT_ERROR;
1712 u->load_error = r;
1713
1714 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1715 * an attempt is made to load this unit, we know we need to check again. */
1716 if (u->load_state == UNIT_NOT_FOUND)
1717 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1718
1719 unit_add_to_dbus_queue(u);
1720 unit_add_to_gc_queue(u);
1721
1722 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1723 }
1724
1725 _printf_(7, 8)
1726 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1727 Unit *u = userdata;
1728 va_list ap;
1729 int r;
1730
1731 if (u && !unit_log_level_test(u, level))
1732 return -ERRNO_VALUE(error);
1733
1734 va_start(ap, format);
1735 if (u)
1736 r = log_object_internalv(level, error, file, line, func,
1737 unit_log_field(u),
1738 u->id,
1739 unit_invocation_log_field(u),
1740 u->invocation_id_string,
1741 format, ap);
1742 else
1743 r = log_internalv(level, error, file, line, func, format, ap);
1744 va_end(ap);
1745
1746 return r;
1747 }
1748
1749 static bool unit_test_condition(Unit *u) {
1750 _cleanup_strv_free_ char **env = NULL;
1751 int r;
1752
1753 assert(u);
1754
1755 dual_timestamp_now(&u->condition_timestamp);
1756
1757 r = manager_get_effective_environment(u->manager, &env);
1758 if (r < 0) {
1759 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1760 u->condition_result = true;
1761 } else
1762 u->condition_result = condition_test_list(
1763 u->conditions,
1764 env,
1765 condition_type_to_string,
1766 log_unit_internal,
1767 u);
1768
1769 unit_add_to_dbus_queue(u);
1770 return u->condition_result;
1771 }
1772
1773 static bool unit_test_assert(Unit *u) {
1774 _cleanup_strv_free_ char **env = NULL;
1775 int r;
1776
1777 assert(u);
1778
1779 dual_timestamp_now(&u->assert_timestamp);
1780
1781 r = manager_get_effective_environment(u->manager, &env);
1782 if (r < 0) {
1783 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1784 u->assert_result = CONDITION_ERROR;
1785 } else
1786 u->assert_result = condition_test_list(
1787 u->asserts,
1788 env,
1789 assert_type_to_string,
1790 log_unit_internal,
1791 u);
1792
1793 unit_add_to_dbus_queue(u);
1794 return u->assert_result;
1795 }
1796
1797 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1798 if (log_get_show_color()) {
1799 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1800 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1801 else
1802 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1803 }
1804
1805 DISABLE_WARNING_FORMAT_NONLITERAL;
1806 manager_status_printf(u->manager, status_type, status, format, ident);
1807 REENABLE_WARNING;
1808 }
1809
1810 int unit_test_start_limit(Unit *u) {
1811 const char *reason;
1812
1813 assert(u);
1814
1815 if (ratelimit_below(&u->start_ratelimit)) {
1816 u->start_limit_hit = false;
1817 return 0;
1818 }
1819
1820 log_unit_warning(u, "Start request repeated too quickly.");
1821 u->start_limit_hit = true;
1822
1823 reason = strjoina("unit ", u->id, " failed");
1824
1825 emergency_action(
1826 u->manager,
1827 u->start_limit_action,
1828 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S,
1829 u->reboot_arg,
1830 /* exit_status= */ -1,
1831 reason);
1832
1833 return -ECANCELED;
1834 }
1835
1836 static bool unit_verify_deps(Unit *u) {
1837 Unit *other;
1838
1839 assert(u);
1840
1841 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1842 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1843 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1844 * that are not used in conjunction with After= as for them any such check would make things entirely
1845 * racy. */
1846
1847 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1848
1849 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1850 continue;
1851
1852 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1853 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1854 return false;
1855 }
1856 }
1857
1858 return true;
1859 }
1860
1861 /* Errors that aren't really errors:
1862 * -EALREADY: Unit is already started.
1863 * -ECOMM: Condition failed
1864 * -EAGAIN: An operation is already in progress. Retry later.
1865 *
1866 * Errors that are real errors:
1867 * -EBADR: This unit type does not support starting.
1868 * -ECANCELED: Start limit hit, too many requests for now
1869 * -EPROTO: Assert failed
1870 * -EINVAL: Unit not loaded
1871 * -EOPNOTSUPP: Unit type not supported
1872 * -ENOLINK: The necessary dependencies are not fulfilled.
1873 * -ESTALE: This unit has been started before and can't be started a second time
1874 * -EDEADLK: This unit is frozen
1875 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1876 * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached
1877 */
1878 int unit_start(Unit *u, ActivationDetails *details) {
1879 UnitActiveState state;
1880 Unit *following;
1881 int r;
1882
1883 assert(u);
1884
1885 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1886 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1887 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1888 if (r < 0)
1889 return r;
1890 if (r > 0)
1891 return -EAGAIN;
1892 }
1893
1894 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1895 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1896 * waiting is finished. */
1897 state = unit_active_state(u);
1898 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1899 return -EALREADY;
1900 if (state == UNIT_MAINTENANCE)
1901 return -EAGAIN;
1902
1903 /* Units that aren't loaded cannot be started */
1904 if (u->load_state != UNIT_LOADED)
1905 return -EINVAL;
1906
1907 /* Refuse starting scope units more than once */
1908 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1909 return -ESTALE;
1910
1911 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1912 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1913 * recheck the condition in that case. */
1914 if (state != UNIT_ACTIVATING &&
1915 !unit_test_condition(u))
1916 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1917
1918 /* If the asserts failed, fail the entire job */
1919 if (state != UNIT_ACTIVATING &&
1920 !unit_test_assert(u))
1921 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1922
1923 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1924 * condition checks, so that we rather return condition check errors (which are usually not
1925 * considered a true failure) than "not supported" errors (which are considered a failure).
1926 */
1927 if (!unit_type_supported(u->type))
1928 return -EOPNOTSUPP;
1929
1930 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1931 * should have taken care of this already, but let's check this here again. After all, our
1932 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1933 if (!unit_verify_deps(u))
1934 return -ENOLINK;
1935
1936 /* Forward to the main object, if we aren't it. */
1937 following = unit_following(u);
1938 if (following) {
1939 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1940 return unit_start(following, details);
1941 }
1942
1943 /* Check to make sure the unit isn't frozen */
1944 if (u->freezer_state != FREEZER_RUNNING)
1945 return -EDEADLK;
1946
1947 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1948 if (UNIT_VTABLE(u)->can_start) {
1949 r = UNIT_VTABLE(u)->can_start(u);
1950 if (r < 0)
1951 return r;
1952 }
1953
1954 /* If it is stopped, but we cannot start it, then fail */
1955 if (!UNIT_VTABLE(u)->start)
1956 return -EBADR;
1957
1958 if (UNIT_IS_INACTIVE_OR_FAILED(state)) {
1959 Slice *slice = SLICE(UNIT_GET_SLICE(u));
1960
1961 if (slice) {
1962 /* Check hard concurrency limit. Note this is partially redundant, we already checked
1963 * this when enqueuing jobs. However, between the time when we enqueued this and the
1964 * time we are dispatching the queue the configuration might have changed, hence
1965 * check here again */
1966 if (slice_concurrency_hard_max_reached(slice, u))
1967 return -ETOOMANYREFS;
1968
1969 /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in
1970 * the queue */
1971 if (slice_concurrency_soft_max_reached(slice, u))
1972 return -EAGAIN; /* Try again, keep in queue */
1973 }
1974 }
1975
1976 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1977 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1978 * waits for a holdoff timer to elapse before it will start again. */
1979
1980 unit_add_to_dbus_queue(u);
1981
1982 if (!u->activation_details) /* Older details object wins */
1983 u->activation_details = activation_details_ref(details);
1984
1985 return UNIT_VTABLE(u)->start(u);
1986 }
1987
1988 bool unit_can_start(Unit *u) {
1989 assert(u);
1990
1991 if (u->load_state != UNIT_LOADED)
1992 return false;
1993
1994 if (!unit_type_supported(u->type))
1995 return false;
1996
1997 /* Scope units may be started only once */
1998 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1999 return false;
2000
2001 return !!UNIT_VTABLE(u)->start;
2002 }
2003
2004 bool unit_can_isolate(Unit *u) {
2005 assert(u);
2006
2007 return unit_can_start(u) &&
2008 u->allow_isolate;
2009 }
2010
2011 /* Errors:
2012 * -EBADR: This unit type does not support stopping.
2013 * -EALREADY: Unit is already stopped.
2014 * -EAGAIN: An operation is already in progress. Retry later.
2015 * -EDEADLK: Unit is frozen
2016 */
2017 int unit_stop(Unit *u) {
2018 UnitActiveState state;
2019 Unit *following;
2020
2021 assert(u);
2022
2023 state = unit_active_state(u);
2024 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2025 return -EALREADY;
2026
2027 following = unit_following(u);
2028 if (following) {
2029 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2030 return unit_stop(following);
2031 }
2032
2033 /* Check to make sure the unit isn't frozen */
2034 if (u->freezer_state != FREEZER_RUNNING)
2035 return -EDEADLK;
2036
2037 if (!UNIT_VTABLE(u)->stop)
2038 return -EBADR;
2039
2040 unit_add_to_dbus_queue(u);
2041
2042 return UNIT_VTABLE(u)->stop(u);
2043 }
2044
2045 bool unit_can_stop(Unit *u) {
2046 assert(u);
2047
2048 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2049 * Extrinsic units follow external state and they may stop following external state changes
2050 * (hence we return true here), but an attempt to do this through the manager will fail. */
2051
2052 if (!unit_type_supported(u->type))
2053 return false;
2054
2055 if (u->perpetual)
2056 return false;
2057
2058 return !!UNIT_VTABLE(u)->stop;
2059 }
2060
2061 /* Errors:
2062 * -EBADR: This unit type does not support reloading.
2063 * -ENOEXEC: Unit is not started.
2064 * -EAGAIN: An operation is already in progress. Retry later.
2065 * -EDEADLK: Unit is frozen.
2066 */
2067 int unit_reload(Unit *u) {
2068 UnitActiveState state;
2069 Unit *following;
2070
2071 assert(u);
2072
2073 if (u->load_state != UNIT_LOADED)
2074 return -EINVAL;
2075
2076 if (!unit_can_reload(u))
2077 return -EBADR;
2078
2079 state = unit_active_state(u);
2080 if (IN_SET(state, UNIT_RELOADING, UNIT_REFRESHING))
2081 /* "refreshing" means some resources in the unit namespace is being updated. Unlike reload,
2082 * the unit processes aren't made aware of refresh. Let's put the job back to queue
2083 * in both cases, as refresh typically takes place before reload and it's better to wait
2084 * for it rather than failing. */
2085 return -EAGAIN;
2086
2087 if (state != UNIT_ACTIVE)
2088 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2089
2090 following = unit_following(u);
2091 if (following) {
2092 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2093 return unit_reload(following);
2094 }
2095
2096 /* Check to make sure the unit isn't frozen */
2097 if (u->freezer_state != FREEZER_RUNNING)
2098 return -EDEADLK;
2099
2100 unit_add_to_dbus_queue(u);
2101
2102 if (!UNIT_VTABLE(u)->reload) {
2103 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2104 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2105 return 0;
2106 }
2107
2108 return UNIT_VTABLE(u)->reload(u);
2109 }
2110
2111 bool unit_can_reload(Unit *u) {
2112 assert(u);
2113
2114 if (UNIT_VTABLE(u)->can_reload)
2115 return UNIT_VTABLE(u)->can_reload(u);
2116
2117 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2118 return true;
2119
2120 return UNIT_VTABLE(u)->reload;
2121 }
2122
2123 bool unit_is_unneeded(Unit *u) {
2124 Unit *other;
2125 assert(u);
2126
2127 if (!u->stop_when_unneeded)
2128 return false;
2129
2130 /* Don't clean up while the unit is transitioning or is even inactive. */
2131 if (unit_active_state(u) != UNIT_ACTIVE)
2132 return false;
2133 if (u->job)
2134 return false;
2135
2136 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2137 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2138 * restart, then don't clean this one up. */
2139
2140 if (other->job)
2141 return false;
2142
2143 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2144 return false;
2145
2146 if (unit_will_restart(other))
2147 return false;
2148 }
2149
2150 return true;
2151 }
2152
2153 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2154 Unit *other;
2155
2156 assert(u);
2157
2158 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2159 * that is active declared an Uphold= dependencies on it */
2160
2161 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2162 if (ret_culprit)
2163 *ret_culprit = NULL;
2164 return false;
2165 }
2166
2167 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2168 if (other->job)
2169 continue;
2170
2171 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2172 if (ret_culprit)
2173 *ret_culprit = other;
2174 return true;
2175 }
2176 }
2177
2178 if (ret_culprit)
2179 *ret_culprit = NULL;
2180 return false;
2181 }
2182
2183 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2184 Unit *other;
2185
2186 assert(u);
2187
2188 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2189 * because the other unit is down. */
2190
2191 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2192 /* Don't clean up while the unit is transitioning or is even inactive. */
2193 if (ret_culprit)
2194 *ret_culprit = NULL;
2195 return false;
2196 }
2197
2198 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2199 if (other->job)
2200 continue;
2201
2202 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2203 if (ret_culprit)
2204 *ret_culprit = other;
2205
2206 return true;
2207 }
2208 }
2209
2210 if (ret_culprit)
2211 *ret_culprit = NULL;
2212 return false;
2213 }
2214
2215 static void check_unneeded_dependencies(Unit *u) {
2216 Unit *other;
2217 assert(u);
2218
2219 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2220
2221 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2222 unit_submit_to_stop_when_unneeded_queue(other);
2223 }
2224
2225 static void check_uphold_dependencies(Unit *u) {
2226 Unit *other;
2227 assert(u);
2228
2229 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2230
2231 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2232 unit_submit_to_start_when_upheld_queue(other);
2233 }
2234
2235 static void check_bound_by_dependencies(Unit *u) {
2236 Unit *other;
2237 assert(u);
2238
2239 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2240
2241 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2242 unit_submit_to_stop_when_bound_queue(other);
2243 }
2244
2245 static void retroactively_start_dependencies(Unit *u) {
2246 Unit *other;
2247
2248 assert(u);
2249 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2250
2251 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2252 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2253 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2254 (void) manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2255
2256 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2257 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2258 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2259 (void) manager_add_job(u->manager, JOB_START, other, JOB_FAIL, /* error = */ NULL, /* ret = */ NULL);
2260
2261 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2262 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2263 (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2264 }
2265
2266 static void retroactively_stop_dependencies(Unit *u) {
2267 Unit *other;
2268
2269 assert(u);
2270 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2271
2272 /* Pull down units which are bound to us recursively if enabled */
2273 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2274 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2275 (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2276 }
2277
2278 void unit_start_on_termination_deps(Unit *u, UnitDependencyAtom atom) {
2279 const char *dependency_name = NULL;
2280 JobMode job_mode;
2281 unsigned n_jobs = 0;
2282 int r;
2283
2284 /* Act on OnFailure= and OnSuccess= dependencies */
2285
2286 assert(u);
2287 assert(u->manager);
2288 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2289
2290 FOREACH_ELEMENT(setting, on_termination_settings)
2291 if (atom == setting->atom) {
2292 job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset);
2293 dependency_name = setting->dependency_name;
2294 break;
2295 }
2296
2297 assert(dependency_name);
2298
2299 Unit *other;
2300 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2301 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2302
2303 if (n_jobs == 0)
2304 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2305
2306 r = manager_add_job(u->manager, JOB_START, other, job_mode, &error, /* ret = */ NULL);
2307 if (r < 0)
2308 log_unit_warning_errno(u, r, "Failed to enqueue %s%s job, ignoring: %s",
2309 dependency_name, other->id, bus_error_message(&error, r));
2310 n_jobs++;
2311 }
2312
2313 if (n_jobs > 0)
2314 log_unit_debug(u, "Triggering %s dependencies done (%u %s).",
2315 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2316 }
2317
2318 void unit_trigger_notify(Unit *u) {
2319 Unit *other;
2320
2321 assert(u);
2322
2323 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2324 if (UNIT_VTABLE(other)->trigger_notify)
2325 UNIT_VTABLE(other)->trigger_notify(other, u);
2326 }
2327
2328 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2329 if (condition_notice && log_level > LOG_NOTICE)
2330 return LOG_NOTICE;
2331 if (condition_info && log_level > LOG_INFO)
2332 return LOG_INFO;
2333 return log_level;
2334 }
2335
2336 static int unit_log_resources(Unit *u) {
2337
2338 static const struct {
2339 const char *journal_field;
2340 const char *message_suffix;
2341 } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
2342 [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" },
2343 [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2344 }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2345 [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2346 [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2347 [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL },
2348 [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL },
2349 }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2350 [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" },
2351 [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2352 [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL },
2353 [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL },
2354 };
2355
2356 struct iovec *iovec = NULL;
2357 size_t n_iovec = 0;
2358 _cleanup_free_ char *message = NULL, *t = NULL;
2359 nsec_t cpu_nsec = NSEC_INFINITY;
2360 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2361
2362 assert(u);
2363
2364 CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free);
2365
2366 iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) +
2367 _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4);
2368 if (!iovec)
2369 return log_oom();
2370
2371 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2372 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2373 * information and the complete data in structured fields. */
2374
2375 (void) unit_get_cpu_usage(u, &cpu_nsec);
2376 if (cpu_nsec != NSEC_INFINITY) {
2377 /* Format the CPU time for inclusion in the structured log message */
2378 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0)
2379 return log_oom();
2380 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2381
2382 /* Format the CPU time for inclusion in the human language message string */
2383 if (strextendf_with_separator(&message, ", ",
2384 "Consumed %s CPU time",
2385 FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0)
2386 return log_oom();
2387
2388 log_level = raise_level(log_level,
2389 cpu_nsec > MENTIONWORTHY_CPU_NSEC,
2390 cpu_nsec > NOTICEWORTHY_CPU_NSEC);
2391 }
2392
2393 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
2394 uint64_t value = UINT64_MAX;
2395
2396 assert(memory_fields[metric].journal_field);
2397 assert(memory_fields[metric].message_suffix);
2398
2399 (void) unit_get_memory_accounting(u, metric, &value);
2400 if (value == UINT64_MAX)
2401 continue;
2402
2403 if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, value) < 0)
2404 return log_oom();
2405 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2406
2407 /* If value is 0, we don't log it in the MESSAGE= field. */
2408 if (value == 0)
2409 continue;
2410
2411 if (strextendf_with_separator(&message, ", ", "%s %s",
2412 FORMAT_BYTES(value), memory_fields[metric].message_suffix) < 0)
2413 return log_oom();
2414
2415 log_level = raise_level(log_level,
2416 value > MENTIONWORTHY_MEMORY_BYTES,
2417 value > NOTICEWORTHY_MEMORY_BYTES);
2418 }
2419
2420 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2421 uint64_t value = UINT64_MAX;
2422
2423 assert(io_fields[k].journal_field);
2424
2425 (void) unit_get_io_accounting(u, k, &value);
2426 if (value == UINT64_MAX)
2427 continue;
2428
2429 /* Format IO accounting data for inclusion in the structured log message */
2430 if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0)
2431 return log_oom();
2432 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2433
2434 /* If value is 0, we don't log it in the MESSAGE= field. */
2435 if (value == 0)
2436 continue;
2437
2438 /* Format the IO accounting data for inclusion in the human language message string, but only
2439 * for the bytes counters (and not for the operations counters) */
2440 if (io_fields[k].message_suffix) {
2441 if (strextendf_with_separator(&message, ", ", "%s %s",
2442 FORMAT_BYTES(value), io_fields[k].message_suffix) < 0)
2443 return log_oom();
2444
2445 log_level = raise_level(log_level,
2446 value > MENTIONWORTHY_IO_BYTES,
2447 value > NOTICEWORTHY_IO_BYTES);
2448 }
2449 }
2450
2451 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2452 uint64_t value = UINT64_MAX;
2453
2454 assert(ip_fields[m].journal_field);
2455
2456 (void) unit_get_ip_accounting(u, m, &value);
2457 if (value == UINT64_MAX)
2458 continue;
2459
2460 /* Format IP accounting data for inclusion in the structured log message */
2461 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0)
2462 return log_oom();
2463 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2464
2465 /* If value is 0, we don't log it in the MESSAGE= field. */
2466 if (value == 0)
2467 continue;
2468
2469 /* Format the IP accounting data for inclusion in the human language message string, but only
2470 * for the bytes counters (and not for the packets counters) */
2471 if (ip_fields[m].message_suffix) {
2472 if (strextendf_with_separator(&message, ", ", "%s %s",
2473 FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0)
2474 return log_oom();
2475
2476 log_level = raise_level(log_level,
2477 value > MENTIONWORTHY_IP_BYTES,
2478 value > NOTICEWORTHY_IP_BYTES);
2479 }
2480 }
2481
2482 /* This check is here because it is the earliest point following all possible log_level assignments.
2483 * (If log_level is assigned anywhere after this point, move this check.) */
2484 if (!unit_log_level_test(u, log_level))
2485 return 0;
2486
2487 /* Is there any accounting data available at all? */
2488 if (n_iovec == 0) {
2489 assert(!message);
2490 return 0;
2491 }
2492
2493 t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", ".");
2494 if (!t)
2495 return log_oom();
2496 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2497
2498 if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR))
2499 return log_oom();
2500
2501 if (!set_iovec_string_field(iovec, &n_iovec, unit_log_field(u), u->id))
2502 return log_oom();
2503
2504 if (!set_iovec_string_field(iovec, &n_iovec, unit_invocation_log_field(u), u->invocation_id_string))
2505 return log_oom();
2506
2507 log_unit_struct_iovec(u, log_level, iovec, n_iovec);
2508
2509 return 0;
2510 }
2511
2512 static void unit_update_on_console(Unit *u) {
2513 bool b;
2514
2515 assert(u);
2516
2517 b = unit_needs_console(u);
2518 if (u->on_console == b)
2519 return;
2520
2521 u->on_console = b;
2522 if (b)
2523 manager_ref_console(u->manager);
2524 else
2525 manager_unref_console(u->manager);
2526 }
2527
2528 static void unit_emit_audit_start(Unit *u) {
2529 assert(u);
2530
2531 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2532 return;
2533
2534 /* Write audit record if we have just finished starting up */
2535 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2536 u->in_audit = true;
2537 }
2538
2539 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2540 assert(u);
2541
2542 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2543 return;
2544
2545 if (u->in_audit) {
2546 /* Write audit record if we have just finished shutting down */
2547 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2548 u->in_audit = false;
2549 } else {
2550 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2551 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2552
2553 if (state == UNIT_INACTIVE)
2554 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2555 }
2556 }
2557
2558 static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2559 bool unexpected = false;
2560 JobResult result;
2561
2562 assert(j);
2563
2564 if (j->state == JOB_WAITING)
2565 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2566 * due to EAGAIN. */
2567 job_add_to_run_queue(j);
2568
2569 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2570 * hence needs to invalidate jobs. */
2571
2572 switch (j->type) {
2573
2574 case JOB_START:
2575 case JOB_VERIFY_ACTIVE:
2576
2577 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2578 job_finish_and_invalidate(j, JOB_DONE, true, false);
2579 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2580 unexpected = true;
2581
2582 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2583 if (ns == UNIT_FAILED)
2584 result = JOB_FAILED;
2585 else
2586 result = JOB_DONE;
2587
2588 job_finish_and_invalidate(j, result, true, false);
2589 }
2590 }
2591
2592 break;
2593
2594 case JOB_RELOAD:
2595 case JOB_RELOAD_OR_START:
2596 case JOB_TRY_RELOAD:
2597
2598 if (j->state == JOB_RUNNING) {
2599 if (ns == UNIT_ACTIVE)
2600 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2601 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING, UNIT_REFRESHING)) {
2602 unexpected = true;
2603
2604 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2605 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2606 }
2607 }
2608
2609 break;
2610
2611 case JOB_STOP:
2612 case JOB_RESTART:
2613 case JOB_TRY_RESTART:
2614
2615 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2616 job_finish_and_invalidate(j, JOB_DONE, true, false);
2617 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2618 unexpected = true;
2619 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2620 }
2621
2622 break;
2623
2624 default:
2625 assert_not_reached();
2626 }
2627
2628 return unexpected;
2629 }
2630
2631 static void unit_recursive_add_to_run_queue(Unit *u) {
2632 assert(u);
2633
2634 if (u->job)
2635 job_add_to_run_queue(u->job);
2636
2637 Unit *child;
2638 UNIT_FOREACH_DEPENDENCY(child, u, UNIT_ATOM_SLICE_OF) {
2639
2640 if (!child->job)
2641 continue;
2642
2643 unit_recursive_add_to_run_queue(child);
2644 }
2645 }
2646
2647 static void unit_check_concurrency_limit(Unit *u) {
2648 assert(u);
2649
2650 Unit *slice = UNIT_GET_SLICE(u);
2651 if (!slice)
2652 return;
2653
2654 /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */
2655
2656 if (SLICE(slice)->concurrency_soft_max != UINT_MAX) {
2657 Unit *sibling;
2658 UNIT_FOREACH_DEPENDENCY(sibling, slice, UNIT_ATOM_SLICE_OF) {
2659 if (sibling == u)
2660 continue;
2661
2662 unit_recursive_add_to_run_queue(sibling);
2663 }
2664 }
2665
2666 /* Also go up the tree. */
2667 unit_check_concurrency_limit(slice);
2668 }
2669
2670 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2671 assert(u);
2672 assert(os < _UNIT_ACTIVE_STATE_MAX);
2673 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2674
2675 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2676 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2677 * remounted this function will be called too! */
2678
2679 Manager *m = ASSERT_PTR(u->manager);
2680
2681 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2682 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2683 unit_add_to_dbus_queue(u);
2684
2685 /* Update systemd-oomd on the property/state change.
2686 *
2687 * Always send an update if the unit is going into an inactive state so systemd-oomd knows to
2688 * stop monitoring.
2689 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2690 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2691 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2692 * have the information on the property. Thus, indiscriminately send an update. */
2693 if (os != ns && (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns)))
2694 (void) manager_varlink_send_managed_oom_update(u);
2695
2696 /* Update timestamps for state changes */
2697 if (!MANAGER_IS_RELOADING(m)) {
2698 dual_timestamp_now(&u->state_change_timestamp);
2699
2700 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2701 u->inactive_exit_timestamp = u->state_change_timestamp;
2702 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2703 u->inactive_enter_timestamp = u->state_change_timestamp;
2704
2705 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2706 u->active_enter_timestamp = u->state_change_timestamp;
2707 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2708 u->active_exit_timestamp = u->state_change_timestamp;
2709 }
2710
2711 /* Keep track of failed units */
2712 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2713
2714 /* Make sure the cgroup and state files are always removed when we become inactive */
2715 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2716 SET_FLAG(u->markers,
2717 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2718 false);
2719 unit_prune_cgroup(u);
2720 unit_unlink_state_files(u);
2721 } else if (ns != os && ns == UNIT_RELOADING)
2722 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2723
2724 unit_update_on_console(u);
2725
2726 if (!MANAGER_IS_RELOADING(m)) {
2727 bool unexpected;
2728
2729 /* Let's propagate state changes to the job */
2730 if (u->job)
2731 unexpected = unit_process_job(u->job, ns, reload_success);
2732 else
2733 unexpected = true;
2734
2735 /* If this state change happened without being requested by a job, then let's retroactively start or
2736 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2737 * additional jobs just because something is already activated. */
2738
2739 if (unexpected) {
2740 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2741 retroactively_start_dependencies(u);
2742 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2743 retroactively_stop_dependencies(u);
2744 }
2745
2746 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2747 /* This unit just finished starting up */
2748
2749 unit_emit_audit_start(u);
2750 manager_send_unit_plymouth(m, u);
2751 manager_send_unit_supervisor(m, u, /* active= */ true);
2752
2753 } else if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2754 /* This unit just stopped/failed. */
2755
2756 unit_emit_audit_stop(u, ns);
2757 manager_send_unit_supervisor(m, u, /* active= */ false);
2758 unit_log_resources(u);
2759 }
2760
2761 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2762 unit_start_on_termination_deps(u, UNIT_ATOM_ON_SUCCESS);
2763 else if (ns != os && ns == UNIT_FAILED)
2764 unit_start_on_termination_deps(u, UNIT_ATOM_ON_FAILURE);
2765 }
2766
2767 manager_recheck_journal(m);
2768 manager_recheck_dbus(m);
2769
2770 unit_trigger_notify(u);
2771
2772 if (!MANAGER_IS_RELOADING(m)) {
2773 const char *reason;
2774
2775 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2776 reason = strjoina("unit ", u->id, " failed");
2777 emergency_action(m, u->failure_action, EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2778 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2779 reason = strjoina("unit ", u->id, " succeeded");
2780 emergency_action(m, u->success_action, /* flags= */ 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2781 }
2782 }
2783
2784 /* And now, add the unit or depending units to various queues that will act on the new situation if
2785 * needed. These queues generally check for continuous state changes rather than events (like most of
2786 * the state propagation above), and do work deferred instead of instantly, since they typically
2787 * don't want to run during reloading, and usually involve checking combined state of multiple units
2788 * at once. */
2789
2790 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2791 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2792 check_unneeded_dependencies(u);
2793 check_bound_by_dependencies(u);
2794
2795 /* Maybe someone wants us to remain up? */
2796 unit_submit_to_start_when_upheld_queue(u);
2797
2798 /* Maybe the unit should be GC'ed now? */
2799 unit_add_to_gc_queue(u);
2800
2801 /* Maybe we can release some resources now? */
2802 unit_submit_to_release_resources_queue(u);
2803
2804 /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */
2805 unit_check_concurrency_limit(u);
2806
2807 } else if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2808 /* Start uphold units regardless if going up was expected or not */
2809 check_uphold_dependencies(u);
2810
2811 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2812 unit_submit_to_stop_when_unneeded_queue(u);
2813
2814 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2815 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2816 * inactive, without ever entering started.) */
2817 unit_submit_to_stop_when_bound_queue(u);
2818 }
2819 }
2820
2821 int unit_watch_pidref(Unit *u, const PidRef *pid, bool exclusive) {
2822 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2823 int r;
2824
2825 /* Adds a specific PID to the set of PIDs this unit watches. */
2826
2827 assert(u);
2828 assert(pidref_is_set(pid));
2829
2830 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2831 * opportunity to remove any stalled references to this PID as they can be created
2832 * easily (when watching a process which is not our direct child). */
2833 if (exclusive)
2834 manager_unwatch_pidref(u->manager, pid);
2835
2836 if (set_contains(u->pids, pid)) { /* early exit if already being watched */
2837 assert(!exclusive);
2838 return 0;
2839 }
2840
2841 r = pidref_dup(pid, &pid_dup);
2842 if (r < 0)
2843 return r;
2844
2845 /* First, insert into the set of PIDs maintained by the unit */
2846 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2847 if (r < 0)
2848 return r;
2849
2850 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2851
2852 /* Second, insert it into the simple global table, see if that works */
2853 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops, pid, u);
2854 if (r != -EEXIST)
2855 return r;
2856
2857 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2858 * hashmap that points to an array. */
2859
2860 PidRef *old_pid = NULL;
2861 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2862
2863 /* Count entries in array */
2864 size_t n = 0;
2865 for (; array && array[n]; n++)
2866 ;
2867
2868 /* Allocate a new array */
2869 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2870 if (!new_array)
2871 return -ENOMEM;
2872
2873 /* Append us to the end */
2874 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2875 new_array[n] = u;
2876 new_array[n+1] = NULL;
2877
2878 /* Add or replace the old array */
2879 r = hashmap_ensure_replace(&u->manager->watch_pids_more, &pidref_hash_ops, old_pid ?: pid, new_array);
2880 if (r < 0)
2881 return r;
2882
2883 TAKE_PTR(new_array); /* Now part of the hash table */
2884 free(array); /* Which means we can now delete the old version */
2885 return 0;
2886 }
2887
2888 void unit_unwatch_pidref(Unit *u, const PidRef *pid) {
2889 assert(u);
2890 assert(pidref_is_set(pid));
2891
2892 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2893 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2894 if (!pid1)
2895 return; /* Early exit if this PID was never watched by us */
2896
2897 /* First let's drop the unit from the simple hash table, if it is included there */
2898 PidRef *pid2 = NULL;
2899 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2900
2901 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2902 assert((uu == u) == (pid1 == pid2));
2903
2904 if (uu == u)
2905 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2906 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2907 else {
2908 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2909 PidRef *pid3 = NULL;
2910 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2911
2912 /* Let's iterate through the array, dropping our own entry */
2913 size_t m = 0, n = 0;
2914 for (; array && array[n]; n++)
2915 if (array[n] != u)
2916 array[m++] = array[n];
2917 if (n == m)
2918 return; /* Not there */
2919
2920 array[m] = NULL; /* set trailing NULL marker on the new end */
2921
2922 if (m == 0) {
2923 /* The array is now empty, remove the entire entry */
2924 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2925 free(array);
2926 } else {
2927 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2928 * we will delete, but by the PidRef object of the Unit that is now first in the
2929 * array. */
2930
2931 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2932 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2933 }
2934 }
2935 }
2936
2937 void unit_unwatch_all_pids(Unit *u) {
2938 assert(u);
2939
2940 while (!set_isempty(u->pids))
2941 unit_unwatch_pidref(u, set_first(u->pids));
2942
2943 u->pids = set_free(u->pids);
2944 }
2945
2946 void unit_unwatch_pidref_done(Unit *u, PidRef *pidref) {
2947 assert(u);
2948
2949 if (!pidref_is_set(pidref))
2950 return;
2951
2952 unit_unwatch_pidref(u, pidref);
2953 pidref_done(pidref);
2954 }
2955
2956 bool unit_job_is_applicable(Unit *u, JobType j) {
2957 assert(u);
2958 assert(j >= 0 && j < _JOB_TYPE_MAX);
2959
2960 switch (j) {
2961
2962 case JOB_VERIFY_ACTIVE:
2963 case JOB_START:
2964 case JOB_NOP:
2965 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2966 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2967 * jobs for it. */
2968 return true;
2969
2970 case JOB_STOP:
2971 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2972 * external events), hence it makes no sense to permit enqueuing such a request either. */
2973 return !u->perpetual;
2974
2975 case JOB_RESTART:
2976 case JOB_TRY_RESTART:
2977 return unit_can_stop(u) && unit_can_start(u);
2978
2979 case JOB_RELOAD:
2980 case JOB_TRY_RELOAD:
2981 return unit_can_reload(u);
2982
2983 case JOB_RELOAD_OR_START:
2984 return unit_can_reload(u) && unit_can_start(u);
2985
2986 default:
2987 assert_not_reached();
2988 }
2989 }
2990
2991 static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
2992 Hashmap *deps;
2993
2994 assert(u);
2995 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2996
2997 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
2998 if (!deps) {
2999 _cleanup_hashmap_free_ Hashmap *h = NULL;
3000
3001 h = hashmap_new(NULL);
3002 if (!h)
3003 return NULL;
3004
3005 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3006 return NULL;
3007
3008 deps = TAKE_PTR(h);
3009 }
3010
3011 return deps;
3012 }
3013
3014 typedef enum NotifyDependencyFlags {
3015 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3016 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3017 } NotifyDependencyFlags;
3018
3019 static int unit_add_dependency_impl(
3020 Unit *u,
3021 UnitDependency d,
3022 Unit *other,
3023 UnitDependencyMask mask) {
3024
3025 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3026 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3027 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3028 [UNIT_WANTS] = UNIT_WANTED_BY,
3029 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3030 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3031 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3032 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3033 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3034 [UNIT_WANTED_BY] = UNIT_WANTS,
3035 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3036 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3037 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3038 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3039 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3040 [UNIT_BEFORE] = UNIT_AFTER,
3041 [UNIT_AFTER] = UNIT_BEFORE,
3042 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3043 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3044 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3045 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3046 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3047 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3048 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3049 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3050 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3051 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3052 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3053 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3054 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3055 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3056 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3057 };
3058
3059 Hashmap *u_deps, *other_deps;
3060 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3061 NotifyDependencyFlags flags = 0;
3062 int r;
3063
3064 assert(u);
3065 assert(other);
3066 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3067 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3068 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3069
3070 /* Ensure the following two hashmaps for each unit exist:
3071 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3072 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3073 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3074 if (!u_deps)
3075 return -ENOMEM;
3076
3077 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3078 if (!other_deps)
3079 return -ENOMEM;
3080
3081 /* Save the original dependency info. */
3082 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3083 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3084
3085 /* Update dependency info. */
3086 u_info.origin_mask |= mask;
3087 other_info.destination_mask |= mask;
3088
3089 /* Save updated dependency info. */
3090 if (u_info.data != u_info_old.data) {
3091 r = hashmap_replace(u_deps, other, u_info.data);
3092 if (r < 0)
3093 return r;
3094
3095 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3096 u->dependency_generation++;
3097 }
3098
3099 if (other_info.data != other_info_old.data) {
3100 r = hashmap_replace(other_deps, u, other_info.data);
3101 if (r < 0) {
3102 if (u_info.data != u_info_old.data) {
3103 /* Restore the old dependency. */
3104 if (u_info_old.data)
3105 (void) hashmap_update(u_deps, other, u_info_old.data);
3106 else
3107 hashmap_remove(u_deps, other);
3108 }
3109 return r;
3110 }
3111
3112 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3113 other->dependency_generation++;
3114 }
3115
3116 return flags;
3117 }
3118
3119 int unit_add_dependency(
3120 Unit *u,
3121 UnitDependency d,
3122 Unit *other,
3123 bool add_reference,
3124 UnitDependencyMask mask) {
3125
3126 UnitDependencyAtom a;
3127 int r;
3128
3129 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3130 * there, no need to notify! */
3131 NotifyDependencyFlags notify_flags;
3132
3133 assert(u);
3134 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3135 assert(other);
3136
3137 u = unit_follow_merge(u);
3138 other = unit_follow_merge(other);
3139 a = unit_dependency_to_atom(d);
3140 assert(a >= 0);
3141
3142 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3143 if (u == other) {
3144 if (unit_should_warn_about_dependency(d))
3145 log_unit_warning(u, "Dependency %s=%s is dropped.",
3146 unit_dependency_to_string(d), u->id);
3147 return 0;
3148 }
3149
3150 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3151 return 0;
3152
3153 /* Note that ordering a device unit after a unit is permitted since it allows its job running
3154 * timeout to be started at a specific time. */
3155 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3156 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3157 return 0;
3158 }
3159
3160 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3161 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3162 return 0;
3163 }
3164
3165 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3166 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3167 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3168 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3169 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3170 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3171
3172 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3173 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3174 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3175 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3176 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3177 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3178
3179 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3180 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3181 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3182
3183 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3184 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3185 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3186
3187 r = unit_add_dependency_impl(u, d, other, mask);
3188 if (r < 0)
3189 return r;
3190 notify_flags = r;
3191
3192 if (add_reference) {
3193 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3194 if (r < 0)
3195 return r;
3196 notify_flags |= r;
3197 }
3198
3199 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3200 unit_add_to_dbus_queue(u);
3201 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3202 unit_add_to_dbus_queue(other);
3203
3204 return notify_flags != 0;
3205 }
3206
3207 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3208 int r = 0, s = 0;
3209
3210 assert(u);
3211 assert(d >= 0 || e >= 0);
3212
3213 if (d >= 0) {
3214 r = unit_add_dependency(u, d, other, add_reference, mask);
3215 if (r < 0)
3216 return r;
3217 }
3218
3219 if (e >= 0) {
3220 s = unit_add_dependency(u, e, other, add_reference, mask);
3221 if (s < 0)
3222 return s;
3223 }
3224
3225 return r > 0 || s > 0;
3226 }
3227
3228 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3229 int r;
3230
3231 assert(u);
3232 assert(name);
3233 assert(buf);
3234 assert(ret);
3235
3236 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3237 *buf = NULL;
3238 *ret = name;
3239 return 0;
3240 }
3241
3242 if (u->instance)
3243 r = unit_name_replace_instance(name, u->instance, buf);
3244 else {
3245 _cleanup_free_ char *i = NULL;
3246
3247 r = unit_name_to_prefix(u->id, &i);
3248 if (r < 0)
3249 return r;
3250
3251 r = unit_name_replace_instance(name, i, buf);
3252 }
3253 if (r < 0)
3254 return r;
3255
3256 *ret = *buf;
3257 return 0;
3258 }
3259
3260 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3261 _cleanup_free_ char *buf = NULL;
3262 Unit *other;
3263 int r;
3264
3265 assert(u);
3266 assert(name);
3267
3268 r = resolve_template(u, name, &buf, &name);
3269 if (r < 0)
3270 return r;
3271
3272 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3273 return 0;
3274
3275 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3276 if (r < 0)
3277 return r;
3278
3279 return unit_add_dependency(u, d, other, add_reference, mask);
3280 }
3281
3282 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3283 _cleanup_free_ char *buf = NULL;
3284 Unit *other;
3285 int r;
3286
3287 assert(u);
3288 assert(name);
3289
3290 r = resolve_template(u, name, &buf, &name);
3291 if (r < 0)
3292 return r;
3293
3294 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3295 return 0;
3296
3297 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3298 if (r < 0)
3299 return r;
3300
3301 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3302 }
3303
3304 int setenv_unit_path(const char *p) {
3305 assert(p);
3306
3307 /* This is mostly for debug purposes */
3308 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, /* overwrite = */ true));
3309 }
3310
3311 char* unit_dbus_path(Unit *u) {
3312 assert(u);
3313
3314 if (!u->id)
3315 return NULL;
3316
3317 return unit_dbus_path_from_name(u->id);
3318 }
3319
3320 char* unit_dbus_path_invocation_id(Unit *u) {
3321 assert(u);
3322
3323 if (sd_id128_is_null(u->invocation_id))
3324 return NULL;
3325
3326 return unit_dbus_path_from_name(u->invocation_id_string);
3327 }
3328
3329 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3330 int r;
3331
3332 assert(u);
3333
3334 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3335
3336 if (sd_id128_equal(u->invocation_id, id))
3337 return 0;
3338
3339 if (!sd_id128_is_null(u->invocation_id))
3340 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3341
3342 if (sd_id128_is_null(id)) {
3343 r = 0;
3344 goto reset;
3345 }
3346
3347 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3348 if (r < 0)
3349 goto reset;
3350
3351 u->invocation_id = id;
3352 sd_id128_to_string(id, u->invocation_id_string);
3353
3354 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3355 if (r < 0)
3356 goto reset;
3357
3358 return 0;
3359
3360 reset:
3361 u->invocation_id = SD_ID128_NULL;
3362 u->invocation_id_string[0] = 0;
3363 return r;
3364 }
3365
3366 int unit_set_slice(Unit *u, Unit *slice) {
3367 int r;
3368
3369 assert(u);
3370 assert(slice);
3371
3372 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3373 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3374 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3375
3376 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3377 return -EOPNOTSUPP;
3378
3379 if (u->type == UNIT_SLICE)
3380 return -EINVAL;
3381
3382 if (unit_active_state(u) != UNIT_INACTIVE)
3383 return -EBUSY;
3384
3385 if (slice->type != UNIT_SLICE)
3386 return -EINVAL;
3387
3388 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3389 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3390 return -EPERM;
3391
3392 if (UNIT_GET_SLICE(u) == slice)
3393 return 0;
3394
3395 /* Disallow slice changes if @u is already bound to cgroups */
3396 if (UNIT_GET_SLICE(u)) {
3397 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3398 if (crt && crt->cgroup_path)
3399 return -EBUSY;
3400 }
3401
3402 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3403 if (UNIT_GET_SLICE(u))
3404 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3405
3406 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3407 if (r < 0)
3408 return r;
3409
3410 return 1;
3411 }
3412
3413 int unit_set_default_slice(Unit *u) {
3414 const char *slice_name;
3415 Unit *slice;
3416 int r;
3417
3418 assert(u);
3419
3420 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3421 return 0;
3422
3423 if (UNIT_GET_SLICE(u))
3424 return 0;
3425
3426 if (u->instance) {
3427 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3428
3429 /* Implicitly place all instantiated units in their
3430 * own per-template slice */
3431
3432 r = unit_name_to_prefix(u->id, &prefix);
3433 if (r < 0)
3434 return r;
3435
3436 /* The prefix is already escaped, but it might include
3437 * "-" which has a special meaning for slice units,
3438 * hence escape it here extra. */
3439 escaped = unit_name_escape(prefix);
3440 if (!escaped)
3441 return -ENOMEM;
3442
3443 if (MANAGER_IS_SYSTEM(u->manager))
3444 slice_name = strjoina("system-", escaped, ".slice");
3445 else
3446 slice_name = strjoina("app-", escaped, ".slice");
3447
3448 } else if (unit_is_extrinsic(u))
3449 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3450 * the root slice. They don't really belong in one of the subslices. */
3451 slice_name = SPECIAL_ROOT_SLICE;
3452
3453 else if (MANAGER_IS_SYSTEM(u->manager))
3454 slice_name = SPECIAL_SYSTEM_SLICE;
3455 else
3456 slice_name = SPECIAL_APP_SLICE;
3457
3458 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3459 if (r < 0)
3460 return r;
3461
3462 return unit_set_slice(u, slice);
3463 }
3464
3465 const char* unit_slice_name(Unit *u) {
3466 Unit *slice;
3467 assert(u);
3468
3469 slice = UNIT_GET_SLICE(u);
3470 if (!slice)
3471 return NULL;
3472
3473 return slice->id;
3474 }
3475
3476 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3477 _cleanup_free_ char *t = NULL;
3478 int r;
3479
3480 assert(u);
3481 assert(type);
3482 assert(_found);
3483
3484 r = unit_name_change_suffix(u->id, type, &t);
3485 if (r < 0)
3486 return r;
3487 if (unit_has_name(u, t))
3488 return -EINVAL;
3489
3490 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3491 assert(r < 0 || *_found != u);
3492 return r;
3493 }
3494
3495 static int signal_name_owner_changed_install_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3496 Unit *u = ASSERT_PTR(userdata);
3497 const sd_bus_error *e;
3498 int r;
3499
3500 e = sd_bus_message_get_error(message);
3501 if (!e) {
3502 log_unit_trace(u, "Successfully installed NameOwnerChanged signal match.");
3503 return 0;
3504 }
3505
3506 r = sd_bus_error_get_errno(e);
3507 log_unit_error_errno(u, r,
3508 "Unexpected error response on installing NameOwnerChanged signal match: %s",
3509 bus_error_message(e, r));
3510
3511 /* If we failed to install NameOwnerChanged signal, also unref the bus slot of GetNameOwner(). */
3512 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3513 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3514
3515 if (UNIT_VTABLE(u)->bus_name_owner_change)
3516 UNIT_VTABLE(u)->bus_name_owner_change(u, NULL);
3517
3518 return 0;
3519 }
3520
3521 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3522 const char *new_owner;
3523 Unit *u = ASSERT_PTR(userdata);
3524 int r;
3525
3526 assert(message);
3527
3528 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3529 if (r < 0) {
3530 bus_log_parse_error(r);
3531 return 0;
3532 }
3533
3534 if (UNIT_VTABLE(u)->bus_name_owner_change)
3535 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3536
3537 return 0;
3538 }
3539
3540 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3541 const sd_bus_error *e;
3542 const char *new_owner;
3543 Unit *u = ASSERT_PTR(userdata);
3544 int r;
3545
3546 assert(message);
3547
3548 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3549
3550 e = sd_bus_message_get_error(message);
3551 if (e) {
3552 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3553 r = sd_bus_error_get_errno(e);
3554 log_unit_error_errno(u, r,
3555 "Unexpected error response from GetNameOwner(): %s",
3556 bus_error_message(e, r));
3557 }
3558
3559 new_owner = NULL;
3560 } else {
3561 r = sd_bus_message_read(message, "s", &new_owner);
3562 if (r < 0)
3563 return bus_log_parse_error(r);
3564
3565 assert(!isempty(new_owner));
3566 }
3567
3568 if (UNIT_VTABLE(u)->bus_name_owner_change)
3569 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3570
3571 return 0;
3572 }
3573
3574 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3575 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3576 const char *match;
3577 usec_t timeout_usec = 0;
3578 int r;
3579
3580 assert(u);
3581 assert(bus);
3582 assert(name);
3583
3584 if (u->match_bus_slot || u->get_name_owner_slot)
3585 return -EBUSY;
3586
3587 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3588 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3589 * value defined above. */
3590 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3591 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3592
3593 match = strjoina("type='signal',"
3594 "sender='org.freedesktop.DBus',"
3595 "path='/org/freedesktop/DBus',"
3596 "interface='org.freedesktop.DBus',"
3597 "member='NameOwnerChanged',"
3598 "arg0='", name, "'");
3599
3600 r = bus_add_match_full(
3601 bus,
3602 &u->match_bus_slot,
3603 /* asynchronous = */ true,
3604 match,
3605 signal_name_owner_changed,
3606 signal_name_owner_changed_install_handler,
3607 u,
3608 timeout_usec);
3609 if (r < 0)
3610 return r;
3611
3612 r = sd_bus_message_new_method_call(
3613 bus,
3614 &m,
3615 "org.freedesktop.DBus",
3616 "/org/freedesktop/DBus",
3617 "org.freedesktop.DBus",
3618 "GetNameOwner");
3619 if (r < 0)
3620 return r;
3621
3622 r = sd_bus_message_append(m, "s", name);
3623 if (r < 0)
3624 return r;
3625
3626 r = sd_bus_call_async(
3627 bus,
3628 &u->get_name_owner_slot,
3629 m,
3630 get_name_owner_handler,
3631 u,
3632 timeout_usec);
3633 if (r < 0) {
3634 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3635 return r;
3636 }
3637
3638 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3639 return 0;
3640 }
3641
3642 int unit_watch_bus_name(Unit *u, const char *name) {
3643 int r;
3644
3645 assert(u);
3646 assert(name);
3647
3648 /* Watch a specific name on the bus. We only support one unit
3649 * watching each name for now. */
3650
3651 if (u->manager->api_bus) {
3652 /* If the bus is already available, install the match directly.
3653 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3654 r = unit_install_bus_match(u, u->manager->api_bus, name);
3655 if (r < 0)
3656 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3657 }
3658
3659 r = hashmap_put(u->manager->watch_bus, name, u);
3660 if (r < 0) {
3661 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3662 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3663 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3664 }
3665
3666 return 0;
3667 }
3668
3669 void unit_unwatch_bus_name(Unit *u, const char *name) {
3670 assert(u);
3671 assert(name);
3672
3673 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3674 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3675 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3676 }
3677
3678 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3679 _cleanup_free_ char *e = NULL;
3680 Unit *device;
3681 int r;
3682
3683 assert(u);
3684
3685 /* Adds in links to the device node that this unit is based on */
3686 if (isempty(what))
3687 return 0;
3688
3689 if (!is_device_path(what))
3690 return 0;
3691
3692 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3693 if (!unit_type_supported(UNIT_DEVICE))
3694 return 0;
3695
3696 r = unit_name_from_path(what, ".device", &e);
3697 if (r < 0)
3698 return r;
3699
3700 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3701 if (r < 0)
3702 return r;
3703
3704 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3705 dep = UNIT_BINDS_TO;
3706
3707 return unit_add_two_dependencies(u, UNIT_AFTER,
3708 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3709 device, true, mask);
3710 }
3711
3712 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3713 _cleanup_free_ char *escaped = NULL, *target = NULL;
3714 int r;
3715
3716 assert(u);
3717
3718 if (isempty(what))
3719 return 0;
3720
3721 if (!path_startswith(what, "/dev/"))
3722 return 0;
3723
3724 /* If we don't support devices, then also don't bother with blockdev@.target */
3725 if (!unit_type_supported(UNIT_DEVICE))
3726 return 0;
3727
3728 r = unit_name_path_escape(what, &escaped);
3729 if (r < 0)
3730 return r;
3731
3732 r = unit_name_build("blockdev", escaped, ".target", &target);
3733 if (r < 0)
3734 return r;
3735
3736 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3737 }
3738
3739 int unit_coldplug(Unit *u) {
3740 int r = 0;
3741
3742 assert(u);
3743
3744 /* Make sure we don't enter a loop, when coldplugging recursively. */
3745 if (u->coldplugged)
3746 return 0;
3747
3748 u->coldplugged = true;
3749
3750 STRV_FOREACH(i, u->deserialized_refs)
3751 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3752
3753 u->deserialized_refs = strv_free(u->deserialized_refs);
3754
3755 if (UNIT_VTABLE(u)->coldplug)
3756 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3757
3758 if (u->job)
3759 RET_GATHER(r, job_coldplug(u->job));
3760 if (u->nop_job)
3761 RET_GATHER(r, job_coldplug(u->nop_job));
3762
3763 unit_modify_nft_set(u, /* add = */ true);
3764 return r;
3765 }
3766
3767 void unit_catchup(Unit *u) {
3768 assert(u);
3769
3770 if (UNIT_VTABLE(u)->catchup)
3771 UNIT_VTABLE(u)->catchup(u);
3772
3773 unit_cgroup_catchup(u);
3774 }
3775
3776 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3777 struct stat st;
3778
3779 if (!path)
3780 return false;
3781
3782 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3783 * are never out-of-date. */
3784 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3785 return false;
3786
3787 if (stat(path, &st) < 0)
3788 /* What, cannot access this anymore? */
3789 return true;
3790
3791 if (path_masked)
3792 /* For masked files check if they are still so */
3793 return !null_or_empty(&st);
3794 else
3795 /* For non-empty files check the mtime */
3796 return timespec_load(&st.st_mtim) > mtime;
3797
3798 return false;
3799 }
3800
3801 bool unit_need_daemon_reload(Unit *u) {
3802 assert(u);
3803 assert(u->manager);
3804
3805 if (u->manager->unit_file_state_outdated)
3806 return true;
3807
3808 /* For unit files, we allow masking… */
3809 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3810 u->load_state == UNIT_MASKED))
3811 return true;
3812
3813 /* Source paths should not be masked… */
3814 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3815 return true;
3816
3817 if (u->load_state == UNIT_LOADED) {
3818 _cleanup_strv_free_ char **dropins = NULL;
3819
3820 (void) unit_find_dropin_paths(u, /* use_unit_path_cache = */ false, &dropins);
3821
3822 if (!strv_equal(u->dropin_paths, dropins))
3823 return true;
3824
3825 /* … any drop-ins that are masked are simply omitted from the list. */
3826 STRV_FOREACH(path, u->dropin_paths)
3827 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3828 return true;
3829 }
3830
3831 return false;
3832 }
3833
3834 void unit_reset_failed(Unit *u) {
3835 assert(u);
3836
3837 if (UNIT_VTABLE(u)->reset_failed)
3838 UNIT_VTABLE(u)->reset_failed(u);
3839
3840 ratelimit_reset(&u->start_ratelimit);
3841 u->start_limit_hit = false;
3842
3843 (void) unit_set_debug_invocation(u, /* enable= */ false);
3844 }
3845
3846 Unit *unit_following(Unit *u) {
3847 assert(u);
3848
3849 if (UNIT_VTABLE(u)->following)
3850 return UNIT_VTABLE(u)->following(u);
3851
3852 return NULL;
3853 }
3854
3855 bool unit_stop_pending(Unit *u) {
3856 assert(u);
3857
3858 /* This call does check the current state of the unit. It's
3859 * hence useful to be called from state change calls of the
3860 * unit itself, where the state isn't updated yet. This is
3861 * different from unit_inactive_or_pending() which checks both
3862 * the current state and for a queued job. */
3863
3864 return unit_has_job_type(u, JOB_STOP);
3865 }
3866
3867 bool unit_inactive_or_pending(Unit *u) {
3868 assert(u);
3869
3870 /* Returns true if the unit is inactive or going down */
3871
3872 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3873 return true;
3874
3875 if (unit_stop_pending(u))
3876 return true;
3877
3878 return false;
3879 }
3880
3881 bool unit_active_or_pending(Unit *u) {
3882 assert(u);
3883
3884 /* Returns true if the unit is active or going up */
3885
3886 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3887 return true;
3888
3889 if (u->job &&
3890 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3891 return true;
3892
3893 return false;
3894 }
3895
3896 bool unit_will_restart_default(Unit *u) {
3897 assert(u);
3898
3899 return unit_has_job_type(u, JOB_START);
3900 }
3901
3902 bool unit_will_restart(Unit *u) {
3903 assert(u);
3904
3905 if (!UNIT_VTABLE(u)->will_restart)
3906 return false;
3907
3908 return UNIT_VTABLE(u)->will_restart(u);
3909 }
3910
3911 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3912 assert(u);
3913
3914 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3915 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3916 }
3917
3918 static int unit_pid_set(Unit *u, Set **pid_set) {
3919 int r;
3920
3921 assert(u);
3922 assert(pid_set);
3923
3924 set_clear(*pid_set); /* This updates input. */
3925
3926 /* Exclude the main/control pids from being killed via the cgroup */
3927
3928 PidRef *pid;
3929 FOREACH_ARGUMENT(pid, unit_main_pid(u), unit_control_pid(u))
3930 if (pidref_is_set(pid)) {
3931 r = set_ensure_put(pid_set, NULL, PID_TO_PTR(pid->pid));
3932 if (r < 0)
3933 return r;
3934 }
3935
3936 return 0;
3937 }
3938
3939 static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
3940 _cleanup_free_ char *comm = NULL;
3941 Unit *u = ASSERT_PTR(userdata);
3942
3943 (void) pidref_get_comm(pid, &comm);
3944
3945 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3946 signal_to_string(signo), pid->pid, strna(comm));
3947
3948 return 1;
3949 }
3950
3951 static int kill_or_sigqueue(PidRef *pidref, int signo, int code, int value) {
3952 assert(pidref_is_set(pidref));
3953 assert(SIGNAL_VALID(signo));
3954
3955 switch (code) {
3956
3957 case SI_USER:
3958 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
3959 return pidref_kill(pidref, signo);
3960
3961 case SI_QUEUE:
3962 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
3963 return pidref_sigqueue(pidref, signo, value);
3964
3965 default:
3966 assert_not_reached();
3967 }
3968 }
3969
3970 static int unit_kill_one(
3971 Unit *u,
3972 PidRef *pidref,
3973 const char *type,
3974 int signo,
3975 int code,
3976 int value,
3977 sd_bus_error *ret_error) {
3978
3979 int r;
3980
3981 assert(u);
3982 assert(type);
3983
3984 if (!pidref_is_set(pidref))
3985 return 0;
3986
3987 _cleanup_free_ char *comm = NULL;
3988 (void) pidref_get_comm(pidref, &comm);
3989
3990 r = kill_or_sigqueue(pidref, signo, code, value);
3991 if (r == -ESRCH)
3992 return 0;
3993 if (r < 0) {
3994 /* Report this failure both to the logs and to the client */
3995 if (ret_error)
3996 sd_bus_error_set_errnof(
3997 ret_error, r,
3998 "Failed to send signal SIG%s to %s process " PID_FMT " (%s): %m",
3999 signal_to_string(signo), type, pidref->pid, strna(comm));
4000
4001 return log_unit_warning_errno(
4002 u, r,
4003 "Failed to send signal SIG%s to %s process " PID_FMT " (%s) on client request: %m",
4004 signal_to_string(signo), type, pidref->pid, strna(comm));
4005 }
4006
4007 log_unit_info(u, "Sent signal SIG%s to %s process " PID_FMT " (%s) on client request.",
4008 signal_to_string(signo), type, pidref->pid, strna(comm));
4009 return 1; /* killed */
4010 }
4011
4012 int unit_kill(
4013 Unit *u,
4014 KillWhom whom,
4015 int signo,
4016 int code,
4017 int value,
4018 sd_bus_error *ret_error) {
4019
4020 PidRef *main_pid, *control_pid;
4021 bool killed = false;
4022 int ret = 0, r;
4023
4024 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4025 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4026 * stop a service ourselves. */
4027
4028 assert(u);
4029 assert(whom >= 0);
4030 assert(whom < _KILL_WHOM_MAX);
4031 assert(SIGNAL_VALID(signo));
4032 assert(IN_SET(code, SI_USER, SI_QUEUE));
4033
4034 main_pid = unit_main_pid(u);
4035 control_pid = unit_control_pid(u);
4036
4037 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4038 return sd_bus_error_setf(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4039
4040 if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL)) {
4041 if (!main_pid)
4042 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4043 if (!pidref_is_set(main_pid))
4044 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4045 }
4046
4047 if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4048 if (!control_pid)
4049 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4050 if (!pidref_is_set(control_pid))
4051 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4052 }
4053
4054 if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4055 r = unit_kill_one(u, control_pid, "control", signo, code, value, ret_error);
4056 RET_GATHER(ret, r);
4057 killed = killed || r > 0;
4058 }
4059
4060 if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4061 r = unit_kill_one(u, main_pid, "main", signo, code, value, ret >= 0 ? ret_error : NULL);
4062 RET_GATHER(ret, r);
4063 killed = killed || r > 0;
4064 }
4065
4066 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4067 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4068 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4069 if (IN_SET(whom, KILL_ALL, KILL_ALL_FAIL) && code == SI_USER) {
4070 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4071 if (crt && crt->cgroup_path) {
4072 _cleanup_set_free_ Set *pid_set = NULL;
4073
4074 if (signo == SIGKILL) {
4075 r = cg_kill_kernel_sigkill(crt->cgroup_path);
4076 if (r >= 0) {
4077 killed = true;
4078 log_unit_info(u, "Killed unit cgroup with SIGKILL on client request.");
4079 goto finish;
4080 }
4081 if (r != -EOPNOTSUPP) {
4082 if (ret >= 0)
4083 sd_bus_error_set_errnof(ret_error, r,
4084 "Failed to kill unit cgroup: %m");
4085 RET_GATHER(ret, log_unit_warning_errno(u, r, "Failed to kill unit cgroup: %m"));
4086 goto finish;
4087 }
4088 /* Fall back to manual enumeration */
4089 } else {
4090 /* Exclude the main/control pids from being killed via the cgroup if
4091 * not SIGKILL */
4092 r = unit_pid_set(u, &pid_set);
4093 if (r < 0)
4094 return log_oom();
4095 }
4096
4097 r = cg_kill_recursive(crt->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4098 if (r < 0 && !IN_SET(r, -ESRCH, -ENOENT)) {
4099 if (ret >= 0)
4100 sd_bus_error_set_errnof(
4101 ret_error, r,
4102 "Failed to send signal SIG%s to auxiliary processes: %m",
4103 signal_to_string(signo));
4104
4105 RET_GATHER(ret, log_unit_warning_errno(
4106 u, r,
4107 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4108 signal_to_string(signo)));
4109 }
4110 killed = killed || r >= 0;
4111 }
4112 }
4113
4114 finish:
4115 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4116 if (ret >= 0 && !killed && IN_SET(whom, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4117 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4118
4119 return ret;
4120 }
4121
4122 int unit_following_set(Unit *u, Set **s) {
4123 assert(u);
4124 assert(s);
4125
4126 if (UNIT_VTABLE(u)->following_set)
4127 return UNIT_VTABLE(u)->following_set(u, s);
4128
4129 *s = NULL;
4130 return 0;
4131 }
4132
4133 UnitFileState unit_get_unit_file_state(Unit *u) {
4134 int r;
4135
4136 assert(u);
4137
4138 if (u->unit_file_state >= 0 || !u->fragment_path)
4139 return u->unit_file_state;
4140
4141 /* If we know this is a transient unit no need to ask the unit file state for details. Let's bypass
4142 * the more expensive on-disk check. */
4143 if (u->transient)
4144 return (u->unit_file_state = UNIT_FILE_TRANSIENT);
4145
4146 r = unit_file_get_state(
4147 u->manager->runtime_scope,
4148 /* root_dir= */ NULL,
4149 u->id,
4150 &u->unit_file_state);
4151 if (r < 0)
4152 u->unit_file_state = UNIT_FILE_BAD;
4153
4154 return u->unit_file_state;
4155 }
4156
4157 PresetAction unit_get_unit_file_preset(Unit *u) {
4158 int r;
4159
4160 assert(u);
4161
4162 if (u->unit_file_preset >= 0)
4163 return u->unit_file_preset;
4164
4165 /* If this is a transient or perpetual unit file it doesn't make much sense to ask the preset
4166 * database about this, because enabling/disabling makes no sense for either. Hence don't. */
4167 if (!u->fragment_path || u->transient || u->perpetual)
4168 return (u->unit_file_preset = -ENOEXEC);
4169
4170 _cleanup_free_ char *bn = NULL;
4171 r = path_extract_filename(u->fragment_path, &bn);
4172 if (r < 0)
4173 return (u->unit_file_preset = r);
4174 if (r == O_DIRECTORY)
4175 return (u->unit_file_preset = -EISDIR);
4176
4177 return (u->unit_file_preset = unit_file_query_preset(
4178 u->manager->runtime_scope,
4179 /* root_dir= */ NULL,
4180 bn,
4181 /* cached= */ NULL));
4182 }
4183
4184 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4185 assert(ref);
4186 assert(source);
4187 assert(target);
4188
4189 if (ref->target)
4190 unit_ref_unset(ref);
4191
4192 ref->source = source;
4193 ref->target = target;
4194 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4195 return target;
4196 }
4197
4198 void unit_ref_unset(UnitRef *ref) {
4199 assert(ref);
4200
4201 if (!ref->target)
4202 return;
4203
4204 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4205 * be unreferenced now. */
4206 unit_add_to_gc_queue(ref->target);
4207
4208 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4209 ref->source = ref->target = NULL;
4210 }
4211
4212 static int user_from_unit_name(Unit *u, char **ret) {
4213
4214 static const uint8_t hash_key[] = {
4215 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4216 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4217 };
4218
4219 _cleanup_free_ char *n = NULL;
4220 int r;
4221
4222 r = unit_name_to_prefix(u->id, &n);
4223 if (r < 0)
4224 return r;
4225
4226 if (valid_user_group_name(n, 0)) {
4227 *ret = TAKE_PTR(n);
4228 return 0;
4229 }
4230
4231 /* If we can't use the unit name as a user name, then let's hash it and use that */
4232 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4233 return -ENOMEM;
4234
4235 return 0;
4236 }
4237
4238 static int unit_verify_contexts(const Unit *u) {
4239 assert(u);
4240
4241 const ExecContext *ec = unit_get_exec_context(u);
4242 if (!ec)
4243 return 0;
4244
4245 if (MANAGER_IS_USER(u->manager) && ec->dynamic_user)
4246 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "DynamicUser= enabled for user unit, which is not supported. Refusing.");
4247
4248 if (ec->dynamic_user && ec->working_directory_home)
4249 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory=~ is not allowed under DynamicUser=yes. Refusing.");
4250
4251 if (ec->working_directory && path_below_api_vfs(ec->working_directory) &&
4252 exec_needs_mount_namespace(ec, /* params = */ NULL, /* runtime = */ NULL))
4253 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory= may not be below /proc/, /sys/ or /dev/ when using mount namespacing. Refusing.");
4254
4255 if (exec_needs_pid_namespace(ec, /* params= */ NULL) && !UNIT_VTABLE(u)->notify_pidref)
4256 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "PrivatePIDs= setting is only supported for service units. Refusing.");
4257
4258 const KillContext *kc = unit_get_kill_context(u);
4259
4260 if (ec->pam_name && kc && !IN_SET(kc->kill_mode, KILL_CONTROL_GROUP, KILL_MIXED))
4261 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit has PAM enabled. Kill mode must be set to 'control-group' or 'mixed'. Refusing.");
4262
4263 return 0;
4264 }
4265
4266 static PrivateTmp unit_get_private_var_tmp(const Unit *u, const ExecContext *c) {
4267 assert(u);
4268 assert(c);
4269 assert(c->private_tmp >= 0 && c->private_tmp < _PRIVATE_TMP_MAX);
4270
4271 /* Disable disconnected private tmpfs on /var/tmp/ when DefaultDependencies=no and
4272 * RootImage=/RootDirectory= are not set, as /var/ may be a separated partition.
4273 * See issue #37258. */
4274
4275 /* PrivateTmp=yes/no also enables/disables private tmpfs on /var/tmp/. */
4276 if (c->private_tmp != PRIVATE_TMP_DISCONNECTED)
4277 return c->private_tmp;
4278
4279 /* When DefaultDependencies=yes, disconnected tmpfs is also enabled on /var/tmp/, and an explicit
4280 * dependency to the mount on /var/ will be added in unit_add_exec_dependencies(). */
4281 if (u->default_dependencies)
4282 return PRIVATE_TMP_DISCONNECTED;
4283
4284 /* When RootImage=/RootDirectory= is enabled, /var/ should be prepared by the image or directory,
4285 * hence we can mount a disconnected tmpfs on /var/tmp/. */
4286 if (exec_context_with_rootfs(c))
4287 return PRIVATE_TMP_DISCONNECTED;
4288
4289 /* Even if DefaultDependencies=no, enable disconnected tmpfs when
4290 * RequiresMountsFor=/WantsMountsFor=/var/ is explicitly set. */
4291 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; t++)
4292 if (hashmap_contains(u->mounts_for[t], "/var/"))
4293 return PRIVATE_TMP_DISCONNECTED;
4294
4295 /* Check the same but for After= with Requires=/Requisite=/Wants= or friends. */
4296 Unit *m = manager_get_unit(u->manager, "var.mount");
4297 if (!m)
4298 return PRIVATE_TMP_NO;
4299
4300 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, m))
4301 return PRIVATE_TMP_NO;
4302
4303 if (unit_has_dependency(u, UNIT_ATOM_PULL_IN_START, m) ||
4304 unit_has_dependency(u, UNIT_ATOM_PULL_IN_VERIFY, m) ||
4305 unit_has_dependency(u, UNIT_ATOM_PULL_IN_START_IGNORED, m))
4306 return PRIVATE_TMP_DISCONNECTED;
4307
4308 return PRIVATE_TMP_NO;
4309 }
4310
4311 int unit_patch_contexts(Unit *u) {
4312 CGroupContext *cc;
4313 ExecContext *ec;
4314 int r;
4315
4316 assert(u);
4317
4318 /* Patch in the manager defaults into the exec and cgroup
4319 * contexts, _after_ the rest of the settings have been
4320 * initialized */
4321
4322 ec = unit_get_exec_context(u);
4323 if (ec) {
4324 /* This only copies in the ones that need memory */
4325 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4326 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4327 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4328 if (!ec->rlimit[i])
4329 return -ENOMEM;
4330 }
4331
4332 if (MANAGER_IS_USER(u->manager) && !ec->working_directory) {
4333 r = get_home_dir(&ec->working_directory);
4334 if (r < 0)
4335 return r;
4336
4337 if (!ec->working_directory_home)
4338 /* If home directory is implied by us, allow it to be missing. */
4339 ec->working_directory_missing_ok = true;
4340 }
4341
4342 if (ec->private_devices)
4343 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4344
4345 if (ec->protect_kernel_modules)
4346 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4347
4348 if (ec->protect_kernel_logs)
4349 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4350
4351 if (ec->protect_clock)
4352 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4353
4354 if (ec->dynamic_user) {
4355 if (!ec->user) {
4356 r = user_from_unit_name(u, &ec->user);
4357 if (r < 0)
4358 return r;
4359 }
4360
4361 if (!ec->group) {
4362 ec->group = strdup(ec->user);
4363 if (!ec->group)
4364 return -ENOMEM;
4365 }
4366
4367 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4368 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4369 * sandbox. */
4370
4371 /* With DynamicUser= we want private directories, so if the user hasn't manually
4372 * selected PrivateTmp=, enable it, but to a fully private (disconnected) tmpfs
4373 * instance. */
4374 if (ec->private_tmp == PRIVATE_TMP_NO)
4375 ec->private_tmp = PRIVATE_TMP_DISCONNECTED;
4376 ec->remove_ipc = true;
4377 ec->protect_system = PROTECT_SYSTEM_STRICT;
4378 if (ec->protect_home == PROTECT_HOME_NO)
4379 ec->protect_home = PROTECT_HOME_READ_ONLY;
4380
4381 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4382 * them. */
4383 ec->no_new_privileges = true;
4384 ec->restrict_suid_sgid = true;
4385 }
4386
4387 ec->private_var_tmp = unit_get_private_var_tmp(u, ec);
4388
4389 FOREACH_ARRAY(d, ec->directories, _EXEC_DIRECTORY_TYPE_MAX)
4390 exec_directory_sort(d);
4391 }
4392
4393 cc = unit_get_cgroup_context(u);
4394 if (cc && ec) {
4395
4396 if (ec->private_devices &&
4397 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4398 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4399
4400 /* Only add these if needed, as they imply that everything else is blocked. */
4401 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4402 if (ec->root_image || ec->mount_images) {
4403
4404 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4405 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4406 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4407 if (r < 0)
4408 return r;
4409 }
4410 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4411 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4412 if (r < 0)
4413 return r;
4414 }
4415
4416 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4417 * Same for mapper and verity. */
4418 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4419 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4420 if (r < 0)
4421 return r;
4422 }
4423 }
4424
4425 if (ec->protect_clock) {
4426 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4427 if (r < 0)
4428 return r;
4429 }
4430 }
4431 }
4432
4433 return unit_verify_contexts(u);
4434 }
4435
4436 ExecContext *unit_get_exec_context(const Unit *u) {
4437 size_t offset;
4438 assert(u);
4439
4440 if (u->type < 0)
4441 return NULL;
4442
4443 offset = UNIT_VTABLE(u)->exec_context_offset;
4444 if (offset <= 0)
4445 return NULL;
4446
4447 return (ExecContext*) ((uint8_t*) u + offset);
4448 }
4449
4450 KillContext *unit_get_kill_context(const Unit *u) {
4451 size_t offset;
4452 assert(u);
4453
4454 if (u->type < 0)
4455 return NULL;
4456
4457 offset = UNIT_VTABLE(u)->kill_context_offset;
4458 if (offset <= 0)
4459 return NULL;
4460
4461 return (KillContext*) ((uint8_t*) u + offset);
4462 }
4463
4464 CGroupContext *unit_get_cgroup_context(const Unit *u) {
4465 size_t offset;
4466
4467 if (u->type < 0)
4468 return NULL;
4469
4470 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4471 if (offset <= 0)
4472 return NULL;
4473
4474 return (CGroupContext*) ((uint8_t*) u + offset);
4475 }
4476
4477 ExecRuntime *unit_get_exec_runtime(const Unit *u) {
4478 size_t offset;
4479
4480 if (u->type < 0)
4481 return NULL;
4482
4483 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4484 if (offset <= 0)
4485 return NULL;
4486
4487 return *(ExecRuntime**) ((uint8_t*) u + offset);
4488 }
4489
4490 CGroupRuntime *unit_get_cgroup_runtime(const Unit *u) {
4491 size_t offset;
4492
4493 if (u->type < 0)
4494 return NULL;
4495
4496 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
4497 if (offset <= 0)
4498 return NULL;
4499
4500 return *(CGroupRuntime**) ((uint8_t*) u + offset);
4501 }
4502
4503 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4504 assert(u);
4505
4506 if (UNIT_WRITE_FLAGS_NOOP(flags))
4507 return NULL;
4508
4509 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4510 return u->manager->lookup_paths.transient;
4511
4512 if (flags & UNIT_PERSISTENT)
4513 return u->manager->lookup_paths.persistent_control;
4514
4515 if (flags & UNIT_RUNTIME)
4516 return u->manager->lookup_paths.runtime_control;
4517
4518 return NULL;
4519 }
4520
4521 const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4522 assert(s);
4523 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4524 assert(buf);
4525
4526 _cleanup_free_ char *t = NULL;
4527
4528 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4529 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4530 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4531 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4532 * allocations. */
4533
4534 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4535 t = specifier_escape(s);
4536 if (!t)
4537 return NULL;
4538
4539 s = t;
4540 }
4541
4542 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4543 * ExecStart= and friends, i.e. '$' and quotes. */
4544
4545 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4546 char *t2;
4547
4548 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4549 t2 = strreplace(s, "$", "$$");
4550 if (!t2)
4551 return NULL;
4552 free_and_replace(t, t2);
4553 }
4554
4555 t2 = shell_escape(t ?: s, "\"");
4556 if (!t2)
4557 return NULL;
4558 free_and_replace(t, t2);
4559
4560 s = t;
4561
4562 } else if (flags & UNIT_ESCAPE_C) {
4563 char *t2;
4564
4565 t2 = cescape(s);
4566 if (!t2)
4567 return NULL;
4568 free_and_replace(t, t2);
4569
4570 s = t;
4571 }
4572
4573 *buf = TAKE_PTR(t);
4574 return s;
4575 }
4576
4577 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4578 _cleanup_free_ char *result = NULL;
4579 size_t n = 0;
4580
4581 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4582 * lines in a way suitable for ExecStart= stanzas. */
4583
4584 STRV_FOREACH(i, l) {
4585 _cleanup_free_ char *buf = NULL;
4586 const char *p;
4587 size_t a;
4588 char *q;
4589
4590 p = unit_escape_setting(*i, flags, &buf);
4591 if (!p)
4592 return NULL;
4593
4594 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4595 if (!GREEDY_REALLOC(result, n + a + 1))
4596 return NULL;
4597
4598 q = result + n;
4599 if (n > 0)
4600 *(q++) = ' ';
4601
4602 *(q++) = '"';
4603 q = stpcpy(q, p);
4604 *(q++) = '"';
4605
4606 n += a;
4607 }
4608
4609 if (!GREEDY_REALLOC(result, n + 1))
4610 return NULL;
4611
4612 result[n] = 0;
4613
4614 return TAKE_PTR(result);
4615 }
4616
4617 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4618 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4619 const char *dir, *wrapped;
4620 int r;
4621
4622 assert(u);
4623 assert(name);
4624 assert(data);
4625
4626 if (UNIT_WRITE_FLAGS_NOOP(flags))
4627 return 0;
4628
4629 data = unit_escape_setting(data, flags, &escaped);
4630 if (!data)
4631 return -ENOMEM;
4632
4633 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4634 * previous section header is the same */
4635
4636 if (flags & UNIT_PRIVATE) {
4637 if (!UNIT_VTABLE(u)->private_section)
4638 return -EINVAL;
4639
4640 if (!u->transient_file || u->last_section_private < 0)
4641 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4642 else if (u->last_section_private == 0)
4643 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4644 } else {
4645 if (!u->transient_file || u->last_section_private < 0)
4646 data = strjoina("[Unit]\n", data);
4647 else if (u->last_section_private > 0)
4648 data = strjoina("\n[Unit]\n", data);
4649 }
4650
4651 if (u->transient_file) {
4652 /* When this is a transient unit file in creation, then let's not create a new drop-in,
4653 * but instead write to the transient unit file. */
4654 fputs_with_newline(u->transient_file, data);
4655
4656 /* Remember which section we wrote this entry to */
4657 u->last_section_private = !!(flags & UNIT_PRIVATE);
4658 return 0;
4659 }
4660
4661 dir = unit_drop_in_dir(u, flags);
4662 if (!dir)
4663 return -EINVAL;
4664
4665 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4666 "# or an equivalent operation. Do not edit.\n",
4667 data,
4668 "\n");
4669
4670 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4671 if (r < 0)
4672 return r;
4673
4674 (void) mkdir_p_label(p, 0755);
4675
4676 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4677 * recreate the cache after every drop-in we write. */
4678 if (u->manager->unit_path_cache) {
4679 r = set_put_strdup_full(&u->manager->unit_path_cache, &path_hash_ops_free, p);
4680 if (r < 0)
4681 return r;
4682 }
4683
4684 r = write_string_file(q, wrapped, WRITE_STRING_FILE_CREATE|WRITE_STRING_FILE_ATOMIC|WRITE_STRING_FILE_LABEL);
4685 if (r < 0)
4686 return r;
4687
4688 r = strv_push(&u->dropin_paths, q);
4689 if (r < 0)
4690 return r;
4691 q = NULL;
4692
4693 strv_uniq(u->dropin_paths);
4694
4695 u->dropin_mtime = now(CLOCK_REALTIME);
4696
4697 return 0;
4698 }
4699
4700 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4701 _cleanup_free_ char *p = NULL;
4702 va_list ap;
4703 int r;
4704
4705 assert(u);
4706 assert(name);
4707 assert(format);
4708
4709 if (UNIT_WRITE_FLAGS_NOOP(flags))
4710 return 0;
4711
4712 va_start(ap, format);
4713 r = vasprintf(&p, format, ap);
4714 va_end(ap);
4715
4716 if (r < 0)
4717 return -ENOMEM;
4718
4719 return unit_write_setting(u, flags, name, p);
4720 }
4721
4722 int unit_make_transient(Unit *u) {
4723 _cleanup_free_ char *path = NULL;
4724 FILE *f;
4725
4726 assert(u);
4727
4728 if (!UNIT_VTABLE(u)->can_transient)
4729 return -EOPNOTSUPP;
4730
4731 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4732
4733 path = path_join(u->manager->lookup_paths.transient, u->id);
4734 if (!path)
4735 return -ENOMEM;
4736
4737 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4738 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4739
4740 WITH_UMASK(0022) {
4741 f = fopen(path, "we");
4742 if (!f)
4743 return -errno;
4744 }
4745
4746 safe_fclose(u->transient_file);
4747 u->transient_file = f;
4748
4749 free_and_replace(u->fragment_path, path);
4750
4751 u->source_path = mfree(u->source_path);
4752 u->dropin_paths = strv_free(u->dropin_paths);
4753 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4754
4755 u->load_state = UNIT_STUB;
4756 u->load_error = 0;
4757 u->transient = true;
4758
4759 unit_add_to_dbus_queue(u);
4760 unit_add_to_gc_queue(u);
4761
4762 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4763 u->transient_file);
4764
4765 return 0;
4766 }
4767
4768 static bool ignore_leftover_process(const char *comm) {
4769 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
4770 }
4771
4772 static int log_kill(const PidRef *pid, int sig, void *userdata) {
4773 const Unit *u = ASSERT_PTR(userdata);
4774 _cleanup_free_ char *comm = NULL;
4775
4776 assert(pidref_is_set(pid));
4777
4778 (void) pidref_get_comm(pid, &comm);
4779
4780 if (ignore_leftover_process(comm))
4781 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4782 * here to let the manager know that a process was killed. */
4783 return 1;
4784
4785 log_unit_notice(u,
4786 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4787 pid->pid,
4788 strna(comm),
4789 signal_to_string(sig));
4790
4791 return 1;
4792 }
4793
4794 static int operation_to_signal(
4795 const KillContext *c,
4796 KillOperation k,
4797 bool *ret_noteworthy) {
4798
4799 assert(c);
4800 assert(ret_noteworthy);
4801
4802 switch (k) {
4803
4804 case KILL_TERMINATE:
4805 case KILL_TERMINATE_AND_LOG:
4806 *ret_noteworthy = false;
4807 return c->kill_signal;
4808
4809 case KILL_RESTART:
4810 *ret_noteworthy = false;
4811 return restart_kill_signal(c);
4812
4813 case KILL_KILL:
4814 *ret_noteworthy = true;
4815 return c->final_kill_signal;
4816
4817 case KILL_WATCHDOG:
4818 *ret_noteworthy = true;
4819 return c->watchdog_signal;
4820
4821 default:
4822 assert_not_reached();
4823 }
4824 }
4825
4826 static int unit_kill_context_one(
4827 Unit *u,
4828 const PidRef *pidref,
4829 const char *type,
4830 bool is_alien,
4831 int sig,
4832 bool send_sighup,
4833 cg_kill_log_func_t log_func) {
4834
4835 int r;
4836
4837 assert(u);
4838 assert(type);
4839
4840 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4841
4842 if (!pidref_is_set(pidref))
4843 return 0;
4844
4845 if (log_func)
4846 log_func(pidref, sig, u);
4847
4848 r = pidref_kill_and_sigcont(pidref, sig);
4849 if (r == -ESRCH)
4850 return !is_alien;
4851 if (r < 0) {
4852 _cleanup_free_ char *comm = NULL;
4853
4854 (void) pidref_get_comm(pidref, &comm);
4855 return log_unit_warning_errno(u, r, "Failed to kill %s process " PID_FMT " (%s), ignoring: %m", type, pidref->pid, strna(comm));
4856 }
4857
4858 if (send_sighup)
4859 (void) pidref_kill(pidref, SIGHUP);
4860
4861 return !is_alien;
4862 }
4863
4864 int unit_kill_context(Unit *u, KillOperation k) {
4865 bool wait_for_exit = false, send_sighup;
4866 cg_kill_log_func_t log_func = NULL;
4867 int sig, r;
4868
4869 assert(u);
4870
4871 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4872 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4873 * which is used for user-requested killing of unit processes. */
4874
4875 KillContext *c = unit_get_kill_context(u);
4876 if (!c || c->kill_mode == KILL_NONE)
4877 return 0;
4878
4879 bool noteworthy;
4880 sig = operation_to_signal(c, k, &noteworthy);
4881 if (noteworthy)
4882 log_func = log_kill;
4883
4884 send_sighup =
4885 c->send_sighup &&
4886 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4887 sig != SIGHUP;
4888
4889 bool is_alien;
4890 PidRef *main_pid = unit_main_pid_full(u, &is_alien);
4891 r = unit_kill_context_one(u, main_pid, "main", is_alien, sig, send_sighup, log_func);
4892 wait_for_exit = wait_for_exit || r > 0;
4893
4894 r = unit_kill_context_one(u, unit_control_pid(u), "control", /* is_alien = */ false, sig, send_sighup, log_func);
4895 wait_for_exit = wait_for_exit || r > 0;
4896
4897 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4898 if (crt && crt->cgroup_path &&
4899 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4900 _cleanup_set_free_ Set *pid_set = NULL;
4901
4902 /* Exclude the main/control pids from being killed via the cgroup */
4903 r = unit_pid_set(u, &pid_set);
4904 if (r < 0)
4905 return r;
4906
4907 r = cg_kill_recursive(
4908 crt->cgroup_path,
4909 sig,
4910 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4911 pid_set,
4912 log_func, u);
4913 if (r < 0) {
4914 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4915 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt->cgroup_path));
4916
4917 } else if (r > 0) {
4918
4919 wait_for_exit = true;
4920
4921 if (send_sighup) {
4922 r = unit_pid_set(u, &pid_set);
4923 if (r < 0)
4924 return r;
4925
4926 (void) cg_kill_recursive(
4927 crt->cgroup_path,
4928 SIGHUP,
4929 CGROUP_IGNORE_SELF,
4930 pid_set,
4931 /* log_kill= */ NULL,
4932 /* userdata= */ NULL);
4933 }
4934 }
4935 }
4936
4937 return wait_for_exit;
4938 }
4939
4940 int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) {
4941 Hashmap **unit_map, **manager_map;
4942 int r;
4943
4944 assert(u);
4945 assert(path);
4946 assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4947
4948 unit_map = &u->mounts_for[type];
4949 manager_map = &u->manager->units_needing_mounts_for[type];
4950
4951 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4952 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4953 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4954 * appearing mount units can easily determine which units to make themselves a dependency of. */
4955
4956 if (!path_is_absolute(path))
4957 return -EINVAL;
4958
4959 if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */
4960 return 0;
4961
4962 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4963 * only after simplification, since path_is_normalized() rejects paths with '.'.
4964 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4965 _cleanup_free_ char *p = NULL;
4966 r = path_simplify_alloc(path, &p);
4967 if (r < 0)
4968 return r;
4969 path = p;
4970
4971 if (!path_is_normalized(path))
4972 return -EPERM;
4973
4974 UnitDependencyInfo di = {
4975 .origin_mask = mask
4976 };
4977
4978 r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data);
4979 if (r < 0)
4980 return r;
4981 assert(r > 0);
4982 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4983
4984 char prefix[strlen(path) + 1];
4985 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4986 Set *x;
4987
4988 x = hashmap_get(*manager_map, prefix);
4989 if (!x) {
4990 _cleanup_free_ char *q = NULL;
4991
4992 r = hashmap_ensure_allocated(manager_map, &path_hash_ops);
4993 if (r < 0)
4994 return r;
4995
4996 q = strdup(prefix);
4997 if (!q)
4998 return -ENOMEM;
4999
5000 x = set_new(NULL);
5001 if (!x)
5002 return -ENOMEM;
5003
5004 r = hashmap_put(*manager_map, q, x);
5005 if (r < 0) {
5006 set_free(x);
5007 return r;
5008 }
5009 q = NULL;
5010 }
5011
5012 r = set_put(x, u);
5013 if (r < 0)
5014 return r;
5015 }
5016
5017 return 0;
5018 }
5019
5020 int unit_setup_exec_runtime(Unit *u) {
5021 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
5022 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
5023 _cleanup_set_free_ Set *units = NULL;
5024 ExecRuntime **rt;
5025 ExecContext *ec;
5026 size_t offset;
5027 Unit *other;
5028 int r;
5029
5030 offset = UNIT_VTABLE(u)->exec_runtime_offset;
5031 assert(offset > 0);
5032
5033 /* Check if there already is an ExecRuntime for this unit? */
5034 rt = (ExecRuntime**) ((uint8_t*) u + offset);
5035 if (*rt)
5036 return 0;
5037
5038 ec = ASSERT_PTR(unit_get_exec_context(u));
5039
5040 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
5041 if (r < 0)
5042 return r;
5043
5044 /* Try to get it from somebody else */
5045 SET_FOREACH(other, units) {
5046 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
5047 if (r < 0)
5048 return r;
5049 if (r > 0)
5050 break;
5051 }
5052
5053 if (!esr) {
5054 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
5055 if (r < 0)
5056 return r;
5057 }
5058
5059 if (ec->dynamic_user) {
5060 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
5061 if (r < 0)
5062 return r;
5063 }
5064
5065 r = exec_runtime_make(u, ec, esr, dcreds, rt);
5066 if (r < 0)
5067 return r;
5068
5069 TAKE_PTR(esr);
5070 TAKE_PTR(dcreds);
5071
5072 return r;
5073 }
5074
5075 CGroupRuntime *unit_setup_cgroup_runtime(Unit *u) {
5076 size_t offset;
5077
5078 assert(u);
5079
5080 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
5081 assert(offset > 0);
5082
5083 CGroupRuntime **rt = (CGroupRuntime**) ((uint8_t*) u + offset);
5084 if (*rt)
5085 return *rt;
5086
5087 return (*rt = cgroup_runtime_new());
5088 }
5089
5090 bool unit_type_supported(UnitType t) {
5091 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5092 int r;
5093
5094 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5095
5096 if (cache[t] == 0) {
5097 char *e;
5098
5099 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5100
5101 r = getenv_bool(ascii_strupper(e));
5102 if (r < 0 && r != -ENXIO)
5103 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5104
5105 cache[t] = r == 0 ? -1 : 1;
5106 }
5107 if (cache[t] < 0)
5108 return false;
5109
5110 if (!unit_vtable[t]->supported)
5111 return true;
5112
5113 return unit_vtable[t]->supported();
5114 }
5115
5116 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5117 int r;
5118
5119 assert(u);
5120 assert(where);
5121
5122 if (!unit_log_level_test(u, LOG_NOTICE))
5123 return;
5124
5125 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5126 if (r > 0 || r == -ENOTDIR)
5127 return;
5128 if (r < 0) {
5129 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5130 return;
5131 }
5132
5133 log_unit_struct(u, LOG_NOTICE,
5134 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING_STR),
5135 LOG_UNIT_INVOCATION_ID(u),
5136 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5137 LOG_ITEM("WHERE=%s", where));
5138 }
5139
5140 int unit_log_noncanonical_mount_path(Unit *u, const char *where) {
5141 assert(u);
5142 assert(where);
5143
5144 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5145 log_unit_struct(u, LOG_ERR,
5146 LOG_MESSAGE_ID(SD_MESSAGE_NON_CANONICAL_MOUNT_STR),
5147 LOG_UNIT_INVOCATION_ID(u),
5148 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5149 LOG_ITEM("WHERE=%s", where));
5150
5151 return -ELOOP;
5152 }
5153
5154 int unit_fail_if_noncanonical_mount_path(Unit *u, const char* where) {
5155 int r;
5156
5157 assert(u);
5158 assert(where);
5159
5160 _cleanup_free_ char *canonical_where = NULL;
5161 r = chase(where, /* root= */ NULL, CHASE_NONEXISTENT, &canonical_where, /* ret_fd= */ NULL);
5162 if (r < 0) {
5163 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5164 return 0;
5165 }
5166
5167 /* We will happily ignore a trailing slash (or any redundant slashes) */
5168 if (path_equal(where, canonical_where))
5169 return 0;
5170
5171 return unit_log_noncanonical_mount_path(u, where);
5172 }
5173
5174 bool unit_is_pristine(Unit *u) {
5175 assert(u);
5176
5177 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5178 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5179 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5180 *
5181 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5182 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5183 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5184 */
5185
5186 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5187 !u->fragment_path &&
5188 !u->source_path &&
5189 !u->job &&
5190 !u->merged_into;
5191 }
5192
5193 PidRef* unit_control_pid(Unit *u) {
5194 assert(u);
5195
5196 if (UNIT_VTABLE(u)->control_pid)
5197 return UNIT_VTABLE(u)->control_pid(u);
5198
5199 return NULL;
5200 }
5201
5202 PidRef* unit_main_pid_full(Unit *u, bool *ret_is_alien) {
5203 assert(u);
5204
5205 if (UNIT_VTABLE(u)->main_pid)
5206 return UNIT_VTABLE(u)->main_pid(u, ret_is_alien);
5207
5208 if (ret_is_alien)
5209 *ret_is_alien = false;
5210 return NULL;
5211 }
5212
5213 static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5214 int r;
5215
5216 assert(u);
5217
5218 if (!MANAGER_IS_SYSTEM(u->manager))
5219 return;
5220
5221 CGroupContext *c;
5222 c = unit_get_cgroup_context(u);
5223 if (!c)
5224 return;
5225
5226 if (!u->manager->fw_ctx) {
5227 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5228 if (r < 0)
5229 return;
5230
5231 assert(u->manager->fw_ctx);
5232 }
5233
5234 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5235 if (nft_set->source != source)
5236 continue;
5237
5238 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5239 if (r < 0)
5240 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5241 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5242 else
5243 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5244 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5245 }
5246 }
5247
5248 static void unit_unref_uid_internal(
5249 Unit *u,
5250 uid_t *ref_uid,
5251 bool destroy_now,
5252 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5253
5254 assert(u);
5255 assert(ref_uid);
5256 assert(_manager_unref_uid);
5257
5258 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5259 * gid_t are actually the same time, with the same validity rules.
5260 *
5261 * Drops a reference to UID/GID from a unit. */
5262
5263 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5264 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5265
5266 if (!uid_is_valid(*ref_uid))
5267 return;
5268
5269 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5270 *ref_uid = UID_INVALID;
5271 }
5272
5273 static void unit_unref_uid(Unit *u, bool destroy_now) {
5274 assert(u);
5275
5276 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5277
5278 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5279 }
5280
5281 static void unit_unref_gid(Unit *u, bool destroy_now) {
5282 assert(u);
5283
5284 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5285
5286 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5287 }
5288
5289 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5290 assert(u);
5291
5292 unit_unref_uid(u, destroy_now);
5293 unit_unref_gid(u, destroy_now);
5294 }
5295
5296 static int unit_ref_uid_internal(
5297 Unit *u,
5298 uid_t *ref_uid,
5299 uid_t uid,
5300 bool clean_ipc,
5301 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5302
5303 int r;
5304
5305 assert(u);
5306 assert(ref_uid);
5307 assert(uid_is_valid(uid));
5308 assert(_manager_ref_uid);
5309
5310 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5311 * are actually the same type, and have the same validity rules.
5312 *
5313 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5314 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5315 * drops to zero. */
5316
5317 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5318 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5319
5320 if (*ref_uid == uid)
5321 return 0;
5322
5323 if (uid_is_valid(*ref_uid)) /* Already set? */
5324 return -EBUSY;
5325
5326 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5327 if (r < 0)
5328 return r;
5329
5330 *ref_uid = uid;
5331 return 1;
5332 }
5333
5334 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5335 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5336 }
5337
5338 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5339 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5340 }
5341
5342 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5343 int r = 0, q = 0;
5344
5345 assert(u);
5346
5347 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5348
5349 if (uid_is_valid(uid)) {
5350 r = unit_ref_uid(u, uid, clean_ipc);
5351 if (r < 0)
5352 return r;
5353 }
5354
5355 if (gid_is_valid(gid)) {
5356 q = unit_ref_gid(u, gid, clean_ipc);
5357 if (q < 0) {
5358 if (r > 0)
5359 unit_unref_uid(u, false);
5360
5361 return q;
5362 }
5363 }
5364
5365 return r > 0 || q > 0;
5366 }
5367
5368 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5369 ExecContext *c;
5370 int r;
5371
5372 assert(u);
5373
5374 c = unit_get_exec_context(u);
5375
5376 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5377 if (r < 0)
5378 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5379
5380 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5381 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5382
5383 return r;
5384 }
5385
5386 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5387 int r;
5388
5389 assert(u);
5390
5391 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5392 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5393 * objects when no service references the UID/GID anymore. */
5394
5395 r = unit_ref_uid_gid(u, uid, gid);
5396 if (r > 0)
5397 unit_add_to_dbus_queue(u);
5398 }
5399
5400 int unit_acquire_invocation_id(Unit *u) {
5401 sd_id128_t id;
5402 int r;
5403
5404 assert(u);
5405
5406 r = sd_id128_randomize(&id);
5407 if (r < 0)
5408 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5409
5410 r = unit_set_invocation_id(u, id);
5411 if (r < 0)
5412 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5413
5414 unit_add_to_dbus_queue(u);
5415 return 0;
5416 }
5417
5418 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5419 int r;
5420
5421 assert(u);
5422 assert(p);
5423
5424 /* Copy parameters from manager */
5425 r = manager_get_effective_environment(u->manager, &p->environment);
5426 if (r < 0)
5427 return r;
5428
5429 p->runtime_scope = u->manager->runtime_scope;
5430
5431 r = strdup_to(&p->confirm_spawn, manager_get_confirm_spawn(u->manager));
5432 if (r < 0)
5433 return r;
5434
5435 p->cgroup_supported = u->manager->cgroup_supported;
5436 p->prefix = u->manager->prefix;
5437 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5438
5439 /* Copy parameters from unit */
5440 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5441 p->cgroup_path = crt ? crt->cgroup_path : NULL;
5442 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5443
5444 p->received_credentials_directory = u->manager->received_credentials_directory;
5445 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5446
5447 p->shall_confirm_spawn = u->manager->confirm_spawn;
5448
5449 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5450
5451 if (u->manager->restrict_fs && p->bpf_restrict_fs_map_fd < 0) {
5452 int fd = bpf_restrict_fs_map_fd(u);
5453 if (fd < 0)
5454 return fd;
5455
5456 p->bpf_restrict_fs_map_fd = fd;
5457 }
5458
5459 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5460 p->handoff_timestamp_fd = u->manager->handoff_timestamp_fds[1];
5461 if (UNIT_VTABLE(u)->notify_pidref)
5462 p->pidref_transport_fd = u->manager->pidref_transport_fds[1];
5463
5464 p->cgroup_id = crt ? crt->cgroup_id : 0;
5465 p->invocation_id = u->invocation_id;
5466 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5467 p->unit_id = strdup(u->id);
5468 if (!p->unit_id)
5469 return -ENOMEM;
5470
5471 p->debug_invocation = u->debug_invocation;
5472
5473 return 0;
5474 }
5475
5476 int unit_fork_helper_process(Unit *u, const char *name, bool into_cgroup, PidRef *ret) {
5477 CGroupRuntime *crt = NULL;
5478 pid_t pid;
5479 int r;
5480
5481 assert(u);
5482 assert(ret);
5483
5484 /* Forks off a helper process and makes sure it is a member of the unit's cgroup, if configured to
5485 * do so. Returns == 0 in the child, and > 0 in the parent. The pid parameter is always filled in
5486 * with the child's PID. */
5487
5488 if (into_cgroup) {
5489 (void) unit_realize_cgroup(u);
5490
5491 crt = unit_setup_cgroup_runtime(u);
5492 if (!crt)
5493 return -ENOMEM;
5494 }
5495
5496 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5497 if (r < 0)
5498 return r;
5499 if (r > 0) {
5500 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5501 int q;
5502
5503 /* Parent */
5504
5505 q = pidref_set_pid(&pidref, pid);
5506 if (q < 0)
5507 return q;
5508
5509 *ret = TAKE_PIDREF(pidref);
5510 return r;
5511 }
5512
5513 /* Child */
5514
5515 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5516 (void) ignore_signals(SIGPIPE);
5517
5518 if (crt && crt->cgroup_path) {
5519 r = cg_attach(crt->cgroup_path, 0);
5520 if (r < 0) {
5521 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(crt->cgroup_path));
5522 _exit(EXIT_CGROUP);
5523 }
5524 }
5525
5526 return 0;
5527 }
5528
5529 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5530 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5531 int r;
5532
5533 assert(u);
5534 assert(ret_pid);
5535
5536 r = unit_fork_helper_process(u, "(sd-rmrf)", /* into_cgroup= */ true, &pid);
5537 if (r < 0)
5538 return r;
5539 if (r == 0) {
5540 int ret = EXIT_SUCCESS;
5541
5542 STRV_FOREACH(i, paths) {
5543 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5544 if (r < 0) {
5545 log_error_errno(r, "Failed to remove '%s': %m", *i);
5546 ret = EXIT_FAILURE;
5547 }
5548 }
5549
5550 _exit(ret);
5551 }
5552
5553 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5554 if (r < 0)
5555 return r;
5556
5557 *ret_pid = TAKE_PIDREF(pid);
5558 return 0;
5559 }
5560
5561 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5562 assert(deps);
5563 assert(other);
5564
5565 if (di.origin_mask == 0 && di.destination_mask == 0)
5566 /* No bit set anymore, let's drop the whole entry */
5567 assert_se(hashmap_remove(deps, other));
5568 else
5569 /* Mask was reduced, let's update the entry */
5570 assert_se(hashmap_update(deps, other, di.data) == 0);
5571 }
5572
5573 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5574 Hashmap *deps;
5575 assert(u);
5576
5577 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5578
5579 if (mask == 0)
5580 return;
5581
5582 HASHMAP_FOREACH(deps, u->dependencies) {
5583 bool done;
5584
5585 do {
5586 UnitDependencyInfo di;
5587 Unit *other;
5588
5589 done = true;
5590
5591 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5592 Hashmap *other_deps;
5593
5594 if (FLAGS_SET(~mask, di.origin_mask))
5595 continue;
5596
5597 di.origin_mask &= ~mask;
5598 unit_update_dependency_mask(deps, other, di);
5599
5600 /* We updated the dependency from our unit to the other unit now. But most
5601 * dependencies imply a reverse dependency. Hence, let's delete that one
5602 * too. For that we go through all dependency types on the other unit and
5603 * delete all those which point to us and have the right mask set. */
5604
5605 HASHMAP_FOREACH(other_deps, other->dependencies) {
5606 UnitDependencyInfo dj;
5607
5608 dj.data = hashmap_get(other_deps, u);
5609 if (FLAGS_SET(~mask, dj.destination_mask))
5610 continue;
5611
5612 dj.destination_mask &= ~mask;
5613 unit_update_dependency_mask(other_deps, u, dj);
5614 }
5615
5616 unit_add_to_gc_queue(other);
5617
5618 /* The unit 'other' may not be wanted by the unit 'u'. */
5619 unit_submit_to_stop_when_unneeded_queue(other);
5620
5621 u->dependency_generation++;
5622 other->dependency_generation++;
5623
5624 done = false;
5625 break;
5626 }
5627
5628 } while (!done);
5629 }
5630 }
5631
5632 static int unit_get_invocation_path(Unit *u, char **ret) {
5633 char *p;
5634 int r;
5635
5636 assert(u);
5637 assert(ret);
5638
5639 if (MANAGER_IS_SYSTEM(u->manager))
5640 p = strjoin("/run/systemd/units/invocation:", u->id);
5641 else {
5642 _cleanup_free_ char *user_path = NULL;
5643
5644 r = xdg_user_runtime_dir("/systemd/units/invocation:", &user_path);
5645 if (r < 0)
5646 return r;
5647
5648 p = strjoin(user_path, u->id);
5649 }
5650 if (!p)
5651 return -ENOMEM;
5652
5653 *ret = p;
5654 return 0;
5655 }
5656
5657 static int unit_export_invocation_id(Unit *u) {
5658 _cleanup_free_ char *p = NULL;
5659 int r;
5660
5661 assert(u);
5662
5663 if (u->exported_invocation_id)
5664 return 0;
5665
5666 if (sd_id128_is_null(u->invocation_id))
5667 return 0;
5668
5669 r = unit_get_invocation_path(u, &p);
5670 if (r < 0)
5671 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5672
5673 r = symlinkat_atomic_full(u->invocation_id_string, AT_FDCWD, p, SYMLINK_LABEL);
5674 if (r < 0)
5675 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5676
5677 u->exported_invocation_id = true;
5678 return 0;
5679 }
5680
5681 static int unit_export_log_level_max(Unit *u, int log_level_max, bool overwrite) {
5682 const char *p;
5683 char buf[2];
5684 int r;
5685
5686 assert(u);
5687
5688 /* When the debug_invocation logic runs, overwrite will be true as we always want to switch the max
5689 * log level that the journal applies, and we want to always restore the previous level once done */
5690
5691 if (!overwrite && u->exported_log_level_max)
5692 return 0;
5693
5694 if (log_level_max < 0)
5695 return 0;
5696
5697 assert(log_level_max <= 7);
5698
5699 buf[0] = '0' + log_level_max;
5700 buf[1] = 0;
5701
5702 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5703 r = symlink_atomic(buf, p);
5704 if (r < 0)
5705 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5706
5707 u->exported_log_level_max = true;
5708 return 0;
5709 }
5710
5711 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5712 _cleanup_close_ int fd = -EBADF;
5713 struct iovec *iovec;
5714 const char *p;
5715 char *pattern;
5716 le64_t *sizes;
5717 ssize_t n;
5718 int r;
5719
5720 if (u->exported_log_extra_fields)
5721 return 0;
5722
5723 if (c->n_log_extra_fields <= 0)
5724 return 0;
5725
5726 sizes = newa(le64_t, c->n_log_extra_fields);
5727 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5728
5729 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5730 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5731
5732 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5733 iovec[i*2+1] = c->log_extra_fields[i];
5734 }
5735
5736 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5737 pattern = strjoina(p, ".XXXXXX");
5738
5739 fd = mkostemp_safe(pattern);
5740 if (fd < 0)
5741 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5742
5743 n = writev(fd, iovec, c->n_log_extra_fields*2);
5744 if (n < 0) {
5745 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5746 goto fail;
5747 }
5748
5749 (void) fchmod(fd, 0644);
5750
5751 if (rename(pattern, p) < 0) {
5752 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5753 goto fail;
5754 }
5755
5756 u->exported_log_extra_fields = true;
5757 return 0;
5758
5759 fail:
5760 (void) unlink(pattern);
5761 return r;
5762 }
5763
5764 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5765 _cleanup_free_ char *buf = NULL;
5766 const char *p;
5767 int r;
5768
5769 assert(u);
5770 assert(c);
5771
5772 if (u->exported_log_ratelimit_interval)
5773 return 0;
5774
5775 if (c->log_ratelimit.interval == 0)
5776 return 0;
5777
5778 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5779
5780 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit.interval) < 0)
5781 return log_oom();
5782
5783 r = symlink_atomic(buf, p);
5784 if (r < 0)
5785 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5786
5787 u->exported_log_ratelimit_interval = true;
5788 return 0;
5789 }
5790
5791 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5792 _cleanup_free_ char *buf = NULL;
5793 const char *p;
5794 int r;
5795
5796 assert(u);
5797 assert(c);
5798
5799 if (u->exported_log_ratelimit_burst)
5800 return 0;
5801
5802 if (c->log_ratelimit.burst == 0)
5803 return 0;
5804
5805 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5806
5807 if (asprintf(&buf, "%u", c->log_ratelimit.burst) < 0)
5808 return log_oom();
5809
5810 r = symlink_atomic(buf, p);
5811 if (r < 0)
5812 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5813
5814 u->exported_log_ratelimit_burst = true;
5815 return 0;
5816 }
5817
5818 void unit_export_state_files(Unit *u) {
5819 const ExecContext *c;
5820
5821 assert(u);
5822
5823 if (!u->id)
5824 return;
5825
5826 if (MANAGER_IS_TEST_RUN(u->manager))
5827 return;
5828
5829 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5830 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5831 * the IPC system itself and PID 1 also log to the journal.
5832 *
5833 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5834 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5835 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5836 * namespace at least.
5837 *
5838 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5839 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5840 * them with one. */
5841
5842 (void) unit_export_invocation_id(u);
5843
5844 if (!MANAGER_IS_SYSTEM(u->manager))
5845 return;
5846
5847 c = unit_get_exec_context(u);
5848 if (c) {
5849 (void) unit_export_log_level_max(u, c->log_level_max, /* overwrite= */ false);
5850 (void) unit_export_log_extra_fields(u, c);
5851 (void) unit_export_log_ratelimit_interval(u, c);
5852 (void) unit_export_log_ratelimit_burst(u, c);
5853 }
5854 }
5855
5856 void unit_unlink_state_files(Unit *u) {
5857 const char *p;
5858
5859 assert(u);
5860
5861 if (!u->id)
5862 return;
5863
5864 /* Undoes the effect of unit_export_state() */
5865
5866 if (u->exported_invocation_id) {
5867 _cleanup_free_ char *invocation_path = NULL;
5868 int r = unit_get_invocation_path(u, &invocation_path);
5869 if (r >= 0) {
5870 (void) unlink(invocation_path);
5871 u->exported_invocation_id = false;
5872 }
5873 }
5874
5875 if (!MANAGER_IS_SYSTEM(u->manager))
5876 return;
5877
5878 if (u->exported_log_level_max) {
5879 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5880 (void) unlink(p);
5881
5882 u->exported_log_level_max = false;
5883 }
5884
5885 if (u->exported_log_extra_fields) {
5886 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5887 (void) unlink(p);
5888
5889 u->exported_log_extra_fields = false;
5890 }
5891
5892 if (u->exported_log_ratelimit_interval) {
5893 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5894 (void) unlink(p);
5895
5896 u->exported_log_ratelimit_interval = false;
5897 }
5898
5899 if (u->exported_log_ratelimit_burst) {
5900 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5901 (void) unlink(p);
5902
5903 u->exported_log_ratelimit_burst = false;
5904 }
5905 }
5906
5907 int unit_set_debug_invocation(Unit *u, bool enable) {
5908 int r;
5909
5910 assert(u);
5911
5912 if (u->debug_invocation == enable)
5913 return 0; /* Nothing to do */
5914
5915 u->debug_invocation = enable;
5916
5917 /* Ensure that the new log level is exported for the journal, in place of the previous one */
5918 if (u->exported_log_level_max) {
5919 const ExecContext *ec = unit_get_exec_context(u);
5920 if (ec) {
5921 r = unit_export_log_level_max(u, enable ? LOG_PRI(LOG_DEBUG) : ec->log_level_max, /* overwrite= */ true);
5922 if (r < 0)
5923 return r;
5924 }
5925 }
5926
5927 return 1;
5928 }
5929
5930 int unit_prepare_exec(Unit *u) {
5931 int r;
5932
5933 assert(u);
5934
5935 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5936 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5937 r = bpf_firewall_load_custom(u);
5938 if (r < 0)
5939 return r;
5940
5941 /* Prepares everything so that we can fork of a process for this unit */
5942
5943 (void) unit_realize_cgroup(u);
5944
5945 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5946 if (crt && crt->reset_accounting) {
5947 (void) unit_reset_accounting(u);
5948 crt->reset_accounting = false;
5949 }
5950
5951 unit_export_state_files(u);
5952
5953 r = unit_setup_exec_runtime(u);
5954 if (r < 0)
5955 return r;
5956
5957 return 0;
5958 }
5959
5960 static int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5961 const Unit *u = ASSERT_PTR(userdata);
5962 _cleanup_free_ char *comm = NULL;
5963
5964 assert(pidref_is_set(pid));
5965
5966 (void) pidref_get_comm(pid, &comm);
5967
5968 if (ignore_leftover_process(comm))
5969 return 0;
5970
5971 /* During start we print a warning */
5972
5973 log_unit_warning(u,
5974 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5975 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5976 pid->pid, strna(comm));
5977
5978 return 1;
5979 }
5980
5981 static int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
5982 const Unit *u = ASSERT_PTR(userdata);
5983 _cleanup_free_ char *comm = NULL;
5984
5985 assert(pidref_is_set(pid));
5986
5987 (void) pidref_get_comm(pid, &comm);
5988
5989 if (ignore_leftover_process(comm))
5990 return 0;
5991
5992 /* During stop we only print an informational message */
5993
5994 log_unit_info(u,
5995 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5996 pid->pid, strna(comm));
5997
5998 return 1;
5999 }
6000
6001 int unit_warn_leftover_processes(Unit *u, bool start) {
6002 _cleanup_free_ char *cgroup = NULL;
6003 int r;
6004
6005 assert(u);
6006
6007 r = unit_get_cgroup_path_with_fallback(u, &cgroup);
6008 if (r < 0)
6009 return r;
6010
6011 return cg_kill_recursive(
6012 cgroup,
6013 /* sig= */ 0,
6014 /* flags= */ 0,
6015 /* killed_pids= */ NULL,
6016 start ? unit_log_leftover_process_start : unit_log_leftover_process_stop,
6017 u);
6018 }
6019
6020 bool unit_needs_console(Unit *u) {
6021 ExecContext *ec;
6022 UnitActiveState state;
6023
6024 assert(u);
6025
6026 state = unit_active_state(u);
6027
6028 if (UNIT_IS_INACTIVE_OR_FAILED(state))
6029 return false;
6030
6031 if (UNIT_VTABLE(u)->needs_console)
6032 return UNIT_VTABLE(u)->needs_console(u);
6033
6034 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
6035 ec = unit_get_exec_context(u);
6036 if (!ec)
6037 return false;
6038
6039 return exec_context_may_touch_console(ec);
6040 }
6041
6042 int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) {
6043 int r;
6044
6045 assert(u);
6046
6047 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
6048 * and not a kernel thread either */
6049
6050 /* First, a simple range check */
6051 if (!pidref_is_set(pid))
6052 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
6053
6054 /* Some extra safety check */
6055 if (pid->pid == 1 || pidref_is_self(pid))
6056 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
6057
6058 /* Don't even begin to bother with kernel threads */
6059 r = pidref_is_kernel_thread(pid);
6060 if (r == -ESRCH)
6061 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
6062 if (r < 0)
6063 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
6064 if (r > 0)
6065 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
6066
6067 return 0;
6068 }
6069
6070 int unit_get_log_level_max(const Unit *u) {
6071 if (u) {
6072 if (u->debug_invocation)
6073 return LOG_DEBUG;
6074
6075 ExecContext *ec = unit_get_exec_context(u);
6076 if (ec && ec->log_level_max >= 0)
6077 return ec->log_level_max;
6078 }
6079
6080 return log_get_max_level();
6081 }
6082
6083 bool unit_log_level_test(const Unit *u, int level) {
6084 assert(u);
6085 return LOG_PRI(level) <= unit_get_log_level_max(u);
6086 }
6087
6088 void unit_log_success(Unit *u) {
6089 assert(u);
6090
6091 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
6092 * This message has low information value for regular users and it might be a bit overwhelming on a system with
6093 * a lot of devices. */
6094 log_unit_struct(u,
6095 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
6096 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SUCCESS_STR),
6097 LOG_UNIT_INVOCATION_ID(u),
6098 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
6099 }
6100
6101 void unit_log_failure(Unit *u, const char *result) {
6102 assert(u);
6103 assert(result);
6104
6105 log_unit_struct(u, LOG_WARNING,
6106 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_FAILURE_RESULT_STR),
6107 LOG_UNIT_INVOCATION_ID(u),
6108 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
6109 LOG_ITEM("UNIT_RESULT=%s", result));
6110 }
6111
6112 void unit_log_skip(Unit *u, const char *result) {
6113 assert(u);
6114 assert(result);
6115
6116 log_unit_struct(u, LOG_INFO,
6117 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SKIPPED_STR),
6118 LOG_UNIT_INVOCATION_ID(u),
6119 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
6120 LOG_ITEM("UNIT_RESULT=%s", result));
6121 }
6122
6123 void unit_log_process_exit(
6124 Unit *u,
6125 const char *kind,
6126 const char *command,
6127 bool success,
6128 int code,
6129 int status) {
6130
6131 int level;
6132
6133 assert(u);
6134 assert(kind);
6135
6136 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6137 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6138 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6139 * WARNING. */
6140 if (success)
6141 level = LOG_DEBUG;
6142 else if (code == CLD_EXITED)
6143 level = LOG_NOTICE;
6144 else
6145 level = LOG_WARNING;
6146
6147 log_unit_struct(u, level,
6148 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_PROCESS_EXIT_STR),
6149 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
6150 kind,
6151 sigchld_code_to_string(code), status,
6152 strna(code == CLD_EXITED
6153 ? exit_status_to_string(status, EXIT_STATUS_FULL)
6154 : signal_to_string(status)),
6155 success ? " (success)" : ""),
6156 LOG_ITEM("EXIT_CODE=%s", sigchld_code_to_string(code)),
6157 LOG_ITEM("EXIT_STATUS=%i", status),
6158 LOG_ITEM("COMMAND=%s", strna(command)),
6159 LOG_UNIT_INVOCATION_ID(u));
6160 }
6161
6162 int unit_exit_status(Unit *u) {
6163 assert(u);
6164
6165 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6166 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6167 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6168 * service process has exited abnormally (signal/coredump). */
6169
6170 if (!UNIT_VTABLE(u)->exit_status)
6171 return -EOPNOTSUPP;
6172
6173 return UNIT_VTABLE(u)->exit_status(u);
6174 }
6175
6176 int unit_failure_action_exit_status(Unit *u) {
6177 int r;
6178
6179 assert(u);
6180
6181 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6182
6183 if (u->failure_action_exit_status >= 0)
6184 return u->failure_action_exit_status;
6185
6186 r = unit_exit_status(u);
6187 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6188 return 255;
6189
6190 return r;
6191 }
6192
6193 int unit_success_action_exit_status(Unit *u) {
6194 int r;
6195
6196 assert(u);
6197
6198 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6199
6200 if (u->success_action_exit_status >= 0)
6201 return u->success_action_exit_status;
6202
6203 r = unit_exit_status(u);
6204 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6205 return 255;
6206
6207 return r;
6208 }
6209
6210 int unit_test_trigger_loaded(Unit *u) {
6211 Unit *trigger;
6212
6213 /* Tests whether the unit to trigger is loaded */
6214
6215 trigger = UNIT_TRIGGER(u);
6216 if (!trigger)
6217 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6218 "Refusing to start, no unit to trigger.");
6219 if (trigger->load_state != UNIT_LOADED)
6220 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6221 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6222
6223 return 0;
6224 }
6225
6226 void unit_destroy_runtime_data(Unit *u, const ExecContext *context, bool destroy_runtime_dir) {
6227 assert(u);
6228 assert(u->manager);
6229 assert(context);
6230
6231 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6232 if (destroy_runtime_dir && context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6233 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6234
6235 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
6236 exec_context_destroy_mount_ns_dir(u);
6237 }
6238
6239 int unit_clean(Unit *u, ExecCleanMask mask) {
6240 UnitActiveState state;
6241
6242 assert(u);
6243
6244 /* Special return values:
6245 *
6246 * -EOPNOTSUPP → cleaning not supported for this unit type
6247 * -EUNATCH → cleaning not defined for this resource type
6248 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6249 * a job queued or similar
6250 */
6251
6252 if (!UNIT_VTABLE(u)->clean)
6253 return -EOPNOTSUPP;
6254
6255 if (mask == 0)
6256 return -EUNATCH;
6257
6258 if (u->load_state != UNIT_LOADED)
6259 return -EBUSY;
6260
6261 if (u->job)
6262 return -EBUSY;
6263
6264 state = unit_active_state(u);
6265 if (state != UNIT_INACTIVE)
6266 return -EBUSY;
6267
6268 return UNIT_VTABLE(u)->clean(u, mask);
6269 }
6270
6271 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6272 assert(u);
6273
6274 if (!UNIT_VTABLE(u)->clean ||
6275 u->load_state != UNIT_LOADED) {
6276 *ret = 0;
6277 return 0;
6278 }
6279
6280 /* When the clean() method is set, can_clean() really should be set too */
6281 assert(UNIT_VTABLE(u)->can_clean);
6282
6283 return UNIT_VTABLE(u)->can_clean(u, ret);
6284 }
6285
6286 bool unit_can_start_refuse_manual(Unit *u) {
6287 return unit_can_start(u) && !u->refuse_manual_start;
6288 }
6289
6290 bool unit_can_stop_refuse_manual(Unit *u) {
6291 return unit_can_stop(u) && !u->refuse_manual_stop;
6292 }
6293
6294 bool unit_can_isolate_refuse_manual(Unit *u) {
6295 return unit_can_isolate(u) && !u->refuse_manual_start;
6296 }
6297
6298 void unit_next_freezer_state(Unit *u, FreezerAction action, FreezerState *ret_next, FreezerState *ret_objective) {
6299 FreezerState current, parent, next, objective;
6300
6301 assert(u);
6302 assert(action >= 0);
6303 assert(action < _FREEZER_ACTION_MAX);
6304 assert(ret_next);
6305 assert(ret_objective);
6306
6307 /* This function determines the correct freezer state transitions for a unit
6308 * given the action being requested. It returns the next state, and also the "objective",
6309 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6310 * ultimately want to achieve. */
6311
6312 current = u->freezer_state;
6313
6314 Unit *slice = UNIT_GET_SLICE(u);
6315 if (slice)
6316 parent = slice->freezer_state;
6317 else
6318 parent = FREEZER_RUNNING;
6319
6320 switch (action) {
6321
6322 case FREEZER_FREEZE:
6323 /* We always "promote" a freeze initiated by parent into a normal freeze */
6324 if (IN_SET(current, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6325 next = FREEZER_FROZEN;
6326 else
6327 next = FREEZER_FREEZING;
6328 break;
6329
6330 case FREEZER_THAW:
6331 /* Thawing is the most complicated operation here, because we can't thaw a unit
6332 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6333 * initiated by parent if the parent is frozen */
6334 if (IN_SET(current, FREEZER_RUNNING, FREEZER_THAWING,
6335 FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT)) /* Should usually be refused by unit_freezer_action */
6336 next = current;
6337 else if (current == FREEZER_FREEZING) {
6338 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6339 next = FREEZER_THAWING;
6340 else
6341 next = FREEZER_FREEZING_BY_PARENT;
6342 } else if (current == FREEZER_FROZEN) {
6343 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6344 next = FREEZER_THAWING;
6345 else
6346 next = FREEZER_FROZEN_BY_PARENT;
6347 } else
6348 assert_not_reached();
6349 break;
6350
6351 case FREEZER_PARENT_FREEZE:
6352 /* We need to avoid accidentally demoting units frozen manually */
6353 if (IN_SET(current, FREEZER_FREEZING, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6354 next = current;
6355 else
6356 next = FREEZER_FREEZING_BY_PARENT;
6357 break;
6358
6359 case FREEZER_PARENT_THAW:
6360 /* We don't want to thaw units from a parent if they were frozen
6361 * manually, so for such units this action is a no-op */
6362 if (IN_SET(current, FREEZER_RUNNING, FREEZER_FREEZING, FREEZER_FROZEN))
6363 next = current;
6364 else
6365 next = FREEZER_THAWING;
6366 break;
6367
6368 default:
6369 assert_not_reached();
6370 }
6371
6372 objective = freezer_state_finish(next);
6373 if (objective == FREEZER_FROZEN_BY_PARENT)
6374 objective = FREEZER_FROZEN;
6375 assert(IN_SET(objective, FREEZER_RUNNING, FREEZER_FROZEN));
6376
6377 *ret_next = next;
6378 *ret_objective = objective;
6379 }
6380
6381 bool unit_can_freeze(const Unit *u) {
6382 assert(u);
6383
6384 if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE))
6385 return false;
6386
6387 if (UNIT_VTABLE(u)->can_freeze)
6388 return UNIT_VTABLE(u)->can_freeze(u);
6389
6390 return UNIT_VTABLE(u)->freezer_action;
6391 }
6392
6393 void unit_set_freezer_state(Unit *u, FreezerState state) {
6394 assert(u);
6395 assert(state >= 0);
6396 assert(state < _FREEZER_STATE_MAX);
6397
6398 if (u->freezer_state == state)
6399 return;
6400
6401 log_unit_debug(u, "Freezer state changed %s -> %s",
6402 freezer_state_to_string(u->freezer_state), freezer_state_to_string(state));
6403
6404 u->freezer_state = state;
6405
6406 unit_add_to_dbus_queue(u);
6407 }
6408
6409 void unit_freezer_complete(Unit *u, FreezerState kernel_state) {
6410 bool expected;
6411
6412 assert(u);
6413 assert(IN_SET(kernel_state, FREEZER_RUNNING, FREEZER_FROZEN));
6414
6415 expected = IN_SET(u->freezer_state, FREEZER_RUNNING, FREEZER_THAWING) == (kernel_state == FREEZER_RUNNING);
6416
6417 unit_set_freezer_state(u, expected ? freezer_state_finish(u->freezer_state) : kernel_state);
6418 log_unit_info(u, "Unit now %s.", u->freezer_state == FREEZER_RUNNING ? "thawed" :
6419 freezer_state_to_string(u->freezer_state));
6420
6421 /* If the cgroup's final state is against what's requested by us, report as canceled. */
6422 bus_unit_send_pending_freezer_message(u, /* canceled = */ !expected);
6423 }
6424
6425 int unit_freezer_action(Unit *u, FreezerAction action) {
6426 UnitActiveState s;
6427 int r;
6428
6429 assert(u);
6430 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6431
6432 if (!unit_can_freeze(u))
6433 return -EOPNOTSUPP;
6434
6435 if (u->job)
6436 return -EBUSY;
6437
6438 if (u->load_state != UNIT_LOADED)
6439 return -EHOSTDOWN;
6440
6441 s = unit_active_state(u);
6442 if (s != UNIT_ACTIVE)
6443 return -EHOSTDOWN;
6444
6445 if (action == FREEZER_FREEZE && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT))
6446 return -EALREADY;
6447 if (action == FREEZER_THAW && u->freezer_state == FREEZER_THAWING)
6448 return -EALREADY;
6449 if (action == FREEZER_THAW && IN_SET(u->freezer_state, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6450 return -EDEADLK;
6451
6452 r = UNIT_VTABLE(u)->freezer_action(u, action);
6453 if (r <= 0)
6454 return r;
6455
6456 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING));
6457 return 1;
6458 }
6459
6460 Condition *unit_find_failed_condition(Unit *u) {
6461 Condition *failed_trigger = NULL;
6462 bool has_succeeded_trigger = false;
6463
6464 if (u->condition_result)
6465 return NULL;
6466
6467 LIST_FOREACH(conditions, c, u->conditions)
6468 if (c->trigger) {
6469 if (c->result == CONDITION_SUCCEEDED)
6470 has_succeeded_trigger = true;
6471 else if (!failed_trigger)
6472 failed_trigger = c;
6473 } else if (c->result != CONDITION_SUCCEEDED)
6474 return c;
6475
6476 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6477 }
6478
6479 int unit_can_live_mount(Unit *u, sd_bus_error *error) {
6480 assert(u);
6481
6482 if (!UNIT_VTABLE(u)->live_mount)
6483 return sd_bus_error_setf(
6484 error,
6485 SD_BUS_ERROR_NOT_SUPPORTED,
6486 "Live mounting not supported by unit type '%s'",
6487 unit_type_to_string(u->type));
6488
6489 if (u->load_state != UNIT_LOADED)
6490 return sd_bus_error_setf(
6491 error,
6492 BUS_ERROR_NO_SUCH_UNIT,
6493 "Unit '%s' not loaded, cannot live mount",
6494 u->id);
6495
6496 if (!UNIT_VTABLE(u)->can_live_mount)
6497 return 0;
6498
6499 return UNIT_VTABLE(u)->can_live_mount(u, error);
6500 }
6501
6502 int unit_live_mount(
6503 Unit *u,
6504 const char *src,
6505 const char *dst,
6506 sd_bus_message *message,
6507 MountInNamespaceFlags flags,
6508 const MountOptions *options,
6509 sd_bus_error *error) {
6510
6511 assert(u);
6512 assert(UNIT_VTABLE(u)->live_mount);
6513
6514 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
6515 log_unit_debug(u, "Unit not active, cannot perform live mount.");
6516 return sd_bus_error_setf(
6517 error,
6518 BUS_ERROR_UNIT_INACTIVE,
6519 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: unit not active",
6520 src,
6521 dst,
6522 u->id);
6523 }
6524
6525 if (unit_active_state(u) == UNIT_REFRESHING) {
6526 log_unit_debug(u, "Unit already live mounting, refusing further requests.");
6527 return sd_bus_error_setf(
6528 error,
6529 BUS_ERROR_UNIT_BUSY,
6530 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another live mount in progress",
6531 src,
6532 dst,
6533 u->id);
6534 }
6535
6536 if (u->job) {
6537 log_unit_debug(u, "Unit already has a job in progress, cannot live mount");
6538 return sd_bus_error_setf(
6539 error,
6540 BUS_ERROR_UNIT_BUSY,
6541 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another operation in progress",
6542 src,
6543 dst,
6544 u->id);
6545 }
6546
6547 return UNIT_VTABLE(u)->live_mount(u, src, dst, message, flags, options, error);
6548 }
6549
6550 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6551 [COLLECT_INACTIVE] = "inactive",
6552 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6553 };
6554
6555 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6556
6557 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6558 Unit *i;
6559
6560 assert(u);
6561
6562 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6563 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6564 * is NULL the first entry found), or NULL if not found. */
6565
6566 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6567 if (!other || other == i)
6568 return i;
6569
6570 return NULL;
6571 }
6572
6573 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6574 _cleanup_free_ Unit **array = NULL;
6575 size_t n = 0;
6576 Unit *other;
6577
6578 assert(u);
6579 assert(ret_array);
6580
6581 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6582 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6583 * while the dependency table is continuously updated. */
6584
6585 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6586 if (!GREEDY_REALLOC(array, n + 1))
6587 return -ENOMEM;
6588
6589 array[n++] = other;
6590 }
6591
6592 *ret_array = TAKE_PTR(array);
6593
6594 assert(n <= INT_MAX);
6595 return (int) n;
6596 }
6597
6598 int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6599 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6600 Unit *other;
6601 int r;
6602
6603 assert(u);
6604 assert(ret);
6605
6606 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6607
6608 do {
6609 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6610 r = set_ensure_put(&units, NULL, other);
6611 if (r < 0)
6612 return r;
6613 if (r == 0)
6614 continue;
6615 r = set_ensure_put(&queue, NULL, other);
6616 if (r < 0)
6617 return r;
6618 }
6619 } while ((u = set_steal_first(queue)));
6620
6621 *ret = TAKE_PTR(units);
6622 return 0;
6623 }
6624
6625 int unit_arm_timer(
6626 Unit *u,
6627 sd_event_source **source,
6628 bool relative,
6629 usec_t usec,
6630 sd_event_time_handler_t handler) {
6631
6632 int r;
6633
6634 assert(u);
6635 assert(source);
6636 assert(handler);
6637
6638 if (*source) {
6639 if (usec == USEC_INFINITY)
6640 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6641
6642 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6643 if (r < 0)
6644 return r;
6645
6646 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6647 }
6648
6649 if (usec == USEC_INFINITY)
6650 return 0;
6651
6652 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6653 u->manager->event,
6654 source,
6655 CLOCK_MONOTONIC,
6656 usec, 0,
6657 handler,
6658 u);
6659 if (r < 0)
6660 return r;
6661
6662 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6663 (void) sd_event_source_set_description(*source, d);
6664
6665 return 0;
6666 }
6667
6668 bool unit_passes_filter(Unit *u, char * const *states, char * const *patterns) {
6669 assert(u);
6670
6671 if (!strv_isempty(states)) {
6672 char * const *unit_states = STRV_MAKE(
6673 unit_load_state_to_string(u->load_state),
6674 unit_active_state_to_string(unit_active_state(u)),
6675 unit_sub_state_to_string(u));
6676
6677 if (!strv_overlap(states, unit_states))
6678 return false;
6679 }
6680
6681 return strv_fnmatch_or_empty(patterns, u->id, FNM_NOESCAPE);
6682 }
6683
6684 static int unit_get_nice(Unit *u) {
6685 ExecContext *ec;
6686
6687 ec = unit_get_exec_context(u);
6688 return ec ? ec->nice : 0;
6689 }
6690
6691 static uint64_t unit_get_cpu_weight(Unit *u) {
6692 CGroupContext *cc;
6693
6694 cc = unit_get_cgroup_context(u);
6695 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6696 }
6697
6698 int unit_compare_priority(Unit *a, Unit *b) {
6699 int ret;
6700
6701 ret = CMP(a->type, b->type);
6702 if (ret != 0)
6703 return -ret;
6704
6705 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6706 if (ret != 0)
6707 return -ret;
6708
6709 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6710 if (ret != 0)
6711 return ret;
6712
6713 return strcmp(a->id, b->id);
6714 }
6715
6716 const char* unit_log_field(const Unit *u) {
6717 return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "UNIT=" : "USER_UNIT=";
6718 }
6719
6720 const char* unit_invocation_log_field(const Unit *u) {
6721 return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "INVOCATION_ID=" : "USER_INVOCATION_ID=";
6722 }
6723
6724 const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6725 [UNIT_PATH] = &activation_details_path_vtable,
6726 [UNIT_TIMER] = &activation_details_timer_vtable,
6727 };
6728
6729 ActivationDetails *activation_details_new(Unit *trigger_unit) {
6730 _cleanup_free_ ActivationDetails *details = NULL;
6731
6732 assert(trigger_unit);
6733 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6734 assert(trigger_unit->id);
6735
6736 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6737 if (!details)
6738 return NULL;
6739
6740 *details = (ActivationDetails) {
6741 .n_ref = 1,
6742 .trigger_unit_type = trigger_unit->type,
6743 };
6744
6745 details->trigger_unit_name = strdup(trigger_unit->id);
6746 if (!details->trigger_unit_name)
6747 return NULL;
6748
6749 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6750 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6751
6752 return TAKE_PTR(details);
6753 }
6754
6755 static ActivationDetails *activation_details_free(ActivationDetails *details) {
6756 if (!details)
6757 return NULL;
6758
6759 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6760 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6761
6762 free(details->trigger_unit_name);
6763
6764 return mfree(details);
6765 }
6766
6767 void activation_details_serialize(const ActivationDetails *details, FILE *f) {
6768 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6769 return;
6770
6771 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6772 if (details->trigger_unit_name)
6773 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6774 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6775 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6776 }
6777
6778 int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6779 int r;
6780
6781 assert(key);
6782 assert(value);
6783 assert(details);
6784
6785 if (!*details) {
6786 UnitType t;
6787
6788 if (!streq(key, "activation-details-unit-type"))
6789 return -EINVAL;
6790
6791 t = unit_type_from_string(value);
6792 if (t < 0)
6793 return t;
6794
6795 /* The activation details vtable has defined ops only for path and timer units */
6796 if (!activation_details_vtable[t])
6797 return -EINVAL;
6798
6799 *details = malloc0(activation_details_vtable[t]->object_size);
6800 if (!*details)
6801 return -ENOMEM;
6802
6803 **details = (ActivationDetails) {
6804 .n_ref = 1,
6805 .trigger_unit_type = t,
6806 };
6807
6808 return 0;
6809 }
6810
6811 if (streq(key, "activation-details-unit-name")) {
6812 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6813 if (r < 0)
6814 return r;
6815
6816 return 0;
6817 }
6818
6819 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6820 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6821
6822 return -EINVAL;
6823 }
6824
6825 int activation_details_append_env(const ActivationDetails *details, char ***strv) {
6826 int r = 0;
6827
6828 assert(strv);
6829
6830 if (!details)
6831 return 0;
6832
6833 if (!isempty(details->trigger_unit_name)) {
6834 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6835 if (!s)
6836 return -ENOMEM;
6837
6838 r = strv_consume(strv, TAKE_PTR(s));
6839 if (r < 0)
6840 return r;
6841 }
6842
6843 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6844 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6845 if (r < 0)
6846 return r;
6847 }
6848
6849 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6850 }
6851
6852 int activation_details_append_pair(const ActivationDetails *details, char ***strv) {
6853 int r = 0;
6854
6855 assert(strv);
6856
6857 if (!details)
6858 return 0;
6859
6860 if (!isempty(details->trigger_unit_name)) {
6861 r = strv_extend_many(strv, "trigger_unit", details->trigger_unit_name);
6862 if (r < 0)
6863 return r;
6864 }
6865
6866 if (ACTIVATION_DETAILS_VTABLE(details)->append_pair) {
6867 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6868 if (r < 0)
6869 return r;
6870 }
6871
6872 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6873 }
6874
6875 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);
6876
6877 static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = {
6878 [UNIT_MOUNT_WANTS] = "WantsMountsFor",
6879 [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor",
6880 };
6881
6882 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType);
6883
6884 static const char* const oom_policy_table[_OOM_POLICY_MAX] = {
6885 [OOM_CONTINUE] = "continue",
6886 [OOM_STOP] = "stop",
6887 [OOM_KILL] = "kill",
6888 };
6889
6890 DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy);
6891
6892 UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) {
6893 switch (t) {
6894
6895 case UNIT_MOUNT_WANTS:
6896 return UNIT_WANTS;
6897
6898 case UNIT_MOUNT_REQUIRES:
6899 return UNIT_REQUIRES;
6900
6901 default:
6902 assert_not_reached();
6903 }
6904 }