]> git.ipfire.org Git - thirdparty/systemd.git/blame_incremental - src/core/unit.c
NEWS: fix typo
[thirdparty/systemd.git] / src / core / unit.c
... / ...
CommitLineData
1/* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3#include <fnmatch.h>
4#include <linux/capability.h>
5#include <unistd.h>
6
7#include "sd-bus.h"
8#include "sd-id128.h"
9#include "sd-messages.h"
10
11#include "all-units.h"
12#include "alloc-util.h"
13#include "ansi-color.h"
14#include "bpf-firewall.h"
15#include "bpf-restrict-fs.h"
16#include "bus-common-errors.h"
17#include "bus-internal.h"
18#include "bus-util.h"
19#include "cgroup-setup.h"
20#include "cgroup-util.h"
21#include "chase.h"
22#include "condition.h"
23#include "dbus-unit.h"
24#include "dropin.h"
25#include "dynamic-user.h"
26#include "env-util.h"
27#include "escape.h"
28#include "exec-credential.h"
29#include "execute.h"
30#include "fd-util.h"
31#include "fileio.h"
32#include "format-util.h"
33#include "fs-util.h"
34#include "id128-util.h"
35#include "install.h"
36#include "iovec-util.h"
37#include "label-util.h"
38#include "load-dropin.h"
39#include "load-fragment.h"
40#include "log.h"
41#include "logarithm.h"
42#include "mkdir-label.h"
43#include "manager.h"
44#include "mount-util.h"
45#include "mountpoint-util.h"
46#include "path-util.h"
47#include "process-util.h"
48#include "rm-rf.h"
49#include "serialize.h"
50#include "set.h"
51#include "signal-util.h"
52#include "siphash24.h"
53#include "sparse-endian.h"
54#include "special.h"
55#include "specifier.h"
56#include "stat-util.h"
57#include "string-table.h"
58#include "string-util.h"
59#include "strv.h"
60#include "tmpfile-util.h"
61#include "umask-util.h"
62#include "unit.h"
63#include "unit-name.h"
64#include "user-util.h"
65#include "varlink.h"
66
67/* Thresholds for logging at INFO level about resource consumption */
68#define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
69#define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
70#define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
71#define MENTIONWORTHY_IP_BYTES UINT64_C(0)
72
73/* Thresholds for logging at NOTICE level about resource consumption */
74#define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
75#define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
76#define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
77#define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
78
79const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
80 [UNIT_SERVICE] = &service_vtable,
81 [UNIT_SOCKET] = &socket_vtable,
82 [UNIT_TARGET] = &target_vtable,
83 [UNIT_DEVICE] = &device_vtable,
84 [UNIT_MOUNT] = &mount_vtable,
85 [UNIT_AUTOMOUNT] = &automount_vtable,
86 [UNIT_SWAP] = &swap_vtable,
87 [UNIT_TIMER] = &timer_vtable,
88 [UNIT_PATH] = &path_vtable,
89 [UNIT_SLICE] = &slice_vtable,
90 [UNIT_SCOPE] = &scope_vtable,
91};
92
93Unit* unit_new(Manager *m, size_t size) {
94 Unit *u;
95
96 assert(m);
97 assert(size >= sizeof(Unit));
98
99 u = malloc0(size);
100 if (!u)
101 return NULL;
102
103 u->manager = m;
104 u->type = _UNIT_TYPE_INVALID;
105 u->default_dependencies = true;
106 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
107 u->unit_file_preset = _PRESET_ACTION_INVALID;
108 u->on_failure_job_mode = JOB_REPLACE;
109 u->on_success_job_mode = JOB_FAIL;
110 u->job_timeout = USEC_INFINITY;
111 u->job_running_timeout = USEC_INFINITY;
112 u->ref_uid = UID_INVALID;
113 u->ref_gid = GID_INVALID;
114
115 u->failure_action_exit_status = u->success_action_exit_status = -1;
116
117 u->last_section_private = -1;
118
119 u->start_ratelimit = m->defaults.start_limit;
120
121 u->auto_start_stop_ratelimit = (const RateLimit) {
122 .interval = 10 * USEC_PER_SEC,
123 .burst = 16
124 };
125
126 return u;
127}
128
129int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
130 _cleanup_(unit_freep) Unit *u = NULL;
131 int r;
132
133 u = unit_new(m, size);
134 if (!u)
135 return -ENOMEM;
136
137 r = unit_add_name(u, name);
138 if (r < 0)
139 return r;
140
141 *ret = TAKE_PTR(u);
142
143 return r;
144}
145
146bool unit_has_name(const Unit *u, const char *name) {
147 assert(u);
148 assert(name);
149
150 return streq_ptr(name, u->id) ||
151 set_contains(u->aliases, name);
152}
153
154static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->io_accounting = u->manager->defaults.io_accounting;
172 cc->memory_accounting = u->manager->defaults.memory_accounting;
173 cc->tasks_accounting = u->manager->defaults.tasks_accounting;
174 cc->ip_accounting = u->manager->defaults.ip_accounting;
175
176 if (u->type != UNIT_SLICE)
177 cc->tasks_max = u->manager->defaults.tasks_max;
178
179 cc->memory_pressure_watch = u->manager->defaults.memory_pressure_watch;
180 cc->memory_pressure_threshold_usec = u->manager->defaults.memory_pressure_threshold_usec;
181 }
182
183 ec = unit_get_exec_context(u);
184 if (ec) {
185 exec_context_init(ec);
186
187 if (u->manager->defaults.oom_score_adjust_set) {
188 ec->oom_score_adjust = u->manager->defaults.oom_score_adjust;
189 ec->oom_score_adjust_set = true;
190 }
191
192 if (MANAGER_IS_SYSTEM(u->manager))
193 ec->keyring_mode = EXEC_KEYRING_SHARED;
194 else {
195 ec->keyring_mode = EXEC_KEYRING_INHERIT;
196
197 /* User manager might have its umask redefined by PAM or UMask=. In this
198 * case let the units it manages inherit this value by default. They can
199 * still tune this value through their own unit file */
200 (void) get_process_umask(0, &ec->umask);
201 }
202 }
203
204 kc = unit_get_kill_context(u);
205 if (kc)
206 kill_context_init(kc);
207
208 if (UNIT_VTABLE(u)->init)
209 UNIT_VTABLE(u)->init(u);
210}
211
212static int unit_add_alias(Unit *u, char *donated_name) {
213 int r;
214
215 /* Make sure that u->names is allocated. We may leave u->names
216 * empty if we fail later, but this is not a problem. */
217 r = set_ensure_put(&u->aliases, &string_hash_ops_free, donated_name);
218 if (r < 0)
219 return r;
220 assert(r > 0);
221
222 return 0;
223}
224
225int unit_add_name(Unit *u, const char *text) {
226 _cleanup_free_ char *name = NULL, *instance = NULL;
227 UnitType t;
228 int r;
229
230 assert(u);
231 assert(text);
232
233 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
234 if (!u->instance)
235 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
236 "Instance is not set when adding name '%s'.", text);
237
238 r = unit_name_replace_instance(text, u->instance, &name);
239 if (r < 0)
240 return log_unit_debug_errno(u, r,
241 "Failed to build instance name from '%s': %m", text);
242 } else {
243 name = strdup(text);
244 if (!name)
245 return -ENOMEM;
246 }
247
248 if (unit_has_name(u, name))
249 return 0;
250
251 if (hashmap_contains(u->manager->units, name))
252 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
253 "Unit already exist when adding name '%s'.", name);
254
255 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
256 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
257 "Name '%s' is invalid.", name);
258
259 t = unit_name_to_type(name);
260 if (t < 0)
261 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
262 "failed to derive unit type from name '%s'.", name);
263
264 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
266 "Unit type is illegal: u->type(%d) and t(%d) for name '%s'.",
267 u->type, t, name);
268
269 r = unit_name_to_instance(name, &instance);
270 if (r < 0)
271 return log_unit_debug_errno(u, r, "Failed to extract instance from name '%s': %m", name);
272
273 if (instance && !unit_type_may_template(t))
274 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "Templates are not allowed for name '%s'.", name);
275
276 /* Ensure that this unit either has no instance, or that the instance matches. */
277 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
278 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
279 "Cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
280 name, instance, u->instance);
281
282 if (u->id && !unit_type_may_alias(t))
283 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
284 "Cannot add name %s, aliases are not allowed for %s units.",
285 name, unit_type_to_string(t));
286
287 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
288 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "Cannot add name, manager has too many units.");
289
290 /* Add name to the global hashmap first, because that's easier to undo */
291 r = hashmap_put(u->manager->units, name, u);
292 if (r < 0)
293 return log_unit_debug_errno(u, r, "Add unit to hashmap failed for name '%s': %m", text);
294
295 if (u->id) {
296 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
297 if (r < 0) {
298 hashmap_remove(u->manager->units, name);
299 return r;
300 }
301 TAKE_PTR(name);
302
303 } else {
304 /* A new name, we don't need the set yet. */
305 assert(u->type == _UNIT_TYPE_INVALID);
306 assert(!u->instance);
307
308 u->type = t;
309 u->id = TAKE_PTR(name);
310 u->instance = TAKE_PTR(instance);
311
312 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
313 unit_init(u);
314 }
315
316 unit_add_to_dbus_queue(u);
317 return 0;
318}
319
320int unit_choose_id(Unit *u, const char *name) {
321 _cleanup_free_ char *t = NULL;
322 char *s;
323 int r;
324
325 assert(u);
326 assert(name);
327
328 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
329 if (!u->instance)
330 return -EINVAL;
331
332 r = unit_name_replace_instance(name, u->instance, &t);
333 if (r < 0)
334 return r;
335
336 name = t;
337 }
338
339 if (streq_ptr(u->id, name))
340 return 0; /* Nothing to do. */
341
342 /* Selects one of the aliases of this unit as the id */
343 s = set_get(u->aliases, (char*) name);
344 if (!s)
345 return -ENOENT;
346
347 if (u->id) {
348 r = set_remove_and_put(u->aliases, name, u->id);
349 if (r < 0)
350 return r;
351 } else
352 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
353
354 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
355 unit_add_to_dbus_queue(u);
356
357 return 0;
358}
359
360int unit_set_description(Unit *u, const char *description) {
361 int r;
362
363 assert(u);
364
365 r = free_and_strdup(&u->description, empty_to_null(description));
366 if (r < 0)
367 return r;
368 if (r > 0)
369 unit_add_to_dbus_queue(u);
370
371 return 0;
372}
373
374static bool unit_success_failure_handler_has_jobs(Unit *unit) {
375 Unit *other;
376
377 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
378 if (other->job || other->nop_job)
379 return true;
380
381 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
382 if (other->job || other->nop_job)
383 return true;
384
385 return false;
386}
387
388void unit_release_resources(Unit *u) {
389 UnitActiveState state;
390 ExecContext *ec;
391
392 assert(u);
393
394 if (u->job || u->nop_job)
395 return;
396
397 if (u->perpetual)
398 return;
399
400 state = unit_active_state(u);
401 if (!UNIT_IS_INACTIVE_OR_FAILED(state))
402 return;
403
404 if (unit_will_restart(u))
405 return;
406
407 ec = unit_get_exec_context(u);
408 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
409 exec_context_destroy_runtime_directory(ec, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
410
411 if (UNIT_VTABLE(u)->release_resources)
412 UNIT_VTABLE(u)->release_resources(u);
413}
414
415bool unit_may_gc(Unit *u) {
416 UnitActiveState state;
417 int r;
418
419 assert(u);
420
421 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
422 * unit may be collected, and false if there's some reason to keep it loaded.
423 *
424 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
425 * using markers to properly collect dependency loops.
426 */
427
428 if (u->job || u->nop_job)
429 return false;
430
431 if (u->perpetual)
432 return false;
433
434 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
435 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
436 * before we release the unit. */
437 if (u->in_cgroup_empty_queue || u->in_cgroup_oom_queue)
438 return false;
439
440 /* Make sure to send out D-Bus events before we unload the unit */
441 if (u->in_dbus_queue)
442 return false;
443
444 if (sd_bus_track_count(u->bus_track) > 0)
445 return false;
446
447 state = unit_active_state(u);
448
449 /* But we keep the unit object around for longer when it is referenced or configured to not be
450 * gc'ed */
451 switch (u->collect_mode) {
452
453 case COLLECT_INACTIVE:
454 if (state != UNIT_INACTIVE)
455 return false;
456
457 break;
458
459 case COLLECT_INACTIVE_OR_FAILED:
460 if (!UNIT_IS_INACTIVE_OR_FAILED(state))
461 return false;
462
463 break;
464
465 default:
466 assert_not_reached();
467 }
468
469 /* Check if any OnFailure= or on Success= jobs may be pending */
470 if (unit_success_failure_handler_has_jobs(u))
471 return false;
472
473 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
474 * around. Units with active processes should never be collected. */
475 r = unit_cgroup_is_empty(u);
476 if (r <= 0 && !IN_SET(r, -ENXIO, -EOWNERDEAD))
477 return false; /* ENXIO/EOWNERDEAD means: currently not realized */
478
479 if (!UNIT_VTABLE(u)->may_gc)
480 return true;
481
482 return UNIT_VTABLE(u)->may_gc(u);
483}
484
485void unit_add_to_load_queue(Unit *u) {
486 assert(u);
487 assert(u->type != _UNIT_TYPE_INVALID);
488
489 if (u->load_state != UNIT_STUB || u->in_load_queue)
490 return;
491
492 LIST_PREPEND(load_queue, u->manager->load_queue, u);
493 u->in_load_queue = true;
494}
495
496void unit_add_to_cleanup_queue(Unit *u) {
497 assert(u);
498
499 if (u->in_cleanup_queue)
500 return;
501
502 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
503 u->in_cleanup_queue = true;
504}
505
506void unit_add_to_gc_queue(Unit *u) {
507 assert(u);
508
509 if (u->in_gc_queue || u->in_cleanup_queue)
510 return;
511
512 if (!unit_may_gc(u))
513 return;
514
515 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
516 u->in_gc_queue = true;
517}
518
519void unit_add_to_dbus_queue(Unit *u) {
520 assert(u);
521 assert(u->type != _UNIT_TYPE_INVALID);
522
523 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
524 return;
525
526 /* Shortcut things if nobody cares */
527 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
528 sd_bus_track_count(u->bus_track) <= 0 &&
529 set_isempty(u->manager->private_buses)) {
530 u->sent_dbus_new_signal = true;
531 return;
532 }
533
534 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
535 u->in_dbus_queue = true;
536}
537
538void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
539 assert(u);
540
541 if (u->in_stop_when_unneeded_queue)
542 return;
543
544 if (!u->stop_when_unneeded)
545 return;
546
547 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
548 return;
549
550 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
551 u->in_stop_when_unneeded_queue = true;
552}
553
554void unit_submit_to_start_when_upheld_queue(Unit *u) {
555 assert(u);
556
557 if (u->in_start_when_upheld_queue)
558 return;
559
560 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
561 return;
562
563 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
564 return;
565
566 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
567 u->in_start_when_upheld_queue = true;
568}
569
570void unit_submit_to_stop_when_bound_queue(Unit *u) {
571 assert(u);
572
573 if (u->in_stop_when_bound_queue)
574 return;
575
576 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
577 return;
578
579 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
580 return;
581
582 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
583 u->in_stop_when_bound_queue = true;
584}
585
586static bool unit_can_release_resources(Unit *u) {
587 ExecContext *ec;
588
589 assert(u);
590
591 if (UNIT_VTABLE(u)->release_resources)
592 return true;
593
594 ec = unit_get_exec_context(u);
595 if (ec && ec->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART)
596 return true;
597
598 return false;
599}
600
601void unit_submit_to_release_resources_queue(Unit *u) {
602 assert(u);
603
604 if (u->in_release_resources_queue)
605 return;
606
607 if (u->job || u->nop_job)
608 return;
609
610 if (u->perpetual)
611 return;
612
613 if (!unit_can_release_resources(u))
614 return;
615
616 LIST_PREPEND(release_resources_queue, u->manager->release_resources_queue, u);
617 u->in_release_resources_queue = true;
618}
619
620void unit_add_to_stop_notify_queue(Unit *u) {
621 assert(u);
622
623 if (u->in_stop_notify_queue)
624 return;
625
626 assert(UNIT_VTABLE(u)->stop_notify);
627
628 LIST_PREPEND(stop_notify_queue, u->manager->stop_notify_queue, u);
629 u->in_stop_notify_queue = true;
630}
631
632void unit_remove_from_stop_notify_queue(Unit *u) {
633 assert(u);
634
635 if (!u->in_stop_notify_queue)
636 return;
637
638 LIST_REMOVE(stop_notify_queue, u->manager->stop_notify_queue, u);
639 u->in_stop_notify_queue = false;
640}
641
642static void unit_clear_dependencies(Unit *u) {
643 assert(u);
644
645 /* Removes all dependencies configured on u and their reverse dependencies. */
646
647 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
648
649 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
650 Hashmap *other_deps;
651
652 HASHMAP_FOREACH(other_deps, other->dependencies)
653 hashmap_remove(other_deps, u);
654
655 unit_add_to_gc_queue(other);
656 other->dependency_generation++;
657 }
658
659 hashmap_free(deps);
660 }
661
662 u->dependencies = hashmap_free(u->dependencies);
663 u->dependency_generation++;
664}
665
666static void unit_remove_transient(Unit *u) {
667 assert(u);
668 assert(u->manager);
669
670 if (!u->transient)
671 return;
672
673 STRV_FOREACH(i, u->dropin_paths) {
674 _cleanup_free_ char *p = NULL, *pp = NULL;
675
676 if (path_extract_directory(*i, &p) < 0) /* Get the drop-in directory from the drop-in file */
677 continue;
678
679 if (path_extract_directory(p, &pp) < 0) /* Get the config directory from the drop-in directory */
680 continue;
681
682 /* Only drop transient drop-ins */
683 if (!path_equal(u->manager->lookup_paths.transient, pp))
684 continue;
685
686 (void) unlink(*i);
687 (void) rmdir(p);
688 }
689
690 if (u->fragment_path) {
691 (void) unlink(u->fragment_path);
692 (void) unit_file_remove_from_name_map(
693 &u->manager->lookup_paths,
694 &u->manager->unit_cache_timestamp_hash,
695 &u->manager->unit_id_map,
696 &u->manager->unit_name_map,
697 &u->manager->unit_path_cache,
698 u->fragment_path);
699 }
700}
701
702static void unit_free_mounts_for(Unit *u) {
703 assert(u);
704
705 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
706 for (;;) {
707 _cleanup_free_ char *path = NULL;
708
709 path = hashmap_steal_first_key(u->mounts_for[t]);
710 if (!path)
711 break;
712
713 char s[strlen(path) + 1];
714
715 PATH_FOREACH_PREFIX_MORE(s, path) {
716 char *y;
717 Set *x;
718
719 x = hashmap_get2(u->manager->units_needing_mounts_for[t], s, (void**) &y);
720 if (!x)
721 continue;
722
723 (void) set_remove(x, u);
724
725 if (set_isempty(x)) {
726 assert_se(hashmap_remove(u->manager->units_needing_mounts_for[t], y));
727 free(y);
728 set_free(x);
729 }
730 }
731 }
732
733 u->mounts_for[t] = hashmap_free(u->mounts_for[t]);
734 }
735}
736
737static void unit_done(Unit *u) {
738 ExecContext *ec;
739 CGroupContext *cc;
740
741 assert(u);
742
743 if (u->type < 0)
744 return;
745
746 if (UNIT_VTABLE(u)->done)
747 UNIT_VTABLE(u)->done(u);
748
749 ec = unit_get_exec_context(u);
750 if (ec)
751 exec_context_done(ec);
752
753 cc = unit_get_cgroup_context(u);
754 if (cc)
755 cgroup_context_done(cc);
756}
757
758Unit* unit_free(Unit *u) {
759 Unit *slice;
760 char *t;
761
762 if (!u)
763 return NULL;
764
765 sd_event_source_disable_unref(u->auto_start_stop_event_source);
766
767 u->transient_file = safe_fclose(u->transient_file);
768
769 if (!MANAGER_IS_RELOADING(u->manager))
770 unit_remove_transient(u);
771
772 bus_unit_send_removed_signal(u);
773
774 unit_done(u);
775
776 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
777 u->bus_track = sd_bus_track_unref(u->bus_track);
778 u->deserialized_refs = strv_free(u->deserialized_refs);
779 u->pending_freezer_invocation = sd_bus_message_unref(u->pending_freezer_invocation);
780
781 unit_free_mounts_for(u);
782
783 SET_FOREACH(t, u->aliases)
784 hashmap_remove_value(u->manager->units, t, u);
785 if (u->id)
786 hashmap_remove_value(u->manager->units, u->id, u);
787
788 if (!sd_id128_is_null(u->invocation_id))
789 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
790
791 if (u->job) {
792 Job *j = u->job;
793 job_uninstall(j);
794 job_free(j);
795 }
796
797 if (u->nop_job) {
798 Job *j = u->nop_job;
799 job_uninstall(j);
800 job_free(j);
801 }
802
803 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
804 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
805 slice = UNIT_GET_SLICE(u);
806 unit_clear_dependencies(u);
807 if (slice)
808 unit_add_family_to_cgroup_realize_queue(slice);
809
810 if (u->on_console)
811 manager_unref_console(u->manager);
812
813 unit_release_cgroup(u, /* drop_cgroup_runtime = */ true);
814
815 if (!MANAGER_IS_RELOADING(u->manager))
816 unit_unlink_state_files(u);
817
818 unit_unref_uid_gid(u, false);
819
820 (void) manager_update_failed_units(u->manager, u, false);
821 set_remove(u->manager->startup_units, u);
822
823 unit_unwatch_all_pids(u);
824
825 while (u->refs_by_target)
826 unit_ref_unset(u->refs_by_target);
827
828 if (u->type != _UNIT_TYPE_INVALID)
829 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
830
831 if (u->in_load_queue)
832 LIST_REMOVE(load_queue, u->manager->load_queue, u);
833
834 if (u->in_dbus_queue)
835 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
836
837 if (u->in_cleanup_queue)
838 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
839
840 if (u->in_gc_queue)
841 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
842
843 if (u->in_cgroup_realize_queue)
844 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
845
846 if (u->in_cgroup_empty_queue)
847 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
848
849 if (u->in_cgroup_oom_queue)
850 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
851
852 if (u->in_target_deps_queue)
853 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
854
855 if (u->in_stop_when_unneeded_queue)
856 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
857
858 if (u->in_start_when_upheld_queue)
859 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
860
861 if (u->in_stop_when_bound_queue)
862 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
863
864 if (u->in_release_resources_queue)
865 LIST_REMOVE(release_resources_queue, u->manager->release_resources_queue, u);
866
867 unit_remove_from_stop_notify_queue(u);
868
869 condition_free_list(u->conditions);
870 condition_free_list(u->asserts);
871
872 free(u->description);
873 strv_free(u->documentation);
874 free(u->fragment_path);
875 free(u->source_path);
876 strv_free(u->dropin_paths);
877 free(u->instance);
878
879 free(u->job_timeout_reboot_arg);
880 free(u->reboot_arg);
881
882 free(u->access_selinux_context);
883
884 set_free(u->aliases);
885 free(u->id);
886
887 activation_details_unref(u->activation_details);
888
889 return mfree(u);
890}
891
892UnitActiveState unit_active_state(Unit *u) {
893 assert(u);
894
895 if (u->load_state == UNIT_MERGED)
896 return unit_active_state(unit_follow_merge(u));
897
898 /* After a reload it might happen that a unit is not correctly
899 * loaded but still has a process around. That's why we won't
900 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
901
902 return UNIT_VTABLE(u)->active_state(u);
903}
904
905const char* unit_sub_state_to_string(Unit *u) {
906 assert(u);
907
908 return UNIT_VTABLE(u)->sub_state_to_string(u);
909}
910
911static int unit_merge_names(Unit *u, Unit *other) {
912 char *name;
913 int r;
914
915 assert(u);
916 assert(other);
917
918 r = unit_add_alias(u, other->id);
919 if (r < 0)
920 return r;
921
922 r = set_move(u->aliases, other->aliases);
923 if (r < 0) {
924 set_remove(u->aliases, other->id);
925 return r;
926 }
927
928 TAKE_PTR(other->id);
929 other->aliases = set_free(other->aliases);
930
931 SET_FOREACH(name, u->aliases)
932 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
933
934 return 0;
935}
936
937static int unit_reserve_dependencies(Unit *u, Unit *other) {
938 size_t n_reserve;
939 Hashmap* deps;
940 void *d;
941 int r;
942
943 assert(u);
944 assert(other);
945
946 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
947 * fail.
948 *
949 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
950 * hashmaps is an estimate that is likely too high since they probably use some of the same
951 * types. But it's never too low, and that's all we need. */
952
953 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
954 if (n_reserve > 0) {
955 r = hashmap_ensure_allocated(&u->dependencies, NULL);
956 if (r < 0)
957 return r;
958
959 r = hashmap_reserve(u->dependencies, n_reserve);
960 if (r < 0)
961 return r;
962 }
963
964 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
965 * other unit's dependencies.
966 *
967 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
968 * reserve anything for. In that case other's set will be transferred as a whole to u by
969 * complete_move(). */
970
971 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
972 Hashmap *other_deps;
973
974 other_deps = hashmap_get(other->dependencies, d);
975
976 r = hashmap_reserve(deps, hashmap_size(other_deps));
977 if (r < 0)
978 return r;
979 }
980
981 return 0;
982}
983
984static bool unit_should_warn_about_dependency(UnitDependency dependency) {
985 /* Only warn about some unit types */
986 return IN_SET(dependency,
987 UNIT_CONFLICTS,
988 UNIT_CONFLICTED_BY,
989 UNIT_BEFORE,
990 UNIT_AFTER,
991 UNIT_ON_SUCCESS,
992 UNIT_ON_FAILURE,
993 UNIT_TRIGGERS,
994 UNIT_TRIGGERED_BY);
995}
996
997static int unit_per_dependency_type_hashmap_update(
998 Hashmap *per_type,
999 Unit *other,
1000 UnitDependencyMask origin_mask,
1001 UnitDependencyMask destination_mask) {
1002
1003 UnitDependencyInfo info;
1004 int r;
1005
1006 assert(other);
1007 assert_cc(sizeof(void*) == sizeof(info));
1008
1009 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1010 * exists, or insert it anew if not. */
1011
1012 info.data = hashmap_get(per_type, other);
1013 if (info.data) {
1014 /* Entry already exists. Add in our mask. */
1015
1016 if (FLAGS_SET(origin_mask, info.origin_mask) &&
1017 FLAGS_SET(destination_mask, info.destination_mask))
1018 return 0; /* NOP */
1019
1020 info.origin_mask |= origin_mask;
1021 info.destination_mask |= destination_mask;
1022
1023 r = hashmap_update(per_type, other, info.data);
1024 } else {
1025 info = (UnitDependencyInfo) {
1026 .origin_mask = origin_mask,
1027 .destination_mask = destination_mask,
1028 };
1029
1030 r = hashmap_put(per_type, other, info.data);
1031 }
1032 if (r < 0)
1033 return r;
1034
1035 return 1;
1036}
1037
1038static void unit_merge_dependencies(Unit *u, Unit *other) {
1039 Hashmap *deps;
1040 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1041 * since the hashmaps all want it as void pointer. */
1042
1043 assert(u);
1044 assert(other);
1045
1046 if (u == other)
1047 return;
1048
1049 /* First, remove dependency to other. */
1050 HASHMAP_FOREACH_KEY(deps, dt, u->dependencies) {
1051 if (hashmap_remove(deps, other) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1052 log_unit_warning(u, "Dependency %s=%s is dropped, as %s is merged into %s.",
1053 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1054 other->id, other->id, u->id);
1055
1056 if (hashmap_isempty(deps))
1057 hashmap_free(hashmap_remove(u->dependencies, dt));
1058 }
1059
1060 for (;;) {
1061 _cleanup_hashmap_free_ Hashmap *other_deps = NULL;
1062 UnitDependencyInfo di_back;
1063 Unit *back;
1064
1065 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1066 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1067 if (!other_deps)
1068 break; /* done! */
1069
1070 deps = hashmap_get(u->dependencies, dt);
1071
1072 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1073 * referenced units as 'back'. */
1074 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1075 Hashmap *back_deps;
1076 void *back_dt;
1077
1078 if (back == u) {
1079 /* This is a dependency pointing back to the unit we want to merge with?
1080 * Suppress it (but warn) */
1081 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt)))
1082 log_unit_warning(u, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1083 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt)),
1084 u->id, other->id, other->id, u->id);
1085
1086 hashmap_remove(other_deps, back);
1087 continue;
1088 }
1089
1090 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1091 * point to 'u' instead. */
1092 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1093 UnitDependencyInfo di_move;
1094
1095 di_move.data = hashmap_remove(back_deps, other);
1096 if (!di_move.data)
1097 continue;
1098
1099 assert_se(unit_per_dependency_type_hashmap_update(
1100 back_deps,
1101 u,
1102 di_move.origin_mask,
1103 di_move.destination_mask) >= 0);
1104 }
1105
1106 /* The target unit already has dependencies of this type, let's then merge this individually. */
1107 if (deps)
1108 assert_se(unit_per_dependency_type_hashmap_update(
1109 deps,
1110 back,
1111 di_back.origin_mask,
1112 di_back.destination_mask) >= 0);
1113 }
1114
1115 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1116 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1117 * dependencies of this type, let's move them per type wholesale. */
1118 if (!deps)
1119 assert_se(hashmap_put(u->dependencies, dt, TAKE_PTR(other_deps)) >= 0);
1120 }
1121
1122 other->dependencies = hashmap_free(other->dependencies);
1123
1124 u->dependency_generation++;
1125 other->dependency_generation++;
1126}
1127
1128int unit_merge(Unit *u, Unit *other) {
1129 int r;
1130
1131 assert(u);
1132 assert(other);
1133 assert(u->manager == other->manager);
1134 assert(u->type != _UNIT_TYPE_INVALID);
1135
1136 other = unit_follow_merge(other);
1137
1138 if (other == u)
1139 return 0;
1140
1141 if (u->type != other->type)
1142 return -EINVAL;
1143
1144 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1145 return -EEXIST;
1146
1147 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1148 return -EEXIST;
1149
1150 if (!streq_ptr(u->instance, other->instance))
1151 return -EINVAL;
1152
1153 if (other->job)
1154 return -EEXIST;
1155
1156 if (other->nop_job)
1157 return -EEXIST;
1158
1159 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1160 return -EEXIST;
1161
1162 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1163 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1164 r = unit_reserve_dependencies(u, other);
1165 if (r < 0)
1166 return r;
1167
1168 /* Redirect all references */
1169 while (other->refs_by_target)
1170 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1171
1172 /* Merge dependencies */
1173 unit_merge_dependencies(u, other);
1174
1175 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1176 r = unit_merge_names(u, other);
1177 if (r < 0)
1178 return r;
1179
1180 other->load_state = UNIT_MERGED;
1181 other->merged_into = u;
1182
1183 if (!u->activation_details)
1184 u->activation_details = activation_details_ref(other->activation_details);
1185
1186 /* If there is still some data attached to the other node, we
1187 * don't need it anymore, and can free it. */
1188 if (other->load_state != UNIT_STUB)
1189 if (UNIT_VTABLE(other)->done)
1190 UNIT_VTABLE(other)->done(other);
1191
1192 unit_add_to_dbus_queue(u);
1193 unit_add_to_cleanup_queue(other);
1194
1195 return 0;
1196}
1197
1198int unit_merge_by_name(Unit *u, const char *name) {
1199 _cleanup_free_ char *s = NULL;
1200 Unit *other;
1201 int r;
1202
1203 /* Either add name to u, or if a unit with name already exists, merge it with u.
1204 * If name is a template, do the same for name@instance, where instance is u's instance. */
1205
1206 assert(u);
1207 assert(name);
1208
1209 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1210 if (!u->instance)
1211 return -EINVAL;
1212
1213 r = unit_name_replace_instance(name, u->instance, &s);
1214 if (r < 0)
1215 return r;
1216
1217 name = s;
1218 }
1219
1220 other = manager_get_unit(u->manager, name);
1221 if (other)
1222 return unit_merge(u, other);
1223
1224 return unit_add_name(u, name);
1225}
1226
1227Unit* unit_follow_merge(Unit *u) {
1228 assert(u);
1229
1230 while (u->load_state == UNIT_MERGED)
1231 assert_se(u = u->merged_into);
1232
1233 return u;
1234}
1235
1236int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1237 int r;
1238
1239 assert(u);
1240 assert(c);
1241
1242 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1243
1244 if (c->working_directory) {
1245 r = unit_add_mounts_for(
1246 u,
1247 c->working_directory,
1248 UNIT_DEPENDENCY_FILE,
1249 c->working_directory_missing_ok ? UNIT_MOUNT_WANTS : UNIT_MOUNT_REQUIRES);
1250 if (r < 0)
1251 return r;
1252 }
1253
1254 if (c->root_directory) {
1255 r = unit_add_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1256 if (r < 0)
1257 return r;
1258 }
1259
1260 if (c->root_image) {
1261 r = unit_add_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1262 if (r < 0)
1263 return r;
1264 }
1265
1266 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1267 if (!u->manager->prefix[dt])
1268 continue;
1269
1270 FOREACH_ARRAY(i, c->directories[dt].items, c->directories[dt].n_items) {
1271 _cleanup_free_ char *p = NULL;
1272
1273 p = path_join(u->manager->prefix[dt], i->path);
1274 if (!p)
1275 return -ENOMEM;
1276
1277 r = unit_add_mounts_for(u, p, UNIT_DEPENDENCY_FILE, UNIT_MOUNT_REQUIRES);
1278 if (r < 0)
1279 return r;
1280 }
1281 }
1282
1283 if (!MANAGER_IS_SYSTEM(u->manager))
1284 return 0;
1285
1286 /* For the following three directory types we need write access, and /var/ is possibly on the root
1287 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1288 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1289 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1290 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1291 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1292 if (r < 0)
1293 return r;
1294 }
1295
1296 /* This must be already set in unit_patch_contexts(). */
1297 assert(c->private_var_tmp >= 0 && c->private_var_tmp < _PRIVATE_TMP_MAX);
1298
1299 if (c->private_tmp == PRIVATE_TMP_CONNECTED) {
1300 assert(c->private_var_tmp == PRIVATE_TMP_CONNECTED);
1301
1302 r = unit_add_mounts_for(u, "/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1303 if (r < 0)
1304 return r;
1305
1306 r = unit_add_mounts_for(u, "/var/tmp/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1307 if (r < 0)
1308 return r;
1309
1310 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1311 if (r < 0)
1312 return r;
1313
1314 } else if (c->private_var_tmp == PRIVATE_TMP_DISCONNECTED && !exec_context_with_rootfs(c)) {
1315 /* Even if PrivateTmp=disconnected, we still require /var/tmp/ mountpoint to be present,
1316 * i.e. /var/ needs to be mounted. See comments in unit_patch_contexts(). */
1317 r = unit_add_mounts_for(u, "/var/", UNIT_DEPENDENCY_FILE, UNIT_MOUNT_WANTS);
1318 if (r < 0)
1319 return r;
1320 }
1321
1322 if (c->root_image) {
1323 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1324 * implicit dependency on udev */
1325
1326 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1327 if (r < 0)
1328 return r;
1329 }
1330
1331 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1332 * is run first. */
1333 if (c->log_namespace) {
1334 static const struct {
1335 const char *template;
1336 UnitType type;
1337 } deps[] = {
1338 { "systemd-journald", UNIT_SOCKET, },
1339 { "systemd-journald-varlink", UNIT_SOCKET, },
1340 { "systemd-journald-sync", UNIT_SERVICE, },
1341 };
1342
1343 FOREACH_ELEMENT(i, deps) {
1344 _cleanup_free_ char *unit = NULL;
1345
1346 r = unit_name_build_from_type(i->template, c->log_namespace, i->type, &unit);
1347 if (r < 0)
1348 return r;
1349
1350 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, unit, true, UNIT_DEPENDENCY_FILE);
1351 if (r < 0)
1352 return r;
1353 }
1354 } else if (IN_SET(c->std_output, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1355 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) ||
1356 IN_SET(c->std_error, EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1357 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE)) {
1358
1359 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1360 if (r < 0)
1361 return r;
1362 }
1363
1364 return 0;
1365}
1366
1367const char* unit_description(Unit *u) {
1368 assert(u);
1369
1370 if (u->description)
1371 return u->description;
1372
1373 return strna(u->id);
1374}
1375
1376const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1377 assert(u);
1378 assert(u->id);
1379
1380 /* Return u->id, u->description, or "{u->id} - {u->description}".
1381 * Versions with u->description are only used if it is set.
1382 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1383 * pointer.
1384 *
1385 * Note that *ret_combined_buffer may be set to NULL. */
1386
1387 if (!u->description ||
1388 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1389 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1390 streq(u->description, u->id)) {
1391
1392 if (ret_combined_buffer)
1393 *ret_combined_buffer = NULL;
1394 return u->id;
1395 }
1396
1397 if (ret_combined_buffer) {
1398 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1399 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1400 if (*ret_combined_buffer)
1401 return *ret_combined_buffer;
1402 log_oom(); /* Fall back to ->description */
1403 } else
1404 *ret_combined_buffer = NULL;
1405 }
1406
1407 return u->description;
1408}
1409
1410/* Common implementation for multiple backends */
1411int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1412 int r;
1413
1414 assert(u);
1415
1416 /* Load a .{service,socket,...} file */
1417 r = unit_load_fragment(u);
1418 if (r < 0)
1419 return r;
1420
1421 if (u->load_state == UNIT_STUB) {
1422 if (fragment_required)
1423 return -ENOENT;
1424
1425 u->load_state = UNIT_LOADED;
1426 }
1427
1428 u = unit_follow_merge(u);
1429
1430 /* Load drop-in directory data. If u is an alias, we might be reloading the
1431 * target unit needlessly. But we cannot be sure which drops-ins have already
1432 * been loaded and which not, at least without doing complicated book-keeping,
1433 * so let's always reread all drop-ins. */
1434 r = unit_load_dropin(u);
1435 if (r < 0)
1436 return r;
1437
1438 if (u->source_path) {
1439 struct stat st;
1440
1441 if (stat(u->source_path, &st) >= 0)
1442 u->source_mtime = timespec_load(&st.st_mtim);
1443 else
1444 u->source_mtime = 0;
1445 }
1446
1447 return 0;
1448}
1449
1450void unit_add_to_target_deps_queue(Unit *u) {
1451 Manager *m = ASSERT_PTR(ASSERT_PTR(u)->manager);
1452
1453 if (u->in_target_deps_queue)
1454 return;
1455
1456 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1457 u->in_target_deps_queue = true;
1458}
1459
1460int unit_add_default_target_dependency(Unit *u, Unit *target) {
1461 assert(u);
1462 assert(target);
1463
1464 if (target->type != UNIT_TARGET)
1465 return 0;
1466
1467 /* Only add the dependency if both units are loaded, so that
1468 * that loop check below is reliable */
1469 if (u->load_state != UNIT_LOADED ||
1470 target->load_state != UNIT_LOADED)
1471 return 0;
1472
1473 /* If either side wants no automatic dependencies, then let's
1474 * skip this */
1475 if (!u->default_dependencies ||
1476 !target->default_dependencies)
1477 return 0;
1478
1479 /* Don't create loops */
1480 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1481 return 0;
1482
1483 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1484}
1485
1486static int unit_add_slice_dependencies(Unit *u) {
1487 Unit *slice;
1488
1489 assert(u);
1490
1491 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1492 return 0;
1493
1494 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1495 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1496 relationship). */
1497 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1498
1499 slice = UNIT_GET_SLICE(u);
1500 if (slice) {
1501 if (!IN_SET(slice->freezer_state, FREEZER_RUNNING, FREEZER_THAWING))
1502 u->freezer_state = FREEZER_FROZEN_BY_PARENT;
1503
1504 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1505 }
1506
1507 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1508 return 0;
1509
1510 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1511}
1512
1513static int unit_add_mount_dependencies(Unit *u) {
1514 bool changed = false;
1515 int r;
1516
1517 assert(u);
1518
1519 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; ++t) {
1520 UnitDependencyInfo di;
1521 const char *path;
1522
1523 HASHMAP_FOREACH_KEY(di.data, path, u->mounts_for[t]) {
1524
1525 char prefix[strlen(ASSERT_PTR(path)) + 1];
1526
1527 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1528 _cleanup_free_ char *p = NULL;
1529 Unit *m;
1530
1531 r = unit_name_from_path(prefix, ".mount", &p);
1532 if (r == -EINVAL)
1533 continue; /* If the path cannot be converted to a mount unit name,
1534 * then it's not manageable as a unit by systemd, and
1535 * hence we don't need a dependency on it. Let's thus
1536 * silently ignore the issue. */
1537 if (r < 0)
1538 return r;
1539
1540 m = manager_get_unit(u->manager, p);
1541 if (!m) {
1542 /* Make sure to load the mount unit if it exists. If so the
1543 * dependencies on this unit will be added later during the loading
1544 * of the mount unit. */
1545 (void) manager_load_unit_prepare(
1546 u->manager,
1547 p,
1548 /* path= */NULL,
1549 /* e= */NULL,
1550 &m);
1551 continue;
1552 }
1553 if (m == u)
1554 continue;
1555
1556 if (m->load_state != UNIT_LOADED)
1557 continue;
1558
1559 r = unit_add_dependency(
1560 u,
1561 UNIT_AFTER,
1562 m,
1563 /* add_reference= */ true,
1564 di.origin_mask);
1565 if (r < 0)
1566 return r;
1567 changed = changed || r > 0;
1568
1569 if (m->fragment_path) {
1570 r = unit_add_dependency(
1571 u,
1572 unit_mount_dependency_type_to_dependency_type(t),
1573 m,
1574 /* add_reference= */ true,
1575 di.origin_mask);
1576 if (r < 0)
1577 return r;
1578 changed = changed || r > 0;
1579 }
1580 }
1581 }
1582 }
1583
1584 return changed;
1585}
1586
1587static int unit_add_oomd_dependencies(Unit *u) {
1588 CGroupContext *c;
1589 CGroupMask mask;
1590 int r;
1591
1592 assert(u);
1593
1594 if (!u->default_dependencies)
1595 return 0;
1596
1597 c = unit_get_cgroup_context(u);
1598 if (!c)
1599 return 0;
1600
1601 bool wants_oomd = c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL;
1602 if (!wants_oomd)
1603 return 0;
1604
1605 r = cg_mask_supported(&mask);
1606 if (r < 0)
1607 return log_debug_errno(r, "Failed to determine supported controllers: %m");
1608
1609 if (!FLAGS_SET(mask, CGROUP_MASK_MEMORY))
1610 return 0;
1611
1612 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1613}
1614
1615static int unit_add_startup_units(Unit *u) {
1616 if (!unit_has_startup_cgroup_constraints(u))
1617 return 0;
1618
1619 return set_ensure_put(&u->manager->startup_units, NULL, u);
1620}
1621
1622static const struct {
1623 UnitDependencyAtom atom;
1624 size_t job_mode_offset;
1625 const char *dependency_name;
1626 const char *job_mode_setting_name;
1627} on_termination_settings[] = {
1628 { UNIT_ATOM_ON_SUCCESS, offsetof(Unit, on_success_job_mode), "OnSuccess=", "OnSuccessJobMode=" },
1629 { UNIT_ATOM_ON_FAILURE, offsetof(Unit, on_failure_job_mode), "OnFailure=", "OnFailureJobMode=" },
1630};
1631
1632static int unit_validate_on_termination_job_modes(Unit *u) {
1633 assert(u);
1634
1635 /* Verify that if On{Success,Failure}JobMode=isolate, only one unit gets specified. */
1636
1637 FOREACH_ELEMENT(setting, on_termination_settings) {
1638 JobMode job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset);
1639
1640 if (job_mode != JOB_ISOLATE)
1641 continue;
1642
1643 Unit *other, *found = NULL;
1644 UNIT_FOREACH_DEPENDENCY(other, u, setting->atom) {
1645 if (!found)
1646 found = other;
1647 else if (found != other)
1648 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC),
1649 "More than one %s dependencies specified but %sisolate set. Refusing.",
1650 setting->dependency_name, setting->job_mode_setting_name);
1651 }
1652 }
1653
1654 return 0;
1655}
1656
1657int unit_load(Unit *u) {
1658 int r;
1659
1660 assert(u);
1661
1662 if (u->in_load_queue) {
1663 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1664 u->in_load_queue = false;
1665 }
1666
1667 if (u->type == _UNIT_TYPE_INVALID)
1668 return -EINVAL;
1669
1670 if (u->load_state != UNIT_STUB)
1671 return 0;
1672
1673 if (u->transient_file) {
1674 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1675 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1676
1677 r = fflush_and_check(u->transient_file);
1678 if (r < 0)
1679 goto fail;
1680
1681 u->transient_file = safe_fclose(u->transient_file);
1682 u->fragment_mtime = now(CLOCK_REALTIME);
1683 }
1684
1685 r = UNIT_VTABLE(u)->load(u);
1686 if (r < 0)
1687 goto fail;
1688
1689 assert(u->load_state != UNIT_STUB);
1690
1691 if (u->load_state == UNIT_LOADED) {
1692 unit_add_to_target_deps_queue(u);
1693
1694 r = unit_add_slice_dependencies(u);
1695 if (r < 0)
1696 goto fail;
1697
1698 r = unit_add_mount_dependencies(u);
1699 if (r < 0)
1700 goto fail;
1701
1702 r = unit_add_oomd_dependencies(u);
1703 if (r < 0)
1704 goto fail;
1705
1706 r = unit_add_startup_units(u);
1707 if (r < 0)
1708 goto fail;
1709
1710 r = unit_validate_on_termination_job_modes(u);
1711 if (r < 0)
1712 goto fail;
1713
1714 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1715 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1716
1717 /* We finished loading, let's ensure our parents recalculate the members mask */
1718 unit_invalidate_cgroup_members_masks(u);
1719 }
1720
1721 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1722
1723 unit_add_to_dbus_queue(unit_follow_merge(u));
1724 unit_add_to_gc_queue(u);
1725 (void) manager_varlink_send_managed_oom_update(u);
1726
1727 return 0;
1728
1729fail:
1730 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1731 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1732
1733 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1734 r == -ENOEXEC ? UNIT_BAD_SETTING :
1735 UNIT_ERROR;
1736 u->load_error = r;
1737
1738 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1739 * an attempt is made to load this unit, we know we need to check again. */
1740 if (u->load_state == UNIT_NOT_FOUND)
1741 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1742
1743 unit_add_to_dbus_queue(u);
1744 unit_add_to_gc_queue(u);
1745
1746 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1747}
1748
1749_printf_(7, 8)
1750static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1751 Unit *u = userdata;
1752 va_list ap;
1753 int r;
1754
1755 if (u && !unit_log_level_test(u, level))
1756 return -ERRNO_VALUE(error);
1757
1758 va_start(ap, format);
1759 if (u)
1760 r = log_object_internalv(level, error, file, line, func,
1761 unit_log_field(u),
1762 u->id,
1763 unit_invocation_log_field(u),
1764 u->invocation_id_string,
1765 format, ap);
1766 else
1767 r = log_internalv(level, error, file, line, func, format, ap);
1768 va_end(ap);
1769
1770 return r;
1771}
1772
1773static bool unit_test_condition(Unit *u) {
1774 _cleanup_strv_free_ char **env = NULL;
1775 int r;
1776
1777 assert(u);
1778
1779 dual_timestamp_now(&u->condition_timestamp);
1780
1781 r = manager_get_effective_environment(u->manager, &env);
1782 if (r < 0) {
1783 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1784 u->condition_result = true;
1785 } else
1786 u->condition_result = condition_test_list(
1787 u->conditions,
1788 env,
1789 condition_type_to_string,
1790 log_unit_internal,
1791 u);
1792
1793 unit_add_to_dbus_queue(u);
1794 return u->condition_result;
1795}
1796
1797static bool unit_test_assert(Unit *u) {
1798 _cleanup_strv_free_ char **env = NULL;
1799 int r;
1800
1801 assert(u);
1802
1803 dual_timestamp_now(&u->assert_timestamp);
1804
1805 r = manager_get_effective_environment(u->manager, &env);
1806 if (r < 0) {
1807 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1808 u->assert_result = CONDITION_ERROR;
1809 } else
1810 u->assert_result = condition_test_list(
1811 u->asserts,
1812 env,
1813 assert_type_to_string,
1814 log_unit_internal,
1815 u);
1816
1817 unit_add_to_dbus_queue(u);
1818 return u->assert_result;
1819}
1820
1821void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1822 if (log_get_show_color()) {
1823 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1824 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1825 else
1826 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1827 }
1828
1829 DISABLE_WARNING_FORMAT_NONLITERAL;
1830 manager_status_printf(u->manager, status_type, status, format, ident);
1831 REENABLE_WARNING;
1832}
1833
1834int unit_test_start_limit(Unit *u) {
1835 const char *reason;
1836
1837 assert(u);
1838
1839 if (ratelimit_below(&u->start_ratelimit)) {
1840 u->start_limit_hit = false;
1841 return 0;
1842 }
1843
1844 log_unit_warning(u, "Start request repeated too quickly.");
1845 u->start_limit_hit = true;
1846
1847 reason = strjoina("unit ", u->id, " failed");
1848
1849 emergency_action(
1850 u->manager,
1851 u->start_limit_action,
1852 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S,
1853 u->reboot_arg,
1854 /* exit_status= */ -1,
1855 reason);
1856
1857 return -ECANCELED;
1858}
1859
1860static bool unit_verify_deps(Unit *u) {
1861 Unit *other;
1862
1863 assert(u);
1864
1865 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1866 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1867 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1868 * that are not used in conjunction with After= as for them any such check would make things entirely
1869 * racy. */
1870
1871 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1872
1873 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1874 continue;
1875
1876 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1877 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1878 return false;
1879 }
1880 }
1881
1882 return true;
1883}
1884
1885/* Errors that aren't really errors:
1886 * -EALREADY: Unit is already started.
1887 * -ECOMM: Condition failed
1888 * -EAGAIN: An operation is already in progress. Retry later.
1889 *
1890 * Errors that are real errors:
1891 * -EBADR: This unit type does not support starting.
1892 * -ECANCELED: Start limit hit, too many requests for now
1893 * -EPROTO: Assert failed
1894 * -EINVAL: Unit not loaded
1895 * -EOPNOTSUPP: Unit type not supported
1896 * -ENOLINK: The necessary dependencies are not fulfilled.
1897 * -ESTALE: This unit has been started before and can't be started a second time
1898 * -EDEADLK: This unit is frozen
1899 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1900 * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached
1901 */
1902int unit_start(Unit *u, ActivationDetails *details) {
1903 UnitActiveState state;
1904 Unit *following;
1905 int r;
1906
1907 assert(u);
1908
1909 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1910 if (UNIT_VTABLE(u)->subsystem_ratelimited) {
1911 r = UNIT_VTABLE(u)->subsystem_ratelimited(u->manager);
1912 if (r < 0)
1913 return r;
1914 if (r > 0)
1915 return -EAGAIN;
1916 }
1917
1918 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1919 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1920 * waiting is finished. */
1921 state = unit_active_state(u);
1922 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1923 return -EALREADY;
1924 if (state == UNIT_MAINTENANCE)
1925 return -EAGAIN;
1926
1927 /* Units that aren't loaded cannot be started */
1928 if (u->load_state != UNIT_LOADED)
1929 return -EINVAL;
1930
1931 /* Refuse starting scope units more than once */
1932 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1933 return -ESTALE;
1934
1935 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1936 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1937 * recheck the condition in that case. */
1938 if (state != UNIT_ACTIVATING &&
1939 !unit_test_condition(u))
1940 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition not met. Not starting unit.");
1941
1942 /* If the asserts failed, fail the entire job */
1943 if (state != UNIT_ACTIVATING &&
1944 !unit_test_assert(u))
1945 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1946
1947 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1948 * condition checks, so that we rather return condition check errors (which are usually not
1949 * considered a true failure) than "not supported" errors (which are considered a failure).
1950 */
1951 if (!unit_type_supported(u->type))
1952 return -EOPNOTSUPP;
1953
1954 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1955 * should have taken care of this already, but let's check this here again. After all, our
1956 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1957 if (!unit_verify_deps(u))
1958 return -ENOLINK;
1959
1960 /* Forward to the main object, if we aren't it. */
1961 following = unit_following(u);
1962 if (following) {
1963 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1964 return unit_start(following, details);
1965 }
1966
1967 /* Check to make sure the unit isn't frozen */
1968 if (u->freezer_state != FREEZER_RUNNING)
1969 return -EDEADLK;
1970
1971 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1972 if (UNIT_VTABLE(u)->can_start) {
1973 r = UNIT_VTABLE(u)->can_start(u);
1974 if (r < 0)
1975 return r;
1976 }
1977
1978 /* If it is stopped, but we cannot start it, then fail */
1979 if (!UNIT_VTABLE(u)->start)
1980 return -EBADR;
1981
1982 if (UNIT_IS_INACTIVE_OR_FAILED(state)) {
1983 Slice *slice = SLICE(UNIT_GET_SLICE(u));
1984
1985 if (slice) {
1986 /* Check hard concurrency limit. Note this is partially redundant, we already checked
1987 * this when enqueuing jobs. However, between the time when we enqueued this and the
1988 * time we are dispatching the queue the configuration might have changed, hence
1989 * check here again */
1990 if (slice_concurrency_hard_max_reached(slice, u))
1991 return -ETOOMANYREFS;
1992
1993 /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in
1994 * the queue */
1995 if (slice_concurrency_soft_max_reached(slice, u))
1996 return -EAGAIN; /* Try again, keep in queue */
1997 }
1998 }
1999
2000 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
2001 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
2002 * waits for a holdoff timer to elapse before it will start again. */
2003
2004 unit_add_to_dbus_queue(u);
2005
2006 if (!u->activation_details) /* Older details object wins */
2007 u->activation_details = activation_details_ref(details);
2008
2009 return UNIT_VTABLE(u)->start(u);
2010}
2011
2012bool unit_can_start(Unit *u) {
2013 assert(u);
2014
2015 if (u->load_state != UNIT_LOADED)
2016 return false;
2017
2018 if (!unit_type_supported(u->type))
2019 return false;
2020
2021 /* Scope units may be started only once */
2022 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
2023 return false;
2024
2025 return !!UNIT_VTABLE(u)->start;
2026}
2027
2028bool unit_can_isolate(Unit *u) {
2029 assert(u);
2030
2031 return unit_can_start(u) &&
2032 u->allow_isolate;
2033}
2034
2035/* Errors:
2036 * -EBADR: This unit type does not support stopping.
2037 * -EALREADY: Unit is already stopped.
2038 * -EAGAIN: An operation is already in progress. Retry later.
2039 * -EDEADLK: Unit is frozen
2040 */
2041int unit_stop(Unit *u) {
2042 UnitActiveState state;
2043 Unit *following;
2044
2045 assert(u);
2046
2047 state = unit_active_state(u);
2048 if (UNIT_IS_INACTIVE_OR_FAILED(state))
2049 return -EALREADY;
2050
2051 following = unit_following(u);
2052 if (following) {
2053 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
2054 return unit_stop(following);
2055 }
2056
2057 /* Check to make sure the unit isn't frozen */
2058 if (u->freezer_state != FREEZER_RUNNING)
2059 return -EDEADLK;
2060
2061 if (!UNIT_VTABLE(u)->stop)
2062 return -EBADR;
2063
2064 unit_add_to_dbus_queue(u);
2065
2066 return UNIT_VTABLE(u)->stop(u);
2067}
2068
2069bool unit_can_stop(Unit *u) {
2070 assert(u);
2071
2072 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2073 * Extrinsic units follow external state and they may stop following external state changes
2074 * (hence we return true here), but an attempt to do this through the manager will fail. */
2075
2076 if (!unit_type_supported(u->type))
2077 return false;
2078
2079 if (u->perpetual)
2080 return false;
2081
2082 return !!UNIT_VTABLE(u)->stop;
2083}
2084
2085/* Errors:
2086 * -EBADR: This unit type does not support reloading.
2087 * -ENOEXEC: Unit is not started.
2088 * -EAGAIN: An operation is already in progress. Retry later.
2089 * -EDEADLK: Unit is frozen.
2090 */
2091int unit_reload(Unit *u) {
2092 UnitActiveState state;
2093 Unit *following;
2094
2095 assert(u);
2096
2097 if (u->load_state != UNIT_LOADED)
2098 return -EINVAL;
2099
2100 if (!unit_can_reload(u))
2101 return -EBADR;
2102
2103 state = unit_active_state(u);
2104 if (IN_SET(state, UNIT_RELOADING, UNIT_REFRESHING))
2105 /* "refreshing" means some resources in the unit namespace is being updated. Unlike reload,
2106 * the unit processes aren't made aware of refresh. Let's put the job back to queue
2107 * in both cases, as refresh typically takes place before reload and it's better to wait
2108 * for it rather than failing. */
2109 return -EAGAIN;
2110
2111 if (state != UNIT_ACTIVE)
2112 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2113
2114 following = unit_following(u);
2115 if (following) {
2116 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2117 return unit_reload(following);
2118 }
2119
2120 /* Check to make sure the unit isn't frozen */
2121 if (u->freezer_state != FREEZER_RUNNING)
2122 return -EDEADLK;
2123
2124 unit_add_to_dbus_queue(u);
2125
2126 if (!UNIT_VTABLE(u)->reload) {
2127 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2128 unit_notify(u, unit_active_state(u), unit_active_state(u), /* reload_success = */ true);
2129 return 0;
2130 }
2131
2132 return UNIT_VTABLE(u)->reload(u);
2133}
2134
2135bool unit_can_reload(Unit *u) {
2136 assert(u);
2137
2138 if (UNIT_VTABLE(u)->can_reload)
2139 return UNIT_VTABLE(u)->can_reload(u);
2140
2141 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2142 return true;
2143
2144 return UNIT_VTABLE(u)->reload;
2145}
2146
2147bool unit_is_unneeded(Unit *u) {
2148 Unit *other;
2149 assert(u);
2150
2151 if (!u->stop_when_unneeded)
2152 return false;
2153
2154 /* Don't clean up while the unit is transitioning or is even inactive. */
2155 if (unit_active_state(u) != UNIT_ACTIVE)
2156 return false;
2157 if (u->job)
2158 return false;
2159
2160 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2161 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2162 * restart, then don't clean this one up. */
2163
2164 if (other->job)
2165 return false;
2166
2167 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2168 return false;
2169
2170 if (unit_will_restart(other))
2171 return false;
2172 }
2173
2174 return true;
2175}
2176
2177bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2178 Unit *other;
2179
2180 assert(u);
2181
2182 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2183 * that is active declared an Uphold= dependencies on it */
2184
2185 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2186 if (ret_culprit)
2187 *ret_culprit = NULL;
2188 return false;
2189 }
2190
2191 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2192 if (other->job)
2193 continue;
2194
2195 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2196 if (ret_culprit)
2197 *ret_culprit = other;
2198 return true;
2199 }
2200 }
2201
2202 if (ret_culprit)
2203 *ret_culprit = NULL;
2204 return false;
2205}
2206
2207bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2208 Unit *other;
2209
2210 assert(u);
2211
2212 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2213 * because the other unit is down. */
2214
2215 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2216 /* Don't clean up while the unit is transitioning or is even inactive. */
2217 if (ret_culprit)
2218 *ret_culprit = NULL;
2219 return false;
2220 }
2221
2222 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2223 if (other->job)
2224 continue;
2225
2226 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2227 if (ret_culprit)
2228 *ret_culprit = other;
2229
2230 return true;
2231 }
2232 }
2233
2234 if (ret_culprit)
2235 *ret_culprit = NULL;
2236 return false;
2237}
2238
2239static void check_unneeded_dependencies(Unit *u) {
2240 Unit *other;
2241 assert(u);
2242
2243 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2244
2245 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2246 unit_submit_to_stop_when_unneeded_queue(other);
2247}
2248
2249static void check_uphold_dependencies(Unit *u) {
2250 Unit *other;
2251 assert(u);
2252
2253 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2254
2255 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2256 unit_submit_to_start_when_upheld_queue(other);
2257}
2258
2259static void check_bound_by_dependencies(Unit *u) {
2260 Unit *other;
2261 assert(u);
2262
2263 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2264
2265 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2266 unit_submit_to_stop_when_bound_queue(other);
2267}
2268
2269static void retroactively_start_dependencies(Unit *u) {
2270 Unit *other;
2271
2272 assert(u);
2273 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2274
2275 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2276 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2277 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2278 (void) manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2279
2280 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2281 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2282 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2283 (void) manager_add_job(u->manager, JOB_START, other, JOB_FAIL, /* error = */ NULL, /* ret = */ NULL);
2284
2285 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2286 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2287 (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2288}
2289
2290static void retroactively_stop_dependencies(Unit *u) {
2291 Unit *other;
2292
2293 assert(u);
2294 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2295
2296 /* Pull down units which are bound to us recursively if enabled */
2297 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2298 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2299 (void) manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, /* error = */ NULL, /* ret = */ NULL);
2300}
2301
2302void unit_start_on_termination_deps(Unit *u, UnitDependencyAtom atom) {
2303 const char *dependency_name = NULL;
2304 JobMode job_mode;
2305 unsigned n_jobs = 0;
2306 int r;
2307
2308 /* Act on OnFailure= and OnSuccess= dependencies */
2309
2310 assert(u);
2311 assert(u->manager);
2312 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2313
2314 FOREACH_ELEMENT(setting, on_termination_settings)
2315 if (atom == setting->atom) {
2316 job_mode = *(JobMode*) ((uint8_t*) u + setting->job_mode_offset);
2317 dependency_name = setting->dependency_name;
2318 break;
2319 }
2320
2321 assert(dependency_name);
2322
2323 Unit *other;
2324 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2325 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2326
2327 if (n_jobs == 0)
2328 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2329
2330 r = manager_add_job(u->manager, JOB_START, other, job_mode, &error, /* ret = */ NULL);
2331 if (r < 0)
2332 log_unit_warning_errno(u, r, "Failed to enqueue %s%s job, ignoring: %s",
2333 dependency_name, other->id, bus_error_message(&error, r));
2334 n_jobs++;
2335 }
2336
2337 if (n_jobs > 0)
2338 log_unit_debug(u, "Triggering %s dependencies done (%u %s).",
2339 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2340}
2341
2342void unit_trigger_notify(Unit *u) {
2343 Unit *other;
2344
2345 assert(u);
2346
2347 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2348 if (UNIT_VTABLE(other)->trigger_notify)
2349 UNIT_VTABLE(other)->trigger_notify(other, u);
2350}
2351
2352static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2353 if (condition_notice && log_level > LOG_NOTICE)
2354 return LOG_NOTICE;
2355 if (condition_info && log_level > LOG_INFO)
2356 return LOG_INFO;
2357 return log_level;
2358}
2359
2360static int unit_log_resources(Unit *u) {
2361
2362 static const struct {
2363 const char *journal_field;
2364 const char *message_suffix;
2365 } memory_fields[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1] = {
2366 [CGROUP_MEMORY_PEAK] = { "MEMORY_PEAK", "memory peak" },
2367 [CGROUP_MEMORY_SWAP_PEAK] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2368 }, ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2369 [CGROUP_IP_INGRESS_BYTES] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2370 [CGROUP_IP_EGRESS_BYTES] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2371 [CGROUP_IP_INGRESS_PACKETS] = { "IP_METRIC_INGRESS_PACKETS", NULL },
2372 [CGROUP_IP_EGRESS_PACKETS] = { "IP_METRIC_EGRESS_PACKETS", NULL },
2373 }, io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2374 [CGROUP_IO_READ_BYTES] = { "IO_METRIC_READ_BYTES", "read from disk" },
2375 [CGROUP_IO_WRITE_BYTES] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2376 [CGROUP_IO_READ_OPERATIONS] = { "IO_METRIC_READ_OPERATIONS", NULL },
2377 [CGROUP_IO_WRITE_OPERATIONS] = { "IO_METRIC_WRITE_OPERATIONS", NULL },
2378 };
2379
2380 struct iovec *iovec = NULL;
2381 size_t n_iovec = 0;
2382 _cleanup_free_ char *message = NULL, *t = NULL;
2383 nsec_t cpu_nsec = NSEC_INFINITY;
2384 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2385
2386 assert(u);
2387
2388 CLEANUP_ARRAY(iovec, n_iovec, iovec_array_free);
2389
2390 iovec = new(struct iovec, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST + 1) +
2391 _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4);
2392 if (!iovec)
2393 return log_oom();
2394
2395 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2396 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2397 * information and the complete data in structured fields. */
2398
2399 (void) unit_get_cpu_usage(u, &cpu_nsec);
2400 if (cpu_nsec != NSEC_INFINITY) {
2401 /* Format the CPU time for inclusion in the structured log message */
2402 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, cpu_nsec) < 0)
2403 return log_oom();
2404 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2405
2406 /* Format the CPU time for inclusion in the human language message string */
2407 if (strextendf_with_separator(&message, ", ",
2408 "Consumed %s CPU time",
2409 FORMAT_TIMESPAN(cpu_nsec / NSEC_PER_USEC, USEC_PER_MSEC)) < 0)
2410 return log_oom();
2411
2412 log_level = raise_level(log_level,
2413 cpu_nsec > MENTIONWORTHY_CPU_NSEC,
2414 cpu_nsec > NOTICEWORTHY_CPU_NSEC);
2415 }
2416
2417 for (CGroupMemoryAccountingMetric metric = 0; metric <= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST; metric++) {
2418 uint64_t value = UINT64_MAX;
2419
2420 assert(memory_fields[metric].journal_field);
2421 assert(memory_fields[metric].message_suffix);
2422
2423 (void) unit_get_memory_accounting(u, metric, &value);
2424 if (value == UINT64_MAX)
2425 continue;
2426
2427 if (asprintf(&t, "%s=%" PRIu64, memory_fields[metric].journal_field, value) < 0)
2428 return log_oom();
2429 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2430
2431 /* If value is 0, we don't log it in the MESSAGE= field. */
2432 if (value == 0)
2433 continue;
2434
2435 if (strextendf_with_separator(&message, ", ", "%s %s",
2436 FORMAT_BYTES(value), memory_fields[metric].message_suffix) < 0)
2437 return log_oom();
2438
2439 log_level = raise_level(log_level,
2440 value > MENTIONWORTHY_MEMORY_BYTES,
2441 value > NOTICEWORTHY_MEMORY_BYTES);
2442 }
2443
2444 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2445 uint64_t value = UINT64_MAX;
2446
2447 assert(io_fields[k].journal_field);
2448
2449 (void) unit_get_io_accounting(u, k, &value);
2450 if (value == UINT64_MAX)
2451 continue;
2452
2453 /* Format IO accounting data for inclusion in the structured log message */
2454 if (asprintf(&t, "%s=%" PRIu64, io_fields[k].journal_field, value) < 0)
2455 return log_oom();
2456 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2457
2458 /* If value is 0, we don't log it in the MESSAGE= field. */
2459 if (value == 0)
2460 continue;
2461
2462 /* Format the IO accounting data for inclusion in the human language message string, but only
2463 * for the bytes counters (and not for the operations counters) */
2464 if (io_fields[k].message_suffix) {
2465 if (strextendf_with_separator(&message, ", ", "%s %s",
2466 FORMAT_BYTES(value), io_fields[k].message_suffix) < 0)
2467 return log_oom();
2468
2469 log_level = raise_level(log_level,
2470 value > MENTIONWORTHY_IO_BYTES,
2471 value > NOTICEWORTHY_IO_BYTES);
2472 }
2473 }
2474
2475 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2476 uint64_t value = UINT64_MAX;
2477
2478 assert(ip_fields[m].journal_field);
2479
2480 (void) unit_get_ip_accounting(u, m, &value);
2481 if (value == UINT64_MAX)
2482 continue;
2483
2484 /* Format IP accounting data for inclusion in the structured log message */
2485 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m].journal_field, value) < 0)
2486 return log_oom();
2487 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2488
2489 /* If value is 0, we don't log it in the MESSAGE= field. */
2490 if (value == 0)
2491 continue;
2492
2493 /* Format the IP accounting data for inclusion in the human language message string, but only
2494 * for the bytes counters (and not for the packets counters) */
2495 if (ip_fields[m].message_suffix) {
2496 if (strextendf_with_separator(&message, ", ", "%s %s",
2497 FORMAT_BYTES(value), ip_fields[m].message_suffix) < 0)
2498 return log_oom();
2499
2500 log_level = raise_level(log_level,
2501 value > MENTIONWORTHY_IP_BYTES,
2502 value > NOTICEWORTHY_IP_BYTES);
2503 }
2504 }
2505
2506 /* This check is here because it is the earliest point following all possible log_level assignments.
2507 * (If log_level is assigned anywhere after this point, move this check.) */
2508 if (!unit_log_level_test(u, log_level))
2509 return 0;
2510
2511 /* Is there any accounting data available at all? */
2512 if (n_iovec == 0) {
2513 assert(!message);
2514 return 0;
2515 }
2516
2517 t = strjoin("MESSAGE=", u->id, ": ", message ?: "Completed", ".");
2518 if (!t)
2519 return log_oom();
2520 iovec[n_iovec++] = IOVEC_MAKE_STRING(TAKE_PTR(t));
2521
2522 if (!set_iovec_string_field(iovec, &n_iovec, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR))
2523 return log_oom();
2524
2525 if (!set_iovec_string_field(iovec, &n_iovec, unit_log_field(u), u->id))
2526 return log_oom();
2527
2528 if (!set_iovec_string_field(iovec, &n_iovec, unit_invocation_log_field(u), u->invocation_id_string))
2529 return log_oom();
2530
2531 log_unit_struct_iovec(u, log_level, iovec, n_iovec);
2532
2533 return 0;
2534}
2535
2536static void unit_update_on_console(Unit *u) {
2537 bool b;
2538
2539 assert(u);
2540
2541 b = unit_needs_console(u);
2542 if (u->on_console == b)
2543 return;
2544
2545 u->on_console = b;
2546 if (b)
2547 manager_ref_console(u->manager);
2548 else
2549 manager_unref_console(u->manager);
2550}
2551
2552static void unit_emit_audit_start(Unit *u) {
2553 assert(u);
2554
2555 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2556 return;
2557
2558 /* Write audit record if we have just finished starting up */
2559 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ true);
2560 u->in_audit = true;
2561}
2562
2563static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2564 assert(u);
2565
2566 if (UNIT_VTABLE(u)->audit_start_message_type <= 0)
2567 return;
2568
2569 if (u->in_audit) {
2570 /* Write audit record if we have just finished shutting down */
2571 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ state == UNIT_INACTIVE);
2572 u->in_audit = false;
2573 } else {
2574 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2575 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_start_message_type, /* success= */ state == UNIT_INACTIVE);
2576
2577 if (state == UNIT_INACTIVE)
2578 manager_send_unit_audit(u->manager, u, UNIT_VTABLE(u)->audit_stop_message_type, /* success= */ true);
2579 }
2580}
2581
2582static bool unit_process_job(Job *j, UnitActiveState ns, bool reload_success) {
2583 bool unexpected = false;
2584 JobResult result;
2585
2586 assert(j);
2587
2588 if (j->state == JOB_WAITING)
2589 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2590 * due to EAGAIN. */
2591 job_add_to_run_queue(j);
2592
2593 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2594 * hence needs to invalidate jobs. */
2595
2596 switch (j->type) {
2597
2598 case JOB_START:
2599 case JOB_VERIFY_ACTIVE:
2600
2601 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2602 job_finish_and_invalidate(j, JOB_DONE, true, false);
2603 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2604 unexpected = true;
2605
2606 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2607 if (ns == UNIT_FAILED)
2608 result = JOB_FAILED;
2609 else
2610 result = JOB_DONE;
2611
2612 job_finish_and_invalidate(j, result, true, false);
2613 }
2614 }
2615
2616 break;
2617
2618 case JOB_RELOAD:
2619 case JOB_RELOAD_OR_START:
2620 case JOB_TRY_RELOAD:
2621
2622 if (j->state == JOB_RUNNING) {
2623 if (ns == UNIT_ACTIVE)
2624 job_finish_and_invalidate(j, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2625 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING, UNIT_REFRESHING)) {
2626 unexpected = true;
2627
2628 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2629 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2630 }
2631 }
2632
2633 break;
2634
2635 case JOB_STOP:
2636 case JOB_RESTART:
2637 case JOB_TRY_RESTART:
2638
2639 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2640 job_finish_and_invalidate(j, JOB_DONE, true, false);
2641 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2642 unexpected = true;
2643 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2644 }
2645
2646 break;
2647
2648 default:
2649 assert_not_reached();
2650 }
2651
2652 return unexpected;
2653}
2654
2655static void unit_recursive_add_to_run_queue(Unit *u) {
2656 assert(u);
2657
2658 if (u->job)
2659 job_add_to_run_queue(u->job);
2660
2661 Unit *child;
2662 UNIT_FOREACH_DEPENDENCY(child, u, UNIT_ATOM_SLICE_OF) {
2663
2664 if (!child->job)
2665 continue;
2666
2667 unit_recursive_add_to_run_queue(child);
2668 }
2669}
2670
2671static void unit_check_concurrency_limit(Unit *u) {
2672 assert(u);
2673
2674 Unit *slice = UNIT_GET_SLICE(u);
2675 if (!slice)
2676 return;
2677
2678 /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */
2679
2680 if (SLICE(slice)->concurrency_soft_max != UINT_MAX) {
2681 Unit *sibling;
2682 UNIT_FOREACH_DEPENDENCY(sibling, slice, UNIT_ATOM_SLICE_OF) {
2683 if (sibling == u)
2684 continue;
2685
2686 unit_recursive_add_to_run_queue(sibling);
2687 }
2688 }
2689
2690 /* Also go up the tree. */
2691 unit_check_concurrency_limit(slice);
2692}
2693
2694void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2695 assert(u);
2696 assert(os < _UNIT_ACTIVE_STATE_MAX);
2697 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2698
2699 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2700 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2701 * remounted this function will be called too! */
2702
2703 Manager *m = ASSERT_PTR(u->manager);
2704
2705 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2706 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2707 unit_add_to_dbus_queue(u);
2708
2709 /* Update systemd-oomd on the property/state change.
2710 *
2711 * Always send an update if the unit is going into an inactive state so systemd-oomd knows to
2712 * stop monitoring.
2713 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2714 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2715 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2716 * have the information on the property. Thus, indiscriminately send an update. */
2717 if (os != ns && (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns)))
2718 (void) manager_varlink_send_managed_oom_update(u);
2719
2720 /* Update timestamps for state changes */
2721 if (!MANAGER_IS_RELOADING(m)) {
2722 dual_timestamp_now(&u->state_change_timestamp);
2723
2724 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2725 u->inactive_exit_timestamp = u->state_change_timestamp;
2726 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2727 u->inactive_enter_timestamp = u->state_change_timestamp;
2728
2729 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2730 u->active_enter_timestamp = u->state_change_timestamp;
2731 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2732 u->active_exit_timestamp = u->state_change_timestamp;
2733 }
2734
2735 /* Keep track of failed units */
2736 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2737
2738 /* Make sure the cgroup and state files are always removed when we become inactive */
2739 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2740 SET_FLAG(u->markers,
2741 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2742 false);
2743 unit_prune_cgroup(u);
2744 unit_unlink_state_files(u);
2745 } else if (ns != os && ns == UNIT_RELOADING)
2746 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2747
2748 unit_update_on_console(u);
2749
2750 if (!MANAGER_IS_RELOADING(m)) {
2751 bool unexpected;
2752
2753 /* Let's propagate state changes to the job */
2754 if (u->job)
2755 unexpected = unit_process_job(u->job, ns, reload_success);
2756 else
2757 unexpected = true;
2758
2759 /* If this state change happened without being requested by a job, then let's retroactively start or
2760 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2761 * additional jobs just because something is already activated. */
2762
2763 if (unexpected) {
2764 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2765 retroactively_start_dependencies(u);
2766 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2767 retroactively_stop_dependencies(u);
2768 }
2769
2770 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2771 /* This unit just finished starting up */
2772
2773 unit_emit_audit_start(u);
2774 manager_send_unit_plymouth(m, u);
2775 manager_send_unit_supervisor(m, u, /* active= */ true);
2776
2777 } else if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2778 /* This unit just stopped/failed. */
2779
2780 unit_emit_audit_stop(u, ns);
2781 manager_send_unit_supervisor(m, u, /* active= */ false);
2782 unit_log_resources(u);
2783 }
2784
2785 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE))
2786 unit_start_on_termination_deps(u, UNIT_ATOM_ON_SUCCESS);
2787 else if (ns != os && ns == UNIT_FAILED)
2788 unit_start_on_termination_deps(u, UNIT_ATOM_ON_FAILURE);
2789 }
2790
2791 manager_recheck_journal(m);
2792 manager_recheck_dbus(m);
2793
2794 unit_trigger_notify(u);
2795
2796 if (!MANAGER_IS_RELOADING(m)) {
2797 const char *reason;
2798
2799 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2800 reason = strjoina("unit ", u->id, " failed");
2801 emergency_action(m, u->failure_action, EMERGENCY_ACTION_WARN|EMERGENCY_ACTION_SLEEP_5S, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2802 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2803 reason = strjoina("unit ", u->id, " succeeded");
2804 emergency_action(m, u->success_action, /* flags= */ 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2805 }
2806 }
2807
2808 /* And now, add the unit or depending units to various queues that will act on the new situation if
2809 * needed. These queues generally check for continuous state changes rather than events (like most of
2810 * the state propagation above), and do work deferred instead of instantly, since they typically
2811 * don't want to run during reloading, and usually involve checking combined state of multiple units
2812 * at once. */
2813
2814 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2815 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2816 check_unneeded_dependencies(u);
2817 check_bound_by_dependencies(u);
2818
2819 /* Maybe someone wants us to remain up? */
2820 unit_submit_to_start_when_upheld_queue(u);
2821
2822 /* Maybe the unit should be GC'ed now? */
2823 unit_add_to_gc_queue(u);
2824
2825 /* Maybe we can release some resources now? */
2826 unit_submit_to_release_resources_queue(u);
2827
2828 /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */
2829 unit_check_concurrency_limit(u);
2830
2831 /* Maybe someone else has been waiting for us to stop? */
2832 m->may_dispatch_stop_notify_queue = true;
2833
2834 } else if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2835 /* Start uphold units regardless if going up was expected or not */
2836 check_uphold_dependencies(u);
2837
2838 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2839 unit_submit_to_stop_when_unneeded_queue(u);
2840
2841 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2842 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2843 * inactive, without ever entering started.) */
2844 unit_submit_to_stop_when_bound_queue(u);
2845 }
2846}
2847
2848int unit_watch_pidref(Unit *u, const PidRef *pid, bool exclusive) {
2849 _cleanup_(pidref_freep) PidRef *pid_dup = NULL;
2850 int r;
2851
2852 /* Adds a specific PID to the set of PIDs this unit watches. */
2853
2854 assert(u);
2855 assert(pidref_is_set(pid));
2856
2857 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2858 * opportunity to remove any stalled references to this PID as they can be created
2859 * easily (when watching a process which is not our direct child). */
2860 if (exclusive)
2861 manager_unwatch_pidref(u->manager, pid);
2862
2863 if (set_contains(u->pids, pid)) { /* early exit if already being watched */
2864 assert(!exclusive);
2865 return 0;
2866 }
2867
2868 r = pidref_dup(pid, &pid_dup);
2869 if (r < 0)
2870 return r;
2871
2872 /* First, insert into the set of PIDs maintained by the unit */
2873 r = set_ensure_put(&u->pids, &pidref_hash_ops_free, pid_dup);
2874 if (r < 0)
2875 return r;
2876
2877 pid = TAKE_PTR(pid_dup); /* continue with our copy now that we have installed it properly in our set */
2878
2879 /* Second, insert it into the simple global table, see if that works */
2880 r = hashmap_ensure_put(&u->manager->watch_pids, &pidref_hash_ops, pid, u);
2881 if (r != -EEXIST)
2882 return r;
2883
2884 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2885 * hashmap that points to an array. */
2886
2887 PidRef *old_pid = NULL;
2888 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &old_pid);
2889
2890 /* Count entries in array */
2891 size_t n = 0;
2892 for (; array && array[n]; n++)
2893 ;
2894
2895 /* Allocate a new array */
2896 _cleanup_free_ Unit **new_array = new(Unit*, n + 2);
2897 if (!new_array)
2898 return -ENOMEM;
2899
2900 /* Append us to the end */
2901 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2902 new_array[n] = u;
2903 new_array[n+1] = NULL;
2904
2905 /* Add or replace the old array */
2906 r = hashmap_ensure_replace(&u->manager->watch_pids_more, &pidref_hash_ops, old_pid ?: pid, new_array);
2907 if (r < 0)
2908 return r;
2909
2910 TAKE_PTR(new_array); /* Now part of the hash table */
2911 free(array); /* Which means we can now delete the old version */
2912 return 0;
2913}
2914
2915void unit_unwatch_pidref(Unit *u, const PidRef *pid) {
2916 assert(u);
2917 assert(pidref_is_set(pid));
2918
2919 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2920 _cleanup_(pidref_freep) PidRef *pid1 = set_remove(u->pids, pid);
2921 if (!pid1)
2922 return; /* Early exit if this PID was never watched by us */
2923
2924 /* First let's drop the unit from the simple hash table, if it is included there */
2925 PidRef *pid2 = NULL;
2926 Unit *uu = hashmap_get2(u->manager->watch_pids, pid, (void**) &pid2);
2927
2928 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2929 assert((uu == u) == (pid1 == pid2));
2930
2931 if (uu == u)
2932 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2933 assert_se(hashmap_remove_value(u->manager->watch_pids, pid2, uu));
2934 else {
2935 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2936 PidRef *pid3 = NULL;
2937 Unit **array = hashmap_get2(u->manager->watch_pids_more, pid, (void**) &pid3);
2938
2939 /* Let's iterate through the array, dropping our own entry */
2940 size_t m = 0, n = 0;
2941 for (; array && array[n]; n++)
2942 if (array[n] != u)
2943 array[m++] = array[n];
2944 if (n == m)
2945 return; /* Not there */
2946
2947 array[m] = NULL; /* set trailing NULL marker on the new end */
2948
2949 if (m == 0) {
2950 /* The array is now empty, remove the entire entry */
2951 assert_se(hashmap_remove_value(u->manager->watch_pids_more, pid3, array));
2952 free(array);
2953 } else {
2954 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2955 * we will delete, but by the PidRef object of the Unit that is now first in the
2956 * array. */
2957
2958 PidRef *new_pid3 = ASSERT_PTR(set_get(array[0]->pids, pid));
2959 assert_se(hashmap_replace(u->manager->watch_pids_more, new_pid3, array) >= 0);
2960 }
2961 }
2962}
2963
2964void unit_unwatch_all_pids(Unit *u) {
2965 assert(u);
2966
2967 while (!set_isempty(u->pids))
2968 unit_unwatch_pidref(u, set_first(u->pids));
2969
2970 u->pids = set_free(u->pids);
2971}
2972
2973void unit_unwatch_pidref_done(Unit *u, PidRef *pidref) {
2974 assert(u);
2975
2976 if (!pidref_is_set(pidref))
2977 return;
2978
2979 unit_unwatch_pidref(u, pidref);
2980 pidref_done(pidref);
2981}
2982
2983bool unit_job_is_applicable(Unit *u, JobType j) {
2984 assert(u);
2985 assert(j >= 0 && j < _JOB_TYPE_MAX);
2986
2987 switch (j) {
2988
2989 case JOB_VERIFY_ACTIVE:
2990 case JOB_START:
2991 case JOB_NOP:
2992 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2993 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2994 * jobs for it. */
2995 return true;
2996
2997 case JOB_STOP:
2998 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2999 * external events), hence it makes no sense to permit enqueuing such a request either. */
3000 return !u->perpetual;
3001
3002 case JOB_RESTART:
3003 case JOB_TRY_RESTART:
3004 return unit_can_stop(u) && unit_can_start(u);
3005
3006 case JOB_RELOAD:
3007 case JOB_TRY_RELOAD:
3008 return unit_can_reload(u);
3009
3010 case JOB_RELOAD_OR_START:
3011 return unit_can_reload(u) && unit_can_start(u);
3012
3013 default:
3014 assert_not_reached();
3015 }
3016}
3017
3018static Hashmap *unit_get_dependency_hashmap_per_type(Unit *u, UnitDependency d) {
3019 Hashmap *deps;
3020
3021 assert(u);
3022 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3023
3024 deps = hashmap_get(u->dependencies, UNIT_DEPENDENCY_TO_PTR(d));
3025 if (!deps) {
3026 _cleanup_hashmap_free_ Hashmap *h = NULL;
3027
3028 h = hashmap_new(NULL);
3029 if (!h)
3030 return NULL;
3031
3032 if (hashmap_ensure_put(&u->dependencies, NULL, UNIT_DEPENDENCY_TO_PTR(d), h) < 0)
3033 return NULL;
3034
3035 deps = TAKE_PTR(h);
3036 }
3037
3038 return deps;
3039}
3040
3041typedef enum NotifyDependencyFlags {
3042 NOTIFY_DEPENDENCY_UPDATE_FROM = 1 << 0,
3043 NOTIFY_DEPENDENCY_UPDATE_TO = 1 << 1,
3044} NotifyDependencyFlags;
3045
3046static int unit_add_dependency_impl(
3047 Unit *u,
3048 UnitDependency d,
3049 Unit *other,
3050 UnitDependencyMask mask) {
3051
3052 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3053 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3054 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3055 [UNIT_WANTS] = UNIT_WANTED_BY,
3056 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3057 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3058 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3059 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3060 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3061 [UNIT_WANTED_BY] = UNIT_WANTS,
3062 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3063 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3064 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3065 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3066 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3067 [UNIT_BEFORE] = UNIT_AFTER,
3068 [UNIT_AFTER] = UNIT_BEFORE,
3069 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3070 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3071 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3072 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3073 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3074 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3075 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3076 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3077 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3078 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3079 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3080 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3081 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3082 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3083 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3084 };
3085
3086 Hashmap *u_deps, *other_deps;
3087 UnitDependencyInfo u_info, u_info_old, other_info, other_info_old;
3088 NotifyDependencyFlags flags = 0;
3089 int r;
3090
3091 assert(u);
3092 assert(other);
3093 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3094 assert(inverse_table[d] >= 0 && inverse_table[d] < _UNIT_DEPENDENCY_MAX);
3095 assert(mask > 0 && mask < _UNIT_DEPENDENCY_MASK_FULL);
3096
3097 /* Ensure the following two hashmaps for each unit exist:
3098 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3099 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3100 u_deps = unit_get_dependency_hashmap_per_type(u, d);
3101 if (!u_deps)
3102 return -ENOMEM;
3103
3104 other_deps = unit_get_dependency_hashmap_per_type(other, inverse_table[d]);
3105 if (!other_deps)
3106 return -ENOMEM;
3107
3108 /* Save the original dependency info. */
3109 u_info.data = u_info_old.data = hashmap_get(u_deps, other);
3110 other_info.data = other_info_old.data = hashmap_get(other_deps, u);
3111
3112 /* Update dependency info. */
3113 u_info.origin_mask |= mask;
3114 other_info.destination_mask |= mask;
3115
3116 /* Save updated dependency info. */
3117 if (u_info.data != u_info_old.data) {
3118 r = hashmap_replace(u_deps, other, u_info.data);
3119 if (r < 0)
3120 return r;
3121
3122 flags = NOTIFY_DEPENDENCY_UPDATE_FROM;
3123 u->dependency_generation++;
3124 }
3125
3126 if (other_info.data != other_info_old.data) {
3127 r = hashmap_replace(other_deps, u, other_info.data);
3128 if (r < 0) {
3129 if (u_info.data != u_info_old.data) {
3130 /* Restore the old dependency. */
3131 if (u_info_old.data)
3132 (void) hashmap_update(u_deps, other, u_info_old.data);
3133 else
3134 hashmap_remove(u_deps, other);
3135 }
3136 return r;
3137 }
3138
3139 flags |= NOTIFY_DEPENDENCY_UPDATE_TO;
3140 other->dependency_generation++;
3141 }
3142
3143 return flags;
3144}
3145
3146int unit_add_dependency(
3147 Unit *u,
3148 UnitDependency d,
3149 Unit *other,
3150 bool add_reference,
3151 UnitDependencyMask mask) {
3152
3153 UnitDependencyAtom a;
3154 int r;
3155
3156 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3157 * there, no need to notify! */
3158 NotifyDependencyFlags notify_flags;
3159
3160 assert(u);
3161 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3162 assert(other);
3163
3164 u = unit_follow_merge(u);
3165 other = unit_follow_merge(other);
3166 a = unit_dependency_to_atom(d);
3167 assert(a >= 0);
3168
3169 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3170 if (u == other) {
3171 if (unit_should_warn_about_dependency(d))
3172 log_unit_warning(u, "Dependency %s=%s is dropped.",
3173 unit_dependency_to_string(d), u->id);
3174 return 0;
3175 }
3176
3177 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3178 return 0;
3179
3180 /* Note that ordering a device unit after a unit is permitted since it allows its job running
3181 * timeout to be started at a specific time. */
3182 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3183 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3184 return 0;
3185 }
3186
3187 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3188 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3189 return 0;
3190 }
3191
3192 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3193 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3194 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3195 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3196 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3197 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3198
3199 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3200 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3201 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3202 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3203 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3204 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3205
3206 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3207 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3208 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3209
3210 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3211 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3212 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3213
3214 r = unit_add_dependency_impl(u, d, other, mask);
3215 if (r < 0)
3216 return r;
3217 notify_flags = r;
3218
3219 if (add_reference) {
3220 r = unit_add_dependency_impl(u, UNIT_REFERENCES, other, mask);
3221 if (r < 0)
3222 return r;
3223 notify_flags |= r;
3224 }
3225
3226 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_FROM))
3227 unit_add_to_dbus_queue(u);
3228 if (FLAGS_SET(notify_flags, NOTIFY_DEPENDENCY_UPDATE_TO))
3229 unit_add_to_dbus_queue(other);
3230
3231 return notify_flags != 0;
3232}
3233
3234int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3235 int r = 0, s = 0;
3236
3237 assert(u);
3238 assert(d >= 0 || e >= 0);
3239
3240 if (d >= 0) {
3241 r = unit_add_dependency(u, d, other, add_reference, mask);
3242 if (r < 0)
3243 return r;
3244 }
3245
3246 if (e >= 0) {
3247 s = unit_add_dependency(u, e, other, add_reference, mask);
3248 if (s < 0)
3249 return s;
3250 }
3251
3252 return r > 0 || s > 0;
3253}
3254
3255static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3256 int r;
3257
3258 assert(u);
3259 assert(name);
3260 assert(buf);
3261 assert(ret);
3262
3263 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3264 *buf = NULL;
3265 *ret = name;
3266 return 0;
3267 }
3268
3269 if (u->instance)
3270 r = unit_name_replace_instance(name, u->instance, buf);
3271 else {
3272 _cleanup_free_ char *i = NULL;
3273
3274 r = unit_name_to_prefix(u->id, &i);
3275 if (r < 0)
3276 return r;
3277
3278 r = unit_name_replace_instance(name, i, buf);
3279 }
3280 if (r < 0)
3281 return r;
3282
3283 *ret = *buf;
3284 return 0;
3285}
3286
3287int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3288 _cleanup_free_ char *buf = NULL;
3289 Unit *other;
3290 int r;
3291
3292 assert(u);
3293 assert(name);
3294
3295 r = resolve_template(u, name, &buf, &name);
3296 if (r < 0)
3297 return r;
3298
3299 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3300 return 0;
3301
3302 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3303 if (r < 0)
3304 return r;
3305
3306 return unit_add_dependency(u, d, other, add_reference, mask);
3307}
3308
3309int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3310 _cleanup_free_ char *buf = NULL;
3311 Unit *other;
3312 int r;
3313
3314 assert(u);
3315 assert(name);
3316
3317 r = resolve_template(u, name, &buf, &name);
3318 if (r < 0)
3319 return r;
3320
3321 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3322 return 0;
3323
3324 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3325 if (r < 0)
3326 return r;
3327
3328 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3329}
3330
3331int setenv_unit_path(const char *p) {
3332 assert(p);
3333
3334 /* This is mostly for debug purposes */
3335 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, /* overwrite = */ true));
3336}
3337
3338char* unit_dbus_path(Unit *u) {
3339 assert(u);
3340
3341 if (!u->id)
3342 return NULL;
3343
3344 return unit_dbus_path_from_name(u->id);
3345}
3346
3347char* unit_dbus_path_invocation_id(Unit *u) {
3348 assert(u);
3349
3350 if (sd_id128_is_null(u->invocation_id))
3351 return NULL;
3352
3353 return unit_dbus_path_from_name(u->invocation_id_string);
3354}
3355
3356int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3357 int r;
3358
3359 assert(u);
3360
3361 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3362
3363 if (sd_id128_equal(u->invocation_id, id))
3364 return 0;
3365
3366 if (!sd_id128_is_null(u->invocation_id))
3367 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3368
3369 if (sd_id128_is_null(id)) {
3370 r = 0;
3371 goto reset;
3372 }
3373
3374 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3375 if (r < 0)
3376 goto reset;
3377
3378 u->invocation_id = id;
3379 sd_id128_to_string(id, u->invocation_id_string);
3380
3381 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3382 if (r < 0)
3383 goto reset;
3384
3385 return 0;
3386
3387reset:
3388 u->invocation_id = SD_ID128_NULL;
3389 u->invocation_id_string[0] = 0;
3390 return r;
3391}
3392
3393int unit_set_slice(Unit *u, Unit *slice) {
3394 int r;
3395
3396 assert(u);
3397 assert(slice);
3398
3399 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3400 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3401 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3402
3403 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3404 return -EOPNOTSUPP;
3405
3406 if (u->type == UNIT_SLICE)
3407 return -EINVAL;
3408
3409 if (unit_active_state(u) != UNIT_INACTIVE)
3410 return -EBUSY;
3411
3412 if (slice->type != UNIT_SLICE)
3413 return -EINVAL;
3414
3415 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3416 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3417 return -EPERM;
3418
3419 if (UNIT_GET_SLICE(u) == slice)
3420 return 0;
3421
3422 /* Disallow slice changes if @u is already bound to cgroups */
3423 if (UNIT_GET_SLICE(u)) {
3424 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
3425 if (crt && crt->cgroup_path)
3426 return -EBUSY;
3427 }
3428
3429 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3430 if (UNIT_GET_SLICE(u))
3431 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3432
3433 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3434 if (r < 0)
3435 return r;
3436
3437 return 1;
3438}
3439
3440int unit_set_default_slice(Unit *u) {
3441 const char *slice_name;
3442 Unit *slice;
3443 int r;
3444
3445 assert(u);
3446
3447 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3448 return 0;
3449
3450 if (UNIT_GET_SLICE(u))
3451 return 0;
3452
3453 if (u->instance) {
3454 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3455
3456 /* Implicitly place all instantiated units in their
3457 * own per-template slice */
3458
3459 r = unit_name_to_prefix(u->id, &prefix);
3460 if (r < 0)
3461 return r;
3462
3463 /* The prefix is already escaped, but it might include
3464 * "-" which has a special meaning for slice units,
3465 * hence escape it here extra. */
3466 escaped = unit_name_escape(prefix);
3467 if (!escaped)
3468 return -ENOMEM;
3469
3470 if (MANAGER_IS_SYSTEM(u->manager))
3471 slice_name = strjoina("system-", escaped, ".slice");
3472 else
3473 slice_name = strjoina("app-", escaped, ".slice");
3474
3475 } else if (unit_is_extrinsic(u))
3476 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3477 * the root slice. They don't really belong in one of the subslices. */
3478 slice_name = SPECIAL_ROOT_SLICE;
3479
3480 else if (MANAGER_IS_SYSTEM(u->manager))
3481 slice_name = SPECIAL_SYSTEM_SLICE;
3482 else
3483 slice_name = SPECIAL_APP_SLICE;
3484
3485 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3486 if (r < 0)
3487 return r;
3488
3489 return unit_set_slice(u, slice);
3490}
3491
3492const char* unit_slice_name(Unit *u) {
3493 Unit *slice;
3494 assert(u);
3495
3496 slice = UNIT_GET_SLICE(u);
3497 if (!slice)
3498 return NULL;
3499
3500 return slice->id;
3501}
3502
3503int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3504 _cleanup_free_ char *t = NULL;
3505 int r;
3506
3507 assert(u);
3508 assert(type);
3509 assert(_found);
3510
3511 r = unit_name_change_suffix(u->id, type, &t);
3512 if (r < 0)
3513 return r;
3514 if (unit_has_name(u, t))
3515 return -EINVAL;
3516
3517 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3518 assert(r < 0 || *_found != u);
3519 return r;
3520}
3521
3522static int signal_name_owner_changed_install_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3523 Unit *u = ASSERT_PTR(userdata);
3524 const sd_bus_error *e;
3525 int r;
3526
3527 e = sd_bus_message_get_error(message);
3528 if (!e) {
3529 log_unit_trace(u, "Successfully installed NameOwnerChanged signal match.");
3530 return 0;
3531 }
3532
3533 r = sd_bus_error_get_errno(e);
3534 log_unit_error_errno(u, r,
3535 "Unexpected error response on installing NameOwnerChanged signal match: %s",
3536 bus_error_message(e, r));
3537
3538 /* If we failed to install NameOwnerChanged signal, also unref the bus slot of GetNameOwner(). */
3539 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3540 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3541
3542 if (UNIT_VTABLE(u)->bus_name_owner_change)
3543 UNIT_VTABLE(u)->bus_name_owner_change(u, NULL);
3544
3545 return 0;
3546}
3547
3548static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3549 const char *new_owner;
3550 Unit *u = ASSERT_PTR(userdata);
3551 int r;
3552
3553 assert(message);
3554
3555 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3556 if (r < 0) {
3557 bus_log_parse_error(r);
3558 return 0;
3559 }
3560
3561 if (UNIT_VTABLE(u)->bus_name_owner_change)
3562 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3563
3564 return 0;
3565}
3566
3567static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3568 const sd_bus_error *e;
3569 const char *new_owner;
3570 Unit *u = ASSERT_PTR(userdata);
3571 int r;
3572
3573 assert(message);
3574
3575 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3576
3577 e = sd_bus_message_get_error(message);
3578 if (e) {
3579 if (!sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) {
3580 r = sd_bus_error_get_errno(e);
3581 log_unit_error_errno(u, r,
3582 "Unexpected error response from GetNameOwner(): %s",
3583 bus_error_message(e, r));
3584 }
3585
3586 new_owner = NULL;
3587 } else {
3588 r = sd_bus_message_read(message, "s", &new_owner);
3589 if (r < 0)
3590 return bus_log_parse_error(r);
3591
3592 assert(!isempty(new_owner));
3593 }
3594
3595 if (UNIT_VTABLE(u)->bus_name_owner_change)
3596 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3597
3598 return 0;
3599}
3600
3601int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3602 _cleanup_(sd_bus_message_unrefp) sd_bus_message *m = NULL;
3603 const char *match;
3604 usec_t timeout_usec = 0;
3605 int r;
3606
3607 assert(u);
3608 assert(bus);
3609 assert(name);
3610
3611 if (u->match_bus_slot || u->get_name_owner_slot)
3612 return -EBUSY;
3613
3614 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3615 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3616 * value defined above. */
3617 if (UNIT_VTABLE(u)->get_timeout_start_usec)
3618 timeout_usec = UNIT_VTABLE(u)->get_timeout_start_usec(u);
3619
3620 match = strjoina("type='signal',"
3621 "sender='org.freedesktop.DBus',"
3622 "path='/org/freedesktop/DBus',"
3623 "interface='org.freedesktop.DBus',"
3624 "member='NameOwnerChanged',"
3625 "arg0='", name, "'");
3626
3627 r = bus_add_match_full(
3628 bus,
3629 &u->match_bus_slot,
3630 /* asynchronous = */ true,
3631 match,
3632 signal_name_owner_changed,
3633 signal_name_owner_changed_install_handler,
3634 u,
3635 timeout_usec);
3636 if (r < 0)
3637 return r;
3638
3639 r = sd_bus_message_new_method_call(
3640 bus,
3641 &m,
3642 "org.freedesktop.DBus",
3643 "/org/freedesktop/DBus",
3644 "org.freedesktop.DBus",
3645 "GetNameOwner");
3646 if (r < 0)
3647 return r;
3648
3649 r = sd_bus_message_append(m, "s", name);
3650 if (r < 0)
3651 return r;
3652
3653 r = sd_bus_call_async(
3654 bus,
3655 &u->get_name_owner_slot,
3656 m,
3657 get_name_owner_handler,
3658 u,
3659 timeout_usec);
3660 if (r < 0) {
3661 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3662 return r;
3663 }
3664
3665 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3666 return 0;
3667}
3668
3669int unit_watch_bus_name(Unit *u, const char *name) {
3670 int r;
3671
3672 assert(u);
3673 assert(name);
3674
3675 /* Watch a specific name on the bus. We only support one unit
3676 * watching each name for now. */
3677
3678 if (u->manager->api_bus) {
3679 /* If the bus is already available, install the match directly.
3680 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3681 r = unit_install_bus_match(u, u->manager->api_bus, name);
3682 if (r < 0)
3683 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3684 }
3685
3686 r = hashmap_put(u->manager->watch_bus, name, u);
3687 if (r < 0) {
3688 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3689 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3690 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3691 }
3692
3693 return 0;
3694}
3695
3696void unit_unwatch_bus_name(Unit *u, const char *name) {
3697 assert(u);
3698 assert(name);
3699
3700 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3701 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3702 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3703}
3704
3705int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3706 _cleanup_free_ char *e = NULL;
3707 Unit *device;
3708 int r;
3709
3710 assert(u);
3711
3712 /* Adds in links to the device node that this unit is based on */
3713 if (isempty(what))
3714 return 0;
3715
3716 if (!is_device_path(what))
3717 return 0;
3718
3719 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3720 if (!unit_type_supported(UNIT_DEVICE))
3721 return 0;
3722
3723 r = unit_name_from_path(what, ".device", &e);
3724 if (r < 0)
3725 return r;
3726
3727 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3728 if (r < 0)
3729 return r;
3730
3731 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3732 dep = UNIT_BINDS_TO;
3733
3734 return unit_add_two_dependencies(u, UNIT_AFTER,
3735 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3736 device, true, mask);
3737}
3738
3739int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3740 _cleanup_free_ char *escaped = NULL, *target = NULL;
3741 int r;
3742
3743 assert(u);
3744
3745 if (isempty(what))
3746 return 0;
3747
3748 if (!path_startswith(what, "/dev/"))
3749 return 0;
3750
3751 /* If we don't support devices, then also don't bother with blockdev@.target */
3752 if (!unit_type_supported(UNIT_DEVICE))
3753 return 0;
3754
3755 r = unit_name_path_escape(what, &escaped);
3756 if (r < 0)
3757 return r;
3758
3759 r = unit_name_build("blockdev", escaped, ".target", &target);
3760 if (r < 0)
3761 return r;
3762
3763 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3764}
3765
3766int unit_coldplug(Unit *u) {
3767 int r = 0;
3768
3769 assert(u);
3770
3771 /* Make sure we don't enter a loop, when coldplugging recursively. */
3772 if (u->coldplugged)
3773 return 0;
3774
3775 u->coldplugged = true;
3776
3777 STRV_FOREACH(i, u->deserialized_refs)
3778 RET_GATHER(r, bus_unit_track_add_name(u, *i));
3779
3780 u->deserialized_refs = strv_free(u->deserialized_refs);
3781
3782 if (UNIT_VTABLE(u)->coldplug)
3783 RET_GATHER(r, UNIT_VTABLE(u)->coldplug(u));
3784
3785 if (u->job)
3786 RET_GATHER(r, job_coldplug(u->job));
3787 if (u->nop_job)
3788 RET_GATHER(r, job_coldplug(u->nop_job));
3789
3790 unit_modify_nft_set(u, /* add = */ true);
3791 return r;
3792}
3793
3794void unit_catchup(Unit *u) {
3795 assert(u);
3796
3797 if (UNIT_VTABLE(u)->catchup)
3798 UNIT_VTABLE(u)->catchup(u);
3799
3800 unit_cgroup_catchup(u);
3801}
3802
3803static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3804 struct stat st;
3805
3806 if (!path)
3807 return false;
3808
3809 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3810 * are never out-of-date. */
3811 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3812 return false;
3813
3814 if (stat(path, &st) < 0)
3815 /* What, cannot access this anymore? */
3816 return true;
3817
3818 if (path_masked)
3819 /* For masked files check if they are still so */
3820 return !null_or_empty(&st);
3821 else
3822 /* For non-empty files check the mtime */
3823 return timespec_load(&st.st_mtim) > mtime;
3824
3825 return false;
3826}
3827
3828bool unit_need_daemon_reload(Unit *u) {
3829 assert(u);
3830 assert(u->manager);
3831
3832 if (u->manager->unit_file_state_outdated)
3833 return true;
3834
3835 /* For unit files, we allow masking… */
3836 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3837 u->load_state == UNIT_MASKED))
3838 return true;
3839
3840 /* Source paths should not be masked… */
3841 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3842 return true;
3843
3844 if (u->load_state == UNIT_LOADED) {
3845 _cleanup_strv_free_ char **dropins = NULL;
3846
3847 (void) unit_find_dropin_paths(u, /* use_unit_path_cache = */ false, &dropins);
3848
3849 if (!strv_equal(u->dropin_paths, dropins))
3850 return true;
3851
3852 /* … any drop-ins that are masked are simply omitted from the list. */
3853 STRV_FOREACH(path, u->dropin_paths)
3854 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3855 return true;
3856 }
3857
3858 return false;
3859}
3860
3861void unit_reset_failed(Unit *u) {
3862 assert(u);
3863
3864 if (UNIT_VTABLE(u)->reset_failed)
3865 UNIT_VTABLE(u)->reset_failed(u);
3866
3867 ratelimit_reset(&u->start_ratelimit);
3868 u->start_limit_hit = false;
3869
3870 (void) unit_set_debug_invocation(u, /* enable= */ false);
3871}
3872
3873Unit *unit_following(Unit *u) {
3874 assert(u);
3875
3876 if (UNIT_VTABLE(u)->following)
3877 return UNIT_VTABLE(u)->following(u);
3878
3879 return NULL;
3880}
3881
3882bool unit_stop_pending(Unit *u) {
3883 assert(u);
3884
3885 /* This call does check the current state of the unit. It's
3886 * hence useful to be called from state change calls of the
3887 * unit itself, where the state isn't updated yet. This is
3888 * different from unit_inactive_or_pending() which checks both
3889 * the current state and for a queued job. */
3890
3891 return unit_has_job_type(u, JOB_STOP);
3892}
3893
3894bool unit_inactive_or_pending(Unit *u) {
3895 assert(u);
3896
3897 /* Returns true if the unit is inactive or going down */
3898
3899 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3900 return true;
3901
3902 if (unit_stop_pending(u))
3903 return true;
3904
3905 return false;
3906}
3907
3908bool unit_active_or_pending(Unit *u) {
3909 assert(u);
3910
3911 /* Returns true if the unit is active or going up */
3912
3913 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3914 return true;
3915
3916 if (u->job &&
3917 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3918 return true;
3919
3920 return false;
3921}
3922
3923bool unit_will_restart_default(Unit *u) {
3924 assert(u);
3925
3926 return unit_has_job_type(u, JOB_START);
3927}
3928
3929bool unit_will_restart(Unit *u) {
3930 assert(u);
3931
3932 if (!UNIT_VTABLE(u)->will_restart)
3933 return false;
3934
3935 return UNIT_VTABLE(u)->will_restart(u);
3936}
3937
3938void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3939 assert(u);
3940
3941 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3942 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3943}
3944
3945static int unit_pid_set(Unit *u, Set **pid_set) {
3946 int r;
3947
3948 assert(u);
3949 assert(pid_set);
3950
3951 set_clear(*pid_set); /* This updates input. */
3952
3953 /* Exclude the main/control pids from being killed via the cgroup */
3954
3955 PidRef *pid;
3956 FOREACH_ARGUMENT(pid, unit_main_pid(u), unit_control_pid(u))
3957 if (pidref_is_set(pid)) {
3958 r = set_ensure_put(pid_set, NULL, PID_TO_PTR(pid->pid));
3959 if (r < 0)
3960 return r;
3961 }
3962
3963 return 0;
3964}
3965
3966static int kill_common_log(const PidRef *pid, int signo, void *userdata) {
3967 _cleanup_free_ char *comm = NULL;
3968 Unit *u = ASSERT_PTR(userdata);
3969
3970 (void) pidref_get_comm(pid, &comm);
3971
3972 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3973 signal_to_string(signo), pid->pid, strna(comm));
3974
3975 return 1;
3976}
3977
3978static int kill_or_sigqueue(PidRef *pidref, int signo, int code, int value) {
3979 assert(pidref_is_set(pidref));
3980 assert(SIGNAL_VALID(signo));
3981
3982 switch (code) {
3983
3984 case SI_USER:
3985 log_debug("Killing " PID_FMT " with signal SIG%s.", pidref->pid, signal_to_string(signo));
3986 return pidref_kill(pidref, signo);
3987
3988 case SI_QUEUE:
3989 log_debug("Enqueuing value %i to " PID_FMT " on signal SIG%s.", value, pidref->pid, signal_to_string(signo));
3990 return pidref_sigqueue(pidref, signo, value);
3991
3992 default:
3993 assert_not_reached();
3994 }
3995}
3996
3997static int unit_kill_one(
3998 Unit *u,
3999 PidRef *pidref,
4000 const char *type,
4001 int signo,
4002 int code,
4003 int value,
4004 sd_bus_error *ret_error) {
4005
4006 int r;
4007
4008 assert(u);
4009 assert(type);
4010
4011 if (!pidref_is_set(pidref))
4012 return 0;
4013
4014 _cleanup_free_ char *comm = NULL;
4015 (void) pidref_get_comm(pidref, &comm);
4016
4017 r = kill_or_sigqueue(pidref, signo, code, value);
4018 if (r == -ESRCH)
4019 return 0;
4020 if (r < 0) {
4021 /* Report this failure both to the logs and to the client */
4022 if (ret_error)
4023 sd_bus_error_set_errnof(
4024 ret_error, r,
4025 "Failed to send signal SIG%s to %s process " PID_FMT " (%s): %m",
4026 signal_to_string(signo), type, pidref->pid, strna(comm));
4027
4028 return log_unit_warning_errno(
4029 u, r,
4030 "Failed to send signal SIG%s to %s process " PID_FMT " (%s) on client request: %m",
4031 signal_to_string(signo), type, pidref->pid, strna(comm));
4032 }
4033
4034 log_unit_info(u, "Sent signal SIG%s to %s process " PID_FMT " (%s) on client request.",
4035 signal_to_string(signo), type, pidref->pid, strna(comm));
4036 return 1; /* killed */
4037}
4038
4039int unit_kill(
4040 Unit *u,
4041 KillWhom whom,
4042 int signo,
4043 int code,
4044 int value,
4045 sd_bus_error *ret_error) {
4046
4047 PidRef *main_pid, *control_pid;
4048 bool killed = false;
4049 int ret = 0, r;
4050
4051 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4052 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4053 * stop a service ourselves. */
4054
4055 assert(u);
4056 assert(whom >= 0);
4057 assert(whom < _KILL_WHOM_MAX);
4058 assert(SIGNAL_VALID(signo));
4059 assert(IN_SET(code, SI_USER, SI_QUEUE));
4060
4061 main_pid = unit_main_pid(u);
4062 control_pid = unit_control_pid(u);
4063
4064 if (!UNIT_HAS_CGROUP_CONTEXT(u) && !main_pid && !control_pid)
4065 return sd_bus_error_setf(ret_error, SD_BUS_ERROR_NOT_SUPPORTED, "Unit type does not support process killing.");
4066
4067 if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL)) {
4068 if (!main_pid)
4069 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4070 if (!pidref_is_set(main_pid))
4071 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4072 }
4073
4074 if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4075 if (!control_pid)
4076 return sd_bus_error_setf(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4077 if (!pidref_is_set(control_pid))
4078 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4079 }
4080
4081 if (IN_SET(whom, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4082 r = unit_kill_one(u, control_pid, "control", signo, code, value, ret_error);
4083 RET_GATHER(ret, r);
4084 killed = killed || r > 0;
4085 }
4086
4087 if (IN_SET(whom, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL)) {
4088 r = unit_kill_one(u, main_pid, "main", signo, code, value, ret >= 0 ? ret_error : NULL);
4089 RET_GATHER(ret, r);
4090 killed = killed || r > 0;
4091 }
4092
4093 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4094 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4095 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4096 if (IN_SET(whom, KILL_ALL, KILL_ALL_FAIL) && code == SI_USER) {
4097 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4098 if (crt && crt->cgroup_path) {
4099 _cleanup_set_free_ Set *pid_set = NULL;
4100
4101 if (signo == SIGKILL) {
4102 r = cg_kill_kernel_sigkill(crt->cgroup_path);
4103 if (r >= 0) {
4104 killed = true;
4105 log_unit_info(u, "Killed unit cgroup with SIGKILL on client request.");
4106 goto finish;
4107 }
4108 if (r != -EOPNOTSUPP) {
4109 if (ret >= 0)
4110 sd_bus_error_set_errnof(ret_error, r,
4111 "Failed to kill unit cgroup: %m");
4112 RET_GATHER(ret, log_unit_warning_errno(u, r, "Failed to kill unit cgroup: %m"));
4113 goto finish;
4114 }
4115 /* Fall back to manual enumeration */
4116 } else {
4117 /* Exclude the main/control pids from being killed via the cgroup if
4118 * not SIGKILL */
4119 r = unit_pid_set(u, &pid_set);
4120 if (r < 0)
4121 return log_oom();
4122 }
4123
4124 r = cg_kill_recursive(crt->cgroup_path, signo, 0, pid_set, kill_common_log, u);
4125 if (r < 0 && !IN_SET(r, -ESRCH, -ENOENT)) {
4126 if (ret >= 0)
4127 sd_bus_error_set_errnof(
4128 ret_error, r,
4129 "Failed to send signal SIG%s to auxiliary processes: %m",
4130 signal_to_string(signo));
4131
4132 RET_GATHER(ret, log_unit_warning_errno(
4133 u, r,
4134 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4135 signal_to_string(signo)));
4136 }
4137 killed = killed || r >= 0;
4138 }
4139 }
4140
4141finish:
4142 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4143 if (ret >= 0 && !killed && IN_SET(whom, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
4144 return sd_bus_error_set_const(ret_error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
4145
4146 return ret;
4147}
4148
4149int unit_following_set(Unit *u, Set **s) {
4150 assert(u);
4151 assert(s);
4152
4153 if (UNIT_VTABLE(u)->following_set)
4154 return UNIT_VTABLE(u)->following_set(u, s);
4155
4156 *s = NULL;
4157 return 0;
4158}
4159
4160UnitFileState unit_get_unit_file_state(Unit *u) {
4161 int r;
4162
4163 assert(u);
4164
4165 if (u->unit_file_state >= 0 || !u->fragment_path)
4166 return u->unit_file_state;
4167
4168 /* If we know this is a transient unit no need to ask the unit file state for details. Let's bypass
4169 * the more expensive on-disk check. */
4170 if (u->transient)
4171 return (u->unit_file_state = UNIT_FILE_TRANSIENT);
4172
4173 r = unit_file_get_state(
4174 u->manager->runtime_scope,
4175 /* root_dir= */ NULL,
4176 u->id,
4177 &u->unit_file_state);
4178 if (r < 0)
4179 u->unit_file_state = UNIT_FILE_BAD;
4180
4181 return u->unit_file_state;
4182}
4183
4184PresetAction unit_get_unit_file_preset(Unit *u) {
4185 int r;
4186
4187 assert(u);
4188
4189 if (u->unit_file_preset >= 0)
4190 return u->unit_file_preset;
4191
4192 /* If this is a transient or perpetual unit file it doesn't make much sense to ask the preset
4193 * database about this, because enabling/disabling makes no sense for either. Hence don't. */
4194 if (!u->fragment_path || u->transient || u->perpetual)
4195 return (u->unit_file_preset = -ENOEXEC);
4196
4197 _cleanup_free_ char *bn = NULL;
4198 r = path_extract_filename(u->fragment_path, &bn);
4199 if (r < 0)
4200 return (u->unit_file_preset = r);
4201 if (r == O_DIRECTORY)
4202 return (u->unit_file_preset = -EISDIR);
4203
4204 return (u->unit_file_preset = unit_file_query_preset(
4205 u->manager->runtime_scope,
4206 /* root_dir= */ NULL,
4207 bn,
4208 /* cached= */ NULL));
4209}
4210
4211Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4212 assert(ref);
4213 assert(source);
4214 assert(target);
4215
4216 if (ref->target)
4217 unit_ref_unset(ref);
4218
4219 ref->source = source;
4220 ref->target = target;
4221 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4222 return target;
4223}
4224
4225void unit_ref_unset(UnitRef *ref) {
4226 assert(ref);
4227
4228 if (!ref->target)
4229 return;
4230
4231 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4232 * be unreferenced now. */
4233 unit_add_to_gc_queue(ref->target);
4234
4235 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4236 ref->source = ref->target = NULL;
4237}
4238
4239static int user_from_unit_name(Unit *u, char **ret) {
4240
4241 static const uint8_t hash_key[] = {
4242 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4243 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4244 };
4245
4246 _cleanup_free_ char *n = NULL;
4247 int r;
4248
4249 r = unit_name_to_prefix(u->id, &n);
4250 if (r < 0)
4251 return r;
4252
4253 if (valid_user_group_name(n, 0)) {
4254 *ret = TAKE_PTR(n);
4255 return 0;
4256 }
4257
4258 /* If we can't use the unit name as a user name, then let's hash it and use that */
4259 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4260 return -ENOMEM;
4261
4262 return 0;
4263}
4264
4265static int unit_verify_contexts(const Unit *u) {
4266 assert(u);
4267
4268 const ExecContext *ec = unit_get_exec_context(u);
4269 if (!ec)
4270 return 0;
4271
4272 if (MANAGER_IS_USER(u->manager) && ec->dynamic_user)
4273 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "DynamicUser= enabled for user unit, which is not supported. Refusing.");
4274
4275 if (ec->dynamic_user && ec->working_directory_home)
4276 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory=~ is not allowed under DynamicUser=yes. Refusing.");
4277
4278 if (ec->working_directory && path_below_api_vfs(ec->working_directory) &&
4279 exec_needs_mount_namespace(ec, /* params = */ NULL, /* runtime = */ NULL))
4280 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "WorkingDirectory= may not be below /proc/, /sys/ or /dev/ when using mount namespacing. Refusing.");
4281
4282 if (exec_needs_pid_namespace(ec, /* params= */ NULL) && !UNIT_VTABLE(u)->notify_pidref)
4283 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "PrivatePIDs= setting is only supported for service units. Refusing.");
4284
4285 const KillContext *kc = unit_get_kill_context(u);
4286
4287 if (ec->pam_name && kc && !IN_SET(kc->kill_mode, KILL_CONTROL_GROUP, KILL_MIXED))
4288 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit has PAM enabled. Kill mode must be set to 'control-group' or 'mixed'. Refusing.");
4289
4290 return 0;
4291}
4292
4293static PrivateTmp unit_get_private_var_tmp(const Unit *u, const ExecContext *c) {
4294 assert(u);
4295 assert(c);
4296 assert(c->private_tmp >= 0 && c->private_tmp < _PRIVATE_TMP_MAX);
4297
4298 /* Disable disconnected private tmpfs on /var/tmp/ when DefaultDependencies=no and
4299 * RootImage=/RootDirectory= are not set, as /var/ may be a separated partition.
4300 * See issue #37258. */
4301
4302 /* PrivateTmp=yes/no also enables/disables private tmpfs on /var/tmp/. */
4303 if (c->private_tmp != PRIVATE_TMP_DISCONNECTED)
4304 return c->private_tmp;
4305
4306 /* When DefaultDependencies=yes, disconnected tmpfs is also enabled on /var/tmp/, and an explicit
4307 * dependency to the mount on /var/ will be added in unit_add_exec_dependencies(). */
4308 if (u->default_dependencies)
4309 return PRIVATE_TMP_DISCONNECTED;
4310
4311 /* When RootImage=/RootDirectory= is enabled, /var/ should be prepared by the image or directory,
4312 * hence we can mount a disconnected tmpfs on /var/tmp/. */
4313 if (exec_context_with_rootfs(c))
4314 return PRIVATE_TMP_DISCONNECTED;
4315
4316 /* Even if DefaultDependencies=no, enable disconnected tmpfs when
4317 * RequiresMountsFor=/WantsMountsFor=/var/ is explicitly set. */
4318 for (UnitMountDependencyType t = 0; t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX; t++)
4319 if (hashmap_contains(u->mounts_for[t], "/var/"))
4320 return PRIVATE_TMP_DISCONNECTED;
4321
4322 /* Check the same but for After= with Requires=/Requisite=/Wants= or friends. */
4323 Unit *m = manager_get_unit(u->manager, "var.mount");
4324 if (!m)
4325 return PRIVATE_TMP_NO;
4326
4327 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, m))
4328 return PRIVATE_TMP_NO;
4329
4330 if (unit_has_dependency(u, UNIT_ATOM_PULL_IN_START, m) ||
4331 unit_has_dependency(u, UNIT_ATOM_PULL_IN_VERIFY, m) ||
4332 unit_has_dependency(u, UNIT_ATOM_PULL_IN_START_IGNORED, m))
4333 return PRIVATE_TMP_DISCONNECTED;
4334
4335 return PRIVATE_TMP_NO;
4336}
4337
4338int unit_patch_contexts(Unit *u) {
4339 CGroupContext *cc;
4340 ExecContext *ec;
4341 int r;
4342
4343 assert(u);
4344
4345 /* Patch in the manager defaults into the exec and cgroup
4346 * contexts, _after_ the rest of the settings have been
4347 * initialized */
4348
4349 ec = unit_get_exec_context(u);
4350 if (ec) {
4351 /* This only copies in the ones that need memory */
4352 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4353 if (u->manager->defaults.rlimit[i] && !ec->rlimit[i]) {
4354 ec->rlimit[i] = newdup(struct rlimit, u->manager->defaults.rlimit[i], 1);
4355 if (!ec->rlimit[i])
4356 return -ENOMEM;
4357 }
4358
4359 if (MANAGER_IS_USER(u->manager) && !ec->working_directory) {
4360 r = get_home_dir(&ec->working_directory);
4361 if (r < 0)
4362 return r;
4363
4364 if (!ec->working_directory_home)
4365 /* If home directory is implied by us, allow it to be missing. */
4366 ec->working_directory_missing_ok = true;
4367 }
4368
4369 if (ec->private_devices)
4370 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4371
4372 if (ec->protect_kernel_modules)
4373 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4374
4375 if (ec->protect_kernel_logs)
4376 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4377
4378 if (ec->protect_clock)
4379 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4380
4381 if (ec->dynamic_user) {
4382 if (!ec->user) {
4383 r = user_from_unit_name(u, &ec->user);
4384 if (r < 0)
4385 return r;
4386 }
4387
4388 if (!ec->group) {
4389 ec->group = strdup(ec->user);
4390 if (!ec->group)
4391 return -ENOMEM;
4392 }
4393
4394 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4395 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4396 * sandbox. */
4397
4398 /* With DynamicUser= we want private directories, so if the user hasn't manually
4399 * selected PrivateTmp=, enable it, but to a fully private (disconnected) tmpfs
4400 * instance. */
4401 if (ec->private_tmp == PRIVATE_TMP_NO)
4402 ec->private_tmp = PRIVATE_TMP_DISCONNECTED;
4403 ec->remove_ipc = true;
4404 ec->protect_system = PROTECT_SYSTEM_STRICT;
4405 if (ec->protect_home == PROTECT_HOME_NO)
4406 ec->protect_home = PROTECT_HOME_READ_ONLY;
4407
4408 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4409 * them. */
4410 ec->no_new_privileges = true;
4411 ec->restrict_suid_sgid = true;
4412 }
4413
4414 ec->private_var_tmp = unit_get_private_var_tmp(u, ec);
4415
4416 FOREACH_ARRAY(d, ec->directories, _EXEC_DIRECTORY_TYPE_MAX)
4417 exec_directory_sort(d);
4418 }
4419
4420 cc = unit_get_cgroup_context(u);
4421 if (cc && ec) {
4422
4423 if (ec->private_devices &&
4424 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4425 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4426
4427 /* Only add these if needed, as they imply that everything else is blocked. */
4428 if (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow) {
4429 if (ec->root_image || ec->mount_images) {
4430
4431 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4432 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4433 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE);
4434 if (r < 0)
4435 return r;
4436 }
4437 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4438 r = cgroup_context_add_device_allow(cc, p, CGROUP_DEVICE_READ|CGROUP_DEVICE_WRITE|CGROUP_DEVICE_MKNOD);
4439 if (r < 0)
4440 return r;
4441 }
4442
4443 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4444 * Same for mapper and verity. */
4445 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4446 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4447 if (r < 0)
4448 return r;
4449 }
4450 }
4451
4452 if (ec->protect_clock) {
4453 r = cgroup_context_add_device_allow(cc, "char-rtc", CGROUP_DEVICE_READ);
4454 if (r < 0)
4455 return r;
4456 }
4457 }
4458 }
4459
4460 return unit_verify_contexts(u);
4461}
4462
4463ExecContext *unit_get_exec_context(const Unit *u) {
4464 size_t offset;
4465 assert(u);
4466
4467 if (u->type < 0)
4468 return NULL;
4469
4470 offset = UNIT_VTABLE(u)->exec_context_offset;
4471 if (offset <= 0)
4472 return NULL;
4473
4474 return (ExecContext*) ((uint8_t*) u + offset);
4475}
4476
4477KillContext *unit_get_kill_context(const Unit *u) {
4478 size_t offset;
4479 assert(u);
4480
4481 if (u->type < 0)
4482 return NULL;
4483
4484 offset = UNIT_VTABLE(u)->kill_context_offset;
4485 if (offset <= 0)
4486 return NULL;
4487
4488 return (KillContext*) ((uint8_t*) u + offset);
4489}
4490
4491CGroupContext *unit_get_cgroup_context(const Unit *u) {
4492 size_t offset;
4493
4494 if (u->type < 0)
4495 return NULL;
4496
4497 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4498 if (offset <= 0)
4499 return NULL;
4500
4501 return (CGroupContext*) ((uint8_t*) u + offset);
4502}
4503
4504ExecRuntime *unit_get_exec_runtime(const Unit *u) {
4505 size_t offset;
4506
4507 if (u->type < 0)
4508 return NULL;
4509
4510 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4511 if (offset <= 0)
4512 return NULL;
4513
4514 return *(ExecRuntime**) ((uint8_t*) u + offset);
4515}
4516
4517CGroupRuntime *unit_get_cgroup_runtime(const Unit *u) {
4518 size_t offset;
4519
4520 if (u->type < 0)
4521 return NULL;
4522
4523 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
4524 if (offset <= 0)
4525 return NULL;
4526
4527 return *(CGroupRuntime**) ((uint8_t*) u + offset);
4528}
4529
4530static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4531 assert(u);
4532
4533 if (UNIT_WRITE_FLAGS_NOOP(flags))
4534 return NULL;
4535
4536 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4537 return u->manager->lookup_paths.transient;
4538
4539 if (flags & UNIT_PERSISTENT)
4540 return u->manager->lookup_paths.persistent_control;
4541
4542 if (flags & UNIT_RUNTIME)
4543 return u->manager->lookup_paths.runtime_control;
4544
4545 return NULL;
4546}
4547
4548const char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4549 assert(s);
4550 assert(popcount(flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX | UNIT_ESCAPE_C)) <= 1);
4551 assert(buf);
4552
4553 _cleanup_free_ char *t = NULL;
4554
4555 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4556 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4557 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4558 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4559 * allocations. */
4560
4561 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4562 t = specifier_escape(s);
4563 if (!t)
4564 return NULL;
4565
4566 s = t;
4567 }
4568
4569 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4570 * ExecStart= and friends, i.e. '$' and quotes. */
4571
4572 if (flags & (UNIT_ESCAPE_EXEC_SYNTAX_ENV | UNIT_ESCAPE_EXEC_SYNTAX)) {
4573 char *t2;
4574
4575 if (flags & UNIT_ESCAPE_EXEC_SYNTAX_ENV) {
4576 t2 = strreplace(s, "$", "$$");
4577 if (!t2)
4578 return NULL;
4579 free_and_replace(t, t2);
4580 }
4581
4582 t2 = shell_escape(t ?: s, "\"");
4583 if (!t2)
4584 return NULL;
4585 free_and_replace(t, t2);
4586
4587 s = t;
4588
4589 } else if (flags & UNIT_ESCAPE_C) {
4590 char *t2;
4591
4592 t2 = cescape(s);
4593 if (!t2)
4594 return NULL;
4595 free_and_replace(t, t2);
4596
4597 s = t;
4598 }
4599
4600 *buf = TAKE_PTR(t);
4601 return s;
4602}
4603
4604char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4605 _cleanup_free_ char *result = NULL;
4606 size_t n = 0;
4607
4608 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4609 * lines in a way suitable for ExecStart= stanzas. */
4610
4611 STRV_FOREACH(i, l) {
4612 _cleanup_free_ char *buf = NULL;
4613 const char *p;
4614 size_t a;
4615 char *q;
4616
4617 p = unit_escape_setting(*i, flags, &buf);
4618 if (!p)
4619 return NULL;
4620
4621 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4622 if (!GREEDY_REALLOC(result, n + a + 1))
4623 return NULL;
4624
4625 q = result + n;
4626 if (n > 0)
4627 *(q++) = ' ';
4628
4629 *(q++) = '"';
4630 q = stpcpy(q, p);
4631 *(q++) = '"';
4632
4633 n += a;
4634 }
4635
4636 if (!GREEDY_REALLOC(result, n + 1))
4637 return NULL;
4638
4639 result[n] = 0;
4640
4641 return TAKE_PTR(result);
4642}
4643
4644int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4645 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4646 const char *dir, *wrapped;
4647 int r;
4648
4649 assert(u);
4650 assert(name);
4651 assert(data);
4652
4653 if (UNIT_WRITE_FLAGS_NOOP(flags))
4654 return 0;
4655
4656 data = unit_escape_setting(data, flags, &escaped);
4657 if (!data)
4658 return -ENOMEM;
4659
4660 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4661 * previous section header is the same */
4662
4663 if (flags & UNIT_PRIVATE) {
4664 if (!UNIT_VTABLE(u)->private_section)
4665 return -EINVAL;
4666
4667 if (!u->transient_file || u->last_section_private < 0)
4668 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4669 else if (u->last_section_private == 0)
4670 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4671 } else {
4672 if (!u->transient_file || u->last_section_private < 0)
4673 data = strjoina("[Unit]\n", data);
4674 else if (u->last_section_private > 0)
4675 data = strjoina("\n[Unit]\n", data);
4676 }
4677
4678 if (u->transient_file) {
4679 /* When this is a transient unit file in creation, then let's not create a new drop-in,
4680 * but instead write to the transient unit file. */
4681 fputs_with_newline(u->transient_file, data);
4682
4683 /* Remember which section we wrote this entry to */
4684 u->last_section_private = !!(flags & UNIT_PRIVATE);
4685 return 0;
4686 }
4687
4688 dir = unit_drop_in_dir(u, flags);
4689 if (!dir)
4690 return -EINVAL;
4691
4692 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4693 "# or an equivalent operation. Do not edit.\n",
4694 data,
4695 "\n");
4696
4697 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4698 if (r < 0)
4699 return r;
4700
4701 (void) mkdir_p_label(p, 0755);
4702
4703 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4704 * recreate the cache after every drop-in we write. */
4705 if (u->manager->unit_path_cache) {
4706 r = set_put_strdup_full(&u->manager->unit_path_cache, &path_hash_ops_free, p);
4707 if (r < 0)
4708 return r;
4709 }
4710
4711 r = write_string_file(q, wrapped, WRITE_STRING_FILE_CREATE|WRITE_STRING_FILE_ATOMIC|WRITE_STRING_FILE_LABEL);
4712 if (r < 0)
4713 return r;
4714
4715 r = strv_push(&u->dropin_paths, q);
4716 if (r < 0)
4717 return r;
4718 q = NULL;
4719
4720 strv_uniq(u->dropin_paths);
4721
4722 u->dropin_mtime = now(CLOCK_REALTIME);
4723
4724 return 0;
4725}
4726
4727int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4728 _cleanup_free_ char *p = NULL;
4729 va_list ap;
4730 int r;
4731
4732 assert(u);
4733 assert(name);
4734 assert(format);
4735
4736 if (UNIT_WRITE_FLAGS_NOOP(flags))
4737 return 0;
4738
4739 va_start(ap, format);
4740 r = vasprintf(&p, format, ap);
4741 va_end(ap);
4742
4743 if (r < 0)
4744 return -ENOMEM;
4745
4746 return unit_write_setting(u, flags, name, p);
4747}
4748
4749int unit_make_transient(Unit *u) {
4750 _cleanup_free_ char *path = NULL;
4751 FILE *f;
4752
4753 assert(u);
4754
4755 if (!UNIT_VTABLE(u)->can_transient)
4756 return -EOPNOTSUPP;
4757
4758 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4759
4760 path = path_join(u->manager->lookup_paths.transient, u->id);
4761 if (!path)
4762 return -ENOMEM;
4763
4764 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4765 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4766
4767 WITH_UMASK(0022) {
4768 f = fopen(path, "we");
4769 if (!f)
4770 return -errno;
4771 }
4772
4773 safe_fclose(u->transient_file);
4774 u->transient_file = f;
4775
4776 free_and_replace(u->fragment_path, path);
4777
4778 u->source_path = mfree(u->source_path);
4779 u->dropin_paths = strv_free(u->dropin_paths);
4780 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4781
4782 u->load_state = UNIT_STUB;
4783 u->load_error = 0;
4784 u->transient = true;
4785
4786 unit_add_to_dbus_queue(u);
4787 unit_add_to_gc_queue(u);
4788
4789 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4790 u->transient_file);
4791
4792 return 0;
4793}
4794
4795static bool ignore_leftover_process(const char *comm) {
4796 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
4797}
4798
4799static int log_kill(const PidRef *pid, int sig, void *userdata) {
4800 const Unit *u = ASSERT_PTR(userdata);
4801 _cleanup_free_ char *comm = NULL;
4802
4803 assert(pidref_is_set(pid));
4804
4805 (void) pidref_get_comm(pid, &comm);
4806
4807 if (ignore_leftover_process(comm))
4808 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4809 * here to let the manager know that a process was killed. */
4810 return 1;
4811
4812 log_unit_notice(u,
4813 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4814 pid->pid,
4815 strna(comm),
4816 signal_to_string(sig));
4817
4818 return 1;
4819}
4820
4821static int operation_to_signal(
4822 const KillContext *c,
4823 KillOperation k,
4824 bool *ret_noteworthy) {
4825
4826 assert(c);
4827 assert(ret_noteworthy);
4828
4829 switch (k) {
4830
4831 case KILL_TERMINATE:
4832 case KILL_TERMINATE_AND_LOG:
4833 *ret_noteworthy = false;
4834 return c->kill_signal;
4835
4836 case KILL_RESTART:
4837 *ret_noteworthy = false;
4838 return restart_kill_signal(c);
4839
4840 case KILL_KILL:
4841 *ret_noteworthy = true;
4842 return c->final_kill_signal;
4843
4844 case KILL_WATCHDOG:
4845 *ret_noteworthy = true;
4846 return c->watchdog_signal;
4847
4848 default:
4849 assert_not_reached();
4850 }
4851}
4852
4853static int unit_kill_context_one(
4854 Unit *u,
4855 const PidRef *pidref,
4856 const char *type,
4857 bool is_alien,
4858 int sig,
4859 bool send_sighup,
4860 cg_kill_log_func_t log_func) {
4861
4862 int r;
4863
4864 assert(u);
4865 assert(type);
4866
4867 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4868
4869 if (!pidref_is_set(pidref))
4870 return 0;
4871
4872 if (log_func)
4873 log_func(pidref, sig, u);
4874
4875 r = pidref_kill_and_sigcont(pidref, sig);
4876 if (r == -ESRCH)
4877 return !is_alien;
4878 if (r < 0) {
4879 _cleanup_free_ char *comm = NULL;
4880
4881 (void) pidref_get_comm(pidref, &comm);
4882 return log_unit_warning_errno(u, r, "Failed to kill %s process " PID_FMT " (%s), ignoring: %m", type, pidref->pid, strna(comm));
4883 }
4884
4885 if (send_sighup)
4886 (void) pidref_kill(pidref, SIGHUP);
4887
4888 return !is_alien;
4889}
4890
4891int unit_kill_context(Unit *u, KillOperation k) {
4892 bool wait_for_exit = false, send_sighup;
4893 cg_kill_log_func_t log_func = NULL;
4894 int sig, r;
4895
4896 assert(u);
4897
4898 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4899 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4900 * which is used for user-requested killing of unit processes. */
4901
4902 KillContext *c = unit_get_kill_context(u);
4903 if (!c || c->kill_mode == KILL_NONE)
4904 return 0;
4905
4906 bool noteworthy;
4907 sig = operation_to_signal(c, k, &noteworthy);
4908 if (noteworthy)
4909 log_func = log_kill;
4910
4911 send_sighup =
4912 c->send_sighup &&
4913 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4914 sig != SIGHUP;
4915
4916 bool is_alien;
4917 PidRef *main_pid = unit_main_pid_full(u, &is_alien);
4918 r = unit_kill_context_one(u, main_pid, "main", is_alien, sig, send_sighup, log_func);
4919 wait_for_exit = wait_for_exit || r > 0;
4920
4921 r = unit_kill_context_one(u, unit_control_pid(u), "control", /* is_alien = */ false, sig, send_sighup, log_func);
4922 wait_for_exit = wait_for_exit || r > 0;
4923
4924 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
4925 if (crt && crt->cgroup_path &&
4926 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4927 _cleanup_set_free_ Set *pid_set = NULL;
4928
4929 /* Exclude the main/control pids from being killed via the cgroup */
4930 r = unit_pid_set(u, &pid_set);
4931 if (r < 0)
4932 return r;
4933
4934 r = cg_kill_recursive(
4935 crt->cgroup_path,
4936 sig,
4937 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4938 pid_set,
4939 log_func, u);
4940 if (r < 0) {
4941 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4942 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt->cgroup_path));
4943
4944 } else if (r > 0) {
4945
4946 wait_for_exit = true;
4947
4948 if (send_sighup) {
4949 r = unit_pid_set(u, &pid_set);
4950 if (r < 0)
4951 return r;
4952
4953 (void) cg_kill_recursive(
4954 crt->cgroup_path,
4955 SIGHUP,
4956 CGROUP_IGNORE_SELF,
4957 pid_set,
4958 /* log_kill= */ NULL,
4959 /* userdata= */ NULL);
4960 }
4961 }
4962 }
4963
4964 return wait_for_exit;
4965}
4966
4967int unit_add_mounts_for(Unit *u, const char *path, UnitDependencyMask mask, UnitMountDependencyType type) {
4968 Hashmap **unit_map, **manager_map;
4969 int r;
4970
4971 assert(u);
4972 assert(path);
4973 assert(type >= 0 && type < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4974
4975 unit_map = &u->mounts_for[type];
4976 manager_map = &u->manager->units_needing_mounts_for[type];
4977
4978 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4979 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4980 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4981 * appearing mount units can easily determine which units to make themselves a dependency of. */
4982
4983 if (!path_is_absolute(path))
4984 return -EINVAL;
4985
4986 if (hashmap_contains(*unit_map, path)) /* Exit quickly if the path is already covered. */
4987 return 0;
4988
4989 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4990 * only after simplification, since path_is_normalized() rejects paths with '.'.
4991 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4992 _cleanup_free_ char *p = NULL;
4993 r = path_simplify_alloc(path, &p);
4994 if (r < 0)
4995 return r;
4996 path = p;
4997
4998 if (!path_is_normalized(path))
4999 return -EPERM;
5000
5001 UnitDependencyInfo di = {
5002 .origin_mask = mask
5003 };
5004
5005 r = hashmap_ensure_put(unit_map, &path_hash_ops, p, di.data);
5006 if (r < 0)
5007 return r;
5008 assert(r > 0);
5009 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
5010
5011 char prefix[strlen(path) + 1];
5012 PATH_FOREACH_PREFIX_MORE(prefix, path) {
5013 Set *x;
5014
5015 x = hashmap_get(*manager_map, prefix);
5016 if (!x) {
5017 _cleanup_free_ char *q = NULL;
5018
5019 r = hashmap_ensure_allocated(manager_map, &path_hash_ops);
5020 if (r < 0)
5021 return r;
5022
5023 q = strdup(prefix);
5024 if (!q)
5025 return -ENOMEM;
5026
5027 x = set_new(NULL);
5028 if (!x)
5029 return -ENOMEM;
5030
5031 r = hashmap_put(*manager_map, q, x);
5032 if (r < 0) {
5033 set_free(x);
5034 return r;
5035 }
5036 q = NULL;
5037 }
5038
5039 r = set_put(x, u);
5040 if (r < 0)
5041 return r;
5042 }
5043
5044 return 0;
5045}
5046
5047int unit_setup_exec_runtime(Unit *u) {
5048 _cleanup_(exec_shared_runtime_unrefp) ExecSharedRuntime *esr = NULL;
5049 _cleanup_(dynamic_creds_unrefp) DynamicCreds *dcreds = NULL;
5050 _cleanup_set_free_ Set *units = NULL;
5051 ExecRuntime **rt;
5052 ExecContext *ec;
5053 size_t offset;
5054 Unit *other;
5055 int r;
5056
5057 offset = UNIT_VTABLE(u)->exec_runtime_offset;
5058 assert(offset > 0);
5059
5060 /* Check if there already is an ExecRuntime for this unit? */
5061 rt = (ExecRuntime**) ((uint8_t*) u + offset);
5062 if (*rt)
5063 return 0;
5064
5065 ec = ASSERT_PTR(unit_get_exec_context(u));
5066
5067 r = unit_get_transitive_dependency_set(u, UNIT_ATOM_JOINS_NAMESPACE_OF, &units);
5068 if (r < 0)
5069 return r;
5070
5071 /* Try to get it from somebody else */
5072 SET_FOREACH(other, units) {
5073 r = exec_shared_runtime_acquire(u->manager, NULL, other->id, false, &esr);
5074 if (r < 0)
5075 return r;
5076 if (r > 0)
5077 break;
5078 }
5079
5080 if (!esr) {
5081 r = exec_shared_runtime_acquire(u->manager, ec, u->id, true, &esr);
5082 if (r < 0)
5083 return r;
5084 }
5085
5086 if (ec->dynamic_user) {
5087 r = dynamic_creds_make(u->manager, ec->user, ec->group, &dcreds);
5088 if (r < 0)
5089 return r;
5090 }
5091
5092 r = exec_runtime_make(u, ec, esr, dcreds, rt);
5093 if (r < 0)
5094 return r;
5095
5096 TAKE_PTR(esr);
5097 TAKE_PTR(dcreds);
5098
5099 return r;
5100}
5101
5102CGroupRuntime *unit_setup_cgroup_runtime(Unit *u) {
5103 size_t offset;
5104
5105 assert(u);
5106
5107 offset = UNIT_VTABLE(u)->cgroup_runtime_offset;
5108 assert(offset > 0);
5109
5110 CGroupRuntime **rt = (CGroupRuntime**) ((uint8_t*) u + offset);
5111 if (*rt)
5112 return *rt;
5113
5114 return (*rt = cgroup_runtime_new());
5115}
5116
5117bool unit_type_supported(UnitType t) {
5118 static int8_t cache[_UNIT_TYPE_MAX] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5119 int r;
5120
5121 assert(t >= 0 && t < _UNIT_TYPE_MAX);
5122
5123 if (cache[t] == 0) {
5124 char *e;
5125
5126 e = strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t));
5127
5128 r = getenv_bool(ascii_strupper(e));
5129 if (r < 0 && r != -ENXIO)
5130 log_debug_errno(r, "Failed to parse $%s, ignoring: %m", e);
5131
5132 cache[t] = r == 0 ? -1 : 1;
5133 }
5134 if (cache[t] < 0)
5135 return false;
5136
5137 if (!unit_vtable[t]->supported)
5138 return true;
5139
5140 return unit_vtable[t]->supported();
5141}
5142
5143void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5144 int r;
5145
5146 assert(u);
5147 assert(where);
5148
5149 if (!unit_log_level_test(u, LOG_NOTICE))
5150 return;
5151
5152 r = dir_is_empty(where, /* ignore_hidden_or_backup= */ false);
5153 if (r > 0 || r == -ENOTDIR)
5154 return;
5155 if (r < 0) {
5156 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5157 return;
5158 }
5159
5160 log_unit_struct(u, LOG_NOTICE,
5161 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING_STR),
5162 LOG_UNIT_INVOCATION_ID(u),
5163 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5164 LOG_ITEM("WHERE=%s", where));
5165}
5166
5167int unit_log_noncanonical_mount_path(Unit *u, const char *where) {
5168 assert(u);
5169 assert(where);
5170
5171 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5172 log_unit_struct(u, LOG_ERR,
5173 LOG_MESSAGE_ID(SD_MESSAGE_NON_CANONICAL_MOUNT_STR),
5174 LOG_UNIT_INVOCATION_ID(u),
5175 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5176 LOG_ITEM("WHERE=%s", where));
5177
5178 return -ELOOP;
5179}
5180
5181int unit_fail_if_noncanonical_mount_path(Unit *u, const char* where) {
5182 int r;
5183
5184 assert(u);
5185 assert(where);
5186
5187 _cleanup_free_ char *canonical_where = NULL;
5188 r = chase(where, /* root= */ NULL, CHASE_NONEXISTENT, &canonical_where, /* ret_fd= */ NULL);
5189 if (r < 0) {
5190 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5191 return 0;
5192 }
5193
5194 /* We will happily ignore a trailing slash (or any redundant slashes) */
5195 if (path_equal(where, canonical_where))
5196 return 0;
5197
5198 return unit_log_noncanonical_mount_path(u, where);
5199}
5200
5201bool unit_is_pristine(Unit *u) {
5202 assert(u);
5203
5204 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5205 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5206 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5207 *
5208 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5209 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5210 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5211 */
5212
5213 return IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) &&
5214 !u->fragment_path &&
5215 !u->source_path &&
5216 !u->job &&
5217 !u->merged_into;
5218}
5219
5220PidRef* unit_control_pid(Unit *u) {
5221 assert(u);
5222
5223 if (UNIT_VTABLE(u)->control_pid)
5224 return UNIT_VTABLE(u)->control_pid(u);
5225
5226 return NULL;
5227}
5228
5229PidRef* unit_main_pid_full(Unit *u, bool *ret_is_alien) {
5230 assert(u);
5231
5232 if (UNIT_VTABLE(u)->main_pid)
5233 return UNIT_VTABLE(u)->main_pid(u, ret_is_alien);
5234
5235 if (ret_is_alien)
5236 *ret_is_alien = false;
5237 return NULL;
5238}
5239
5240static void unit_modify_user_nft_set(Unit *u, bool add, NFTSetSource source, uint32_t element) {
5241 int r;
5242
5243 assert(u);
5244
5245 if (!MANAGER_IS_SYSTEM(u->manager))
5246 return;
5247
5248 CGroupContext *c;
5249 c = unit_get_cgroup_context(u);
5250 if (!c)
5251 return;
5252
5253 if (!u->manager->fw_ctx) {
5254 r = fw_ctx_new_full(&u->manager->fw_ctx, /* init_tables= */ false);
5255 if (r < 0)
5256 return;
5257
5258 assert(u->manager->fw_ctx);
5259 }
5260
5261 FOREACH_ARRAY(nft_set, c->nft_set_context.sets, c->nft_set_context.n_sets) {
5262 if (nft_set->source != source)
5263 continue;
5264
5265 r = nft_set_element_modify_any(u->manager->fw_ctx, add, nft_set->nfproto, nft_set->table, nft_set->set, &element, sizeof(element));
5266 if (r < 0)
5267 log_warning_errno(r, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5268 add? "add" : "delete", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5269 else
5270 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5271 add? "Added" : "Deleted", nfproto_to_string(nft_set->nfproto), nft_set->table, nft_set->set, element);
5272 }
5273}
5274
5275static void unit_unref_uid_internal(
5276 Unit *u,
5277 uid_t *ref_uid,
5278 bool destroy_now,
5279 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5280
5281 assert(u);
5282 assert(ref_uid);
5283 assert(_manager_unref_uid);
5284
5285 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5286 * gid_t are actually the same time, with the same validity rules.
5287 *
5288 * Drops a reference to UID/GID from a unit. */
5289
5290 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5291 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5292
5293 if (!uid_is_valid(*ref_uid))
5294 return;
5295
5296 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5297 *ref_uid = UID_INVALID;
5298}
5299
5300static void unit_unref_uid(Unit *u, bool destroy_now) {
5301 assert(u);
5302
5303 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_USER, u->ref_uid);
5304
5305 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5306}
5307
5308static void unit_unref_gid(Unit *u, bool destroy_now) {
5309 assert(u);
5310
5311 unit_modify_user_nft_set(u, /* add = */ false, NFT_SET_SOURCE_GROUP, u->ref_gid);
5312
5313 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5314}
5315
5316void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5317 assert(u);
5318
5319 unit_unref_uid(u, destroy_now);
5320 unit_unref_gid(u, destroy_now);
5321}
5322
5323static int unit_ref_uid_internal(
5324 Unit *u,
5325 uid_t *ref_uid,
5326 uid_t uid,
5327 bool clean_ipc,
5328 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5329
5330 int r;
5331
5332 assert(u);
5333 assert(ref_uid);
5334 assert(uid_is_valid(uid));
5335 assert(_manager_ref_uid);
5336
5337 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5338 * are actually the same type, and have the same validity rules.
5339 *
5340 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5341 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5342 * drops to zero. */
5343
5344 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5345 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5346
5347 if (*ref_uid == uid)
5348 return 0;
5349
5350 if (uid_is_valid(*ref_uid)) /* Already set? */
5351 return -EBUSY;
5352
5353 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5354 if (r < 0)
5355 return r;
5356
5357 *ref_uid = uid;
5358 return 1;
5359}
5360
5361static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5362 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5363}
5364
5365static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5366 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5367}
5368
5369static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5370 int r = 0, q = 0;
5371
5372 assert(u);
5373
5374 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5375
5376 if (uid_is_valid(uid)) {
5377 r = unit_ref_uid(u, uid, clean_ipc);
5378 if (r < 0)
5379 return r;
5380 }
5381
5382 if (gid_is_valid(gid)) {
5383 q = unit_ref_gid(u, gid, clean_ipc);
5384 if (q < 0) {
5385 if (r > 0)
5386 unit_unref_uid(u, false);
5387
5388 return q;
5389 }
5390 }
5391
5392 return r > 0 || q > 0;
5393}
5394
5395int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5396 ExecContext *c;
5397 int r;
5398
5399 assert(u);
5400
5401 c = unit_get_exec_context(u);
5402
5403 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5404 if (r < 0)
5405 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5406
5407 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_USER, uid);
5408 unit_modify_user_nft_set(u, /* add = */ true, NFT_SET_SOURCE_GROUP, gid);
5409
5410 return r;
5411}
5412
5413void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5414 int r;
5415
5416 assert(u);
5417
5418 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5419 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5420 * objects when no service references the UID/GID anymore. */
5421
5422 r = unit_ref_uid_gid(u, uid, gid);
5423 if (r > 0)
5424 unit_add_to_dbus_queue(u);
5425}
5426
5427int unit_acquire_invocation_id(Unit *u) {
5428 sd_id128_t id;
5429 int r;
5430
5431 assert(u);
5432
5433 r = sd_id128_randomize(&id);
5434 if (r < 0)
5435 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5436
5437 r = unit_set_invocation_id(u, id);
5438 if (r < 0)
5439 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5440
5441 unit_add_to_dbus_queue(u);
5442 return 0;
5443}
5444
5445int unit_set_exec_params(Unit *u, ExecParameters *p) {
5446 int r;
5447
5448 assert(u);
5449 assert(p);
5450
5451 /* Copy parameters from manager */
5452 r = manager_get_effective_environment(u->manager, &p->environment);
5453 if (r < 0)
5454 return r;
5455
5456 p->runtime_scope = u->manager->runtime_scope;
5457
5458 r = strdup_to(&p->confirm_spawn, manager_get_confirm_spawn(u->manager));
5459 if (r < 0)
5460 return r;
5461
5462 p->cgroup_supported = u->manager->cgroup_supported;
5463 p->prefix = u->manager->prefix;
5464 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5465
5466 /* Copy parameters from unit */
5467 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5468 p->cgroup_path = crt ? crt->cgroup_path : NULL;
5469 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5470
5471 p->received_credentials_directory = u->manager->received_credentials_directory;
5472 p->received_encrypted_credentials_directory = u->manager->received_encrypted_credentials_directory;
5473
5474 p->shall_confirm_spawn = u->manager->confirm_spawn;
5475
5476 p->fallback_smack_process_label = u->manager->defaults.smack_process_label;
5477
5478 if (u->manager->restrict_fs && p->bpf_restrict_fs_map_fd < 0) {
5479 int fd = bpf_restrict_fs_map_fd(u);
5480 if (fd < 0)
5481 return fd;
5482
5483 p->bpf_restrict_fs_map_fd = fd;
5484 }
5485
5486 p->user_lookup_fd = u->manager->user_lookup_fds[1];
5487 p->handoff_timestamp_fd = u->manager->handoff_timestamp_fds[1];
5488 if (UNIT_VTABLE(u)->notify_pidref)
5489 p->pidref_transport_fd = u->manager->pidref_transport_fds[1];
5490
5491 p->cgroup_id = crt ? crt->cgroup_id : 0;
5492 p->invocation_id = u->invocation_id;
5493 sd_id128_to_string(p->invocation_id, p->invocation_id_string);
5494 p->unit_id = strdup(u->id);
5495 if (!p->unit_id)
5496 return -ENOMEM;
5497
5498 p->debug_invocation = u->debug_invocation;
5499
5500 return 0;
5501}
5502
5503int unit_fork_helper_process(Unit *u, const char *name, bool into_cgroup, PidRef *ret) {
5504 CGroupRuntime *crt = NULL;
5505 pid_t pid;
5506 int r;
5507
5508 assert(u);
5509 assert(ret);
5510
5511 /* Forks off a helper process and makes sure it is a member of the unit's cgroup, if configured to
5512 * do so. Returns == 0 in the child, and > 0 in the parent. The pid parameter is always filled in
5513 * with the child's PID. */
5514
5515 if (into_cgroup) {
5516 (void) unit_realize_cgroup(u);
5517
5518 crt = unit_setup_cgroup_runtime(u);
5519 if (!crt)
5520 return -ENOMEM;
5521 }
5522
5523 r = safe_fork(name, FORK_REOPEN_LOG|FORK_DEATHSIG_SIGTERM, &pid);
5524 if (r < 0)
5525 return r;
5526 if (r > 0) {
5527 _cleanup_(pidref_done) PidRef pidref = PIDREF_NULL;
5528 int q;
5529
5530 /* Parent */
5531
5532 q = pidref_set_pid(&pidref, pid);
5533 if (q < 0)
5534 return q;
5535
5536 *ret = TAKE_PIDREF(pidref);
5537 return r;
5538 }
5539
5540 /* Child */
5541
5542 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5543 (void) ignore_signals(SIGPIPE);
5544
5545 if (crt && crt->cgroup_path) {
5546 r = cg_attach(crt->cgroup_path, 0);
5547 if (r < 0) {
5548 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(crt->cgroup_path));
5549 _exit(EXIT_CGROUP);
5550 }
5551 }
5552
5553 return 0;
5554}
5555
5556int unit_fork_and_watch_rm_rf(Unit *u, char **paths, PidRef *ret_pid) {
5557 _cleanup_(pidref_done) PidRef pid = PIDREF_NULL;
5558 int r;
5559
5560 assert(u);
5561 assert(ret_pid);
5562
5563 r = unit_fork_helper_process(u, "(sd-rmrf)", /* into_cgroup= */ true, &pid);
5564 if (r < 0)
5565 return r;
5566 if (r == 0) {
5567 int ret = EXIT_SUCCESS;
5568
5569 STRV_FOREACH(i, paths) {
5570 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5571 if (r < 0) {
5572 log_error_errno(r, "Failed to remove '%s': %m", *i);
5573 ret = EXIT_FAILURE;
5574 }
5575 }
5576
5577 _exit(ret);
5578 }
5579
5580 r = unit_watch_pidref(u, &pid, /* exclusive= */ true);
5581 if (r < 0)
5582 return r;
5583
5584 *ret_pid = TAKE_PIDREF(pid);
5585 return 0;
5586}
5587
5588static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5589 assert(deps);
5590 assert(other);
5591
5592 if (di.origin_mask == 0 && di.destination_mask == 0)
5593 /* No bit set anymore, let's drop the whole entry */
5594 assert_se(hashmap_remove(deps, other));
5595 else
5596 /* Mask was reduced, let's update the entry */
5597 assert_se(hashmap_update(deps, other, di.data) == 0);
5598}
5599
5600void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5601 Hashmap *deps;
5602 assert(u);
5603
5604 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5605
5606 if (mask == 0)
5607 return;
5608
5609 HASHMAP_FOREACH(deps, u->dependencies) {
5610 bool done;
5611
5612 do {
5613 UnitDependencyInfo di;
5614 Unit *other;
5615
5616 done = true;
5617
5618 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5619 Hashmap *other_deps;
5620
5621 if (FLAGS_SET(~mask, di.origin_mask))
5622 continue;
5623
5624 di.origin_mask &= ~mask;
5625 unit_update_dependency_mask(deps, other, di);
5626
5627 /* We updated the dependency from our unit to the other unit now. But most
5628 * dependencies imply a reverse dependency. Hence, let's delete that one
5629 * too. For that we go through all dependency types on the other unit and
5630 * delete all those which point to us and have the right mask set. */
5631
5632 HASHMAP_FOREACH(other_deps, other->dependencies) {
5633 UnitDependencyInfo dj;
5634
5635 dj.data = hashmap_get(other_deps, u);
5636 if (FLAGS_SET(~mask, dj.destination_mask))
5637 continue;
5638
5639 dj.destination_mask &= ~mask;
5640 unit_update_dependency_mask(other_deps, u, dj);
5641 }
5642
5643 unit_add_to_gc_queue(other);
5644
5645 /* The unit 'other' may not be wanted by the unit 'u'. */
5646 unit_submit_to_stop_when_unneeded_queue(other);
5647
5648 u->dependency_generation++;
5649 other->dependency_generation++;
5650
5651 done = false;
5652 break;
5653 }
5654
5655 } while (!done);
5656 }
5657}
5658
5659static int unit_get_invocation_path(Unit *u, char **ret) {
5660 char *p;
5661 int r;
5662
5663 assert(u);
5664 assert(ret);
5665
5666 if (MANAGER_IS_SYSTEM(u->manager))
5667 p = strjoin("/run/systemd/units/invocation:", u->id);
5668 else {
5669 _cleanup_free_ char *user_path = NULL;
5670
5671 r = xdg_user_runtime_dir("/systemd/units/invocation:", &user_path);
5672 if (r < 0)
5673 return r;
5674
5675 p = strjoin(user_path, u->id);
5676 }
5677 if (!p)
5678 return -ENOMEM;
5679
5680 *ret = p;
5681 return 0;
5682}
5683
5684static int unit_export_invocation_id(Unit *u) {
5685 _cleanup_free_ char *p = NULL;
5686 int r;
5687
5688 assert(u);
5689
5690 if (u->exported_invocation_id)
5691 return 0;
5692
5693 if (sd_id128_is_null(u->invocation_id))
5694 return 0;
5695
5696 r = unit_get_invocation_path(u, &p);
5697 if (r < 0)
5698 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5699
5700 r = symlinkat_atomic_full(u->invocation_id_string, AT_FDCWD, p, SYMLINK_LABEL);
5701 if (r < 0)
5702 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5703
5704 u->exported_invocation_id = true;
5705 return 0;
5706}
5707
5708static int unit_export_log_level_max(Unit *u, int log_level_max, bool overwrite) {
5709 const char *p;
5710 char buf[2];
5711 int r;
5712
5713 assert(u);
5714
5715 /* When the debug_invocation logic runs, overwrite will be true as we always want to switch the max
5716 * log level that the journal applies, and we want to always restore the previous level once done */
5717
5718 if (!overwrite && u->exported_log_level_max)
5719 return 0;
5720
5721 if (log_level_max < 0)
5722 return 0;
5723
5724 assert(log_level_max <= 7);
5725
5726 buf[0] = '0' + log_level_max;
5727 buf[1] = 0;
5728
5729 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5730 r = symlink_atomic(buf, p);
5731 if (r < 0)
5732 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5733
5734 u->exported_log_level_max = true;
5735 return 0;
5736}
5737
5738static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5739 _cleanup_close_ int fd = -EBADF;
5740 struct iovec *iovec;
5741 const char *p;
5742 char *pattern;
5743 le64_t *sizes;
5744 ssize_t n;
5745 int r;
5746
5747 if (u->exported_log_extra_fields)
5748 return 0;
5749
5750 if (c->n_log_extra_fields <= 0)
5751 return 0;
5752
5753 sizes = newa(le64_t, c->n_log_extra_fields);
5754 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5755
5756 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5757 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5758
5759 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5760 iovec[i*2+1] = c->log_extra_fields[i];
5761 }
5762
5763 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5764 pattern = strjoina(p, ".XXXXXX");
5765
5766 fd = mkostemp_safe(pattern);
5767 if (fd < 0)
5768 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5769
5770 n = writev(fd, iovec, c->n_log_extra_fields*2);
5771 if (n < 0) {
5772 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5773 goto fail;
5774 }
5775
5776 (void) fchmod(fd, 0644);
5777
5778 if (rename(pattern, p) < 0) {
5779 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5780 goto fail;
5781 }
5782
5783 u->exported_log_extra_fields = true;
5784 return 0;
5785
5786fail:
5787 (void) unlink(pattern);
5788 return r;
5789}
5790
5791static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5792 _cleanup_free_ char *buf = NULL;
5793 const char *p;
5794 int r;
5795
5796 assert(u);
5797 assert(c);
5798
5799 if (u->exported_log_ratelimit_interval)
5800 return 0;
5801
5802 if (c->log_ratelimit.interval == 0)
5803 return 0;
5804
5805 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5806
5807 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit.interval) < 0)
5808 return log_oom();
5809
5810 r = symlink_atomic(buf, p);
5811 if (r < 0)
5812 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5813
5814 u->exported_log_ratelimit_interval = true;
5815 return 0;
5816}
5817
5818static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5819 _cleanup_free_ char *buf = NULL;
5820 const char *p;
5821 int r;
5822
5823 assert(u);
5824 assert(c);
5825
5826 if (u->exported_log_ratelimit_burst)
5827 return 0;
5828
5829 if (c->log_ratelimit.burst == 0)
5830 return 0;
5831
5832 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5833
5834 if (asprintf(&buf, "%u", c->log_ratelimit.burst) < 0)
5835 return log_oom();
5836
5837 r = symlink_atomic(buf, p);
5838 if (r < 0)
5839 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5840
5841 u->exported_log_ratelimit_burst = true;
5842 return 0;
5843}
5844
5845void unit_export_state_files(Unit *u) {
5846 const ExecContext *c;
5847
5848 assert(u);
5849
5850 if (!u->id)
5851 return;
5852
5853 if (MANAGER_IS_TEST_RUN(u->manager))
5854 return;
5855
5856 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5857 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5858 * the IPC system itself and PID 1 also log to the journal.
5859 *
5860 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5861 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5862 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5863 * namespace at least.
5864 *
5865 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5866 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5867 * them with one. */
5868
5869 (void) unit_export_invocation_id(u);
5870
5871 if (!MANAGER_IS_SYSTEM(u->manager))
5872 return;
5873
5874 c = unit_get_exec_context(u);
5875 if (c) {
5876 (void) unit_export_log_level_max(u, c->log_level_max, /* overwrite= */ false);
5877 (void) unit_export_log_extra_fields(u, c);
5878 (void) unit_export_log_ratelimit_interval(u, c);
5879 (void) unit_export_log_ratelimit_burst(u, c);
5880 }
5881}
5882
5883void unit_unlink_state_files(Unit *u) {
5884 const char *p;
5885
5886 assert(u);
5887
5888 if (!u->id)
5889 return;
5890
5891 /* Undoes the effect of unit_export_state() */
5892
5893 if (u->exported_invocation_id) {
5894 _cleanup_free_ char *invocation_path = NULL;
5895 int r = unit_get_invocation_path(u, &invocation_path);
5896 if (r >= 0) {
5897 (void) unlink(invocation_path);
5898 u->exported_invocation_id = false;
5899 }
5900 }
5901
5902 if (!MANAGER_IS_SYSTEM(u->manager))
5903 return;
5904
5905 if (u->exported_log_level_max) {
5906 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5907 (void) unlink(p);
5908
5909 u->exported_log_level_max = false;
5910 }
5911
5912 if (u->exported_log_extra_fields) {
5913 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5914 (void) unlink(p);
5915
5916 u->exported_log_extra_fields = false;
5917 }
5918
5919 if (u->exported_log_ratelimit_interval) {
5920 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5921 (void) unlink(p);
5922
5923 u->exported_log_ratelimit_interval = false;
5924 }
5925
5926 if (u->exported_log_ratelimit_burst) {
5927 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5928 (void) unlink(p);
5929
5930 u->exported_log_ratelimit_burst = false;
5931 }
5932}
5933
5934int unit_set_debug_invocation(Unit *u, bool enable) {
5935 int r;
5936
5937 assert(u);
5938
5939 if (u->debug_invocation == enable)
5940 return 0; /* Nothing to do */
5941
5942 u->debug_invocation = enable;
5943
5944 /* Ensure that the new log level is exported for the journal, in place of the previous one */
5945 if (u->exported_log_level_max) {
5946 const ExecContext *ec = unit_get_exec_context(u);
5947 if (ec) {
5948 r = unit_export_log_level_max(u, enable ? LOG_PRI(LOG_DEBUG) : ec->log_level_max, /* overwrite= */ true);
5949 if (r < 0)
5950 return r;
5951 }
5952 }
5953
5954 return 1;
5955}
5956
5957int unit_prepare_exec(Unit *u) {
5958 int r;
5959
5960 assert(u);
5961
5962 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5963 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5964 r = bpf_firewall_load_custom(u);
5965 if (r < 0)
5966 return r;
5967
5968 /* Prepares everything so that we can fork of a process for this unit */
5969
5970 (void) unit_realize_cgroup(u);
5971
5972 CGroupRuntime *crt = unit_get_cgroup_runtime(u);
5973 if (crt && crt->reset_accounting) {
5974 (void) unit_reset_accounting(u);
5975 crt->reset_accounting = false;
5976 }
5977
5978 unit_export_state_files(u);
5979
5980 r = unit_setup_exec_runtime(u);
5981 if (r < 0)
5982 return r;
5983
5984 return 0;
5985}
5986
5987static int unit_log_leftover_process_start(const PidRef *pid, int sig, void *userdata) {
5988 const Unit *u = ASSERT_PTR(userdata);
5989 _cleanup_free_ char *comm = NULL;
5990
5991 assert(pidref_is_set(pid));
5992
5993 (void) pidref_get_comm(pid, &comm);
5994
5995 if (ignore_leftover_process(comm))
5996 return 0;
5997
5998 /* During start we print a warning */
5999
6000 log_unit_warning(u,
6001 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
6002 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
6003 pid->pid, strna(comm));
6004
6005 return 1;
6006}
6007
6008static int unit_log_leftover_process_stop(const PidRef *pid, int sig, void *userdata) {
6009 const Unit *u = ASSERT_PTR(userdata);
6010 _cleanup_free_ char *comm = NULL;
6011
6012 assert(pidref_is_set(pid));
6013
6014 (void) pidref_get_comm(pid, &comm);
6015
6016 if (ignore_leftover_process(comm))
6017 return 0;
6018
6019 /* During stop we only print an informational message */
6020
6021 log_unit_info(u,
6022 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
6023 pid->pid, strna(comm));
6024
6025 return 1;
6026}
6027
6028int unit_warn_leftover_processes(Unit *u, bool start) {
6029 _cleanup_free_ char *cgroup = NULL;
6030 int r;
6031
6032 assert(u);
6033
6034 r = unit_get_cgroup_path_with_fallback(u, &cgroup);
6035 if (r < 0)
6036 return r;
6037
6038 return cg_kill_recursive(
6039 cgroup,
6040 /* sig= */ 0,
6041 /* flags= */ 0,
6042 /* killed_pids= */ NULL,
6043 start ? unit_log_leftover_process_start : unit_log_leftover_process_stop,
6044 u);
6045}
6046
6047bool unit_needs_console(Unit *u) {
6048 ExecContext *ec;
6049 UnitActiveState state;
6050
6051 assert(u);
6052
6053 state = unit_active_state(u);
6054
6055 if (UNIT_IS_INACTIVE_OR_FAILED(state))
6056 return false;
6057
6058 if (UNIT_VTABLE(u)->needs_console)
6059 return UNIT_VTABLE(u)->needs_console(u);
6060
6061 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
6062 ec = unit_get_exec_context(u);
6063 if (!ec)
6064 return false;
6065
6066 return exec_context_may_touch_console(ec);
6067}
6068
6069int unit_pid_attachable(Unit *u, PidRef *pid, sd_bus_error *error) {
6070 int r;
6071
6072 assert(u);
6073
6074 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
6075 * and not a kernel thread either */
6076
6077 /* First, a simple range check */
6078 if (!pidref_is_set(pid))
6079 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier is not valid.");
6080
6081 /* Some extra safety check */
6082 if (pid->pid == 1 || pidref_is_self(pid))
6083 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid->pid);
6084
6085 /* Don't even begin to bother with kernel threads */
6086 r = pidref_is_kernel_thread(pid);
6087 if (r == -ESRCH)
6088 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid->pid);
6089 if (r < 0)
6090 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid->pid);
6091 if (r > 0)
6092 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid->pid);
6093
6094 return 0;
6095}
6096
6097int unit_get_log_level_max(const Unit *u) {
6098 if (u) {
6099 if (u->debug_invocation)
6100 return LOG_DEBUG;
6101
6102 ExecContext *ec = unit_get_exec_context(u);
6103 if (ec && ec->log_level_max >= 0)
6104 return ec->log_level_max;
6105 }
6106
6107 return log_get_max_level();
6108}
6109
6110bool unit_log_level_test(const Unit *u, int level) {
6111 assert(u);
6112 return LOG_PRI(level) <= unit_get_log_level_max(u);
6113}
6114
6115void unit_log_success(Unit *u) {
6116 assert(u);
6117
6118 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
6119 * This message has low information value for regular users and it might be a bit overwhelming on a system with
6120 * a lot of devices. */
6121 log_unit_struct(u,
6122 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
6123 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SUCCESS_STR),
6124 LOG_UNIT_INVOCATION_ID(u),
6125 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
6126}
6127
6128void unit_log_failure(Unit *u, const char *result) {
6129 assert(u);
6130 assert(result);
6131
6132 log_unit_struct(u, LOG_WARNING,
6133 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_FAILURE_RESULT_STR),
6134 LOG_UNIT_INVOCATION_ID(u),
6135 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
6136 LOG_ITEM("UNIT_RESULT=%s", result));
6137}
6138
6139void unit_log_skip(Unit *u, const char *result) {
6140 assert(u);
6141 assert(result);
6142
6143 log_unit_struct(u, LOG_INFO,
6144 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SKIPPED_STR),
6145 LOG_UNIT_INVOCATION_ID(u),
6146 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
6147 LOG_ITEM("UNIT_RESULT=%s", result));
6148}
6149
6150void unit_log_process_exit(
6151 Unit *u,
6152 const char *kind,
6153 const char *command,
6154 bool success,
6155 int code,
6156 int status) {
6157
6158 int level;
6159
6160 assert(u);
6161 assert(kind);
6162
6163 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6164 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6165 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6166 * WARNING. */
6167 if (success)
6168 level = LOG_DEBUG;
6169 else if (code == CLD_EXITED)
6170 level = LOG_NOTICE;
6171 else
6172 level = LOG_WARNING;
6173
6174 log_unit_struct(u, level,
6175 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_PROCESS_EXIT_STR),
6176 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
6177 kind,
6178 sigchld_code_to_string(code), status,
6179 strna(code == CLD_EXITED
6180 ? exit_status_to_string(status, EXIT_STATUS_FULL)
6181 : signal_to_string(status)),
6182 success ? " (success)" : ""),
6183 LOG_ITEM("EXIT_CODE=%s", sigchld_code_to_string(code)),
6184 LOG_ITEM("EXIT_STATUS=%i", status),
6185 LOG_ITEM("COMMAND=%s", strna(command)),
6186 LOG_UNIT_INVOCATION_ID(u));
6187}
6188
6189int unit_exit_status(Unit *u) {
6190 assert(u);
6191
6192 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6193 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6194 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6195 * service process has exited abnormally (signal/coredump). */
6196
6197 if (!UNIT_VTABLE(u)->exit_status)
6198 return -EOPNOTSUPP;
6199
6200 return UNIT_VTABLE(u)->exit_status(u);
6201}
6202
6203int unit_failure_action_exit_status(Unit *u) {
6204 int r;
6205
6206 assert(u);
6207
6208 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6209
6210 if (u->failure_action_exit_status >= 0)
6211 return u->failure_action_exit_status;
6212
6213 r = unit_exit_status(u);
6214 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6215 return 255;
6216
6217 return r;
6218}
6219
6220int unit_success_action_exit_status(Unit *u) {
6221 int r;
6222
6223 assert(u);
6224
6225 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6226
6227 if (u->success_action_exit_status >= 0)
6228 return u->success_action_exit_status;
6229
6230 r = unit_exit_status(u);
6231 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
6232 return 255;
6233
6234 return r;
6235}
6236
6237int unit_test_trigger_loaded(Unit *u) {
6238 Unit *trigger;
6239
6240 /* Tests whether the unit to trigger is loaded */
6241
6242 trigger = UNIT_TRIGGER(u);
6243 if (!trigger)
6244 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6245 "Refusing to start, no unit to trigger.");
6246 if (trigger->load_state != UNIT_LOADED)
6247 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
6248 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
6249
6250 return 0;
6251}
6252
6253void unit_destroy_runtime_data(Unit *u, const ExecContext *context, bool destroy_runtime_dir) {
6254 assert(u);
6255 assert(u->manager);
6256 assert(context);
6257
6258 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6259 if (destroy_runtime_dir && context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO)
6260 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6261
6262 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
6263 exec_context_destroy_mount_ns_dir(u);
6264}
6265
6266int unit_clean(Unit *u, ExecCleanMask mask) {
6267 UnitActiveState state;
6268
6269 assert(u);
6270
6271 /* Special return values:
6272 *
6273 * -EOPNOTSUPP → cleaning not supported for this unit type
6274 * -EUNATCH → cleaning not defined for this resource type
6275 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6276 * a job queued or similar
6277 */
6278
6279 if (!UNIT_VTABLE(u)->clean)
6280 return -EOPNOTSUPP;
6281
6282 if (mask == 0)
6283 return -EUNATCH;
6284
6285 if (u->load_state != UNIT_LOADED)
6286 return -EBUSY;
6287
6288 if (u->job)
6289 return -EBUSY;
6290
6291 state = unit_active_state(u);
6292 if (state != UNIT_INACTIVE)
6293 return -EBUSY;
6294
6295 return UNIT_VTABLE(u)->clean(u, mask);
6296}
6297
6298int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6299 assert(u);
6300
6301 if (!UNIT_VTABLE(u)->clean ||
6302 u->load_state != UNIT_LOADED) {
6303 *ret = 0;
6304 return 0;
6305 }
6306
6307 /* When the clean() method is set, can_clean() really should be set too */
6308 assert(UNIT_VTABLE(u)->can_clean);
6309
6310 return UNIT_VTABLE(u)->can_clean(u, ret);
6311}
6312
6313bool unit_can_start_refuse_manual(Unit *u) {
6314 return unit_can_start(u) && !u->refuse_manual_start;
6315}
6316
6317bool unit_can_stop_refuse_manual(Unit *u) {
6318 return unit_can_stop(u) && !u->refuse_manual_stop;
6319}
6320
6321bool unit_can_isolate_refuse_manual(Unit *u) {
6322 return unit_can_isolate(u) && !u->refuse_manual_start;
6323}
6324
6325void unit_next_freezer_state(Unit *u, FreezerAction action, FreezerState *ret_next, FreezerState *ret_objective) {
6326 FreezerState current, parent, next, objective;
6327
6328 assert(u);
6329 assert(action >= 0);
6330 assert(action < _FREEZER_ACTION_MAX);
6331 assert(ret_next);
6332 assert(ret_objective);
6333
6334 /* This function determines the correct freezer state transitions for a unit
6335 * given the action being requested. It returns the next state, and also the "objective",
6336 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6337 * ultimately want to achieve. */
6338
6339 current = u->freezer_state;
6340
6341 Unit *slice = UNIT_GET_SLICE(u);
6342 if (slice)
6343 parent = slice->freezer_state;
6344 else
6345 parent = FREEZER_RUNNING;
6346
6347 switch (action) {
6348
6349 case FREEZER_FREEZE:
6350 /* We always "promote" a freeze initiated by parent into a normal freeze */
6351 if (IN_SET(current, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6352 next = FREEZER_FROZEN;
6353 else
6354 next = FREEZER_FREEZING;
6355 break;
6356
6357 case FREEZER_THAW:
6358 /* Thawing is the most complicated operation here, because we can't thaw a unit
6359 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6360 * initiated by parent if the parent is frozen */
6361 if (IN_SET(current, FREEZER_RUNNING, FREEZER_THAWING,
6362 FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT)) /* Should usually be refused by unit_freezer_action */
6363 next = current;
6364 else if (current == FREEZER_FREEZING) {
6365 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6366 next = FREEZER_THAWING;
6367 else
6368 next = FREEZER_FREEZING_BY_PARENT;
6369 } else if (current == FREEZER_FROZEN) {
6370 if (IN_SET(parent, FREEZER_RUNNING, FREEZER_THAWING))
6371 next = FREEZER_THAWING;
6372 else
6373 next = FREEZER_FROZEN_BY_PARENT;
6374 } else
6375 assert_not_reached();
6376 break;
6377
6378 case FREEZER_PARENT_FREEZE:
6379 /* We need to avoid accidentally demoting units frozen manually */
6380 if (IN_SET(current, FREEZER_FREEZING, FREEZER_FROZEN, FREEZER_FROZEN_BY_PARENT))
6381 next = current;
6382 else
6383 next = FREEZER_FREEZING_BY_PARENT;
6384 break;
6385
6386 case FREEZER_PARENT_THAW:
6387 /* We don't want to thaw units from a parent if they were frozen
6388 * manually, so for such units this action is a no-op */
6389 if (IN_SET(current, FREEZER_RUNNING, FREEZER_FREEZING, FREEZER_FROZEN))
6390 next = current;
6391 else
6392 next = FREEZER_THAWING;
6393 break;
6394
6395 default:
6396 assert_not_reached();
6397 }
6398
6399 objective = freezer_state_finish(next);
6400 if (objective == FREEZER_FROZEN_BY_PARENT)
6401 objective = FREEZER_FROZEN;
6402 assert(IN_SET(objective, FREEZER_RUNNING, FREEZER_FROZEN));
6403
6404 *ret_next = next;
6405 *ret_objective = objective;
6406}
6407
6408bool unit_can_freeze(const Unit *u) {
6409 assert(u);
6410
6411 if (unit_has_name(u, SPECIAL_ROOT_SLICE) || unit_has_name(u, SPECIAL_INIT_SCOPE))
6412 return false;
6413
6414 if (UNIT_VTABLE(u)->can_freeze)
6415 return UNIT_VTABLE(u)->can_freeze(u);
6416
6417 return UNIT_VTABLE(u)->freezer_action;
6418}
6419
6420void unit_set_freezer_state(Unit *u, FreezerState state) {
6421 assert(u);
6422 assert(state >= 0);
6423 assert(state < _FREEZER_STATE_MAX);
6424
6425 if (u->freezer_state == state)
6426 return;
6427
6428 log_unit_debug(u, "Freezer state changed %s -> %s",
6429 freezer_state_to_string(u->freezer_state), freezer_state_to_string(state));
6430
6431 u->freezer_state = state;
6432
6433 unit_add_to_dbus_queue(u);
6434}
6435
6436void unit_freezer_complete(Unit *u, FreezerState kernel_state) {
6437 bool expected;
6438
6439 assert(u);
6440 assert(IN_SET(kernel_state, FREEZER_RUNNING, FREEZER_FROZEN));
6441
6442 expected = IN_SET(u->freezer_state, FREEZER_RUNNING, FREEZER_THAWING) == (kernel_state == FREEZER_RUNNING);
6443
6444 unit_set_freezer_state(u, expected ? freezer_state_finish(u->freezer_state) : kernel_state);
6445 log_unit_info(u, "Unit now %s.", u->freezer_state == FREEZER_RUNNING ? "thawed" :
6446 freezer_state_to_string(u->freezer_state));
6447
6448 /* If the cgroup's final state is against what's requested by us, report as canceled. */
6449 bus_unit_send_pending_freezer_message(u, /* canceled = */ !expected);
6450}
6451
6452int unit_freezer_action(Unit *u, FreezerAction action) {
6453 UnitActiveState s;
6454 int r;
6455
6456 assert(u);
6457 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
6458
6459 if (!unit_can_freeze(u))
6460 return -EOPNOTSUPP;
6461
6462 if (u->job)
6463 return -EBUSY;
6464
6465 if (u->load_state != UNIT_LOADED)
6466 return -EHOSTDOWN;
6467
6468 s = unit_active_state(u);
6469 if (s != UNIT_ACTIVE)
6470 return -EHOSTDOWN;
6471
6472 if (action == FREEZER_FREEZE && IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT))
6473 return -EALREADY;
6474 if (action == FREEZER_THAW && u->freezer_state == FREEZER_THAWING)
6475 return -EALREADY;
6476 if (action == FREEZER_THAW && IN_SET(u->freezer_state, FREEZER_FREEZING_BY_PARENT, FREEZER_FROZEN_BY_PARENT))
6477 return -EDEADLK;
6478
6479 r = UNIT_VTABLE(u)->freezer_action(u, action);
6480 if (r <= 0)
6481 return r;
6482
6483 assert(IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_FREEZING_BY_PARENT, FREEZER_THAWING));
6484 return 1;
6485}
6486
6487Condition *unit_find_failed_condition(Unit *u) {
6488 Condition *failed_trigger = NULL;
6489 bool has_succeeded_trigger = false;
6490
6491 if (u->condition_result)
6492 return NULL;
6493
6494 LIST_FOREACH(conditions, c, u->conditions)
6495 if (c->trigger) {
6496 if (c->result == CONDITION_SUCCEEDED)
6497 has_succeeded_trigger = true;
6498 else if (!failed_trigger)
6499 failed_trigger = c;
6500 } else if (c->result != CONDITION_SUCCEEDED)
6501 return c;
6502
6503 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
6504}
6505
6506int unit_can_live_mount(Unit *u, sd_bus_error *error) {
6507 assert(u);
6508
6509 if (!UNIT_VTABLE(u)->live_mount)
6510 return sd_bus_error_setf(
6511 error,
6512 SD_BUS_ERROR_NOT_SUPPORTED,
6513 "Live mounting not supported by unit type '%s'",
6514 unit_type_to_string(u->type));
6515
6516 if (u->load_state != UNIT_LOADED)
6517 return sd_bus_error_setf(
6518 error,
6519 BUS_ERROR_NO_SUCH_UNIT,
6520 "Unit '%s' not loaded, cannot live mount",
6521 u->id);
6522
6523 if (!UNIT_VTABLE(u)->can_live_mount)
6524 return 0;
6525
6526 return UNIT_VTABLE(u)->can_live_mount(u, error);
6527}
6528
6529int unit_live_mount(
6530 Unit *u,
6531 const char *src,
6532 const char *dst,
6533 sd_bus_message *message,
6534 MountInNamespaceFlags flags,
6535 const MountOptions *options,
6536 sd_bus_error *error) {
6537
6538 assert(u);
6539 assert(UNIT_VTABLE(u)->live_mount);
6540
6541 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
6542 log_unit_debug(u, "Unit not active, cannot perform live mount.");
6543 return sd_bus_error_setf(
6544 error,
6545 BUS_ERROR_UNIT_INACTIVE,
6546 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: unit not active",
6547 src,
6548 dst,
6549 u->id);
6550 }
6551
6552 if (unit_active_state(u) == UNIT_REFRESHING) {
6553 log_unit_debug(u, "Unit already live mounting, refusing further requests.");
6554 return sd_bus_error_setf(
6555 error,
6556 BUS_ERROR_UNIT_BUSY,
6557 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another live mount in progress",
6558 src,
6559 dst,
6560 u->id);
6561 }
6562
6563 if (u->job) {
6564 log_unit_debug(u, "Unit already has a job in progress, cannot live mount");
6565 return sd_bus_error_setf(
6566 error,
6567 BUS_ERROR_UNIT_BUSY,
6568 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another operation in progress",
6569 src,
6570 dst,
6571 u->id);
6572 }
6573
6574 return UNIT_VTABLE(u)->live_mount(u, src, dst, message, flags, options, error);
6575}
6576
6577static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6578 [COLLECT_INACTIVE] = "inactive",
6579 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6580};
6581
6582DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
6583
6584Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
6585 Unit *i;
6586
6587 assert(u);
6588
6589 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6590 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6591 * is NULL the first entry found), or NULL if not found. */
6592
6593 UNIT_FOREACH_DEPENDENCY(i, u, atom)
6594 if (!other || other == i)
6595 return i;
6596
6597 return NULL;
6598}
6599
6600int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
6601 _cleanup_free_ Unit **array = NULL;
6602 size_t n = 0;
6603 Unit *other;
6604
6605 assert(u);
6606 assert(ret_array);
6607
6608 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6609 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6610 * while the dependency table is continuously updated. */
6611
6612 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6613 if (!GREEDY_REALLOC(array, n + 1))
6614 return -ENOMEM;
6615
6616 array[n++] = other;
6617 }
6618
6619 *ret_array = TAKE_PTR(array);
6620
6621 assert(n <= INT_MAX);
6622 return (int) n;
6623}
6624
6625int unit_get_transitive_dependency_set(Unit *u, UnitDependencyAtom atom, Set **ret) {
6626 _cleanup_set_free_ Set *units = NULL, *queue = NULL;
6627 Unit *other;
6628 int r;
6629
6630 assert(u);
6631 assert(ret);
6632
6633 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6634
6635 do {
6636 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
6637 r = set_ensure_put(&units, NULL, other);
6638 if (r < 0)
6639 return r;
6640 if (r == 0)
6641 continue;
6642 r = set_ensure_put(&queue, NULL, other);
6643 if (r < 0)
6644 return r;
6645 }
6646 } while ((u = set_steal_first(queue)));
6647
6648 *ret = TAKE_PTR(units);
6649 return 0;
6650}
6651
6652int unit_arm_timer(
6653 Unit *u,
6654 sd_event_source **source,
6655 bool relative,
6656 usec_t usec,
6657 sd_event_time_handler_t handler) {
6658
6659 int r;
6660
6661 assert(u);
6662 assert(source);
6663 assert(handler);
6664
6665 if (*source) {
6666 if (usec == USEC_INFINITY)
6667 return sd_event_source_set_enabled(*source, SD_EVENT_OFF);
6668
6669 r = (relative ? sd_event_source_set_time_relative : sd_event_source_set_time)(*source, usec);
6670 if (r < 0)
6671 return r;
6672
6673 return sd_event_source_set_enabled(*source, SD_EVENT_ONESHOT);
6674 }
6675
6676 if (usec == USEC_INFINITY)
6677 return 0;
6678
6679 r = (relative ? sd_event_add_time_relative : sd_event_add_time)(
6680 u->manager->event,
6681 source,
6682 CLOCK_MONOTONIC,
6683 usec, 0,
6684 handler,
6685 u);
6686 if (r < 0)
6687 return r;
6688
6689 const char *d = strjoina(unit_type_to_string(u->type), "-timer");
6690 (void) sd_event_source_set_description(*source, d);
6691
6692 return 0;
6693}
6694
6695bool unit_passes_filter(Unit *u, char * const *states, char * const *patterns) {
6696 assert(u);
6697
6698 if (!strv_isempty(states)) {
6699 char * const *unit_states = STRV_MAKE(
6700 unit_load_state_to_string(u->load_state),
6701 unit_active_state_to_string(unit_active_state(u)),
6702 unit_sub_state_to_string(u));
6703
6704 if (!strv_overlap(states, unit_states))
6705 return false;
6706 }
6707
6708 return strv_fnmatch_or_empty(patterns, u->id, FNM_NOESCAPE);
6709}
6710
6711static int unit_get_nice(Unit *u) {
6712 ExecContext *ec;
6713
6714 ec = unit_get_exec_context(u);
6715 return ec ? ec->nice : 0;
6716}
6717
6718static uint64_t unit_get_cpu_weight(Unit *u) {
6719 CGroupContext *cc;
6720
6721 cc = unit_get_cgroup_context(u);
6722 return cc ? cgroup_context_cpu_weight(cc, manager_state(u->manager)) : CGROUP_WEIGHT_DEFAULT;
6723}
6724
6725int unit_compare_priority(Unit *a, Unit *b) {
6726 int ret;
6727
6728 ret = CMP(a->type, b->type);
6729 if (ret != 0)
6730 return -ret;
6731
6732 ret = CMP(unit_get_cpu_weight(a), unit_get_cpu_weight(b));
6733 if (ret != 0)
6734 return -ret;
6735
6736 ret = CMP(unit_get_nice(a), unit_get_nice(b));
6737 if (ret != 0)
6738 return ret;
6739
6740 return strcmp(a->id, b->id);
6741}
6742
6743const char* unit_log_field(const Unit *u) {
6744 return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "UNIT=" : "USER_UNIT=";
6745}
6746
6747const char* unit_invocation_log_field(const Unit *u) {
6748 return MANAGER_IS_SYSTEM(ASSERT_PTR(u)->manager) ? "INVOCATION_ID=" : "USER_INVOCATION_ID=";
6749}
6750
6751const ActivationDetailsVTable * const activation_details_vtable[_UNIT_TYPE_MAX] = {
6752 [UNIT_PATH] = &activation_details_path_vtable,
6753 [UNIT_TIMER] = &activation_details_timer_vtable,
6754};
6755
6756ActivationDetails *activation_details_new(Unit *trigger_unit) {
6757 _cleanup_free_ ActivationDetails *details = NULL;
6758
6759 assert(trigger_unit);
6760 assert(trigger_unit->type != _UNIT_TYPE_INVALID);
6761 assert(trigger_unit->id);
6762
6763 details = malloc0(activation_details_vtable[trigger_unit->type]->object_size);
6764 if (!details)
6765 return NULL;
6766
6767 *details = (ActivationDetails) {
6768 .n_ref = 1,
6769 .trigger_unit_type = trigger_unit->type,
6770 };
6771
6772 details->trigger_unit_name = strdup(trigger_unit->id);
6773 if (!details->trigger_unit_name)
6774 return NULL;
6775
6776 if (ACTIVATION_DETAILS_VTABLE(details)->init)
6777 ACTIVATION_DETAILS_VTABLE(details)->init(details, trigger_unit);
6778
6779 return TAKE_PTR(details);
6780}
6781
6782static ActivationDetails *activation_details_free(ActivationDetails *details) {
6783 if (!details)
6784 return NULL;
6785
6786 if (ACTIVATION_DETAILS_VTABLE(details)->done)
6787 ACTIVATION_DETAILS_VTABLE(details)->done(details);
6788
6789 free(details->trigger_unit_name);
6790
6791 return mfree(details);
6792}
6793
6794void activation_details_serialize(const ActivationDetails *details, FILE *f) {
6795 if (!details || details->trigger_unit_type == _UNIT_TYPE_INVALID)
6796 return;
6797
6798 (void) serialize_item(f, "activation-details-unit-type", unit_type_to_string(details->trigger_unit_type));
6799 if (details->trigger_unit_name)
6800 (void) serialize_item(f, "activation-details-unit-name", details->trigger_unit_name);
6801 if (ACTIVATION_DETAILS_VTABLE(details)->serialize)
6802 ACTIVATION_DETAILS_VTABLE(details)->serialize(details, f);
6803}
6804
6805int activation_details_deserialize(const char *key, const char *value, ActivationDetails **details) {
6806 int r;
6807
6808 assert(key);
6809 assert(value);
6810 assert(details);
6811
6812 if (!*details) {
6813 UnitType t;
6814
6815 if (!streq(key, "activation-details-unit-type"))
6816 return -EINVAL;
6817
6818 t = unit_type_from_string(value);
6819 if (t < 0)
6820 return t;
6821
6822 /* The activation details vtable has defined ops only for path and timer units */
6823 if (!activation_details_vtable[t])
6824 return -EINVAL;
6825
6826 *details = malloc0(activation_details_vtable[t]->object_size);
6827 if (!*details)
6828 return -ENOMEM;
6829
6830 **details = (ActivationDetails) {
6831 .n_ref = 1,
6832 .trigger_unit_type = t,
6833 };
6834
6835 return 0;
6836 }
6837
6838 if (streq(key, "activation-details-unit-name")) {
6839 r = free_and_strdup(&(*details)->trigger_unit_name, value);
6840 if (r < 0)
6841 return r;
6842
6843 return 0;
6844 }
6845
6846 if (ACTIVATION_DETAILS_VTABLE(*details)->deserialize)
6847 return ACTIVATION_DETAILS_VTABLE(*details)->deserialize(key, value, details);
6848
6849 return -EINVAL;
6850}
6851
6852int activation_details_append_env(const ActivationDetails *details, char ***strv) {
6853 int r = 0;
6854
6855 assert(strv);
6856
6857 if (!details)
6858 return 0;
6859
6860 if (!isempty(details->trigger_unit_name)) {
6861 char *s = strjoin("TRIGGER_UNIT=", details->trigger_unit_name);
6862 if (!s)
6863 return -ENOMEM;
6864
6865 r = strv_consume(strv, TAKE_PTR(s));
6866 if (r < 0)
6867 return r;
6868 }
6869
6870 if (ACTIVATION_DETAILS_VTABLE(details)->append_env) {
6871 r = ACTIVATION_DETAILS_VTABLE(details)->append_env(details, strv);
6872 if (r < 0)
6873 return r;
6874 }
6875
6876 return r + !isempty(details->trigger_unit_name); /* Return the number of variables added to the env block */
6877}
6878
6879int activation_details_append_pair(const ActivationDetails *details, char ***strv) {
6880 int r = 0;
6881
6882 assert(strv);
6883
6884 if (!details)
6885 return 0;
6886
6887 if (!isempty(details->trigger_unit_name)) {
6888 r = strv_extend_many(strv, "trigger_unit", details->trigger_unit_name);
6889 if (r < 0)
6890 return r;
6891 }
6892
6893 if (ACTIVATION_DETAILS_VTABLE(details)->append_pair) {
6894 r = ACTIVATION_DETAILS_VTABLE(details)->append_pair(details, strv);
6895 if (r < 0)
6896 return r;
6897 }
6898
6899 return r + !isempty(details->trigger_unit_name); /* Return the number of pairs added to the strv */
6900}
6901
6902DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails, activation_details, activation_details_free);
6903
6904static const char* const unit_mount_dependency_type_table[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX] = {
6905 [UNIT_MOUNT_WANTS] = "WantsMountsFor",
6906 [UNIT_MOUNT_REQUIRES] = "RequiresMountsFor",
6907};
6908
6909DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type, UnitMountDependencyType);
6910
6911static const char* const oom_policy_table[_OOM_POLICY_MAX] = {
6912 [OOM_CONTINUE] = "continue",
6913 [OOM_STOP] = "stop",
6914 [OOM_KILL] = "kill",
6915};
6916
6917DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy);
6918
6919UnitDependency unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t) {
6920 switch (t) {
6921
6922 case UNIT_MOUNT_WANTS:
6923 return UNIT_WANTS;
6924
6925 case UNIT_MOUNT_REQUIRES:
6926 return UNIT_REQUIRES;
6927
6928 default:
6929 assert_not_reached();
6930 }
6931}