]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #22791 from keszybz/bootctl-invert-order
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-setup.h"
19 #include "cgroup-util.h"
20 #include "chase-symlinks.h"
21 #include "core-varlink.h"
22 #include "dbus-unit.h"
23 #include "dbus.h"
24 #include "dropin.h"
25 #include "escape.h"
26 #include "execute.h"
27 #include "fd-util.h"
28 #include "fileio-label.h"
29 #include "fileio.h"
30 #include "format-util.h"
31 #include "id128-util.h"
32 #include "install.h"
33 #include "io-util.h"
34 #include "label.h"
35 #include "load-dropin.h"
36 #include "load-fragment.h"
37 #include "log.h"
38 #include "macro.h"
39 #include "missing_audit.h"
40 #include "mkdir-label.h"
41 #include "path-util.h"
42 #include "process-util.h"
43 #include "rm-rf.h"
44 #include "set.h"
45 #include "signal-util.h"
46 #include "sparse-endian.h"
47 #include "special.h"
48 #include "specifier.h"
49 #include "stat-util.h"
50 #include "stdio-util.h"
51 #include "string-table.h"
52 #include "string-util.h"
53 #include "strv.h"
54 #include "terminal-util.h"
55 #include "tmpfile-util.h"
56 #include "umask-util.h"
57 #include "unit-name.h"
58 #include "unit.h"
59 #include "user-util.h"
60 #include "virt.h"
61 #if BPF_FRAMEWORK
62 #include "bpf-link.h"
63 #endif
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
67 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
68 #define MENTIONWORTHY_IP_BYTES (0ULL)
69
70 /* Thresholds for logging at INFO level about resource consumption */
71 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
72 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
73 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
74
75 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
76 [UNIT_SERVICE] = &service_vtable,
77 [UNIT_SOCKET] = &socket_vtable,
78 [UNIT_TARGET] = &target_vtable,
79 [UNIT_DEVICE] = &device_vtable,
80 [UNIT_MOUNT] = &mount_vtable,
81 [UNIT_AUTOMOUNT] = &automount_vtable,
82 [UNIT_SWAP] = &swap_vtable,
83 [UNIT_TIMER] = &timer_vtable,
84 [UNIT_PATH] = &path_vtable,
85 [UNIT_SLICE] = &slice_vtable,
86 [UNIT_SCOPE] = &scope_vtable,
87 };
88
89 Unit* unit_new(Manager *m, size_t size) {
90 Unit *u;
91
92 assert(m);
93 assert(size >= sizeof(Unit));
94
95 u = malloc0(size);
96 if (!u)
97 return NULL;
98
99 u->manager = m;
100 u->type = _UNIT_TYPE_INVALID;
101 u->default_dependencies = true;
102 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
103 u->unit_file_preset = -1;
104 u->on_failure_job_mode = JOB_REPLACE;
105 u->on_success_job_mode = JOB_FAIL;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
119 u->io_accounting_last[i] = UINT64_MAX;
120
121 u->ipv4_allow_map_fd = -1;
122 u->ipv6_allow_map_fd = -1;
123 u->ipv4_deny_map_fd = -1;
124 u->ipv6_deny_map_fd = -1;
125
126 u->last_section_private = -1;
127
128 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
129 u->auto_start_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return streq_ptr(name, u->id) ||
156 set_contains(u->aliases, name);
157 }
158
159 static void unit_init(Unit *u) {
160 CGroupContext *cc;
161 ExecContext *ec;
162 KillContext *kc;
163
164 assert(u);
165 assert(u->manager);
166 assert(u->type >= 0);
167
168 cc = unit_get_cgroup_context(u);
169 if (cc) {
170 cgroup_context_init(cc);
171
172 /* Copy in the manager defaults into the cgroup
173 * context, _before_ the rest of the settings have
174 * been initialized */
175
176 cc->cpu_accounting = u->manager->default_cpu_accounting;
177 cc->io_accounting = u->manager->default_io_accounting;
178 cc->blockio_accounting = u->manager->default_blockio_accounting;
179 cc->memory_accounting = u->manager->default_memory_accounting;
180 cc->tasks_accounting = u->manager->default_tasks_accounting;
181 cc->ip_accounting = u->manager->default_ip_accounting;
182
183 if (u->type != UNIT_SLICE)
184 cc->tasks_max = u->manager->default_tasks_max;
185 }
186
187 ec = unit_get_exec_context(u);
188 if (ec) {
189 exec_context_init(ec);
190
191 if (u->manager->default_oom_score_adjust_set) {
192 ec->oom_score_adjust = u->manager->default_oom_score_adjust;
193 ec->oom_score_adjust_set = true;
194 }
195
196 if (MANAGER_IS_SYSTEM(u->manager))
197 ec->keyring_mode = EXEC_KEYRING_SHARED;
198 else {
199 ec->keyring_mode = EXEC_KEYRING_INHERIT;
200
201 /* User manager might have its umask redefined by PAM or UMask=. In this
202 * case let the units it manages inherit this value by default. They can
203 * still tune this value through their own unit file */
204 (void) get_process_umask(getpid_cached(), &ec->umask);
205 }
206 }
207
208 kc = unit_get_kill_context(u);
209 if (kc)
210 kill_context_init(kc);
211
212 if (UNIT_VTABLE(u)->init)
213 UNIT_VTABLE(u)->init(u);
214 }
215
216 static int unit_add_alias(Unit *u, char *donated_name) {
217 int r;
218
219 /* Make sure that u->names is allocated. We may leave u->names
220 * empty if we fail later, but this is not a problem. */
221 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
222 if (r < 0)
223 return r;
224 assert(r > 0);
225
226 return 0;
227 }
228
229 int unit_add_name(Unit *u, const char *text) {
230 _cleanup_free_ char *name = NULL, *instance = NULL;
231 UnitType t;
232 int r;
233
234 assert(u);
235 assert(text);
236
237 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
238 if (!u->instance)
239 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
240 "instance is not set when adding name '%s': %m", text);
241
242 r = unit_name_replace_instance(text, u->instance, &name);
243 if (r < 0)
244 return log_unit_debug_errno(u, r,
245 "failed to build instance name from '%s': %m", text);
246 } else {
247 name = strdup(text);
248 if (!name)
249 return -ENOMEM;
250 }
251
252 if (unit_has_name(u, name))
253 return 0;
254
255 if (hashmap_contains(u->manager->units, name))
256 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
257 "unit already exist when adding name '%s': %m", name);
258
259 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
260 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
261 "name '%s' is invalid: %m", name);
262
263 t = unit_name_to_type(name);
264 if (t < 0)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
266 "failed to derive unit type from name '%s': %m", name);
267
268 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
269 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
270 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
271 u->type, t, name);
272
273 r = unit_name_to_instance(name, &instance);
274 if (r < 0)
275 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
276
277 if (instance && !unit_type_may_template(t))
278 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
279
280 /* Ensure that this unit either has no instance, or that the instance matches. */
281 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
282 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
283 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
284 name, instance, u->instance);
285
286 if (u->id && !unit_type_may_alias(t))
287 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
288 "cannot add name %s, aliases are not allowed for %s units.",
289 name, unit_type_to_string(t));
290
291 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
292 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
293
294 /* Add name to the global hashmap first, because that's easier to undo */
295 r = hashmap_put(u->manager->units, name, u);
296 if (r < 0)
297 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
298
299 if (u->id) {
300 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
301 if (r < 0) {
302 hashmap_remove(u->manager->units, name);
303 return r;
304 }
305 TAKE_PTR(name);
306
307 } else {
308 /* A new name, we don't need the set yet. */
309 assert(u->type == _UNIT_TYPE_INVALID);
310 assert(!u->instance);
311
312 u->type = t;
313 u->id = TAKE_PTR(name);
314 u->instance = TAKE_PTR(instance);
315
316 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
317 unit_init(u);
318 }
319
320 unit_add_to_dbus_queue(u);
321 return 0;
322 }
323
324 int unit_choose_id(Unit *u, const char *name) {
325 _cleanup_free_ char *t = NULL;
326 char *s;
327 int r;
328
329 assert(u);
330 assert(name);
331
332 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
333 if (!u->instance)
334 return -EINVAL;
335
336 r = unit_name_replace_instance(name, u->instance, &t);
337 if (r < 0)
338 return r;
339
340 name = t;
341 }
342
343 if (streq_ptr(u->id, name))
344 return 0; /* Nothing to do. */
345
346 /* Selects one of the aliases of this unit as the id */
347 s = set_get(u->aliases, (char*) name);
348 if (!s)
349 return -ENOENT;
350
351 if (u->id) {
352 r = set_remove_and_put(u->aliases, name, u->id);
353 if (r < 0)
354 return r;
355 } else
356 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
357
358 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
359 unit_add_to_dbus_queue(u);
360
361 return 0;
362 }
363
364 int unit_set_description(Unit *u, const char *description) {
365 int r;
366
367 assert(u);
368
369 r = free_and_strdup(&u->description, empty_to_null(description));
370 if (r < 0)
371 return r;
372 if (r > 0)
373 unit_add_to_dbus_queue(u);
374
375 return 0;
376 }
377
378 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
379 Unit *other;
380
381 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
382 if (other->job || other->nop_job)
383 return true;
384
385 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
386 if (other->job || other->nop_job)
387 return true;
388
389 return false;
390 }
391
392 bool unit_may_gc(Unit *u) {
393 UnitActiveState state;
394 int r;
395
396 assert(u);
397
398 /* Checks whether the unit is ready to be unloaded for garbage collection.
399 * Returns true when the unit may be collected, and false if there's some
400 * reason to keep it loaded.
401 *
402 * References from other units are *not* checked here. Instead, this is done
403 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
404 */
405
406 if (u->job || u->nop_job)
407 return false;
408
409 state = unit_active_state(u);
410
411 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
412 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
413 UNIT_VTABLE(u)->release_resources)
414 UNIT_VTABLE(u)->release_resources(u);
415
416 if (u->perpetual)
417 return false;
418
419 if (sd_bus_track_count(u->bus_track) > 0)
420 return false;
421
422 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
423 switch (u->collect_mode) {
424
425 case COLLECT_INACTIVE:
426 if (state != UNIT_INACTIVE)
427 return false;
428
429 break;
430
431 case COLLECT_INACTIVE_OR_FAILED:
432 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
433 return false;
434
435 break;
436
437 default:
438 assert_not_reached();
439 }
440
441 /* Check if any OnFailure= or on Success= jobs may be pending */
442 if (unit_success_failure_handler_has_jobs(u))
443 return false;
444
445 if (u->cgroup_path) {
446 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
447 * around. Units with active processes should never be collected. */
448
449 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
450 if (r < 0)
451 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
452 if (r <= 0)
453 return false;
454 }
455
456 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
457 return false;
458
459 return true;
460 }
461
462 void unit_add_to_load_queue(Unit *u) {
463 assert(u);
464 assert(u->type != _UNIT_TYPE_INVALID);
465
466 if (u->load_state != UNIT_STUB || u->in_load_queue)
467 return;
468
469 LIST_PREPEND(load_queue, u->manager->load_queue, u);
470 u->in_load_queue = true;
471 }
472
473 void unit_add_to_cleanup_queue(Unit *u) {
474 assert(u);
475
476 if (u->in_cleanup_queue)
477 return;
478
479 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
480 u->in_cleanup_queue = true;
481 }
482
483 void unit_add_to_gc_queue(Unit *u) {
484 assert(u);
485
486 if (u->in_gc_queue || u->in_cleanup_queue)
487 return;
488
489 if (!unit_may_gc(u))
490 return;
491
492 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
493 u->in_gc_queue = true;
494 }
495
496 void unit_add_to_dbus_queue(Unit *u) {
497 assert(u);
498 assert(u->type != _UNIT_TYPE_INVALID);
499
500 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
501 return;
502
503 /* Shortcut things if nobody cares */
504 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
505 sd_bus_track_count(u->bus_track) <= 0 &&
506 set_isempty(u->manager->private_buses)) {
507 u->sent_dbus_new_signal = true;
508 return;
509 }
510
511 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
512 u->in_dbus_queue = true;
513 }
514
515 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
516 assert(u);
517
518 if (u->in_stop_when_unneeded_queue)
519 return;
520
521 if (!u->stop_when_unneeded)
522 return;
523
524 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
525 return;
526
527 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
528 u->in_stop_when_unneeded_queue = true;
529 }
530
531 void unit_submit_to_start_when_upheld_queue(Unit *u) {
532 assert(u);
533
534 if (u->in_start_when_upheld_queue)
535 return;
536
537 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
538 return;
539
540 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
541 return;
542
543 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
544 u->in_start_when_upheld_queue = true;
545 }
546
547 void unit_submit_to_stop_when_bound_queue(Unit *u) {
548 assert(u);
549
550 if (u->in_stop_when_bound_queue)
551 return;
552
553 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
554 return;
555
556 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
557 return;
558
559 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
560 u->in_stop_when_bound_queue = true;
561 }
562
563 static void unit_clear_dependencies(Unit *u) {
564 assert(u);
565
566 /* Removes all dependencies configured on u and their reverse dependencies. */
567
568 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
569
570 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
571 Hashmap *other_deps;
572
573 HASHMAP_FOREACH(other_deps, other->dependencies)
574 hashmap_remove(other_deps, u);
575
576 unit_add_to_gc_queue(other);
577 }
578
579 hashmap_free(deps);
580 }
581
582 u->dependencies = hashmap_free(u->dependencies);
583 }
584
585 static void unit_remove_transient(Unit *u) {
586 assert(u);
587
588 if (!u->transient)
589 return;
590
591 if (u->fragment_path)
592 (void) unlink(u->fragment_path);
593
594 STRV_FOREACH(i, u->dropin_paths) {
595 _cleanup_free_ char *p = NULL, *pp = NULL;
596
597 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
598 if (!p)
599 continue;
600
601 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
602 if (!pp)
603 continue;
604
605 /* Only drop transient drop-ins */
606 if (!path_equal(u->manager->lookup_paths.transient, pp))
607 continue;
608
609 (void) unlink(*i);
610 (void) rmdir(p);
611 }
612 }
613
614 static void unit_free_requires_mounts_for(Unit *u) {
615 assert(u);
616
617 for (;;) {
618 _cleanup_free_ char *path = NULL;
619
620 path = hashmap_steal_first_key(u->requires_mounts_for);
621 if (!path)
622 break;
623 else {
624 char s[strlen(path) + 1];
625
626 PATH_FOREACH_PREFIX_MORE(s, path) {
627 char *y;
628 Set *x;
629
630 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
631 if (!x)
632 continue;
633
634 (void) set_remove(x, u);
635
636 if (set_isempty(x)) {
637 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
638 free(y);
639 set_free(x);
640 }
641 }
642 }
643 }
644
645 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
646 }
647
648 static void unit_done(Unit *u) {
649 ExecContext *ec;
650 CGroupContext *cc;
651
652 assert(u);
653
654 if (u->type < 0)
655 return;
656
657 if (UNIT_VTABLE(u)->done)
658 UNIT_VTABLE(u)->done(u);
659
660 ec = unit_get_exec_context(u);
661 if (ec)
662 exec_context_done(ec);
663
664 cc = unit_get_cgroup_context(u);
665 if (cc)
666 cgroup_context_done(cc);
667 }
668
669 Unit* unit_free(Unit *u) {
670 Unit *slice;
671 char *t;
672
673 if (!u)
674 return NULL;
675
676 u->transient_file = safe_fclose(u->transient_file);
677
678 if (!MANAGER_IS_RELOADING(u->manager))
679 unit_remove_transient(u);
680
681 bus_unit_send_removed_signal(u);
682
683 unit_done(u);
684
685 unit_dequeue_rewatch_pids(u);
686
687 sd_bus_slot_unref(u->match_bus_slot);
688 sd_bus_track_unref(u->bus_track);
689 u->deserialized_refs = strv_free(u->deserialized_refs);
690 u->pending_freezer_message = sd_bus_message_unref(u->pending_freezer_message);
691
692 unit_free_requires_mounts_for(u);
693
694 SET_FOREACH(t, u->aliases)
695 hashmap_remove_value(u->manager->units, t, u);
696 if (u->id)
697 hashmap_remove_value(u->manager->units, u->id, u);
698
699 if (!sd_id128_is_null(u->invocation_id))
700 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
701
702 if (u->job) {
703 Job *j = u->job;
704 job_uninstall(j);
705 job_free(j);
706 }
707
708 if (u->nop_job) {
709 Job *j = u->nop_job;
710 job_uninstall(j);
711 job_free(j);
712 }
713
714 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
715 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
716 slice = UNIT_GET_SLICE(u);
717 unit_clear_dependencies(u);
718 if (slice)
719 unit_add_family_to_cgroup_realize_queue(slice);
720
721 if (u->on_console)
722 manager_unref_console(u->manager);
723
724
725 fdset_free(u->initial_socket_bind_link_fds);
726 #if BPF_FRAMEWORK
727 bpf_link_free(u->ipv4_socket_bind_link);
728 bpf_link_free(u->ipv6_socket_bind_link);
729 #endif
730
731 unit_release_cgroup(u);
732
733 if (!MANAGER_IS_RELOADING(u->manager))
734 unit_unlink_state_files(u);
735
736 unit_unref_uid_gid(u, false);
737
738 (void) manager_update_failed_units(u->manager, u, false);
739 set_remove(u->manager->startup_units, u);
740
741 unit_unwatch_all_pids(u);
742
743 while (u->refs_by_target)
744 unit_ref_unset(u->refs_by_target);
745
746 if (u->type != _UNIT_TYPE_INVALID)
747 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
748
749 if (u->in_load_queue)
750 LIST_REMOVE(load_queue, u->manager->load_queue, u);
751
752 if (u->in_dbus_queue)
753 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
754
755 if (u->in_cleanup_queue)
756 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
757
758 if (u->in_gc_queue)
759 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
760
761 if (u->in_cgroup_realize_queue)
762 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
763
764 if (u->in_cgroup_empty_queue)
765 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
766
767 if (u->in_cgroup_oom_queue)
768 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
769
770 if (u->in_target_deps_queue)
771 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
772
773 if (u->in_stop_when_unneeded_queue)
774 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
775
776 if (u->in_start_when_upheld_queue)
777 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
778
779 if (u->in_stop_when_bound_queue)
780 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
781
782 bpf_firewall_close(u);
783
784 hashmap_free(u->bpf_foreign_by_key);
785
786 bpf_program_free(u->bpf_device_control_installed);
787
788 #if BPF_FRAMEWORK
789 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
790 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
791 #endif
792 fdset_free(u->initial_restric_ifaces_link_fds);
793
794 condition_free_list(u->conditions);
795 condition_free_list(u->asserts);
796
797 free(u->description);
798 strv_free(u->documentation);
799 free(u->fragment_path);
800 free(u->source_path);
801 strv_free(u->dropin_paths);
802 free(u->instance);
803
804 free(u->job_timeout_reboot_arg);
805 free(u->reboot_arg);
806
807 set_free_free(u->aliases);
808 free(u->id);
809
810 return mfree(u);
811 }
812
813 FreezerState unit_freezer_state(Unit *u) {
814 assert(u);
815
816 return u->freezer_state;
817 }
818
819 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
820 char *values[1] = {};
821 int r;
822
823 assert(u);
824
825 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
826 STRV_MAKE("frozen"), values);
827 if (r < 0)
828 return r;
829
830 r = _FREEZER_STATE_INVALID;
831
832 if (values[0]) {
833 if (streq(values[0], "0"))
834 r = FREEZER_RUNNING;
835 else if (streq(values[0], "1"))
836 r = FREEZER_FROZEN;
837 }
838
839 free(values[0]);
840 *ret = r;
841
842 return 0;
843 }
844
845 UnitActiveState unit_active_state(Unit *u) {
846 assert(u);
847
848 if (u->load_state == UNIT_MERGED)
849 return unit_active_state(unit_follow_merge(u));
850
851 /* After a reload it might happen that a unit is not correctly
852 * loaded but still has a process around. That's why we won't
853 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
854
855 return UNIT_VTABLE(u)->active_state(u);
856 }
857
858 const char* unit_sub_state_to_string(Unit *u) {
859 assert(u);
860
861 return UNIT_VTABLE(u)->sub_state_to_string(u);
862 }
863
864 static int unit_merge_names(Unit *u, Unit *other) {
865 char *name;
866 int r;
867
868 assert(u);
869 assert(other);
870
871 r = unit_add_alias(u, other->id);
872 if (r < 0)
873 return r;
874
875 r = set_move(u->aliases, other->aliases);
876 if (r < 0) {
877 set_remove(u->aliases, other->id);
878 return r;
879 }
880
881 TAKE_PTR(other->id);
882 other->aliases = set_free_free(other->aliases);
883
884 SET_FOREACH(name, u->aliases)
885 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
886
887 return 0;
888 }
889
890 static int unit_reserve_dependencies(Unit *u, Unit *other) {
891 size_t n_reserve;
892 Hashmap* deps;
893 void *d;
894 int r;
895
896 assert(u);
897 assert(other);
898
899 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
900 * fail.
901 *
902 * First make some room in the per dependency type hashmaps. Using the summed size of both unit's
903 * hashmaps is an estimate that is likely too high since they probably use some of the same
904 * types. But it's never too low, and that's all we need. */
905
906 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
907 if (n_reserve > 0) {
908 r = hashmap_ensure_allocated(&u->dependencies, NULL);
909 if (r < 0)
910 return r;
911
912 r = hashmap_reserve(u->dependencies, n_reserve);
913 if (r < 0)
914 return r;
915 }
916
917 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
918 * other unit's dependencies.
919 *
920 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
921 * reserve anything for. In that case other's set will be transferred as a whole to u by
922 * complete_move(). */
923
924 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
925 Hashmap *other_deps;
926
927 other_deps = hashmap_get(other->dependencies, d);
928
929 r = hashmap_reserve(deps, hashmap_size(other_deps));
930 if (r < 0)
931 return r;
932 }
933
934 return 0;
935 }
936
937 static void unit_maybe_warn_about_dependency(
938 Unit *u,
939 const char *other_id,
940 UnitDependency dependency) {
941
942 assert(u);
943
944 /* Only warn about some unit types */
945 if (!IN_SET(dependency,
946 UNIT_CONFLICTS,
947 UNIT_CONFLICTED_BY,
948 UNIT_BEFORE,
949 UNIT_AFTER,
950 UNIT_ON_SUCCESS,
951 UNIT_ON_FAILURE,
952 UNIT_TRIGGERS,
953 UNIT_TRIGGERED_BY))
954 return;
955
956 if (streq_ptr(u->id, other_id))
957 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
958 else
959 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other_id), u->id);
960 }
961
962 static int unit_per_dependency_type_hashmap_update(
963 Hashmap *per_type,
964 Unit *other,
965 UnitDependencyMask origin_mask,
966 UnitDependencyMask destination_mask) {
967
968 UnitDependencyInfo info;
969 int r;
970
971 assert(other);
972 assert_cc(sizeof(void*) == sizeof(info));
973
974 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
975 * exists, or insert it anew if not. */
976
977 info.data = hashmap_get(per_type, other);
978 if (info.data) {
979 /* Entry already exists. Add in our mask. */
980
981 if (FLAGS_SET(origin_mask, info.origin_mask) &&
982 FLAGS_SET(destination_mask, info.destination_mask))
983 return 0; /* NOP */
984
985 info.origin_mask |= origin_mask;
986 info.destination_mask |= destination_mask;
987
988 r = hashmap_update(per_type, other, info.data);
989 } else {
990 info = (UnitDependencyInfo) {
991 .origin_mask = origin_mask,
992 .destination_mask = destination_mask,
993 };
994
995 r = hashmap_put(per_type, other, info.data);
996 }
997 if (r < 0)
998 return r;
999
1000
1001 return 1;
1002 }
1003
1004 static int unit_add_dependency_hashmap(
1005 Hashmap **dependencies,
1006 UnitDependency d,
1007 Unit *other,
1008 UnitDependencyMask origin_mask,
1009 UnitDependencyMask destination_mask) {
1010
1011 Hashmap *per_type;
1012 int r;
1013
1014 assert(dependencies);
1015 assert(other);
1016 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
1017 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
1018 assert(origin_mask > 0 || destination_mask > 0);
1019
1020 /* Ensure the top-level dependency hashmap exists that maps UnitDependency → Hashmap(Unit* →
1021 * UnitDependencyInfo) */
1022 r = hashmap_ensure_allocated(dependencies, NULL);
1023 if (r < 0)
1024 return r;
1025
1026 /* Acquire the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency
1027 * type, and if it's missing allocate it and insert it. */
1028 per_type = hashmap_get(*dependencies, UNIT_DEPENDENCY_TO_PTR(d));
1029 if (!per_type) {
1030 per_type = hashmap_new(NULL);
1031 if (!per_type)
1032 return -ENOMEM;
1033
1034 r = hashmap_put(*dependencies, UNIT_DEPENDENCY_TO_PTR(d), per_type);
1035 if (r < 0) {
1036 hashmap_free(per_type);
1037 return r;
1038 }
1039 }
1040
1041 return unit_per_dependency_type_hashmap_update(per_type, other, origin_mask, destination_mask);
1042 }
1043
1044 static void unit_merge_dependencies(
1045 Unit *u,
1046 Unit *other) {
1047
1048 int r;
1049
1050 assert(u);
1051 assert(other);
1052
1053 if (u == other)
1054 return;
1055
1056 for (;;) {
1057 _cleanup_(hashmap_freep) Hashmap *other_deps = NULL;
1058 UnitDependencyInfo di_back;
1059 Unit *back;
1060 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1061 * since the hashmaps all want it as void pointer. */
1062
1063 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1064 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1065 if (!other_deps)
1066 break; /* done! */
1067
1068 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1069 * referenced units as 'back'. */
1070 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1071 Hashmap *back_deps;
1072 void *back_dt;
1073
1074 if (back == u) {
1075 /* This is a dependency pointing back to the unit we want to merge with?
1076 * Suppress it (but warn) */
1077 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1078 continue;
1079 }
1080
1081 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1082 * point to 'u' instead. */
1083 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1084 UnitDependencyInfo di_move;
1085
1086 di_move.data = hashmap_remove(back_deps, other);
1087 if (!di_move.data)
1088 continue;
1089
1090 assert_se(unit_per_dependency_type_hashmap_update(
1091 back_deps,
1092 u,
1093 di_move.origin_mask,
1094 di_move.destination_mask) >= 0);
1095 }
1096 }
1097
1098 /* Now all references towards 'other' of the current type 'dt' are corrected to point to
1099 * 'u'. Lets's now move the deps of type 'dt' from 'other' to 'u'. First, let's try to move
1100 * them per type wholesale. */
1101 r = hashmap_put(u->dependencies, dt, other_deps);
1102 if (r == -EEXIST) {
1103 Hashmap *deps;
1104
1105 /* The target unit already has dependencies of this type, let's then merge this individually. */
1106
1107 assert_se(deps = hashmap_get(u->dependencies, dt));
1108
1109 for (;;) {
1110 UnitDependencyInfo di_move;
1111
1112 /* Get first dep */
1113 di_move.data = hashmap_steal_first_key_and_value(other_deps, (void**) &back);
1114 if (!di_move.data)
1115 break; /* done */
1116 if (back == u) {
1117 /* Would point back to us, ignore */
1118 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1119 continue;
1120 }
1121
1122 assert_se(unit_per_dependency_type_hashmap_update(deps, back, di_move.origin_mask, di_move.destination_mask) >= 0);
1123 }
1124 } else {
1125 assert_se(r >= 0);
1126 TAKE_PTR(other_deps);
1127
1128 if (hashmap_remove(other_deps, u))
1129 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1130 }
1131 }
1132
1133 other->dependencies = hashmap_free(other->dependencies);
1134 }
1135
1136 int unit_merge(Unit *u, Unit *other) {
1137 int r;
1138
1139 assert(u);
1140 assert(other);
1141 assert(u->manager == other->manager);
1142 assert(u->type != _UNIT_TYPE_INVALID);
1143
1144 other = unit_follow_merge(other);
1145
1146 if (other == u)
1147 return 0;
1148
1149 if (u->type != other->type)
1150 return -EINVAL;
1151
1152 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1153 return -EEXIST;
1154
1155 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1156 return -EEXIST;
1157
1158 if (!streq_ptr(u->instance, other->instance))
1159 return -EINVAL;
1160
1161 if (other->job)
1162 return -EEXIST;
1163
1164 if (other->nop_job)
1165 return -EEXIST;
1166
1167 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1168 return -EEXIST;
1169
1170 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1171 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1172 r = unit_reserve_dependencies(u, other);
1173 if (r < 0)
1174 return r;
1175
1176 /* Merge names */
1177 r = unit_merge_names(u, other);
1178 if (r < 0)
1179 return r;
1180
1181 /* Redirect all references */
1182 while (other->refs_by_target)
1183 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1184
1185 /* Merge dependencies */
1186 unit_merge_dependencies(u, other);
1187
1188 other->load_state = UNIT_MERGED;
1189 other->merged_into = u;
1190
1191 /* If there is still some data attached to the other node, we
1192 * don't need it anymore, and can free it. */
1193 if (other->load_state != UNIT_STUB)
1194 if (UNIT_VTABLE(other)->done)
1195 UNIT_VTABLE(other)->done(other);
1196
1197 unit_add_to_dbus_queue(u);
1198 unit_add_to_cleanup_queue(other);
1199
1200 return 0;
1201 }
1202
1203 int unit_merge_by_name(Unit *u, const char *name) {
1204 _cleanup_free_ char *s = NULL;
1205 Unit *other;
1206 int r;
1207
1208 /* Either add name to u, or if a unit with name already exists, merge it with u.
1209 * If name is a template, do the same for name@instance, where instance is u's instance. */
1210
1211 assert(u);
1212 assert(name);
1213
1214 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1215 if (!u->instance)
1216 return -EINVAL;
1217
1218 r = unit_name_replace_instance(name, u->instance, &s);
1219 if (r < 0)
1220 return r;
1221
1222 name = s;
1223 }
1224
1225 other = manager_get_unit(u->manager, name);
1226 if (other)
1227 return unit_merge(u, other);
1228
1229 return unit_add_name(u, name);
1230 }
1231
1232 Unit* unit_follow_merge(Unit *u) {
1233 assert(u);
1234
1235 while (u->load_state == UNIT_MERGED)
1236 assert_se(u = u->merged_into);
1237
1238 return u;
1239 }
1240
1241 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1242 int r;
1243
1244 assert(u);
1245 assert(c);
1246
1247 if (c->working_directory && !c->working_directory_missing_ok) {
1248 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1249 if (r < 0)
1250 return r;
1251 }
1252
1253 if (c->root_directory) {
1254 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1255 if (r < 0)
1256 return r;
1257 }
1258
1259 if (c->root_image) {
1260 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1261 if (r < 0)
1262 return r;
1263 }
1264
1265 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1266 if (!u->manager->prefix[dt])
1267 continue;
1268
1269 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1270 _cleanup_free_ char *p = NULL;
1271
1272 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1273 if (!p)
1274 return -ENOMEM;
1275
1276 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1277 if (r < 0)
1278 return r;
1279 }
1280 }
1281
1282 if (!MANAGER_IS_SYSTEM(u->manager))
1283 return 0;
1284
1285 /* For the following three directory types we need write access, and /var/ is possibly on the root
1286 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1287 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1288 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1289 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1290 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1291 if (r < 0)
1292 return r;
1293 }
1294
1295 if (c->private_tmp) {
1296
1297 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1298 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1299 * /tmp specifically and masking other mount units should be handled more
1300 * gracefully too, see PR#16894. */
1301 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "tmp.mount", true, UNIT_DEPENDENCY_FILE);
1302 if (r < 0)
1303 return r;
1304
1305 r = unit_require_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE);
1306 if (r < 0)
1307 return r;
1308
1309 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1310 if (r < 0)
1311 return r;
1312 }
1313
1314 if (c->root_image) {
1315 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1316 * implicit dependency on udev */
1317
1318 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1319 if (r < 0)
1320 return r;
1321 }
1322
1323 if (!IN_SET(c->std_output,
1324 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1325 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1326 !IN_SET(c->std_error,
1327 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1328 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1329 !c->log_namespace)
1330 return 0;
1331
1332 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1333 * is run first. */
1334
1335 if (c->log_namespace) {
1336 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1337
1338 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1339 if (r < 0)
1340 return r;
1341
1342 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1343 if (r < 0)
1344 return r;
1345
1346 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1347 if (r < 0)
1348 return r;
1349
1350 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1351 if (r < 0)
1352 return r;
1353 } else
1354 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1355 if (r < 0)
1356 return r;
1357
1358 return 0;
1359 }
1360
1361 const char* unit_description(Unit *u) {
1362 assert(u);
1363
1364 if (u->description)
1365 return u->description;
1366
1367 return strna(u->id);
1368 }
1369
1370 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1371 assert(u);
1372 assert(u->id);
1373
1374 /* Return u->id, u->description, or "{u->id} - {u->description}".
1375 * Versions with u->description are only used if it is set.
1376 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1377 * pointer.
1378 *
1379 * Note that *ret_combined_buffer may be set to NULL. */
1380
1381 if (!u->description ||
1382 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1383 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1384 streq(u->description, u->id)) {
1385
1386 if (ret_combined_buffer)
1387 *ret_combined_buffer = NULL;
1388 return u->id;
1389 }
1390
1391 if (ret_combined_buffer) {
1392 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1393 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1394 if (*ret_combined_buffer)
1395 return *ret_combined_buffer;
1396 log_oom(); /* Fall back to ->description */
1397 } else
1398 *ret_combined_buffer = NULL;
1399 }
1400
1401 return u->description;
1402 }
1403
1404 /* Common implementation for multiple backends */
1405 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1406 int r;
1407
1408 assert(u);
1409
1410 /* Load a .{service,socket,...} file */
1411 r = unit_load_fragment(u);
1412 if (r < 0)
1413 return r;
1414
1415 if (u->load_state == UNIT_STUB) {
1416 if (fragment_required)
1417 return -ENOENT;
1418
1419 u->load_state = UNIT_LOADED;
1420 }
1421
1422 /* Load drop-in directory data. If u is an alias, we might be reloading the
1423 * target unit needlessly. But we cannot be sure which drops-ins have already
1424 * been loaded and which not, at least without doing complicated book-keeping,
1425 * so let's always reread all drop-ins. */
1426 r = unit_load_dropin(unit_follow_merge(u));
1427 if (r < 0)
1428 return r;
1429
1430 if (u->source_path) {
1431 struct stat st;
1432
1433 if (stat(u->source_path, &st) >= 0)
1434 u->source_mtime = timespec_load(&st.st_mtim);
1435 else
1436 u->source_mtime = 0;
1437 }
1438
1439 return 0;
1440 }
1441
1442 void unit_add_to_target_deps_queue(Unit *u) {
1443 Manager *m = u->manager;
1444
1445 assert(u);
1446
1447 if (u->in_target_deps_queue)
1448 return;
1449
1450 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1451 u->in_target_deps_queue = true;
1452 }
1453
1454 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1455 assert(u);
1456 assert(target);
1457
1458 if (target->type != UNIT_TARGET)
1459 return 0;
1460
1461 /* Only add the dependency if both units are loaded, so that
1462 * that loop check below is reliable */
1463 if (u->load_state != UNIT_LOADED ||
1464 target->load_state != UNIT_LOADED)
1465 return 0;
1466
1467 /* If either side wants no automatic dependencies, then let's
1468 * skip this */
1469 if (!u->default_dependencies ||
1470 !target->default_dependencies)
1471 return 0;
1472
1473 /* Don't create loops */
1474 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1475 return 0;
1476
1477 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1478 }
1479
1480 static int unit_add_slice_dependencies(Unit *u) {
1481 Unit *slice;
1482 assert(u);
1483
1484 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1485 return 0;
1486
1487 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1488 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1489 relationship). */
1490 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1491
1492 slice = UNIT_GET_SLICE(u);
1493 if (slice)
1494 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1495
1496 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1497 return 0;
1498
1499 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1500 }
1501
1502 static int unit_add_mount_dependencies(Unit *u) {
1503 UnitDependencyInfo di;
1504 const char *path;
1505 int r;
1506
1507 assert(u);
1508
1509 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1510 char prefix[strlen(path) + 1];
1511
1512 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1513 _cleanup_free_ char *p = NULL;
1514 Unit *m;
1515
1516 r = unit_name_from_path(prefix, ".mount", &p);
1517 if (IN_SET(r, -EINVAL, -ENAMETOOLONG))
1518 continue; /* If the path cannot be converted to a mount unit name, then it's
1519 * not manageable as a unit by systemd, and hence we don't need a
1520 * dependency on it. Let's thus silently ignore the issue. */
1521 if (r < 0)
1522 return r;
1523
1524 m = manager_get_unit(u->manager, p);
1525 if (!m) {
1526 /* Make sure to load the mount unit if it exists. If so the dependencies on
1527 * this unit will be added later during the loading of the mount unit. */
1528 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1529 continue;
1530 }
1531 if (m == u)
1532 continue;
1533
1534 if (m->load_state != UNIT_LOADED)
1535 continue;
1536
1537 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1538 if (r < 0)
1539 return r;
1540
1541 if (m->fragment_path) {
1542 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1543 if (r < 0)
1544 return r;
1545 }
1546 }
1547 }
1548
1549 return 0;
1550 }
1551
1552 static int unit_add_oomd_dependencies(Unit *u) {
1553 CGroupContext *c;
1554 bool wants_oomd;
1555 int r;
1556
1557 assert(u);
1558
1559 if (!u->default_dependencies)
1560 return 0;
1561
1562 c = unit_get_cgroup_context(u);
1563 if (!c)
1564 return 0;
1565
1566 wants_oomd = (c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL);
1567 if (!wants_oomd)
1568 return 0;
1569
1570 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1571 if (r < 0)
1572 return r;
1573
1574 return 0;
1575 }
1576
1577 static int unit_add_startup_units(Unit *u) {
1578 if (!unit_has_startup_cgroup_constraints(u))
1579 return 0;
1580
1581 return set_ensure_put(&u->manager->startup_units, NULL, u);
1582 }
1583
1584 static int unit_validate_on_failure_job_mode(
1585 Unit *u,
1586 const char *job_mode_setting,
1587 JobMode job_mode,
1588 const char *dependency_name,
1589 UnitDependencyAtom atom) {
1590
1591 Unit *other, *found = NULL;
1592
1593 if (job_mode != JOB_ISOLATE)
1594 return 0;
1595
1596 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1597 if (!found)
1598 found = other;
1599 else if (found != other)
1600 return log_unit_error_errno(
1601 u, SYNTHETIC_ERRNO(ENOEXEC),
1602 "More than one %s dependencies specified but %sisolate set. Refusing.",
1603 dependency_name, job_mode_setting);
1604 }
1605
1606 return 0;
1607 }
1608
1609 int unit_load(Unit *u) {
1610 int r;
1611
1612 assert(u);
1613
1614 if (u->in_load_queue) {
1615 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1616 u->in_load_queue = false;
1617 }
1618
1619 if (u->type == _UNIT_TYPE_INVALID)
1620 return -EINVAL;
1621
1622 if (u->load_state != UNIT_STUB)
1623 return 0;
1624
1625 if (u->transient_file) {
1626 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1627 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1628
1629 r = fflush_and_check(u->transient_file);
1630 if (r < 0)
1631 goto fail;
1632
1633 u->transient_file = safe_fclose(u->transient_file);
1634 u->fragment_mtime = now(CLOCK_REALTIME);
1635 }
1636
1637 r = UNIT_VTABLE(u)->load(u);
1638 if (r < 0)
1639 goto fail;
1640
1641 assert(u->load_state != UNIT_STUB);
1642
1643 if (u->load_state == UNIT_LOADED) {
1644 unit_add_to_target_deps_queue(u);
1645
1646 r = unit_add_slice_dependencies(u);
1647 if (r < 0)
1648 goto fail;
1649
1650 r = unit_add_mount_dependencies(u);
1651 if (r < 0)
1652 goto fail;
1653
1654 r = unit_add_oomd_dependencies(u);
1655 if (r < 0)
1656 goto fail;
1657
1658 r = unit_add_startup_units(u);
1659 if (r < 0)
1660 goto fail;
1661
1662 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1663 if (r < 0)
1664 goto fail;
1665
1666 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1667 if (r < 0)
1668 goto fail;
1669
1670 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1671 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1672
1673 /* We finished loading, let's ensure our parents recalculate the members mask */
1674 unit_invalidate_cgroup_members_masks(u);
1675 }
1676
1677 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1678
1679 unit_add_to_dbus_queue(unit_follow_merge(u));
1680 unit_add_to_gc_queue(u);
1681 (void) manager_varlink_send_managed_oom_update(u);
1682
1683 return 0;
1684
1685 fail:
1686 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1687 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1688
1689 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1690 r == -ENOEXEC ? UNIT_BAD_SETTING :
1691 UNIT_ERROR;
1692 u->load_error = r;
1693
1694 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1695 * an attempt is made to load this unit, we know we need to check again. */
1696 if (u->load_state == UNIT_NOT_FOUND)
1697 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1698
1699 unit_add_to_dbus_queue(u);
1700 unit_add_to_gc_queue(u);
1701
1702 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1703 }
1704
1705 _printf_(7, 8)
1706 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1707 Unit *u = userdata;
1708 va_list ap;
1709 int r;
1710
1711 if (u && !unit_log_level_test(u, level))
1712 return -ERRNO_VALUE(error);
1713
1714 va_start(ap, format);
1715 if (u)
1716 r = log_object_internalv(level, error, file, line, func,
1717 u->manager->unit_log_field,
1718 u->id,
1719 u->manager->invocation_log_field,
1720 u->invocation_id_string,
1721 format, ap);
1722 else
1723 r = log_internalv(level, error, file, line, func, format, ap);
1724 va_end(ap);
1725
1726 return r;
1727 }
1728
1729 static bool unit_test_condition(Unit *u) {
1730 _cleanup_strv_free_ char **env = NULL;
1731 int r;
1732
1733 assert(u);
1734
1735 dual_timestamp_get(&u->condition_timestamp);
1736
1737 r = manager_get_effective_environment(u->manager, &env);
1738 if (r < 0) {
1739 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1740 u->condition_result = true;
1741 } else
1742 u->condition_result = condition_test_list(
1743 u->conditions,
1744 env,
1745 condition_type_to_string,
1746 log_unit_internal,
1747 u);
1748
1749 unit_add_to_dbus_queue(u);
1750 return u->condition_result;
1751 }
1752
1753 static bool unit_test_assert(Unit *u) {
1754 _cleanup_strv_free_ char **env = NULL;
1755 int r;
1756
1757 assert(u);
1758
1759 dual_timestamp_get(&u->assert_timestamp);
1760
1761 r = manager_get_effective_environment(u->manager, &env);
1762 if (r < 0) {
1763 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1764 u->assert_result = CONDITION_ERROR;
1765 } else
1766 u->assert_result = condition_test_list(
1767 u->asserts,
1768 env,
1769 assert_type_to_string,
1770 log_unit_internal,
1771 u);
1772
1773 unit_add_to_dbus_queue(u);
1774 return u->assert_result;
1775 }
1776
1777 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1778 if (log_get_show_color()) {
1779 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1780 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1781 else
1782 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1783 }
1784
1785 DISABLE_WARNING_FORMAT_NONLITERAL;
1786 manager_status_printf(u->manager, status_type, status, format, ident);
1787 REENABLE_WARNING;
1788 }
1789
1790 int unit_test_start_limit(Unit *u) {
1791 const char *reason;
1792
1793 assert(u);
1794
1795 if (ratelimit_below(&u->start_ratelimit)) {
1796 u->start_limit_hit = false;
1797 return 0;
1798 }
1799
1800 log_unit_warning(u, "Start request repeated too quickly.");
1801 u->start_limit_hit = true;
1802
1803 reason = strjoina("unit ", u->id, " failed");
1804
1805 emergency_action(u->manager, u->start_limit_action,
1806 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1807 u->reboot_arg, -1, reason);
1808
1809 return -ECANCELED;
1810 }
1811
1812 bool unit_shall_confirm_spawn(Unit *u) {
1813 assert(u);
1814
1815 if (manager_is_confirm_spawn_disabled(u->manager))
1816 return false;
1817
1818 /* For some reasons units remaining in the same process group
1819 * as PID 1 fail to acquire the console even if it's not used
1820 * by any process. So skip the confirmation question for them. */
1821 return !unit_get_exec_context(u)->same_pgrp;
1822 }
1823
1824 static bool unit_verify_deps(Unit *u) {
1825 Unit *other;
1826
1827 assert(u);
1828
1829 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1830 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1831 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1832 * that are not used in conjunction with After= as for them any such check would make things entirely
1833 * racy. */
1834
1835 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1836
1837 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1838 continue;
1839
1840 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1841 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1842 return false;
1843 }
1844 }
1845
1846 return true;
1847 }
1848
1849 /* Errors that aren't really errors:
1850 * -EALREADY: Unit is already started.
1851 * -ECOMM: Condition failed
1852 * -EAGAIN: An operation is already in progress. Retry later.
1853 *
1854 * Errors that are real errors:
1855 * -EBADR: This unit type does not support starting.
1856 * -ECANCELED: Start limit hit, too many requests for now
1857 * -EPROTO: Assert failed
1858 * -EINVAL: Unit not loaded
1859 * -EOPNOTSUPP: Unit type not supported
1860 * -ENOLINK: The necessary dependencies are not fulfilled.
1861 * -ESTALE: This unit has been started before and can't be started a second time
1862 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1863 */
1864 int unit_start(Unit *u) {
1865 UnitActiveState state;
1866 Unit *following;
1867 int r;
1868
1869 assert(u);
1870
1871 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1872 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1873 * waiting is finished. */
1874 state = unit_active_state(u);
1875 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1876 return -EALREADY;
1877 if (state == UNIT_MAINTENANCE)
1878 return -EAGAIN;
1879
1880 /* Units that aren't loaded cannot be started */
1881 if (u->load_state != UNIT_LOADED)
1882 return -EINVAL;
1883
1884 /* Refuse starting scope units more than once */
1885 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1886 return -ESTALE;
1887
1888 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1889 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1890 * recheck the condition in that case. */
1891 if (state != UNIT_ACTIVATING &&
1892 !unit_test_condition(u))
1893 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1894
1895 /* If the asserts failed, fail the entire job */
1896 if (state != UNIT_ACTIVATING &&
1897 !unit_test_assert(u))
1898 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1899
1900 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1901 * condition checks, so that we rather return condition check errors (which are usually not
1902 * considered a true failure) than "not supported" errors (which are considered a failure).
1903 */
1904 if (!unit_type_supported(u->type))
1905 return -EOPNOTSUPP;
1906
1907 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1908 * should have taken care of this already, but let's check this here again. After all, our
1909 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1910 if (!unit_verify_deps(u))
1911 return -ENOLINK;
1912
1913 /* Forward to the main object, if we aren't it. */
1914 following = unit_following(u);
1915 if (following) {
1916 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1917 return unit_start(following);
1918 }
1919
1920 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1921 if (UNIT_VTABLE(u)->can_start) {
1922 r = UNIT_VTABLE(u)->can_start(u);
1923 if (r < 0)
1924 return r;
1925 }
1926
1927 /* If it is stopped, but we cannot start it, then fail */
1928 if (!UNIT_VTABLE(u)->start)
1929 return -EBADR;
1930
1931 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1932 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1933 * waits for a holdoff timer to elapse before it will start again. */
1934
1935 unit_add_to_dbus_queue(u);
1936 unit_cgroup_freezer_action(u, FREEZER_THAW);
1937
1938 return UNIT_VTABLE(u)->start(u);
1939 }
1940
1941 bool unit_can_start(Unit *u) {
1942 assert(u);
1943
1944 if (u->load_state != UNIT_LOADED)
1945 return false;
1946
1947 if (!unit_type_supported(u->type))
1948 return false;
1949
1950 /* Scope units may be started only once */
1951 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1952 return false;
1953
1954 return !!UNIT_VTABLE(u)->start;
1955 }
1956
1957 bool unit_can_isolate(Unit *u) {
1958 assert(u);
1959
1960 return unit_can_start(u) &&
1961 u->allow_isolate;
1962 }
1963
1964 /* Errors:
1965 * -EBADR: This unit type does not support stopping.
1966 * -EALREADY: Unit is already stopped.
1967 * -EAGAIN: An operation is already in progress. Retry later.
1968 */
1969 int unit_stop(Unit *u) {
1970 UnitActiveState state;
1971 Unit *following;
1972
1973 assert(u);
1974
1975 state = unit_active_state(u);
1976 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1977 return -EALREADY;
1978
1979 following = unit_following(u);
1980 if (following) {
1981 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1982 return unit_stop(following);
1983 }
1984
1985 if (!UNIT_VTABLE(u)->stop)
1986 return -EBADR;
1987
1988 unit_add_to_dbus_queue(u);
1989 unit_cgroup_freezer_action(u, FREEZER_THAW);
1990
1991 return UNIT_VTABLE(u)->stop(u);
1992 }
1993
1994 bool unit_can_stop(Unit *u) {
1995 assert(u);
1996
1997 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
1998 * Extrinsic units follow external state and they may stop following external state changes
1999 * (hence we return true here), but an attempt to do this through the manager will fail. */
2000
2001 if (!unit_type_supported(u->type))
2002 return false;
2003
2004 if (u->perpetual)
2005 return false;
2006
2007 return !!UNIT_VTABLE(u)->stop;
2008 }
2009
2010 /* Errors:
2011 * -EBADR: This unit type does not support reloading.
2012 * -ENOEXEC: Unit is not started.
2013 * -EAGAIN: An operation is already in progress. Retry later.
2014 */
2015 int unit_reload(Unit *u) {
2016 UnitActiveState state;
2017 Unit *following;
2018
2019 assert(u);
2020
2021 if (u->load_state != UNIT_LOADED)
2022 return -EINVAL;
2023
2024 if (!unit_can_reload(u))
2025 return -EBADR;
2026
2027 state = unit_active_state(u);
2028 if (state == UNIT_RELOADING)
2029 return -EAGAIN;
2030
2031 if (state != UNIT_ACTIVE)
2032 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2033
2034 following = unit_following(u);
2035 if (following) {
2036 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2037 return unit_reload(following);
2038 }
2039
2040 unit_add_to_dbus_queue(u);
2041
2042 if (!UNIT_VTABLE(u)->reload) {
2043 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2044 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
2045 return 0;
2046 }
2047
2048 unit_cgroup_freezer_action(u, FREEZER_THAW);
2049
2050 return UNIT_VTABLE(u)->reload(u);
2051 }
2052
2053 bool unit_can_reload(Unit *u) {
2054 assert(u);
2055
2056 if (UNIT_VTABLE(u)->can_reload)
2057 return UNIT_VTABLE(u)->can_reload(u);
2058
2059 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2060 return true;
2061
2062 return UNIT_VTABLE(u)->reload;
2063 }
2064
2065 bool unit_is_unneeded(Unit *u) {
2066 Unit *other;
2067 assert(u);
2068
2069 if (!u->stop_when_unneeded)
2070 return false;
2071
2072 /* Don't clean up while the unit is transitioning or is even inactive. */
2073 if (unit_active_state(u) != UNIT_ACTIVE)
2074 return false;
2075 if (u->job)
2076 return false;
2077
2078 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2079 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2080 * restart, then don't clean this one up. */
2081
2082 if (other->job)
2083 return false;
2084
2085 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2086 return false;
2087
2088 if (unit_will_restart(other))
2089 return false;
2090 }
2091
2092 return true;
2093 }
2094
2095 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2096 Unit *other;
2097
2098 assert(u);
2099
2100 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2101 * that is active declared an Uphold= dependencies on it */
2102
2103 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2104 if (ret_culprit)
2105 *ret_culprit = NULL;
2106 return false;
2107 }
2108
2109 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2110 if (other->job)
2111 continue;
2112
2113 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2114 if (ret_culprit)
2115 *ret_culprit = other;
2116 return true;
2117 }
2118 }
2119
2120 if (ret_culprit)
2121 *ret_culprit = NULL;
2122 return false;
2123 }
2124
2125 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2126 Unit *other;
2127
2128 assert(u);
2129
2130 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2131 * because the other unit is down. */
2132
2133 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2134 /* Don't clean up while the unit is transitioning or is even inactive. */
2135 if (ret_culprit)
2136 *ret_culprit = NULL;
2137 return false;
2138 }
2139
2140 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2141 if (other->job)
2142 continue;
2143
2144 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2145 if (ret_culprit)
2146 *ret_culprit = other;
2147
2148 return true;
2149 }
2150 }
2151
2152 if (ret_culprit)
2153 *ret_culprit = NULL;
2154 return false;
2155 }
2156
2157 static void check_unneeded_dependencies(Unit *u) {
2158 Unit *other;
2159 assert(u);
2160
2161 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2162
2163 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2164 unit_submit_to_stop_when_unneeded_queue(other);
2165 }
2166
2167 static void check_uphold_dependencies(Unit *u) {
2168 Unit *other;
2169 assert(u);
2170
2171 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2172
2173 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2174 unit_submit_to_start_when_upheld_queue(other);
2175 }
2176
2177 static void check_bound_by_dependencies(Unit *u) {
2178 Unit *other;
2179 assert(u);
2180
2181 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2182
2183 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2184 unit_submit_to_stop_when_bound_queue(other);
2185 }
2186
2187 static void retroactively_start_dependencies(Unit *u) {
2188 Unit *other;
2189
2190 assert(u);
2191 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2192
2193 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2194 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2195 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2196 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2197
2198 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2199 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2200 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2201 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2202
2203 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2204 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2205 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2206 }
2207
2208 static void retroactively_stop_dependencies(Unit *u) {
2209 Unit *other;
2210
2211 assert(u);
2212 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2213
2214 /* Pull down units which are bound to us recursively if enabled */
2215 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2216 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2217 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2218 }
2219
2220 void unit_start_on_failure(
2221 Unit *u,
2222 const char *dependency_name,
2223 UnitDependencyAtom atom,
2224 JobMode job_mode) {
2225
2226 int n_jobs = -1;
2227 Unit *other;
2228 int r;
2229
2230 assert(u);
2231 assert(dependency_name);
2232 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2233
2234 /* Act on OnFailure= and OnSuccess= dependencies */
2235
2236 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2237 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2238
2239 if (n_jobs < 0) {
2240 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2241 n_jobs = 0;
2242 }
2243
2244 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2245 if (r < 0)
2246 log_unit_warning_errno(
2247 u, r, "Failed to enqueue %s job, ignoring: %s",
2248 dependency_name, bus_error_message(&error, r));
2249 n_jobs ++;
2250 }
2251
2252 if (n_jobs >= 0)
2253 log_unit_debug(u, "Triggering %s dependencies done (%u %s).",
2254 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2255 }
2256
2257 void unit_trigger_notify(Unit *u) {
2258 Unit *other;
2259
2260 assert(u);
2261
2262 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2263 if (UNIT_VTABLE(other)->trigger_notify)
2264 UNIT_VTABLE(other)->trigger_notify(other, u);
2265 }
2266
2267 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2268 if (condition_notice && log_level > LOG_NOTICE)
2269 return LOG_NOTICE;
2270 if (condition_info && log_level > LOG_INFO)
2271 return LOG_INFO;
2272 return log_level;
2273 }
2274
2275 static int unit_log_resources(Unit *u) {
2276 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2277 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2278 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2279 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2280 size_t n_message_parts = 0, n_iovec = 0;
2281 char* message_parts[1 + 2 + 2 + 1], *t;
2282 nsec_t nsec = NSEC_INFINITY;
2283 int r;
2284 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2285 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2286 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2287 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2288 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2289 };
2290 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2291 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2292 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2293 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2294 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2295 };
2296
2297 assert(u);
2298
2299 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2300 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2301 * information and the complete data in structured fields. */
2302
2303 (void) unit_get_cpu_usage(u, &nsec);
2304 if (nsec != NSEC_INFINITY) {
2305 /* Format the CPU time for inclusion in the structured log message */
2306 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2307 r = log_oom();
2308 goto finish;
2309 }
2310 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2311
2312 /* Format the CPU time for inclusion in the human language message string */
2313 t = strjoin("consumed ", FORMAT_TIMESPAN(nsec / NSEC_PER_USEC, USEC_PER_MSEC), " CPU time");
2314 if (!t) {
2315 r = log_oom();
2316 goto finish;
2317 }
2318
2319 message_parts[n_message_parts++] = t;
2320
2321 log_level = raise_level(log_level,
2322 nsec > MENTIONWORTHY_CPU_NSEC,
2323 nsec > NOTICEWORTHY_CPU_NSEC);
2324 }
2325
2326 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2327 uint64_t value = UINT64_MAX;
2328
2329 assert(io_fields[k]);
2330
2331 (void) unit_get_io_accounting(u, k, k > 0, &value);
2332 if (value == UINT64_MAX)
2333 continue;
2334
2335 have_io_accounting = true;
2336 if (value > 0)
2337 any_io = true;
2338
2339 /* Format IO accounting data for inclusion in the structured log message */
2340 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2341 r = log_oom();
2342 goto finish;
2343 }
2344 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2345
2346 /* Format the IO accounting data for inclusion in the human language message string, but only
2347 * for the bytes counters (and not for the operations counters) */
2348 if (k == CGROUP_IO_READ_BYTES) {
2349 assert(!rr);
2350 rr = strjoin("read ", strna(FORMAT_BYTES(value)), " from disk");
2351 if (!rr) {
2352 r = log_oom();
2353 goto finish;
2354 }
2355 } else if (k == CGROUP_IO_WRITE_BYTES) {
2356 assert(!wr);
2357 wr = strjoin("written ", strna(FORMAT_BYTES(value)), " to disk");
2358 if (!wr) {
2359 r = log_oom();
2360 goto finish;
2361 }
2362 }
2363
2364 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2365 log_level = raise_level(log_level,
2366 value > MENTIONWORTHY_IO_BYTES,
2367 value > NOTICEWORTHY_IO_BYTES);
2368 }
2369
2370 if (have_io_accounting) {
2371 if (any_io) {
2372 if (rr)
2373 message_parts[n_message_parts++] = TAKE_PTR(rr);
2374 if (wr)
2375 message_parts[n_message_parts++] = TAKE_PTR(wr);
2376
2377 } else {
2378 char *k;
2379
2380 k = strdup("no IO");
2381 if (!k) {
2382 r = log_oom();
2383 goto finish;
2384 }
2385
2386 message_parts[n_message_parts++] = k;
2387 }
2388 }
2389
2390 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2391 uint64_t value = UINT64_MAX;
2392
2393 assert(ip_fields[m]);
2394
2395 (void) unit_get_ip_accounting(u, m, &value);
2396 if (value == UINT64_MAX)
2397 continue;
2398
2399 have_ip_accounting = true;
2400 if (value > 0)
2401 any_traffic = true;
2402
2403 /* Format IP accounting data for inclusion in the structured log message */
2404 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2405 r = log_oom();
2406 goto finish;
2407 }
2408 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2409
2410 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2411 * bytes counters (and not for the packets counters) */
2412 if (m == CGROUP_IP_INGRESS_BYTES) {
2413 assert(!igress);
2414 igress = strjoin("received ", strna(FORMAT_BYTES(value)), " IP traffic");
2415 if (!igress) {
2416 r = log_oom();
2417 goto finish;
2418 }
2419 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2420 assert(!egress);
2421 egress = strjoin("sent ", strna(FORMAT_BYTES(value)), " IP traffic");
2422 if (!egress) {
2423 r = log_oom();
2424 goto finish;
2425 }
2426 }
2427
2428 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2429 log_level = raise_level(log_level,
2430 value > MENTIONWORTHY_IP_BYTES,
2431 value > NOTICEWORTHY_IP_BYTES);
2432 }
2433
2434 /* This check is here because it is the earliest point following all possible log_level assignments. If
2435 * log_level is assigned anywhere after this point, move this check. */
2436 if (!unit_log_level_test(u, log_level)) {
2437 r = 0;
2438 goto finish;
2439 }
2440
2441 if (have_ip_accounting) {
2442 if (any_traffic) {
2443 if (igress)
2444 message_parts[n_message_parts++] = TAKE_PTR(igress);
2445 if (egress)
2446 message_parts[n_message_parts++] = TAKE_PTR(egress);
2447
2448 } else {
2449 char *k;
2450
2451 k = strdup("no IP traffic");
2452 if (!k) {
2453 r = log_oom();
2454 goto finish;
2455 }
2456
2457 message_parts[n_message_parts++] = k;
2458 }
2459 }
2460
2461 /* Is there any accounting data available at all? */
2462 if (n_iovec == 0) {
2463 r = 0;
2464 goto finish;
2465 }
2466
2467 if (n_message_parts == 0)
2468 t = strjoina("MESSAGE=", u->id, ": Completed.");
2469 else {
2470 _cleanup_free_ char *joined = NULL;
2471
2472 message_parts[n_message_parts] = NULL;
2473
2474 joined = strv_join(message_parts, ", ");
2475 if (!joined) {
2476 r = log_oom();
2477 goto finish;
2478 }
2479
2480 joined[0] = ascii_toupper(joined[0]);
2481 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2482 }
2483
2484 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2485 * and hence don't increase n_iovec for them */
2486 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2487 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2488
2489 t = strjoina(u->manager->unit_log_field, u->id);
2490 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2491
2492 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2493 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2494
2495 log_unit_struct_iovec(u, log_level, iovec, n_iovec + 4);
2496 r = 0;
2497
2498 finish:
2499 for (size_t i = 0; i < n_message_parts; i++)
2500 free(message_parts[i]);
2501
2502 for (size_t i = 0; i < n_iovec; i++)
2503 free(iovec[i].iov_base);
2504
2505 return r;
2506
2507 }
2508
2509 static void unit_update_on_console(Unit *u) {
2510 bool b;
2511
2512 assert(u);
2513
2514 b = unit_needs_console(u);
2515 if (u->on_console == b)
2516 return;
2517
2518 u->on_console = b;
2519 if (b)
2520 manager_ref_console(u->manager);
2521 else
2522 manager_unref_console(u->manager);
2523 }
2524
2525 static void unit_emit_audit_start(Unit *u) {
2526 assert(u);
2527
2528 if (u->type != UNIT_SERVICE)
2529 return;
2530
2531 /* Write audit record if we have just finished starting up */
2532 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2533 u->in_audit = true;
2534 }
2535
2536 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2537 assert(u);
2538
2539 if (u->type != UNIT_SERVICE)
2540 return;
2541
2542 if (u->in_audit) {
2543 /* Write audit record if we have just finished shutting down */
2544 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2545 u->in_audit = false;
2546 } else {
2547 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2548 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2549
2550 if (state == UNIT_INACTIVE)
2551 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2552 }
2553 }
2554
2555 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2556 bool unexpected = false;
2557 JobResult result;
2558
2559 assert(j);
2560
2561 if (j->state == JOB_WAITING)
2562
2563 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2564 * due to EAGAIN. */
2565 job_add_to_run_queue(j);
2566
2567 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2568 * hence needs to invalidate jobs. */
2569
2570 switch (j->type) {
2571
2572 case JOB_START:
2573 case JOB_VERIFY_ACTIVE:
2574
2575 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2576 job_finish_and_invalidate(j, JOB_DONE, true, false);
2577 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2578 unexpected = true;
2579
2580 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2581 if (ns == UNIT_FAILED)
2582 result = JOB_FAILED;
2583 else
2584 result = JOB_DONE;
2585
2586 job_finish_and_invalidate(j, result, true, false);
2587 }
2588 }
2589
2590 break;
2591
2592 case JOB_RELOAD:
2593 case JOB_RELOAD_OR_START:
2594 case JOB_TRY_RELOAD:
2595
2596 if (j->state == JOB_RUNNING) {
2597 if (ns == UNIT_ACTIVE)
2598 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2599 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2600 unexpected = true;
2601
2602 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2603 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2604 }
2605 }
2606
2607 break;
2608
2609 case JOB_STOP:
2610 case JOB_RESTART:
2611 case JOB_TRY_RESTART:
2612
2613 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2614 job_finish_and_invalidate(j, JOB_DONE, true, false);
2615 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2616 unexpected = true;
2617 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2618 }
2619
2620 break;
2621
2622 default:
2623 assert_not_reached();
2624 }
2625
2626 return unexpected;
2627 }
2628
2629 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2630 const char *reason;
2631 Manager *m;
2632
2633 assert(u);
2634 assert(os < _UNIT_ACTIVE_STATE_MAX);
2635 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2636
2637 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2638 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2639 * remounted this function will be called too! */
2640
2641 m = u->manager;
2642
2643 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2644 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2645 unit_add_to_dbus_queue(u);
2646
2647 /* Update systemd-oomd on the property/state change */
2648 if (os != ns) {
2649 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2650 * monitoring.
2651 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2652 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2653 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2654 * have the information on the property. Thus, indiscriminately send an update. */
2655 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2656 (void) manager_varlink_send_managed_oom_update(u);
2657 }
2658
2659 /* Update timestamps for state changes */
2660 if (!MANAGER_IS_RELOADING(m)) {
2661 dual_timestamp_get(&u->state_change_timestamp);
2662
2663 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2664 u->inactive_exit_timestamp = u->state_change_timestamp;
2665 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2666 u->inactive_enter_timestamp = u->state_change_timestamp;
2667
2668 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2669 u->active_enter_timestamp = u->state_change_timestamp;
2670 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2671 u->active_exit_timestamp = u->state_change_timestamp;
2672 }
2673
2674 /* Keep track of failed units */
2675 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2676
2677 /* Make sure the cgroup and state files are always removed when we become inactive */
2678 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2679 SET_FLAG(u->markers,
2680 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2681 false);
2682 unit_prune_cgroup(u);
2683 unit_unlink_state_files(u);
2684 } else if (ns != os && ns == UNIT_RELOADING)
2685 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2686
2687 unit_update_on_console(u);
2688
2689 if (!MANAGER_IS_RELOADING(m)) {
2690 bool unexpected;
2691
2692 /* Let's propagate state changes to the job */
2693 if (u->job)
2694 unexpected = unit_process_job(u->job, ns, flags);
2695 else
2696 unexpected = true;
2697
2698 /* If this state change happened without being requested by a job, then let's retroactively start or
2699 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2700 * additional jobs just because something is already activated. */
2701
2702 if (unexpected) {
2703 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2704 retroactively_start_dependencies(u);
2705 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2706 retroactively_stop_dependencies(u);
2707 }
2708
2709 if (ns != os && ns == UNIT_FAILED) {
2710 log_unit_debug(u, "Unit entered failed state.");
2711
2712 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2713 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2714 }
2715
2716 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2717 /* This unit just finished starting up */
2718
2719 unit_emit_audit_start(u);
2720 manager_send_unit_plymouth(m, u);
2721 }
2722
2723 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2724 /* This unit just stopped/failed. */
2725
2726 unit_emit_audit_stop(u, ns);
2727 unit_log_resources(u);
2728 }
2729
2730 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE) &&
2731 !(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2732 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2733 }
2734
2735 manager_recheck_journal(m);
2736 manager_recheck_dbus(m);
2737
2738 unit_trigger_notify(u);
2739
2740 if (!MANAGER_IS_RELOADING(m)) {
2741 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2742 reason = strjoina("unit ", u->id, " failed");
2743 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2744 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2745 reason = strjoina("unit ", u->id, " succeeded");
2746 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2747 }
2748 }
2749
2750 /* And now, add the unit or depending units to various queues that will act on the new situation if
2751 * needed. These queues generally check for continuous state changes rather than events (like most of
2752 * the state propagation above), and do work deferred instead of instantly, since they typically
2753 * don't want to run during reloading, and usually involve checking combined state of multiple units
2754 * at once. */
2755
2756 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2757 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2758 check_unneeded_dependencies(u);
2759 check_bound_by_dependencies(u);
2760
2761 /* Maybe someone wants us to remain up? */
2762 unit_submit_to_start_when_upheld_queue(u);
2763
2764 /* Maybe the unit should be GC'ed now? */
2765 unit_add_to_gc_queue(u);
2766 }
2767
2768 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2769 /* Start uphold units regardless if going up was expected or not */
2770 check_uphold_dependencies(u);
2771
2772 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2773 unit_submit_to_stop_when_unneeded_queue(u);
2774
2775 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2776 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2777 * inactive, without ever entering started.) */
2778 unit_submit_to_stop_when_bound_queue(u);
2779 }
2780 }
2781
2782 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2783 int r;
2784
2785 assert(u);
2786 assert(pid_is_valid(pid));
2787
2788 /* Watch a specific PID */
2789
2790 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2791 * opportunity to remove any stalled references to this PID as they can be created
2792 * easily (when watching a process which is not our direct child). */
2793 if (exclusive)
2794 manager_unwatch_pid(u->manager, pid);
2795
2796 r = set_ensure_allocated(&u->pids, NULL);
2797 if (r < 0)
2798 return r;
2799
2800 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2801 if (r < 0)
2802 return r;
2803
2804 /* First try, let's add the unit keyed by "pid". */
2805 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2806 if (r == -EEXIST) {
2807 Unit **array;
2808 bool found = false;
2809 size_t n = 0;
2810
2811 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2812 * to an array of Units rather than just a Unit), lists us already. */
2813
2814 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2815 if (array)
2816 for (; array[n]; n++)
2817 if (array[n] == u)
2818 found = true;
2819
2820 if (found) /* Found it already? if so, do nothing */
2821 r = 0;
2822 else {
2823 Unit **new_array;
2824
2825 /* Allocate a new array */
2826 new_array = new(Unit*, n + 2);
2827 if (!new_array)
2828 return -ENOMEM;
2829
2830 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2831 new_array[n] = u;
2832 new_array[n+1] = NULL;
2833
2834 /* Add or replace the old array */
2835 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2836 if (r < 0) {
2837 free(new_array);
2838 return r;
2839 }
2840
2841 free(array);
2842 }
2843 } else if (r < 0)
2844 return r;
2845
2846 r = set_put(u->pids, PID_TO_PTR(pid));
2847 if (r < 0)
2848 return r;
2849
2850 return 0;
2851 }
2852
2853 void unit_unwatch_pid(Unit *u, pid_t pid) {
2854 Unit **array;
2855
2856 assert(u);
2857 assert(pid_is_valid(pid));
2858
2859 /* First let's drop the unit in case it's keyed as "pid". */
2860 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2861
2862 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2863 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2864 if (array) {
2865 /* Let's iterate through the array, dropping our own entry */
2866
2867 size_t m = 0;
2868 for (size_t n = 0; array[n]; n++)
2869 if (array[n] != u)
2870 array[m++] = array[n];
2871 array[m] = NULL;
2872
2873 if (m == 0) {
2874 /* The array is now empty, remove the entire entry */
2875 assert_se(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2876 free(array);
2877 }
2878 }
2879
2880 (void) set_remove(u->pids, PID_TO_PTR(pid));
2881 }
2882
2883 void unit_unwatch_all_pids(Unit *u) {
2884 assert(u);
2885
2886 while (!set_isempty(u->pids))
2887 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2888
2889 u->pids = set_free(u->pids);
2890 }
2891
2892 static void unit_tidy_watch_pids(Unit *u) {
2893 pid_t except1, except2;
2894 void *e;
2895
2896 assert(u);
2897
2898 /* Cleans dead PIDs from our list */
2899
2900 except1 = unit_main_pid(u);
2901 except2 = unit_control_pid(u);
2902
2903 SET_FOREACH(e, u->pids) {
2904 pid_t pid = PTR_TO_PID(e);
2905
2906 if (pid == except1 || pid == except2)
2907 continue;
2908
2909 if (!pid_is_unwaited(pid))
2910 unit_unwatch_pid(u, pid);
2911 }
2912 }
2913
2914 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2915 Unit *u = userdata;
2916
2917 assert(s);
2918 assert(u);
2919
2920 unit_tidy_watch_pids(u);
2921 unit_watch_all_pids(u);
2922
2923 /* If the PID set is empty now, then let's finish this off. */
2924 unit_synthesize_cgroup_empty_event(u);
2925
2926 return 0;
2927 }
2928
2929 int unit_enqueue_rewatch_pids(Unit *u) {
2930 int r;
2931
2932 assert(u);
2933
2934 if (!u->cgroup_path)
2935 return -ENOENT;
2936
2937 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2938 if (r < 0)
2939 return r;
2940 if (r > 0) /* On unified we can use proper notifications */
2941 return 0;
2942
2943 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2944 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2945 * involves issuing kill(pid, 0) on all processes we watch. */
2946
2947 if (!u->rewatch_pids_event_source) {
2948 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2949
2950 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2951 if (r < 0)
2952 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2953
2954 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2955 if (r < 0)
2956 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2957
2958 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2959
2960 u->rewatch_pids_event_source = TAKE_PTR(s);
2961 }
2962
2963 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2964 if (r < 0)
2965 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2966
2967 return 0;
2968 }
2969
2970 void unit_dequeue_rewatch_pids(Unit *u) {
2971 int r;
2972 assert(u);
2973
2974 if (!u->rewatch_pids_event_source)
2975 return;
2976
2977 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2978 if (r < 0)
2979 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2980
2981 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
2982 }
2983
2984 bool unit_job_is_applicable(Unit *u, JobType j) {
2985 assert(u);
2986 assert(j >= 0 && j < _JOB_TYPE_MAX);
2987
2988 switch (j) {
2989
2990 case JOB_VERIFY_ACTIVE:
2991 case JOB_START:
2992 case JOB_NOP:
2993 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2994 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2995 * jobs for it. */
2996 return true;
2997
2998 case JOB_STOP:
2999 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3000 * external events), hence it makes no sense to permit enqueuing such a request either. */
3001 return !u->perpetual;
3002
3003 case JOB_RESTART:
3004 case JOB_TRY_RESTART:
3005 return unit_can_stop(u) && unit_can_start(u);
3006
3007 case JOB_RELOAD:
3008 case JOB_TRY_RELOAD:
3009 return unit_can_reload(u);
3010
3011 case JOB_RELOAD_OR_START:
3012 return unit_can_reload(u) && unit_can_start(u);
3013
3014 default:
3015 assert_not_reached();
3016 }
3017 }
3018
3019 int unit_add_dependency(
3020 Unit *u,
3021 UnitDependency d,
3022 Unit *other,
3023 bool add_reference,
3024 UnitDependencyMask mask) {
3025
3026 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3027 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3028 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3029 [UNIT_WANTS] = UNIT_WANTED_BY,
3030 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3031 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3032 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3033 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3034 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3035 [UNIT_WANTED_BY] = UNIT_WANTS,
3036 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3037 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3038 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3039 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3040 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3041 [UNIT_BEFORE] = UNIT_AFTER,
3042 [UNIT_AFTER] = UNIT_BEFORE,
3043 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3044 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3045 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3046 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3047 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3048 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3049 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3050 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3051 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3052 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3053 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3054 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3055 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3056 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3057 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3058 };
3059 Unit *original_u = u, *original_other = other;
3060 UnitDependencyAtom a;
3061 int r;
3062
3063 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3064 * there, no need to notify! */
3065 bool noop;
3066
3067 assert(u);
3068 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3069 assert(other);
3070
3071 u = unit_follow_merge(u);
3072 other = unit_follow_merge(other);
3073 a = unit_dependency_to_atom(d);
3074 assert(a >= 0);
3075
3076 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3077 if (u == other) {
3078 unit_maybe_warn_about_dependency(original_u, original_other->id, d);
3079 return 0;
3080 }
3081
3082 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3083 return 0;
3084
3085 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3086 * running timeout at a specific time. */
3087 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3088 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3089 return 0;
3090 }
3091
3092 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3093 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3094 return 0;
3095 }
3096
3097 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3098 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3099 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3100 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3101 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3102 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3103
3104 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3105 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3106 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3107 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3108 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3109 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3110
3111 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3112 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3113 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3114
3115 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3116 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3117 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3118
3119 r = unit_add_dependency_hashmap(&u->dependencies, d, other, mask, 0);
3120 if (r < 0)
3121 return r;
3122 noop = !r;
3123
3124 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
3125 r = unit_add_dependency_hashmap(&other->dependencies, inverse_table[d], u, 0, mask);
3126 if (r < 0)
3127 return r;
3128 if (r)
3129 noop = false;
3130 }
3131
3132 if (add_reference) {
3133 r = unit_add_dependency_hashmap(&u->dependencies, UNIT_REFERENCES, other, mask, 0);
3134 if (r < 0)
3135 return r;
3136 if (r)
3137 noop = false;
3138
3139 r = unit_add_dependency_hashmap(&other->dependencies, UNIT_REFERENCED_BY, u, 0, mask);
3140 if (r < 0)
3141 return r;
3142 if (r)
3143 noop = false;
3144 }
3145
3146 if (!noop)
3147 unit_add_to_dbus_queue(u);
3148
3149 return 0;
3150 }
3151
3152 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3153 int r;
3154
3155 assert(u);
3156
3157 r = unit_add_dependency(u, d, other, add_reference, mask);
3158 if (r < 0)
3159 return r;
3160
3161 return unit_add_dependency(u, e, other, add_reference, mask);
3162 }
3163
3164 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3165 int r;
3166
3167 assert(u);
3168 assert(name);
3169 assert(buf);
3170 assert(ret);
3171
3172 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3173 *buf = NULL;
3174 *ret = name;
3175 return 0;
3176 }
3177
3178 if (u->instance)
3179 r = unit_name_replace_instance(name, u->instance, buf);
3180 else {
3181 _cleanup_free_ char *i = NULL;
3182
3183 r = unit_name_to_prefix(u->id, &i);
3184 if (r < 0)
3185 return r;
3186
3187 r = unit_name_replace_instance(name, i, buf);
3188 }
3189 if (r < 0)
3190 return r;
3191
3192 *ret = *buf;
3193 return 0;
3194 }
3195
3196 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3197 _cleanup_free_ char *buf = NULL;
3198 Unit *other;
3199 int r;
3200
3201 assert(u);
3202 assert(name);
3203
3204 r = resolve_template(u, name, &buf, &name);
3205 if (r < 0)
3206 return r;
3207
3208 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3209 return 0;
3210
3211 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3212 if (r < 0)
3213 return r;
3214
3215 return unit_add_dependency(u, d, other, add_reference, mask);
3216 }
3217
3218 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3219 _cleanup_free_ char *buf = NULL;
3220 Unit *other;
3221 int r;
3222
3223 assert(u);
3224 assert(name);
3225
3226 r = resolve_template(u, name, &buf, &name);
3227 if (r < 0)
3228 return r;
3229
3230 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3231 return 0;
3232
3233 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3234 if (r < 0)
3235 return r;
3236
3237 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3238 }
3239
3240 int set_unit_path(const char *p) {
3241 /* This is mostly for debug purposes */
3242 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3243 }
3244
3245 char *unit_dbus_path(Unit *u) {
3246 assert(u);
3247
3248 if (!u->id)
3249 return NULL;
3250
3251 return unit_dbus_path_from_name(u->id);
3252 }
3253
3254 char *unit_dbus_path_invocation_id(Unit *u) {
3255 assert(u);
3256
3257 if (sd_id128_is_null(u->invocation_id))
3258 return NULL;
3259
3260 return unit_dbus_path_from_name(u->invocation_id_string);
3261 }
3262
3263 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3264 int r;
3265
3266 assert(u);
3267
3268 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3269
3270 if (sd_id128_equal(u->invocation_id, id))
3271 return 0;
3272
3273 if (!sd_id128_is_null(u->invocation_id))
3274 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3275
3276 if (sd_id128_is_null(id)) {
3277 r = 0;
3278 goto reset;
3279 }
3280
3281 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3282 if (r < 0)
3283 goto reset;
3284
3285 u->invocation_id = id;
3286 sd_id128_to_string(id, u->invocation_id_string);
3287
3288 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3289 if (r < 0)
3290 goto reset;
3291
3292 return 0;
3293
3294 reset:
3295 u->invocation_id = SD_ID128_NULL;
3296 u->invocation_id_string[0] = 0;
3297 return r;
3298 }
3299
3300 int unit_set_slice(Unit *u, Unit *slice) {
3301 int r;
3302
3303 assert(u);
3304 assert(slice);
3305
3306 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3307 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3308 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3309
3310 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3311 return -EOPNOTSUPP;
3312
3313 if (u->type == UNIT_SLICE)
3314 return -EINVAL;
3315
3316 if (unit_active_state(u) != UNIT_INACTIVE)
3317 return -EBUSY;
3318
3319 if (slice->type != UNIT_SLICE)
3320 return -EINVAL;
3321
3322 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3323 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3324 return -EPERM;
3325
3326 if (UNIT_GET_SLICE(u) == slice)
3327 return 0;
3328
3329 /* Disallow slice changes if @u is already bound to cgroups */
3330 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3331 return -EBUSY;
3332
3333 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3334 if (UNIT_GET_SLICE(u))
3335 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3336
3337 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3338 if (r < 0)
3339 return r;
3340
3341 return 1;
3342 }
3343
3344 int unit_set_default_slice(Unit *u) {
3345 const char *slice_name;
3346 Unit *slice;
3347 int r;
3348
3349 assert(u);
3350
3351 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3352 return 0;
3353
3354 if (UNIT_GET_SLICE(u))
3355 return 0;
3356
3357 if (u->instance) {
3358 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3359
3360 /* Implicitly place all instantiated units in their
3361 * own per-template slice */
3362
3363 r = unit_name_to_prefix(u->id, &prefix);
3364 if (r < 0)
3365 return r;
3366
3367 /* The prefix is already escaped, but it might include
3368 * "-" which has a special meaning for slice units,
3369 * hence escape it here extra. */
3370 escaped = unit_name_escape(prefix);
3371 if (!escaped)
3372 return -ENOMEM;
3373
3374 if (MANAGER_IS_SYSTEM(u->manager))
3375 slice_name = strjoina("system-", escaped, ".slice");
3376 else
3377 slice_name = strjoina("app-", escaped, ".slice");
3378
3379 } else if (unit_is_extrinsic(u))
3380 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3381 * the root slice. They don't really belong in one of the subslices. */
3382 slice_name = SPECIAL_ROOT_SLICE;
3383
3384 else if (MANAGER_IS_SYSTEM(u->manager))
3385 slice_name = SPECIAL_SYSTEM_SLICE;
3386 else
3387 slice_name = SPECIAL_APP_SLICE;
3388
3389 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3390 if (r < 0)
3391 return r;
3392
3393 return unit_set_slice(u, slice);
3394 }
3395
3396 const char *unit_slice_name(Unit *u) {
3397 Unit *slice;
3398 assert(u);
3399
3400 slice = UNIT_GET_SLICE(u);
3401 if (!slice)
3402 return NULL;
3403
3404 return slice->id;
3405 }
3406
3407 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3408 _cleanup_free_ char *t = NULL;
3409 int r;
3410
3411 assert(u);
3412 assert(type);
3413 assert(_found);
3414
3415 r = unit_name_change_suffix(u->id, type, &t);
3416 if (r < 0)
3417 return r;
3418 if (unit_has_name(u, t))
3419 return -EINVAL;
3420
3421 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3422 assert(r < 0 || *_found != u);
3423 return r;
3424 }
3425
3426 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3427 const char *new_owner;
3428 Unit *u = userdata;
3429 int r;
3430
3431 assert(message);
3432 assert(u);
3433
3434 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3435 if (r < 0) {
3436 bus_log_parse_error(r);
3437 return 0;
3438 }
3439
3440 if (UNIT_VTABLE(u)->bus_name_owner_change)
3441 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3442
3443 return 0;
3444 }
3445
3446 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3447 const sd_bus_error *e;
3448 const char *new_owner;
3449 Unit *u = userdata;
3450 int r;
3451
3452 assert(message);
3453 assert(u);
3454
3455 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3456
3457 e = sd_bus_message_get_error(message);
3458 if (e) {
3459 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner")) {
3460 r = sd_bus_error_get_errno(e);
3461 log_unit_error_errno(u, r,
3462 "Unexpected error response from GetNameOwner(): %s",
3463 bus_error_message(e, r));
3464 }
3465
3466 new_owner = NULL;
3467 } else {
3468 r = sd_bus_message_read(message, "s", &new_owner);
3469 if (r < 0)
3470 return bus_log_parse_error(r);
3471
3472 assert(!isempty(new_owner));
3473 }
3474
3475 if (UNIT_VTABLE(u)->bus_name_owner_change)
3476 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3477
3478 return 0;
3479 }
3480
3481 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3482 const char *match;
3483 int r;
3484
3485 assert(u);
3486 assert(bus);
3487 assert(name);
3488
3489 if (u->match_bus_slot || u->get_name_owner_slot)
3490 return -EBUSY;
3491
3492 match = strjoina("type='signal',"
3493 "sender='org.freedesktop.DBus',"
3494 "path='/org/freedesktop/DBus',"
3495 "interface='org.freedesktop.DBus',"
3496 "member='NameOwnerChanged',"
3497 "arg0='", name, "'");
3498
3499 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3500 if (r < 0)
3501 return r;
3502
3503 r = sd_bus_call_method_async(
3504 bus,
3505 &u->get_name_owner_slot,
3506 "org.freedesktop.DBus",
3507 "/org/freedesktop/DBus",
3508 "org.freedesktop.DBus",
3509 "GetNameOwner",
3510 get_name_owner_handler,
3511 u,
3512 "s", name);
3513 if (r < 0) {
3514 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3515 return r;
3516 }
3517
3518 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3519 return 0;
3520 }
3521
3522 int unit_watch_bus_name(Unit *u, const char *name) {
3523 int r;
3524
3525 assert(u);
3526 assert(name);
3527
3528 /* Watch a specific name on the bus. We only support one unit
3529 * watching each name for now. */
3530
3531 if (u->manager->api_bus) {
3532 /* If the bus is already available, install the match directly.
3533 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3534 r = unit_install_bus_match(u, u->manager->api_bus, name);
3535 if (r < 0)
3536 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3537 }
3538
3539 r = hashmap_put(u->manager->watch_bus, name, u);
3540 if (r < 0) {
3541 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3542 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3543 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3544 }
3545
3546 return 0;
3547 }
3548
3549 void unit_unwatch_bus_name(Unit *u, const char *name) {
3550 assert(u);
3551 assert(name);
3552
3553 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3554 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3555 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3556 }
3557
3558 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3559 _cleanup_free_ char *e = NULL;
3560 Unit *device;
3561 int r;
3562
3563 assert(u);
3564
3565 /* Adds in links to the device node that this unit is based on */
3566 if (isempty(what))
3567 return 0;
3568
3569 if (!is_device_path(what))
3570 return 0;
3571
3572 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3573 if (!unit_type_supported(UNIT_DEVICE))
3574 return 0;
3575
3576 r = unit_name_from_path(what, ".device", &e);
3577 if (r < 0)
3578 return r;
3579
3580 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3581 if (r < 0)
3582 return r;
3583
3584 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3585 dep = UNIT_BINDS_TO;
3586
3587 return unit_add_two_dependencies(u, UNIT_AFTER,
3588 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3589 device, true, mask);
3590 }
3591
3592 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3593 _cleanup_free_ char *escaped = NULL, *target = NULL;
3594 int r;
3595
3596 assert(u);
3597
3598 if (isempty(what))
3599 return 0;
3600
3601 if (!path_startswith(what, "/dev/"))
3602 return 0;
3603
3604 /* If we don't support devices, then also don't bother with blockdev@.target */
3605 if (!unit_type_supported(UNIT_DEVICE))
3606 return 0;
3607
3608 r = unit_name_path_escape(what, &escaped);
3609 if (r < 0)
3610 return r;
3611
3612 r = unit_name_build("blockdev", escaped, ".target", &target);
3613 if (r < 0)
3614 return r;
3615
3616 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3617 }
3618
3619 int unit_coldplug(Unit *u) {
3620 int r = 0, q;
3621
3622 assert(u);
3623
3624 /* Make sure we don't enter a loop, when coldplugging recursively. */
3625 if (u->coldplugged)
3626 return 0;
3627
3628 u->coldplugged = true;
3629
3630 STRV_FOREACH(i, u->deserialized_refs) {
3631 q = bus_unit_track_add_name(u, *i);
3632 if (q < 0 && r >= 0)
3633 r = q;
3634 }
3635 u->deserialized_refs = strv_free(u->deserialized_refs);
3636
3637 if (UNIT_VTABLE(u)->coldplug) {
3638 q = UNIT_VTABLE(u)->coldplug(u);
3639 if (q < 0 && r >= 0)
3640 r = q;
3641 }
3642
3643 if (u->job) {
3644 q = job_coldplug(u->job);
3645 if (q < 0 && r >= 0)
3646 r = q;
3647 }
3648 if (u->nop_job) {
3649 q = job_coldplug(u->nop_job);
3650 if (q < 0 && r >= 0)
3651 r = q;
3652 }
3653
3654 return r;
3655 }
3656
3657 void unit_catchup(Unit *u) {
3658 assert(u);
3659
3660 if (UNIT_VTABLE(u)->catchup)
3661 UNIT_VTABLE(u)->catchup(u);
3662
3663 unit_cgroup_catchup(u);
3664 }
3665
3666 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3667 struct stat st;
3668
3669 if (!path)
3670 return false;
3671
3672 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3673 * are never out-of-date. */
3674 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3675 return false;
3676
3677 if (stat(path, &st) < 0)
3678 /* What, cannot access this anymore? */
3679 return true;
3680
3681 if (path_masked)
3682 /* For masked files check if they are still so */
3683 return !null_or_empty(&st);
3684 else
3685 /* For non-empty files check the mtime */
3686 return timespec_load(&st.st_mtim) > mtime;
3687
3688 return false;
3689 }
3690
3691 bool unit_need_daemon_reload(Unit *u) {
3692 _cleanup_strv_free_ char **t = NULL;
3693
3694 assert(u);
3695
3696 /* For unit files, we allow masking… */
3697 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3698 u->load_state == UNIT_MASKED))
3699 return true;
3700
3701 /* Source paths should not be masked… */
3702 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3703 return true;
3704
3705 if (u->load_state == UNIT_LOADED)
3706 (void) unit_find_dropin_paths(u, &t);
3707 if (!strv_equal(u->dropin_paths, t))
3708 return true;
3709
3710 /* … any drop-ins that are masked are simply omitted from the list. */
3711 STRV_FOREACH(path, u->dropin_paths)
3712 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3713 return true;
3714
3715 return false;
3716 }
3717
3718 void unit_reset_failed(Unit *u) {
3719 assert(u);
3720
3721 if (UNIT_VTABLE(u)->reset_failed)
3722 UNIT_VTABLE(u)->reset_failed(u);
3723
3724 ratelimit_reset(&u->start_ratelimit);
3725 u->start_limit_hit = false;
3726 }
3727
3728 Unit *unit_following(Unit *u) {
3729 assert(u);
3730
3731 if (UNIT_VTABLE(u)->following)
3732 return UNIT_VTABLE(u)->following(u);
3733
3734 return NULL;
3735 }
3736
3737 bool unit_stop_pending(Unit *u) {
3738 assert(u);
3739
3740 /* This call does check the current state of the unit. It's
3741 * hence useful to be called from state change calls of the
3742 * unit itself, where the state isn't updated yet. This is
3743 * different from unit_inactive_or_pending() which checks both
3744 * the current state and for a queued job. */
3745
3746 return unit_has_job_type(u, JOB_STOP);
3747 }
3748
3749 bool unit_inactive_or_pending(Unit *u) {
3750 assert(u);
3751
3752 /* Returns true if the unit is inactive or going down */
3753
3754 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3755 return true;
3756
3757 if (unit_stop_pending(u))
3758 return true;
3759
3760 return false;
3761 }
3762
3763 bool unit_active_or_pending(Unit *u) {
3764 assert(u);
3765
3766 /* Returns true if the unit is active or going up */
3767
3768 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3769 return true;
3770
3771 if (u->job &&
3772 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3773 return true;
3774
3775 return false;
3776 }
3777
3778 bool unit_will_restart_default(Unit *u) {
3779 assert(u);
3780
3781 return unit_has_job_type(u, JOB_START);
3782 }
3783
3784 bool unit_will_restart(Unit *u) {
3785 assert(u);
3786
3787 if (!UNIT_VTABLE(u)->will_restart)
3788 return false;
3789
3790 return UNIT_VTABLE(u)->will_restart(u);
3791 }
3792
3793 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3794 assert(u);
3795 assert(w >= 0 && w < _KILL_WHO_MAX);
3796 assert(SIGNAL_VALID(signo));
3797
3798 if (!UNIT_VTABLE(u)->kill)
3799 return -EOPNOTSUPP;
3800
3801 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3802 }
3803
3804 void unit_notify_cgroup_oom(Unit *u, bool managed_oom) {
3805 assert(u);
3806
3807 if (UNIT_VTABLE(u)->notify_cgroup_oom)
3808 UNIT_VTABLE(u)->notify_cgroup_oom(u, managed_oom);
3809 }
3810
3811 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3812 _cleanup_set_free_ Set *pid_set = NULL;
3813 int r;
3814
3815 pid_set = set_new(NULL);
3816 if (!pid_set)
3817 return NULL;
3818
3819 /* Exclude the main/control pids from being killed via the cgroup */
3820 if (main_pid > 0) {
3821 r = set_put(pid_set, PID_TO_PTR(main_pid));
3822 if (r < 0)
3823 return NULL;
3824 }
3825
3826 if (control_pid > 0) {
3827 r = set_put(pid_set, PID_TO_PTR(control_pid));
3828 if (r < 0)
3829 return NULL;
3830 }
3831
3832 return TAKE_PTR(pid_set);
3833 }
3834
3835 static int kill_common_log(pid_t pid, int signo, void *userdata) {
3836 _cleanup_free_ char *comm = NULL;
3837 Unit *u = userdata;
3838
3839 assert(u);
3840
3841 (void) get_process_comm(pid, &comm);
3842 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3843 signal_to_string(signo), pid, strna(comm));
3844
3845 return 1;
3846 }
3847
3848 int unit_kill_common(
3849 Unit *u,
3850 KillWho who,
3851 int signo,
3852 pid_t main_pid,
3853 pid_t control_pid,
3854 sd_bus_error *error) {
3855
3856 int r = 0;
3857 bool killed = false;
3858
3859 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
3860 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
3861 * stop a service ourselves. */
3862
3863 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3864 if (main_pid < 0)
3865 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3866 if (main_pid == 0)
3867 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3868 }
3869
3870 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3871 if (control_pid < 0)
3872 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3873 if (control_pid == 0)
3874 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3875 }
3876
3877 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3878 if (control_pid > 0) {
3879 _cleanup_free_ char *comm = NULL;
3880 (void) get_process_comm(control_pid, &comm);
3881
3882 if (kill(control_pid, signo) < 0) {
3883 /* Report this failure both to the logs and to the client */
3884 sd_bus_error_set_errnof(
3885 error, errno,
3886 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
3887 signal_to_string(signo), control_pid, strna(comm));
3888 r = log_unit_warning_errno(
3889 u, errno,
3890 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
3891 signal_to_string(signo), control_pid, strna(comm));
3892 } else {
3893 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
3894 signal_to_string(signo), control_pid, strna(comm));
3895 killed = true;
3896 }
3897 }
3898
3899 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3900 if (main_pid > 0) {
3901 _cleanup_free_ char *comm = NULL;
3902 (void) get_process_comm(main_pid, &comm);
3903
3904 if (kill(main_pid, signo) < 0) {
3905 if (r == 0)
3906 sd_bus_error_set_errnof(
3907 error, errno,
3908 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
3909 signal_to_string(signo), main_pid, strna(comm));
3910
3911 r = log_unit_warning_errno(
3912 u, errno,
3913 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
3914 signal_to_string(signo), main_pid, strna(comm));
3915 } else {
3916 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
3917 signal_to_string(signo), main_pid, strna(comm));
3918 killed = true;
3919 }
3920 }
3921
3922 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3923 _cleanup_set_free_ Set *pid_set = NULL;
3924 int q;
3925
3926 /* Exclude the main/control pids from being killed via the cgroup */
3927 pid_set = unit_pid_set(main_pid, control_pid);
3928 if (!pid_set)
3929 return log_oom();
3930
3931 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
3932 if (q < 0) {
3933 if (!IN_SET(q, -ESRCH, -ENOENT)) {
3934 if (r == 0)
3935 sd_bus_error_set_errnof(
3936 error, q,
3937 "Failed to send signal SIG%s to auxiliary processes: %m",
3938 signal_to_string(signo));
3939
3940 r = log_unit_warning_errno(
3941 u, q,
3942 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
3943 signal_to_string(signo));
3944 }
3945 } else
3946 killed = true;
3947 }
3948
3949 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
3950 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
3951 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
3952
3953 return r;
3954 }
3955
3956 int unit_following_set(Unit *u, Set **s) {
3957 assert(u);
3958 assert(s);
3959
3960 if (UNIT_VTABLE(u)->following_set)
3961 return UNIT_VTABLE(u)->following_set(u, s);
3962
3963 *s = NULL;
3964 return 0;
3965 }
3966
3967 UnitFileState unit_get_unit_file_state(Unit *u) {
3968 int r;
3969
3970 assert(u);
3971
3972 if (u->unit_file_state < 0 && u->fragment_path) {
3973 r = unit_file_get_state(
3974 u->manager->unit_file_scope,
3975 NULL,
3976 u->id,
3977 &u->unit_file_state);
3978 if (r < 0)
3979 u->unit_file_state = UNIT_FILE_BAD;
3980 }
3981
3982 return u->unit_file_state;
3983 }
3984
3985 int unit_get_unit_file_preset(Unit *u) {
3986 assert(u);
3987
3988 if (u->unit_file_preset < 0 && u->fragment_path)
3989 u->unit_file_preset = unit_file_query_preset(
3990 u->manager->unit_file_scope,
3991 NULL,
3992 basename(u->fragment_path),
3993 NULL);
3994
3995 return u->unit_file_preset;
3996 }
3997
3998 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3999 assert(ref);
4000 assert(source);
4001 assert(target);
4002
4003 if (ref->target)
4004 unit_ref_unset(ref);
4005
4006 ref->source = source;
4007 ref->target = target;
4008 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4009 return target;
4010 }
4011
4012 void unit_ref_unset(UnitRef *ref) {
4013 assert(ref);
4014
4015 if (!ref->target)
4016 return;
4017
4018 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4019 * be unreferenced now. */
4020 unit_add_to_gc_queue(ref->target);
4021
4022 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4023 ref->source = ref->target = NULL;
4024 }
4025
4026 static int user_from_unit_name(Unit *u, char **ret) {
4027
4028 static const uint8_t hash_key[] = {
4029 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4030 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4031 };
4032
4033 _cleanup_free_ char *n = NULL;
4034 int r;
4035
4036 r = unit_name_to_prefix(u->id, &n);
4037 if (r < 0)
4038 return r;
4039
4040 if (valid_user_group_name(n, 0)) {
4041 *ret = TAKE_PTR(n);
4042 return 0;
4043 }
4044
4045 /* If we can't use the unit name as a user name, then let's hash it and use that */
4046 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4047 return -ENOMEM;
4048
4049 return 0;
4050 }
4051
4052 int unit_patch_contexts(Unit *u) {
4053 CGroupContext *cc;
4054 ExecContext *ec;
4055 int r;
4056
4057 assert(u);
4058
4059 /* Patch in the manager defaults into the exec and cgroup
4060 * contexts, _after_ the rest of the settings have been
4061 * initialized */
4062
4063 ec = unit_get_exec_context(u);
4064 if (ec) {
4065 /* This only copies in the ones that need memory */
4066 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4067 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4068 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4069 if (!ec->rlimit[i])
4070 return -ENOMEM;
4071 }
4072
4073 if (MANAGER_IS_USER(u->manager) &&
4074 !ec->working_directory) {
4075
4076 r = get_home_dir(&ec->working_directory);
4077 if (r < 0)
4078 return r;
4079
4080 /* Allow user services to run, even if the
4081 * home directory is missing */
4082 ec->working_directory_missing_ok = true;
4083 }
4084
4085 if (ec->private_devices)
4086 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4087
4088 if (ec->protect_kernel_modules)
4089 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4090
4091 if (ec->protect_kernel_logs)
4092 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4093
4094 if (ec->protect_clock)
4095 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4096
4097 if (ec->dynamic_user) {
4098 if (!ec->user) {
4099 r = user_from_unit_name(u, &ec->user);
4100 if (r < 0)
4101 return r;
4102 }
4103
4104 if (!ec->group) {
4105 ec->group = strdup(ec->user);
4106 if (!ec->group)
4107 return -ENOMEM;
4108 }
4109
4110 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4111 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4112 * sandbox. */
4113
4114 ec->private_tmp = true;
4115 ec->remove_ipc = true;
4116 ec->protect_system = PROTECT_SYSTEM_STRICT;
4117 if (ec->protect_home == PROTECT_HOME_NO)
4118 ec->protect_home = PROTECT_HOME_READ_ONLY;
4119
4120 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4121 * them. */
4122 ec->no_new_privileges = true;
4123 ec->restrict_suid_sgid = true;
4124 }
4125 }
4126
4127 cc = unit_get_cgroup_context(u);
4128 if (cc && ec) {
4129
4130 if (ec->private_devices &&
4131 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4132 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4133
4134 if ((ec->root_image || !LIST_IS_EMPTY(ec->mount_images)) &&
4135 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4136 const char *p;
4137
4138 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4139 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4140 r = cgroup_add_device_allow(cc, p, "rw");
4141 if (r < 0)
4142 return r;
4143 }
4144 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4145 r = cgroup_add_device_allow(cc, p, "rwm");
4146 if (r < 0)
4147 return r;
4148 }
4149
4150 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4151 * Same for mapper and verity. */
4152 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4153 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4154 if (r < 0)
4155 return r;
4156 }
4157 }
4158
4159 if (ec->protect_clock) {
4160 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4161 if (r < 0)
4162 return r;
4163 }
4164 }
4165
4166 return 0;
4167 }
4168
4169 ExecContext *unit_get_exec_context(const Unit *u) {
4170 size_t offset;
4171 assert(u);
4172
4173 if (u->type < 0)
4174 return NULL;
4175
4176 offset = UNIT_VTABLE(u)->exec_context_offset;
4177 if (offset <= 0)
4178 return NULL;
4179
4180 return (ExecContext*) ((uint8_t*) u + offset);
4181 }
4182
4183 KillContext *unit_get_kill_context(Unit *u) {
4184 size_t offset;
4185 assert(u);
4186
4187 if (u->type < 0)
4188 return NULL;
4189
4190 offset = UNIT_VTABLE(u)->kill_context_offset;
4191 if (offset <= 0)
4192 return NULL;
4193
4194 return (KillContext*) ((uint8_t*) u + offset);
4195 }
4196
4197 CGroupContext *unit_get_cgroup_context(Unit *u) {
4198 size_t offset;
4199
4200 if (u->type < 0)
4201 return NULL;
4202
4203 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4204 if (offset <= 0)
4205 return NULL;
4206
4207 return (CGroupContext*) ((uint8_t*) u + offset);
4208 }
4209
4210 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4211 size_t offset;
4212
4213 if (u->type < 0)
4214 return NULL;
4215
4216 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4217 if (offset <= 0)
4218 return NULL;
4219
4220 return *(ExecRuntime**) ((uint8_t*) u + offset);
4221 }
4222
4223 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4224 assert(u);
4225
4226 if (UNIT_WRITE_FLAGS_NOOP(flags))
4227 return NULL;
4228
4229 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4230 return u->manager->lookup_paths.transient;
4231
4232 if (flags & UNIT_PERSISTENT)
4233 return u->manager->lookup_paths.persistent_control;
4234
4235 if (flags & UNIT_RUNTIME)
4236 return u->manager->lookup_paths.runtime_control;
4237
4238 return NULL;
4239 }
4240
4241 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4242 char *ret = NULL;
4243
4244 if (!s)
4245 return NULL;
4246
4247 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4248 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4249 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4250 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4251 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4252 * allocations. */
4253
4254 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4255 ret = specifier_escape(s);
4256 if (!ret)
4257 return NULL;
4258
4259 s = ret;
4260 }
4261
4262 if (flags & UNIT_ESCAPE_C) {
4263 char *a;
4264
4265 a = cescape(s);
4266 free(ret);
4267 if (!a)
4268 return NULL;
4269
4270 ret = a;
4271 }
4272
4273 if (buf) {
4274 *buf = ret;
4275 return ret ?: (char*) s;
4276 }
4277
4278 return ret ?: strdup(s);
4279 }
4280
4281 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4282 _cleanup_free_ char *result = NULL;
4283 size_t n = 0;
4284
4285 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4286 * way suitable for ExecStart= stanzas */
4287
4288 STRV_FOREACH(i, l) {
4289 _cleanup_free_ char *buf = NULL;
4290 const char *p;
4291 size_t a;
4292 char *q;
4293
4294 p = unit_escape_setting(*i, flags, &buf);
4295 if (!p)
4296 return NULL;
4297
4298 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4299 if (!GREEDY_REALLOC(result, n + a + 1))
4300 return NULL;
4301
4302 q = result + n;
4303 if (n > 0)
4304 *(q++) = ' ';
4305
4306 *(q++) = '"';
4307 q = stpcpy(q, p);
4308 *(q++) = '"';
4309
4310 n += a;
4311 }
4312
4313 if (!GREEDY_REALLOC(result, n + 1))
4314 return NULL;
4315
4316 result[n] = 0;
4317
4318 return TAKE_PTR(result);
4319 }
4320
4321 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4322 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4323 const char *dir, *wrapped;
4324 int r;
4325
4326 assert(u);
4327 assert(name);
4328 assert(data);
4329
4330 if (UNIT_WRITE_FLAGS_NOOP(flags))
4331 return 0;
4332
4333 data = unit_escape_setting(data, flags, &escaped);
4334 if (!data)
4335 return -ENOMEM;
4336
4337 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4338 * previous section header is the same */
4339
4340 if (flags & UNIT_PRIVATE) {
4341 if (!UNIT_VTABLE(u)->private_section)
4342 return -EINVAL;
4343
4344 if (!u->transient_file || u->last_section_private < 0)
4345 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4346 else if (u->last_section_private == 0)
4347 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4348 } else {
4349 if (!u->transient_file || u->last_section_private < 0)
4350 data = strjoina("[Unit]\n", data);
4351 else if (u->last_section_private > 0)
4352 data = strjoina("\n[Unit]\n", data);
4353 }
4354
4355 if (u->transient_file) {
4356 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4357 * write to the transient unit file. */
4358 fputs(data, u->transient_file);
4359
4360 if (!endswith(data, "\n"))
4361 fputc('\n', u->transient_file);
4362
4363 /* Remember which section we wrote this entry to */
4364 u->last_section_private = !!(flags & UNIT_PRIVATE);
4365 return 0;
4366 }
4367
4368 dir = unit_drop_in_dir(u, flags);
4369 if (!dir)
4370 return -EINVAL;
4371
4372 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4373 "# or an equivalent operation. Do not edit.\n",
4374 data,
4375 "\n");
4376
4377 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4378 if (r < 0)
4379 return r;
4380
4381 (void) mkdir_p_label(p, 0755);
4382
4383 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4384 * recreate the cache after every drop-in we write. */
4385 if (u->manager->unit_path_cache) {
4386 r = set_put_strdup(&u->manager->unit_path_cache, p);
4387 if (r < 0)
4388 return r;
4389 }
4390
4391 r = write_string_file_atomic_label(q, wrapped);
4392 if (r < 0)
4393 return r;
4394
4395 r = strv_push(&u->dropin_paths, q);
4396 if (r < 0)
4397 return r;
4398 q = NULL;
4399
4400 strv_uniq(u->dropin_paths);
4401
4402 u->dropin_mtime = now(CLOCK_REALTIME);
4403
4404 return 0;
4405 }
4406
4407 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4408 _cleanup_free_ char *p = NULL;
4409 va_list ap;
4410 int r;
4411
4412 assert(u);
4413 assert(name);
4414 assert(format);
4415
4416 if (UNIT_WRITE_FLAGS_NOOP(flags))
4417 return 0;
4418
4419 va_start(ap, format);
4420 r = vasprintf(&p, format, ap);
4421 va_end(ap);
4422
4423 if (r < 0)
4424 return -ENOMEM;
4425
4426 return unit_write_setting(u, flags, name, p);
4427 }
4428
4429 int unit_make_transient(Unit *u) {
4430 _cleanup_free_ char *path = NULL;
4431 FILE *f;
4432
4433 assert(u);
4434
4435 if (!UNIT_VTABLE(u)->can_transient)
4436 return -EOPNOTSUPP;
4437
4438 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4439
4440 path = path_join(u->manager->lookup_paths.transient, u->id);
4441 if (!path)
4442 return -ENOMEM;
4443
4444 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4445 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4446
4447 RUN_WITH_UMASK(0022) {
4448 f = fopen(path, "we");
4449 if (!f)
4450 return -errno;
4451 }
4452
4453 safe_fclose(u->transient_file);
4454 u->transient_file = f;
4455
4456 free_and_replace(u->fragment_path, path);
4457
4458 u->source_path = mfree(u->source_path);
4459 u->dropin_paths = strv_free(u->dropin_paths);
4460 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4461
4462 u->load_state = UNIT_STUB;
4463 u->load_error = 0;
4464 u->transient = true;
4465
4466 unit_add_to_dbus_queue(u);
4467 unit_add_to_gc_queue(u);
4468
4469 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4470 u->transient_file);
4471
4472 return 0;
4473 }
4474
4475 static int log_kill(pid_t pid, int sig, void *userdata) {
4476 _cleanup_free_ char *comm = NULL;
4477
4478 (void) get_process_comm(pid, &comm);
4479
4480 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4481 only, like for example systemd's own PAM stub process. */
4482 if (comm && comm[0] == '(')
4483 return 0;
4484
4485 log_unit_notice(userdata,
4486 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4487 pid,
4488 strna(comm),
4489 signal_to_string(sig));
4490
4491 return 1;
4492 }
4493
4494 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4495 assert(c);
4496
4497 switch (k) {
4498
4499 case KILL_TERMINATE:
4500 case KILL_TERMINATE_AND_LOG:
4501 *noteworthy = false;
4502 return c->kill_signal;
4503
4504 case KILL_RESTART:
4505 *noteworthy = false;
4506 return restart_kill_signal(c);
4507
4508 case KILL_KILL:
4509 *noteworthy = true;
4510 return c->final_kill_signal;
4511
4512 case KILL_WATCHDOG:
4513 *noteworthy = true;
4514 return c->watchdog_signal;
4515
4516 default:
4517 assert_not_reached();
4518 }
4519 }
4520
4521 int unit_kill_context(
4522 Unit *u,
4523 KillContext *c,
4524 KillOperation k,
4525 pid_t main_pid,
4526 pid_t control_pid,
4527 bool main_pid_alien) {
4528
4529 bool wait_for_exit = false, send_sighup;
4530 cg_kill_log_func_t log_func = NULL;
4531 int sig, r;
4532
4533 assert(u);
4534 assert(c);
4535
4536 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4537 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4538 * which is used for user-requested killing of unit processes. */
4539
4540 if (c->kill_mode == KILL_NONE)
4541 return 0;
4542
4543 bool noteworthy;
4544 sig = operation_to_signal(c, k, &noteworthy);
4545 if (noteworthy)
4546 log_func = log_kill;
4547
4548 send_sighup =
4549 c->send_sighup &&
4550 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4551 sig != SIGHUP;
4552
4553 if (main_pid > 0) {
4554 if (log_func)
4555 log_func(main_pid, sig, u);
4556
4557 r = kill_and_sigcont(main_pid, sig);
4558 if (r < 0 && r != -ESRCH) {
4559 _cleanup_free_ char *comm = NULL;
4560 (void) get_process_comm(main_pid, &comm);
4561
4562 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4563 } else {
4564 if (!main_pid_alien)
4565 wait_for_exit = true;
4566
4567 if (r != -ESRCH && send_sighup)
4568 (void) kill(main_pid, SIGHUP);
4569 }
4570 }
4571
4572 if (control_pid > 0) {
4573 if (log_func)
4574 log_func(control_pid, sig, u);
4575
4576 r = kill_and_sigcont(control_pid, sig);
4577 if (r < 0 && r != -ESRCH) {
4578 _cleanup_free_ char *comm = NULL;
4579 (void) get_process_comm(control_pid, &comm);
4580
4581 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4582 } else {
4583 wait_for_exit = true;
4584
4585 if (r != -ESRCH && send_sighup)
4586 (void) kill(control_pid, SIGHUP);
4587 }
4588 }
4589
4590 if (u->cgroup_path &&
4591 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4592 _cleanup_set_free_ Set *pid_set = NULL;
4593
4594 /* Exclude the main/control pids from being killed via the cgroup */
4595 pid_set = unit_pid_set(main_pid, control_pid);
4596 if (!pid_set)
4597 return -ENOMEM;
4598
4599 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4600 sig,
4601 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4602 pid_set,
4603 log_func, u);
4604 if (r < 0) {
4605 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4606 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4607
4608 } else if (r > 0) {
4609
4610 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4611 * we are running in a container or if this is a delegation unit, simply because cgroup
4612 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4613 * of containers it can be confused easily by left-over directories in the cgroup — which
4614 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4615 * there we get proper events. Hence rely on them. */
4616
4617 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4618 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4619 wait_for_exit = true;
4620
4621 if (send_sighup) {
4622 set_free(pid_set);
4623
4624 pid_set = unit_pid_set(main_pid, control_pid);
4625 if (!pid_set)
4626 return -ENOMEM;
4627
4628 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4629 SIGHUP,
4630 CGROUP_IGNORE_SELF,
4631 pid_set,
4632 NULL, NULL);
4633 }
4634 }
4635 }
4636
4637 return wait_for_exit;
4638 }
4639
4640 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4641 int r;
4642
4643 assert(u);
4644 assert(path);
4645
4646 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4647 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4648 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4649 * appearing mount units can easily determine which units to make themselves a dependency of. */
4650
4651 if (!path_is_absolute(path))
4652 return -EINVAL;
4653
4654 if (hashmap_contains(u->requires_mounts_for, path)) /* Exit quickly if the path is already covered. */
4655 return 0;
4656
4657 _cleanup_free_ char *p = strdup(path);
4658 if (!p)
4659 return -ENOMEM;
4660
4661 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4662 * only after simplification, since path_is_normalized() rejects paths with '.'.
4663 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4664 path = path_simplify(p);
4665
4666 if (!path_is_normalized(path))
4667 return -EPERM;
4668
4669 UnitDependencyInfo di = {
4670 .origin_mask = mask
4671 };
4672
4673 r = hashmap_ensure_put(&u->requires_mounts_for, &path_hash_ops, p, di.data);
4674 if (r < 0)
4675 return r;
4676 assert(r > 0);
4677 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4678
4679 char prefix[strlen(path) + 1];
4680 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4681 Set *x;
4682
4683 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4684 if (!x) {
4685 _cleanup_free_ char *q = NULL;
4686
4687 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4688 if (r < 0)
4689 return r;
4690
4691 q = strdup(prefix);
4692 if (!q)
4693 return -ENOMEM;
4694
4695 x = set_new(NULL);
4696 if (!x)
4697 return -ENOMEM;
4698
4699 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4700 if (r < 0) {
4701 set_free(x);
4702 return r;
4703 }
4704 q = NULL;
4705 }
4706
4707 r = set_put(x, u);
4708 if (r < 0)
4709 return r;
4710 }
4711
4712 return 0;
4713 }
4714
4715 int unit_setup_exec_runtime(Unit *u) {
4716 ExecRuntime **rt;
4717 size_t offset;
4718 Unit *other;
4719 int r;
4720
4721 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4722 assert(offset > 0);
4723
4724 /* Check if there already is an ExecRuntime for this unit? */
4725 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4726 if (*rt)
4727 return 0;
4728
4729 /* Try to get it from somebody else */
4730 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_JOINS_NAMESPACE_OF) {
4731 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4732 if (r == 1)
4733 return 1;
4734 }
4735
4736 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4737 }
4738
4739 int unit_setup_dynamic_creds(Unit *u) {
4740 ExecContext *ec;
4741 DynamicCreds *dcreds;
4742 size_t offset;
4743
4744 assert(u);
4745
4746 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4747 assert(offset > 0);
4748 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4749
4750 ec = unit_get_exec_context(u);
4751 assert(ec);
4752
4753 if (!ec->dynamic_user)
4754 return 0;
4755
4756 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4757 }
4758
4759 bool unit_type_supported(UnitType t) {
4760 if (_unlikely_(t < 0))
4761 return false;
4762 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4763 return false;
4764
4765 if (!unit_vtable[t]->supported)
4766 return true;
4767
4768 return unit_vtable[t]->supported();
4769 }
4770
4771 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4772 int r;
4773
4774 assert(u);
4775 assert(where);
4776
4777 if (!unit_log_level_test(u, LOG_NOTICE))
4778 return;
4779
4780 r = dir_is_empty(where);
4781 if (r > 0 || r == -ENOTDIR)
4782 return;
4783 if (r < 0) {
4784 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4785 return;
4786 }
4787
4788 log_unit_struct(u, LOG_NOTICE,
4789 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4790 LOG_UNIT_INVOCATION_ID(u),
4791 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4792 "WHERE=%s", where);
4793 }
4794
4795 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4796 _cleanup_free_ char *canonical_where = NULL;
4797 int r;
4798
4799 assert(u);
4800 assert(where);
4801
4802 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
4803 if (r < 0) {
4804 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4805 return 0;
4806 }
4807
4808 /* We will happily ignore a trailing slash (or any redundant slashes) */
4809 if (path_equal(where, canonical_where))
4810 return 0;
4811
4812 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4813 log_unit_struct(u, LOG_ERR,
4814 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4815 LOG_UNIT_INVOCATION_ID(u),
4816 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4817 "WHERE=%s", where);
4818
4819 return -ELOOP;
4820 }
4821
4822 bool unit_is_pristine(Unit *u) {
4823 assert(u);
4824
4825 /* Check if the unit already exists or is already around,
4826 * in a number of different ways. Note that to cater for unit
4827 * types such as slice, we are generally fine with units that
4828 * are marked UNIT_LOADED even though nothing was actually
4829 * loaded, as those unit types don't require a file on disk. */
4830
4831 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4832 u->fragment_path ||
4833 u->source_path ||
4834 !strv_isempty(u->dropin_paths) ||
4835 u->job ||
4836 u->merged_into);
4837 }
4838
4839 pid_t unit_control_pid(Unit *u) {
4840 assert(u);
4841
4842 if (UNIT_VTABLE(u)->control_pid)
4843 return UNIT_VTABLE(u)->control_pid(u);
4844
4845 return 0;
4846 }
4847
4848 pid_t unit_main_pid(Unit *u) {
4849 assert(u);
4850
4851 if (UNIT_VTABLE(u)->main_pid)
4852 return UNIT_VTABLE(u)->main_pid(u);
4853
4854 return 0;
4855 }
4856
4857 static void unit_unref_uid_internal(
4858 Unit *u,
4859 uid_t *ref_uid,
4860 bool destroy_now,
4861 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4862
4863 assert(u);
4864 assert(ref_uid);
4865 assert(_manager_unref_uid);
4866
4867 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4868 * gid_t are actually the same time, with the same validity rules.
4869 *
4870 * Drops a reference to UID/GID from a unit. */
4871
4872 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4873 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4874
4875 if (!uid_is_valid(*ref_uid))
4876 return;
4877
4878 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4879 *ref_uid = UID_INVALID;
4880 }
4881
4882 static void unit_unref_uid(Unit *u, bool destroy_now) {
4883 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4884 }
4885
4886 static void unit_unref_gid(Unit *u, bool destroy_now) {
4887 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4888 }
4889
4890 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4891 assert(u);
4892
4893 unit_unref_uid(u, destroy_now);
4894 unit_unref_gid(u, destroy_now);
4895 }
4896
4897 static int unit_ref_uid_internal(
4898 Unit *u,
4899 uid_t *ref_uid,
4900 uid_t uid,
4901 bool clean_ipc,
4902 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4903
4904 int r;
4905
4906 assert(u);
4907 assert(ref_uid);
4908 assert(uid_is_valid(uid));
4909 assert(_manager_ref_uid);
4910
4911 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4912 * are actually the same type, and have the same validity rules.
4913 *
4914 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4915 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4916 * drops to zero. */
4917
4918 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4919 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4920
4921 if (*ref_uid == uid)
4922 return 0;
4923
4924 if (uid_is_valid(*ref_uid)) /* Already set? */
4925 return -EBUSY;
4926
4927 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4928 if (r < 0)
4929 return r;
4930
4931 *ref_uid = uid;
4932 return 1;
4933 }
4934
4935 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4936 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4937 }
4938
4939 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4940 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4941 }
4942
4943 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4944 int r = 0, q = 0;
4945
4946 assert(u);
4947
4948 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4949
4950 if (uid_is_valid(uid)) {
4951 r = unit_ref_uid(u, uid, clean_ipc);
4952 if (r < 0)
4953 return r;
4954 }
4955
4956 if (gid_is_valid(gid)) {
4957 q = unit_ref_gid(u, gid, clean_ipc);
4958 if (q < 0) {
4959 if (r > 0)
4960 unit_unref_uid(u, false);
4961
4962 return q;
4963 }
4964 }
4965
4966 return r > 0 || q > 0;
4967 }
4968
4969 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4970 ExecContext *c;
4971 int r;
4972
4973 assert(u);
4974
4975 c = unit_get_exec_context(u);
4976
4977 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4978 if (r < 0)
4979 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4980
4981 return r;
4982 }
4983
4984 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4985 int r;
4986
4987 assert(u);
4988
4989 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4990 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4991 * objects when no service references the UID/GID anymore. */
4992
4993 r = unit_ref_uid_gid(u, uid, gid);
4994 if (r > 0)
4995 unit_add_to_dbus_queue(u);
4996 }
4997
4998 int unit_acquire_invocation_id(Unit *u) {
4999 sd_id128_t id;
5000 int r;
5001
5002 assert(u);
5003
5004 r = sd_id128_randomize(&id);
5005 if (r < 0)
5006 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5007
5008 r = unit_set_invocation_id(u, id);
5009 if (r < 0)
5010 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5011
5012 unit_add_to_dbus_queue(u);
5013 return 0;
5014 }
5015
5016 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5017 int r;
5018
5019 assert(u);
5020 assert(p);
5021
5022 /* Copy parameters from manager */
5023 r = manager_get_effective_environment(u->manager, &p->environment);
5024 if (r < 0)
5025 return r;
5026
5027 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5028 p->cgroup_supported = u->manager->cgroup_supported;
5029 p->prefix = u->manager->prefix;
5030 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5031
5032 /* Copy parameters from unit */
5033 p->cgroup_path = u->cgroup_path;
5034 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5035
5036 p->received_credentials = u->manager->received_credentials;
5037
5038 return 0;
5039 }
5040
5041 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5042 int r;
5043
5044 assert(u);
5045 assert(ret);
5046
5047 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5048 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5049
5050 (void) unit_realize_cgroup(u);
5051
5052 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5053 if (r != 0)
5054 return r;
5055
5056 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5057 (void) ignore_signals(SIGPIPE);
5058
5059 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5060
5061 if (u->cgroup_path) {
5062 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5063 if (r < 0) {
5064 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5065 _exit(EXIT_CGROUP);
5066 }
5067 }
5068
5069 return 0;
5070 }
5071
5072 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5073 pid_t pid;
5074 int r;
5075
5076 assert(u);
5077 assert(ret_pid);
5078
5079 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5080 if (r < 0)
5081 return r;
5082 if (r == 0) {
5083 int ret = EXIT_SUCCESS;
5084
5085 STRV_FOREACH(i, paths) {
5086 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5087 if (r < 0) {
5088 log_error_errno(r, "Failed to remove '%s': %m", *i);
5089 ret = EXIT_FAILURE;
5090 }
5091 }
5092
5093 _exit(ret);
5094 }
5095
5096 r = unit_watch_pid(u, pid, true);
5097 if (r < 0)
5098 return r;
5099
5100 *ret_pid = pid;
5101 return 0;
5102 }
5103
5104 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5105 assert(deps);
5106 assert(other);
5107
5108 if (di.origin_mask == 0 && di.destination_mask == 0)
5109 /* No bit set anymore, let's drop the whole entry */
5110 assert_se(hashmap_remove(deps, other));
5111 else
5112 /* Mask was reduced, let's update the entry */
5113 assert_se(hashmap_update(deps, other, di.data) == 0);
5114 }
5115
5116 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5117 Hashmap *deps;
5118 assert(u);
5119
5120 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5121
5122 if (mask == 0)
5123 return;
5124
5125 HASHMAP_FOREACH(deps, u->dependencies) {
5126 bool done;
5127
5128 do {
5129 UnitDependencyInfo di;
5130 Unit *other;
5131
5132 done = true;
5133
5134 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5135 Hashmap *other_deps;
5136
5137 if (FLAGS_SET(~mask, di.origin_mask))
5138 continue;
5139
5140 di.origin_mask &= ~mask;
5141 unit_update_dependency_mask(deps, other, di);
5142
5143 /* We updated the dependency from our unit to the other unit now. But most
5144 * dependencies imply a reverse dependency. Hence, let's delete that one
5145 * too. For that we go through all dependency types on the other unit and
5146 * delete all those which point to us and have the right mask set. */
5147
5148 HASHMAP_FOREACH(other_deps, other->dependencies) {
5149 UnitDependencyInfo dj;
5150
5151 dj.data = hashmap_get(other_deps, u);
5152 if (FLAGS_SET(~mask, dj.destination_mask))
5153 continue;
5154
5155 dj.destination_mask &= ~mask;
5156 unit_update_dependency_mask(other_deps, u, dj);
5157 }
5158
5159 unit_add_to_gc_queue(other);
5160
5161 done = false;
5162 break;
5163 }
5164
5165 } while (!done);
5166 }
5167 }
5168
5169 static int unit_get_invocation_path(Unit *u, char **ret) {
5170 char *p;
5171 int r;
5172
5173 assert(u);
5174 assert(ret);
5175
5176 if (MANAGER_IS_SYSTEM(u->manager))
5177 p = strjoin("/run/systemd/units/invocation:", u->id);
5178 else {
5179 _cleanup_free_ char *user_path = NULL;
5180 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5181 if (r < 0)
5182 return r;
5183 p = strjoin(user_path, u->id);
5184 }
5185
5186 if (!p)
5187 return -ENOMEM;
5188
5189 *ret = p;
5190 return 0;
5191 }
5192
5193 static int unit_export_invocation_id(Unit *u) {
5194 _cleanup_free_ char *p = NULL;
5195 int r;
5196
5197 assert(u);
5198
5199 if (u->exported_invocation_id)
5200 return 0;
5201
5202 if (sd_id128_is_null(u->invocation_id))
5203 return 0;
5204
5205 r = unit_get_invocation_path(u, &p);
5206 if (r < 0)
5207 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5208
5209 r = symlink_atomic_label(u->invocation_id_string, p);
5210 if (r < 0)
5211 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5212
5213 u->exported_invocation_id = true;
5214 return 0;
5215 }
5216
5217 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5218 const char *p;
5219 char buf[2];
5220 int r;
5221
5222 assert(u);
5223 assert(c);
5224
5225 if (u->exported_log_level_max)
5226 return 0;
5227
5228 if (c->log_level_max < 0)
5229 return 0;
5230
5231 assert(c->log_level_max <= 7);
5232
5233 buf[0] = '0' + c->log_level_max;
5234 buf[1] = 0;
5235
5236 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5237 r = symlink_atomic(buf, p);
5238 if (r < 0)
5239 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5240
5241 u->exported_log_level_max = true;
5242 return 0;
5243 }
5244
5245 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5246 _cleanup_close_ int fd = -1;
5247 struct iovec *iovec;
5248 const char *p;
5249 char *pattern;
5250 le64_t *sizes;
5251 ssize_t n;
5252 int r;
5253
5254 if (u->exported_log_extra_fields)
5255 return 0;
5256
5257 if (c->n_log_extra_fields <= 0)
5258 return 0;
5259
5260 sizes = newa(le64_t, c->n_log_extra_fields);
5261 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5262
5263 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5264 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5265
5266 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5267 iovec[i*2+1] = c->log_extra_fields[i];
5268 }
5269
5270 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5271 pattern = strjoina(p, ".XXXXXX");
5272
5273 fd = mkostemp_safe(pattern);
5274 if (fd < 0)
5275 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5276
5277 n = writev(fd, iovec, c->n_log_extra_fields*2);
5278 if (n < 0) {
5279 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5280 goto fail;
5281 }
5282
5283 (void) fchmod(fd, 0644);
5284
5285 if (rename(pattern, p) < 0) {
5286 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5287 goto fail;
5288 }
5289
5290 u->exported_log_extra_fields = true;
5291 return 0;
5292
5293 fail:
5294 (void) unlink(pattern);
5295 return r;
5296 }
5297
5298 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5299 _cleanup_free_ char *buf = NULL;
5300 const char *p;
5301 int r;
5302
5303 assert(u);
5304 assert(c);
5305
5306 if (u->exported_log_ratelimit_interval)
5307 return 0;
5308
5309 if (c->log_ratelimit_interval_usec == 0)
5310 return 0;
5311
5312 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5313
5314 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5315 return log_oom();
5316
5317 r = symlink_atomic(buf, p);
5318 if (r < 0)
5319 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5320
5321 u->exported_log_ratelimit_interval = true;
5322 return 0;
5323 }
5324
5325 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5326 _cleanup_free_ char *buf = NULL;
5327 const char *p;
5328 int r;
5329
5330 assert(u);
5331 assert(c);
5332
5333 if (u->exported_log_ratelimit_burst)
5334 return 0;
5335
5336 if (c->log_ratelimit_burst == 0)
5337 return 0;
5338
5339 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5340
5341 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5342 return log_oom();
5343
5344 r = symlink_atomic(buf, p);
5345 if (r < 0)
5346 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5347
5348 u->exported_log_ratelimit_burst = true;
5349 return 0;
5350 }
5351
5352 void unit_export_state_files(Unit *u) {
5353 const ExecContext *c;
5354
5355 assert(u);
5356
5357 if (!u->id)
5358 return;
5359
5360 if (MANAGER_IS_TEST_RUN(u->manager))
5361 return;
5362
5363 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5364 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5365 * the IPC system itself and PID 1 also log to the journal.
5366 *
5367 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5368 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5369 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5370 * namespace at least.
5371 *
5372 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5373 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5374 * them with one. */
5375
5376 (void) unit_export_invocation_id(u);
5377
5378 if (!MANAGER_IS_SYSTEM(u->manager))
5379 return;
5380
5381 c = unit_get_exec_context(u);
5382 if (c) {
5383 (void) unit_export_log_level_max(u, c);
5384 (void) unit_export_log_extra_fields(u, c);
5385 (void) unit_export_log_ratelimit_interval(u, c);
5386 (void) unit_export_log_ratelimit_burst(u, c);
5387 }
5388 }
5389
5390 void unit_unlink_state_files(Unit *u) {
5391 const char *p;
5392
5393 assert(u);
5394
5395 if (!u->id)
5396 return;
5397
5398 /* Undoes the effect of unit_export_state() */
5399
5400 if (u->exported_invocation_id) {
5401 _cleanup_free_ char *invocation_path = NULL;
5402 int r = unit_get_invocation_path(u, &invocation_path);
5403 if (r >= 0) {
5404 (void) unlink(invocation_path);
5405 u->exported_invocation_id = false;
5406 }
5407 }
5408
5409 if (!MANAGER_IS_SYSTEM(u->manager))
5410 return;
5411
5412 if (u->exported_log_level_max) {
5413 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5414 (void) unlink(p);
5415
5416 u->exported_log_level_max = false;
5417 }
5418
5419 if (u->exported_log_extra_fields) {
5420 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5421 (void) unlink(p);
5422
5423 u->exported_log_extra_fields = false;
5424 }
5425
5426 if (u->exported_log_ratelimit_interval) {
5427 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5428 (void) unlink(p);
5429
5430 u->exported_log_ratelimit_interval = false;
5431 }
5432
5433 if (u->exported_log_ratelimit_burst) {
5434 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5435 (void) unlink(p);
5436
5437 u->exported_log_ratelimit_burst = false;
5438 }
5439 }
5440
5441 int unit_prepare_exec(Unit *u) {
5442 int r;
5443
5444 assert(u);
5445
5446 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5447 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5448 r = bpf_firewall_load_custom(u);
5449 if (r < 0)
5450 return r;
5451
5452 /* Prepares everything so that we can fork of a process for this unit */
5453
5454 (void) unit_realize_cgroup(u);
5455
5456 if (u->reset_accounting) {
5457 (void) unit_reset_accounting(u);
5458 u->reset_accounting = false;
5459 }
5460
5461 unit_export_state_files(u);
5462
5463 r = unit_setup_exec_runtime(u);
5464 if (r < 0)
5465 return r;
5466
5467 r = unit_setup_dynamic_creds(u);
5468 if (r < 0)
5469 return r;
5470
5471 return 0;
5472 }
5473
5474 static bool ignore_leftover_process(const char *comm) {
5475 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5476 }
5477
5478 int unit_log_leftover_process_start(pid_t pid, int sig, void *userdata) {
5479 _cleanup_free_ char *comm = NULL;
5480
5481 (void) get_process_comm(pid, &comm);
5482
5483 if (ignore_leftover_process(comm))
5484 return 0;
5485
5486 /* During start we print a warning */
5487
5488 log_unit_warning(userdata,
5489 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5490 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5491 pid, strna(comm));
5492
5493 return 1;
5494 }
5495
5496 int unit_log_leftover_process_stop(pid_t pid, int sig, void *userdata) {
5497 _cleanup_free_ char *comm = NULL;
5498
5499 (void) get_process_comm(pid, &comm);
5500
5501 if (ignore_leftover_process(comm))
5502 return 0;
5503
5504 /* During stop we only print an informational message */
5505
5506 log_unit_info(userdata,
5507 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5508 pid, strna(comm));
5509
5510 return 1;
5511 }
5512
5513 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5514 assert(u);
5515
5516 (void) unit_pick_cgroup_path(u);
5517
5518 if (!u->cgroup_path)
5519 return 0;
5520
5521 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_func, u);
5522 }
5523
5524 bool unit_needs_console(Unit *u) {
5525 ExecContext *ec;
5526 UnitActiveState state;
5527
5528 assert(u);
5529
5530 state = unit_active_state(u);
5531
5532 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5533 return false;
5534
5535 if (UNIT_VTABLE(u)->needs_console)
5536 return UNIT_VTABLE(u)->needs_console(u);
5537
5538 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5539 ec = unit_get_exec_context(u);
5540 if (!ec)
5541 return false;
5542
5543 return exec_context_may_touch_console(ec);
5544 }
5545
5546 const char *unit_label_path(const Unit *u) {
5547 const char *p;
5548
5549 assert(u);
5550
5551 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5552 * when validating access checks. */
5553
5554 if (IN_SET(u->load_state, UNIT_MASKED, UNIT_NOT_FOUND, UNIT_MERGED))
5555 return NULL; /* Shortcut things if we know there is no real, relevant unit file around */
5556
5557 p = u->source_path ?: u->fragment_path;
5558 if (!p)
5559 return NULL;
5560
5561 if (IN_SET(u->load_state, UNIT_LOADED, UNIT_BAD_SETTING, UNIT_ERROR))
5562 return p; /* Shortcut things, if we successfully loaded at least some stuff from the unit file */
5563
5564 /* Not loaded yet, we need to go to disk */
5565 assert(u->load_state == UNIT_STUB);
5566
5567 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5568 if (null_or_empty_path(p) > 0)
5569 return NULL;
5570
5571 return p;
5572 }
5573
5574 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5575 int r;
5576
5577 assert(u);
5578
5579 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5580 * and not a kernel thread either */
5581
5582 /* First, a simple range check */
5583 if (!pid_is_valid(pid))
5584 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5585
5586 /* Some extra safety check */
5587 if (pid == 1 || pid == getpid_cached())
5588 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5589
5590 /* Don't even begin to bother with kernel threads */
5591 r = is_kernel_thread(pid);
5592 if (r == -ESRCH)
5593 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5594 if (r < 0)
5595 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5596 if (r > 0)
5597 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5598
5599 return 0;
5600 }
5601
5602 void unit_log_success(Unit *u) {
5603 assert(u);
5604
5605 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5606 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5607 * a lot of devices. */
5608 log_unit_struct(u,
5609 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5610 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5611 LOG_UNIT_INVOCATION_ID(u),
5612 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5613 }
5614
5615 void unit_log_failure(Unit *u, const char *result) {
5616 assert(u);
5617 assert(result);
5618
5619 log_unit_struct(u, LOG_WARNING,
5620 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5621 LOG_UNIT_INVOCATION_ID(u),
5622 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5623 "UNIT_RESULT=%s", result);
5624 }
5625
5626 void unit_log_skip(Unit *u, const char *result) {
5627 assert(u);
5628 assert(result);
5629
5630 log_unit_struct(u, LOG_INFO,
5631 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5632 LOG_UNIT_INVOCATION_ID(u),
5633 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5634 "UNIT_RESULT=%s", result);
5635 }
5636
5637 void unit_log_process_exit(
5638 Unit *u,
5639 const char *kind,
5640 const char *command,
5641 bool success,
5642 int code,
5643 int status) {
5644
5645 int level;
5646
5647 assert(u);
5648 assert(kind);
5649
5650 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5651 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5652 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5653 * WARNING. */
5654 if (success)
5655 level = LOG_DEBUG;
5656 else if (code == CLD_EXITED)
5657 level = LOG_NOTICE;
5658 else
5659 level = LOG_WARNING;
5660
5661 log_unit_struct(u, level,
5662 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5663 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
5664 kind,
5665 sigchld_code_to_string(code), status,
5666 strna(code == CLD_EXITED
5667 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5668 : signal_to_string(status)),
5669 success ? " (success)" : ""),
5670 "EXIT_CODE=%s", sigchld_code_to_string(code),
5671 "EXIT_STATUS=%i", status,
5672 "COMMAND=%s", strna(command),
5673 LOG_UNIT_INVOCATION_ID(u));
5674 }
5675
5676 int unit_exit_status(Unit *u) {
5677 assert(u);
5678
5679 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5680 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5681 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5682 * service process has exited abnormally (signal/coredump). */
5683
5684 if (!UNIT_VTABLE(u)->exit_status)
5685 return -EOPNOTSUPP;
5686
5687 return UNIT_VTABLE(u)->exit_status(u);
5688 }
5689
5690 int unit_failure_action_exit_status(Unit *u) {
5691 int r;
5692
5693 assert(u);
5694
5695 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5696
5697 if (u->failure_action_exit_status >= 0)
5698 return u->failure_action_exit_status;
5699
5700 r = unit_exit_status(u);
5701 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5702 return 255;
5703
5704 return r;
5705 }
5706
5707 int unit_success_action_exit_status(Unit *u) {
5708 int r;
5709
5710 assert(u);
5711
5712 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5713
5714 if (u->success_action_exit_status >= 0)
5715 return u->success_action_exit_status;
5716
5717 r = unit_exit_status(u);
5718 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5719 return 255;
5720
5721 return r;
5722 }
5723
5724 int unit_test_trigger_loaded(Unit *u) {
5725 Unit *trigger;
5726
5727 /* Tests whether the unit to trigger is loaded */
5728
5729 trigger = UNIT_TRIGGER(u);
5730 if (!trigger)
5731 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5732 "Refusing to start, no unit to trigger.");
5733 if (trigger->load_state != UNIT_LOADED)
5734 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5735 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5736
5737 return 0;
5738 }
5739
5740 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
5741 assert(u);
5742 assert(context);
5743
5744 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
5745 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
5746 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5747
5748 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
5749 }
5750
5751 int unit_clean(Unit *u, ExecCleanMask mask) {
5752 UnitActiveState state;
5753
5754 assert(u);
5755
5756 /* Special return values:
5757 *
5758 * -EOPNOTSUPP → cleaning not supported for this unit type
5759 * -EUNATCH → cleaning not defined for this resource type
5760 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5761 * a job queued or similar
5762 */
5763
5764 if (!UNIT_VTABLE(u)->clean)
5765 return -EOPNOTSUPP;
5766
5767 if (mask == 0)
5768 return -EUNATCH;
5769
5770 if (u->load_state != UNIT_LOADED)
5771 return -EBUSY;
5772
5773 if (u->job)
5774 return -EBUSY;
5775
5776 state = unit_active_state(u);
5777 if (!IN_SET(state, UNIT_INACTIVE))
5778 return -EBUSY;
5779
5780 return UNIT_VTABLE(u)->clean(u, mask);
5781 }
5782
5783 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5784 assert(u);
5785
5786 if (!UNIT_VTABLE(u)->clean ||
5787 u->load_state != UNIT_LOADED) {
5788 *ret = 0;
5789 return 0;
5790 }
5791
5792 /* When the clean() method is set, can_clean() really should be set too */
5793 assert(UNIT_VTABLE(u)->can_clean);
5794
5795 return UNIT_VTABLE(u)->can_clean(u, ret);
5796 }
5797
5798 bool unit_can_freeze(Unit *u) {
5799 assert(u);
5800
5801 if (UNIT_VTABLE(u)->can_freeze)
5802 return UNIT_VTABLE(u)->can_freeze(u);
5803
5804 return UNIT_VTABLE(u)->freeze;
5805 }
5806
5807 void unit_frozen(Unit *u) {
5808 assert(u);
5809
5810 u->freezer_state = FREEZER_FROZEN;
5811
5812 bus_unit_send_pending_freezer_message(u);
5813 }
5814
5815 void unit_thawed(Unit *u) {
5816 assert(u);
5817
5818 u->freezer_state = FREEZER_RUNNING;
5819
5820 bus_unit_send_pending_freezer_message(u);
5821 }
5822
5823 static int unit_freezer_action(Unit *u, FreezerAction action) {
5824 UnitActiveState s;
5825 int (*method)(Unit*);
5826 int r;
5827
5828 assert(u);
5829 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
5830
5831 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
5832 if (!method || !cg_freezer_supported())
5833 return -EOPNOTSUPP;
5834
5835 if (u->job)
5836 return -EBUSY;
5837
5838 if (u->load_state != UNIT_LOADED)
5839 return -EHOSTDOWN;
5840
5841 s = unit_active_state(u);
5842 if (s != UNIT_ACTIVE)
5843 return -EHOSTDOWN;
5844
5845 if (IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING))
5846 return -EALREADY;
5847
5848 r = method(u);
5849 if (r <= 0)
5850 return r;
5851
5852 return 1;
5853 }
5854
5855 int unit_freeze(Unit *u) {
5856 return unit_freezer_action(u, FREEZER_FREEZE);
5857 }
5858
5859 int unit_thaw(Unit *u) {
5860 return unit_freezer_action(u, FREEZER_THAW);
5861 }
5862
5863 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
5864 int unit_freeze_vtable_common(Unit *u) {
5865 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
5866 }
5867
5868 int unit_thaw_vtable_common(Unit *u) {
5869 return unit_cgroup_freezer_action(u, FREEZER_THAW);
5870 }
5871
5872 Condition *unit_find_failed_condition(Unit *u) {
5873 Condition *failed_trigger = NULL;
5874 bool has_succeeded_trigger = false;
5875
5876 if (u->condition_result)
5877 return NULL;
5878
5879 LIST_FOREACH(conditions, c, u->conditions)
5880 if (c->trigger) {
5881 if (c->result == CONDITION_SUCCEEDED)
5882 has_succeeded_trigger = true;
5883 else if (!failed_trigger)
5884 failed_trigger = c;
5885 } else if (c->result != CONDITION_SUCCEEDED)
5886 return c;
5887
5888 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
5889 }
5890
5891 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5892 [COLLECT_INACTIVE] = "inactive",
5893 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5894 };
5895
5896 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
5897
5898 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
5899 Unit *i;
5900
5901 assert(u);
5902
5903 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
5904 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
5905 * is NULL the first entry found), or NULL if not found. */
5906
5907 UNIT_FOREACH_DEPENDENCY(i, u, atom)
5908 if (!other || other == i)
5909 return i;
5910
5911 return NULL;
5912 }
5913
5914 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
5915 _cleanup_free_ Unit **array = NULL;
5916 size_t n = 0;
5917 Unit *other;
5918
5919 assert(u);
5920 assert(ret_array);
5921
5922 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
5923 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
5924 * while the dependency table is continuously updated. */
5925
5926 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
5927 if (!GREEDY_REALLOC(array, n + 1))
5928 return -ENOMEM;
5929
5930 array[n++] = other;
5931 }
5932
5933 *ret_array = TAKE_PTR(array);
5934
5935 assert(n <= INT_MAX);
5936 return (int) n;
5937 }