]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
strv: make iterator in STRV_FOREACH() declaread in the loop
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-setup.h"
19 #include "cgroup-util.h"
20 #include "chase-symlinks.h"
21 #include "core-varlink.h"
22 #include "dbus-unit.h"
23 #include "dbus.h"
24 #include "dropin.h"
25 #include "escape.h"
26 #include "execute.h"
27 #include "fd-util.h"
28 #include "fileio-label.h"
29 #include "fileio.h"
30 #include "format-util.h"
31 #include "id128-util.h"
32 #include "install.h"
33 #include "io-util.h"
34 #include "label.h"
35 #include "load-dropin.h"
36 #include "load-fragment.h"
37 #include "log.h"
38 #include "macro.h"
39 #include "missing_audit.h"
40 #include "mkdir-label.h"
41 #include "path-util.h"
42 #include "process-util.h"
43 #include "rm-rf.h"
44 #include "set.h"
45 #include "signal-util.h"
46 #include "sparse-endian.h"
47 #include "special.h"
48 #include "specifier.h"
49 #include "stat-util.h"
50 #include "stdio-util.h"
51 #include "string-table.h"
52 #include "string-util.h"
53 #include "strv.h"
54 #include "terminal-util.h"
55 #include "tmpfile-util.h"
56 #include "umask-util.h"
57 #include "unit-name.h"
58 #include "unit.h"
59 #include "user-util.h"
60 #include "virt.h"
61 #if BPF_FRAMEWORK
62 #include "bpf-link.h"
63 #endif
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
67 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
68 #define MENTIONWORTHY_IP_BYTES (0ULL)
69
70 /* Thresholds for logging at INFO level about resource consumption */
71 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
72 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
73 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
74
75 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
76 [UNIT_SERVICE] = &service_vtable,
77 [UNIT_SOCKET] = &socket_vtable,
78 [UNIT_TARGET] = &target_vtable,
79 [UNIT_DEVICE] = &device_vtable,
80 [UNIT_MOUNT] = &mount_vtable,
81 [UNIT_AUTOMOUNT] = &automount_vtable,
82 [UNIT_SWAP] = &swap_vtable,
83 [UNIT_TIMER] = &timer_vtable,
84 [UNIT_PATH] = &path_vtable,
85 [UNIT_SLICE] = &slice_vtable,
86 [UNIT_SCOPE] = &scope_vtable,
87 };
88
89 Unit* unit_new(Manager *m, size_t size) {
90 Unit *u;
91
92 assert(m);
93 assert(size >= sizeof(Unit));
94
95 u = malloc0(size);
96 if (!u)
97 return NULL;
98
99 u->manager = m;
100 u->type = _UNIT_TYPE_INVALID;
101 u->default_dependencies = true;
102 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
103 u->unit_file_preset = -1;
104 u->on_failure_job_mode = JOB_REPLACE;
105 u->on_success_job_mode = JOB_FAIL;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
119 u->io_accounting_last[i] = UINT64_MAX;
120
121 u->ipv4_allow_map_fd = -1;
122 u->ipv6_allow_map_fd = -1;
123 u->ipv4_deny_map_fd = -1;
124 u->ipv6_deny_map_fd = -1;
125
126 u->last_section_private = -1;
127
128 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
129 u->auto_start_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return streq_ptr(name, u->id) ||
156 set_contains(u->aliases, name);
157 }
158
159 static void unit_init(Unit *u) {
160 CGroupContext *cc;
161 ExecContext *ec;
162 KillContext *kc;
163
164 assert(u);
165 assert(u->manager);
166 assert(u->type >= 0);
167
168 cc = unit_get_cgroup_context(u);
169 if (cc) {
170 cgroup_context_init(cc);
171
172 /* Copy in the manager defaults into the cgroup
173 * context, _before_ the rest of the settings have
174 * been initialized */
175
176 cc->cpu_accounting = u->manager->default_cpu_accounting;
177 cc->io_accounting = u->manager->default_io_accounting;
178 cc->blockio_accounting = u->manager->default_blockio_accounting;
179 cc->memory_accounting = u->manager->default_memory_accounting;
180 cc->tasks_accounting = u->manager->default_tasks_accounting;
181 cc->ip_accounting = u->manager->default_ip_accounting;
182
183 if (u->type != UNIT_SLICE)
184 cc->tasks_max = u->manager->default_tasks_max;
185 }
186
187 ec = unit_get_exec_context(u);
188 if (ec) {
189 exec_context_init(ec);
190
191 if (u->manager->default_oom_score_adjust_set) {
192 ec->oom_score_adjust = u->manager->default_oom_score_adjust;
193 ec->oom_score_adjust_set = true;
194 }
195
196 if (MANAGER_IS_SYSTEM(u->manager))
197 ec->keyring_mode = EXEC_KEYRING_SHARED;
198 else {
199 ec->keyring_mode = EXEC_KEYRING_INHERIT;
200
201 /* User manager might have its umask redefined by PAM or UMask=. In this
202 * case let the units it manages inherit this value by default. They can
203 * still tune this value through their own unit file */
204 (void) get_process_umask(getpid_cached(), &ec->umask);
205 }
206 }
207
208 kc = unit_get_kill_context(u);
209 if (kc)
210 kill_context_init(kc);
211
212 if (UNIT_VTABLE(u)->init)
213 UNIT_VTABLE(u)->init(u);
214 }
215
216 static int unit_add_alias(Unit *u, char *donated_name) {
217 int r;
218
219 /* Make sure that u->names is allocated. We may leave u->names
220 * empty if we fail later, but this is not a problem. */
221 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
222 if (r < 0)
223 return r;
224 assert(r > 0);
225
226 return 0;
227 }
228
229 int unit_add_name(Unit *u, const char *text) {
230 _cleanup_free_ char *name = NULL, *instance = NULL;
231 UnitType t;
232 int r;
233
234 assert(u);
235 assert(text);
236
237 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
238 if (!u->instance)
239 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
240 "instance is not set when adding name '%s': %m", text);
241
242 r = unit_name_replace_instance(text, u->instance, &name);
243 if (r < 0)
244 return log_unit_debug_errno(u, r,
245 "failed to build instance name from '%s': %m", text);
246 } else {
247 name = strdup(text);
248 if (!name)
249 return -ENOMEM;
250 }
251
252 if (unit_has_name(u, name))
253 return 0;
254
255 if (hashmap_contains(u->manager->units, name))
256 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
257 "unit already exist when adding name '%s': %m", name);
258
259 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
260 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
261 "name '%s' is invalid: %m", name);
262
263 t = unit_name_to_type(name);
264 if (t < 0)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
266 "failed to derive unit type from name '%s': %m", name);
267
268 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
269 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
270 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
271 u->type, t, name);
272
273 r = unit_name_to_instance(name, &instance);
274 if (r < 0)
275 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
276
277 if (instance && !unit_type_may_template(t))
278 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
279
280 /* Ensure that this unit either has no instance, or that the instance matches. */
281 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
282 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
283 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
284 name, instance, u->instance);
285
286 if (u->id && !unit_type_may_alias(t))
287 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
288 "cannot add name %s, aliases are not allowed for %s units.",
289 name, unit_type_to_string(t));
290
291 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
292 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
293
294 /* Add name to the global hashmap first, because that's easier to undo */
295 r = hashmap_put(u->manager->units, name, u);
296 if (r < 0)
297 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
298
299 if (u->id) {
300 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
301 if (r < 0) {
302 hashmap_remove(u->manager->units, name);
303 return r;
304 }
305 TAKE_PTR(name);
306
307 } else {
308 /* A new name, we don't need the set yet. */
309 assert(u->type == _UNIT_TYPE_INVALID);
310 assert(!u->instance);
311
312 u->type = t;
313 u->id = TAKE_PTR(name);
314 u->instance = TAKE_PTR(instance);
315
316 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
317 unit_init(u);
318 }
319
320 unit_add_to_dbus_queue(u);
321 return 0;
322 }
323
324 int unit_choose_id(Unit *u, const char *name) {
325 _cleanup_free_ char *t = NULL;
326 char *s;
327 int r;
328
329 assert(u);
330 assert(name);
331
332 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
333 if (!u->instance)
334 return -EINVAL;
335
336 r = unit_name_replace_instance(name, u->instance, &t);
337 if (r < 0)
338 return r;
339
340 name = t;
341 }
342
343 if (streq_ptr(u->id, name))
344 return 0; /* Nothing to do. */
345
346 /* Selects one of the aliases of this unit as the id */
347 s = set_get(u->aliases, (char*) name);
348 if (!s)
349 return -ENOENT;
350
351 if (u->id) {
352 r = set_remove_and_put(u->aliases, name, u->id);
353 if (r < 0)
354 return r;
355 } else
356 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
357
358 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
359 unit_add_to_dbus_queue(u);
360
361 return 0;
362 }
363
364 int unit_set_description(Unit *u, const char *description) {
365 int r;
366
367 assert(u);
368
369 r = free_and_strdup(&u->description, empty_to_null(description));
370 if (r < 0)
371 return r;
372 if (r > 0)
373 unit_add_to_dbus_queue(u);
374
375 return 0;
376 }
377
378 static bool unit_success_failure_handler_has_jobs(Unit *unit) {
379 Unit *other;
380
381 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_SUCCESS)
382 if (other->job || other->nop_job)
383 return true;
384
385 UNIT_FOREACH_DEPENDENCY(other, unit, UNIT_ATOM_ON_FAILURE)
386 if (other->job || other->nop_job)
387 return true;
388
389 return false;
390 }
391
392 bool unit_may_gc(Unit *u) {
393 UnitActiveState state;
394 int r;
395
396 assert(u);
397
398 /* Checks whether the unit is ready to be unloaded for garbage collection.
399 * Returns true when the unit may be collected, and false if there's some
400 * reason to keep it loaded.
401 *
402 * References from other units are *not* checked here. Instead, this is done
403 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
404 */
405
406 if (u->job || u->nop_job)
407 return false;
408
409 state = unit_active_state(u);
410
411 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
412 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
413 UNIT_VTABLE(u)->release_resources)
414 UNIT_VTABLE(u)->release_resources(u);
415
416 if (u->perpetual)
417 return false;
418
419 if (sd_bus_track_count(u->bus_track) > 0)
420 return false;
421
422 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
423 switch (u->collect_mode) {
424
425 case COLLECT_INACTIVE:
426 if (state != UNIT_INACTIVE)
427 return false;
428
429 break;
430
431 case COLLECT_INACTIVE_OR_FAILED:
432 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
433 return false;
434
435 break;
436
437 default:
438 assert_not_reached();
439 }
440
441 /* Check if any OnFailure= or on Success= jobs may be pending */
442 if (unit_success_failure_handler_has_jobs(u))
443 return false;
444
445 if (u->cgroup_path) {
446 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
447 * around. Units with active processes should never be collected. */
448
449 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
450 if (r < 0)
451 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u->cgroup_path));
452 if (r <= 0)
453 return false;
454 }
455
456 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
457 return false;
458
459 return true;
460 }
461
462 void unit_add_to_load_queue(Unit *u) {
463 assert(u);
464 assert(u->type != _UNIT_TYPE_INVALID);
465
466 if (u->load_state != UNIT_STUB || u->in_load_queue)
467 return;
468
469 LIST_PREPEND(load_queue, u->manager->load_queue, u);
470 u->in_load_queue = true;
471 }
472
473 void unit_add_to_cleanup_queue(Unit *u) {
474 assert(u);
475
476 if (u->in_cleanup_queue)
477 return;
478
479 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
480 u->in_cleanup_queue = true;
481 }
482
483 void unit_add_to_gc_queue(Unit *u) {
484 assert(u);
485
486 if (u->in_gc_queue || u->in_cleanup_queue)
487 return;
488
489 if (!unit_may_gc(u))
490 return;
491
492 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
493 u->in_gc_queue = true;
494 }
495
496 void unit_add_to_dbus_queue(Unit *u) {
497 assert(u);
498 assert(u->type != _UNIT_TYPE_INVALID);
499
500 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
501 return;
502
503 /* Shortcut things if nobody cares */
504 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
505 sd_bus_track_count(u->bus_track) <= 0 &&
506 set_isempty(u->manager->private_buses)) {
507 u->sent_dbus_new_signal = true;
508 return;
509 }
510
511 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
512 u->in_dbus_queue = true;
513 }
514
515 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
516 assert(u);
517
518 if (u->in_stop_when_unneeded_queue)
519 return;
520
521 if (!u->stop_when_unneeded)
522 return;
523
524 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
525 return;
526
527 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
528 u->in_stop_when_unneeded_queue = true;
529 }
530
531 void unit_submit_to_start_when_upheld_queue(Unit *u) {
532 assert(u);
533
534 if (u->in_start_when_upheld_queue)
535 return;
536
537 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)))
538 return;
539
540 if (!unit_has_dependency(u, UNIT_ATOM_START_STEADILY, NULL))
541 return;
542
543 LIST_PREPEND(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
544 u->in_start_when_upheld_queue = true;
545 }
546
547 void unit_submit_to_stop_when_bound_queue(Unit *u) {
548 assert(u);
549
550 if (u->in_stop_when_bound_queue)
551 return;
552
553 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
554 return;
555
556 if (!unit_has_dependency(u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT, NULL))
557 return;
558
559 LIST_PREPEND(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
560 u->in_stop_when_bound_queue = true;
561 }
562
563 static void unit_clear_dependencies(Unit *u) {
564 assert(u);
565
566 /* Removes all dependencies configured on u and their reverse dependencies. */
567
568 for (Hashmap *deps; (deps = hashmap_steal_first(u->dependencies));) {
569
570 for (Unit *other; (other = hashmap_steal_first_key(deps));) {
571 Hashmap *other_deps;
572
573 HASHMAP_FOREACH(other_deps, other->dependencies)
574 hashmap_remove(other_deps, u);
575
576 unit_add_to_gc_queue(other);
577 }
578
579 hashmap_free(deps);
580 }
581
582 u->dependencies = hashmap_free(u->dependencies);
583 }
584
585 static void unit_remove_transient(Unit *u) {
586 assert(u);
587
588 if (!u->transient)
589 return;
590
591 if (u->fragment_path)
592 (void) unlink(u->fragment_path);
593
594 STRV_FOREACH(i, u->dropin_paths) {
595 _cleanup_free_ char *p = NULL, *pp = NULL;
596
597 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
598 if (!p)
599 continue;
600
601 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
602 if (!pp)
603 continue;
604
605 /* Only drop transient drop-ins */
606 if (!path_equal(u->manager->lookup_paths.transient, pp))
607 continue;
608
609 (void) unlink(*i);
610 (void) rmdir(p);
611 }
612 }
613
614 static void unit_free_requires_mounts_for(Unit *u) {
615 assert(u);
616
617 for (;;) {
618 _cleanup_free_ char *path = NULL;
619
620 path = hashmap_steal_first_key(u->requires_mounts_for);
621 if (!path)
622 break;
623 else {
624 char s[strlen(path) + 1];
625
626 PATH_FOREACH_PREFIX_MORE(s, path) {
627 char *y;
628 Set *x;
629
630 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
631 if (!x)
632 continue;
633
634 (void) set_remove(x, u);
635
636 if (set_isempty(x)) {
637 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
638 free(y);
639 set_free(x);
640 }
641 }
642 }
643 }
644
645 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
646 }
647
648 static void unit_done(Unit *u) {
649 ExecContext *ec;
650 CGroupContext *cc;
651
652 assert(u);
653
654 if (u->type < 0)
655 return;
656
657 if (UNIT_VTABLE(u)->done)
658 UNIT_VTABLE(u)->done(u);
659
660 ec = unit_get_exec_context(u);
661 if (ec)
662 exec_context_done(ec);
663
664 cc = unit_get_cgroup_context(u);
665 if (cc)
666 cgroup_context_done(cc);
667 }
668
669 Unit* unit_free(Unit *u) {
670 Unit *slice;
671 char *t;
672
673 if (!u)
674 return NULL;
675
676 u->transient_file = safe_fclose(u->transient_file);
677
678 if (!MANAGER_IS_RELOADING(u->manager))
679 unit_remove_transient(u);
680
681 bus_unit_send_removed_signal(u);
682
683 unit_done(u);
684
685 unit_dequeue_rewatch_pids(u);
686
687 sd_bus_slot_unref(u->match_bus_slot);
688 sd_bus_track_unref(u->bus_track);
689 u->deserialized_refs = strv_free(u->deserialized_refs);
690 u->pending_freezer_message = sd_bus_message_unref(u->pending_freezer_message);
691
692 unit_free_requires_mounts_for(u);
693
694 SET_FOREACH(t, u->aliases)
695 hashmap_remove_value(u->manager->units, t, u);
696 if (u->id)
697 hashmap_remove_value(u->manager->units, u->id, u);
698
699 if (!sd_id128_is_null(u->invocation_id))
700 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
701
702 if (u->job) {
703 Job *j = u->job;
704 job_uninstall(j);
705 job_free(j);
706 }
707
708 if (u->nop_job) {
709 Job *j = u->nop_job;
710 job_uninstall(j);
711 job_free(j);
712 }
713
714 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
715 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
716 slice = UNIT_GET_SLICE(u);
717 unit_clear_dependencies(u);
718 if (slice)
719 unit_add_family_to_cgroup_realize_queue(slice);
720
721 if (u->on_console)
722 manager_unref_console(u->manager);
723
724
725 fdset_free(u->initial_socket_bind_link_fds);
726 #if BPF_FRAMEWORK
727 bpf_link_free(u->ipv4_socket_bind_link);
728 bpf_link_free(u->ipv6_socket_bind_link);
729 #endif
730
731 unit_release_cgroup(u);
732
733 if (!MANAGER_IS_RELOADING(u->manager))
734 unit_unlink_state_files(u);
735
736 unit_unref_uid_gid(u, false);
737
738 (void) manager_update_failed_units(u->manager, u, false);
739 set_remove(u->manager->startup_units, u);
740
741 unit_unwatch_all_pids(u);
742
743 while (u->refs_by_target)
744 unit_ref_unset(u->refs_by_target);
745
746 if (u->type != _UNIT_TYPE_INVALID)
747 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
748
749 if (u->in_load_queue)
750 LIST_REMOVE(load_queue, u->manager->load_queue, u);
751
752 if (u->in_dbus_queue)
753 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
754
755 if (u->in_cleanup_queue)
756 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
757
758 if (u->in_gc_queue)
759 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
760
761 if (u->in_cgroup_realize_queue)
762 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
763
764 if (u->in_cgroup_empty_queue)
765 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
766
767 if (u->in_cgroup_oom_queue)
768 LIST_REMOVE(cgroup_oom_queue, u->manager->cgroup_oom_queue, u);
769
770 if (u->in_target_deps_queue)
771 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
772
773 if (u->in_stop_when_unneeded_queue)
774 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
775
776 if (u->in_start_when_upheld_queue)
777 LIST_REMOVE(start_when_upheld_queue, u->manager->start_when_upheld_queue, u);
778
779 if (u->in_stop_when_bound_queue)
780 LIST_REMOVE(stop_when_bound_queue, u->manager->stop_when_bound_queue, u);
781
782 bpf_firewall_close(u);
783
784 hashmap_free(u->bpf_foreign_by_key);
785
786 bpf_program_free(u->bpf_device_control_installed);
787
788 #if BPF_FRAMEWORK
789 bpf_link_free(u->restrict_ifaces_ingress_bpf_link);
790 bpf_link_free(u->restrict_ifaces_egress_bpf_link);
791 #endif
792 fdset_free(u->initial_restric_ifaces_link_fds);
793
794 condition_free_list(u->conditions);
795 condition_free_list(u->asserts);
796
797 free(u->description);
798 strv_free(u->documentation);
799 free(u->fragment_path);
800 free(u->source_path);
801 strv_free(u->dropin_paths);
802 free(u->instance);
803
804 free(u->job_timeout_reboot_arg);
805 free(u->reboot_arg);
806
807 set_free_free(u->aliases);
808 free(u->id);
809
810 return mfree(u);
811 }
812
813 FreezerState unit_freezer_state(Unit *u) {
814 assert(u);
815
816 return u->freezer_state;
817 }
818
819 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
820 char *values[1] = {};
821 int r;
822
823 assert(u);
824
825 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
826 STRV_MAKE("frozen"), values);
827 if (r < 0)
828 return r;
829
830 r = _FREEZER_STATE_INVALID;
831
832 if (values[0]) {
833 if (streq(values[0], "0"))
834 r = FREEZER_RUNNING;
835 else if (streq(values[0], "1"))
836 r = FREEZER_FROZEN;
837 }
838
839 free(values[0]);
840 *ret = r;
841
842 return 0;
843 }
844
845 UnitActiveState unit_active_state(Unit *u) {
846 assert(u);
847
848 if (u->load_state == UNIT_MERGED)
849 return unit_active_state(unit_follow_merge(u));
850
851 /* After a reload it might happen that a unit is not correctly
852 * loaded but still has a process around. That's why we won't
853 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
854
855 return UNIT_VTABLE(u)->active_state(u);
856 }
857
858 const char* unit_sub_state_to_string(Unit *u) {
859 assert(u);
860
861 return UNIT_VTABLE(u)->sub_state_to_string(u);
862 }
863
864 static int unit_merge_names(Unit *u, Unit *other) {
865 char *name;
866 int r;
867
868 assert(u);
869 assert(other);
870
871 r = unit_add_alias(u, other->id);
872 if (r < 0)
873 return r;
874
875 r = set_move(u->aliases, other->aliases);
876 if (r < 0) {
877 set_remove(u->aliases, other->id);
878 return r;
879 }
880
881 TAKE_PTR(other->id);
882 other->aliases = set_free_free(other->aliases);
883
884 SET_FOREACH(name, u->aliases)
885 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
886
887 return 0;
888 }
889
890 static int unit_reserve_dependencies(Unit *u, Unit *other) {
891 size_t n_reserve;
892 Hashmap* deps;
893 void *d;
894 int r;
895
896 assert(u);
897 assert(other);
898
899 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
900 * fail.
901 *
902 * First make some room in the per dependency type hashmaps. Using the summed size of both unit's
903 * hashmaps is an estimate that is likely too high since they probably use some of the same
904 * types. But it's never too low, and that's all we need. */
905
906 n_reserve = MIN(hashmap_size(other->dependencies), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX, hashmap_size(u->dependencies)));
907 if (n_reserve > 0) {
908 r = hashmap_ensure_allocated(&u->dependencies, NULL);
909 if (r < 0)
910 return r;
911
912 r = hashmap_reserve(u->dependencies, n_reserve);
913 if (r < 0)
914 return r;
915 }
916
917 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
918 * other unit's dependencies.
919 *
920 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
921 * reserve anything for. In that case other's set will be transferred as a whole to u by
922 * complete_move(). */
923
924 HASHMAP_FOREACH_KEY(deps, d, u->dependencies) {
925 Hashmap *other_deps;
926
927 other_deps = hashmap_get(other->dependencies, d);
928
929 r = hashmap_reserve(deps, hashmap_size(other_deps));
930 if (r < 0)
931 return r;
932 }
933
934 return 0;
935 }
936
937 static void unit_maybe_warn_about_dependency(
938 Unit *u,
939 const char *other_id,
940 UnitDependency dependency) {
941
942 assert(u);
943
944 /* Only warn about some unit types */
945 if (!IN_SET(dependency,
946 UNIT_CONFLICTS,
947 UNIT_CONFLICTED_BY,
948 UNIT_BEFORE,
949 UNIT_AFTER,
950 UNIT_ON_SUCCESS,
951 UNIT_ON_FAILURE,
952 UNIT_TRIGGERS,
953 UNIT_TRIGGERED_BY))
954 return;
955
956 if (streq_ptr(u->id, other_id))
957 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
958 else
959 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other_id), u->id);
960 }
961
962 static int unit_per_dependency_type_hashmap_update(
963 Hashmap *per_type,
964 Unit *other,
965 UnitDependencyMask origin_mask,
966 UnitDependencyMask destination_mask) {
967
968 UnitDependencyInfo info;
969 int r;
970
971 assert(other);
972 assert_cc(sizeof(void*) == sizeof(info));
973
974 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
975 * exists, or insert it anew if not. */
976
977 info.data = hashmap_get(per_type, other);
978 if (info.data) {
979 /* Entry already exists. Add in our mask. */
980
981 if (FLAGS_SET(origin_mask, info.origin_mask) &&
982 FLAGS_SET(destination_mask, info.destination_mask))
983 return 0; /* NOP */
984
985 info.origin_mask |= origin_mask;
986 info.destination_mask |= destination_mask;
987
988 r = hashmap_update(per_type, other, info.data);
989 } else {
990 info = (UnitDependencyInfo) {
991 .origin_mask = origin_mask,
992 .destination_mask = destination_mask,
993 };
994
995 r = hashmap_put(per_type, other, info.data);
996 }
997 if (r < 0)
998 return r;
999
1000
1001 return 1;
1002 }
1003
1004 static int unit_add_dependency_hashmap(
1005 Hashmap **dependencies,
1006 UnitDependency d,
1007 Unit *other,
1008 UnitDependencyMask origin_mask,
1009 UnitDependencyMask destination_mask) {
1010
1011 Hashmap *per_type;
1012 int r;
1013
1014 assert(dependencies);
1015 assert(other);
1016 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
1017 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
1018 assert(origin_mask > 0 || destination_mask > 0);
1019
1020 /* Ensure the top-level dependency hashmap exists that maps UnitDependency → Hashmap(Unit* →
1021 * UnitDependencyInfo) */
1022 r = hashmap_ensure_allocated(dependencies, NULL);
1023 if (r < 0)
1024 return r;
1025
1026 /* Acquire the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency
1027 * type, and if it's missing allocate it and insert it. */
1028 per_type = hashmap_get(*dependencies, UNIT_DEPENDENCY_TO_PTR(d));
1029 if (!per_type) {
1030 per_type = hashmap_new(NULL);
1031 if (!per_type)
1032 return -ENOMEM;
1033
1034 r = hashmap_put(*dependencies, UNIT_DEPENDENCY_TO_PTR(d), per_type);
1035 if (r < 0) {
1036 hashmap_free(per_type);
1037 return r;
1038 }
1039 }
1040
1041 return unit_per_dependency_type_hashmap_update(per_type, other, origin_mask, destination_mask);
1042 }
1043
1044 static void unit_merge_dependencies(
1045 Unit *u,
1046 Unit *other) {
1047
1048 int r;
1049
1050 assert(u);
1051 assert(other);
1052
1053 if (u == other)
1054 return;
1055
1056 for (;;) {
1057 _cleanup_(hashmap_freep) Hashmap *other_deps = NULL;
1058 UnitDependencyInfo di_back;
1059 Unit *back;
1060 void *dt; /* Actually of type UnitDependency, except that we don't bother casting it here,
1061 * since the hashmaps all want it as void pointer. */
1062
1063 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1064 other_deps = hashmap_steal_first_key_and_value(other->dependencies, &dt);
1065 if (!other_deps)
1066 break; /* done! */
1067
1068 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1069 * referenced units as 'back'. */
1070 HASHMAP_FOREACH_KEY(di_back.data, back, other_deps) {
1071 Hashmap *back_deps;
1072 void *back_dt;
1073
1074 if (back == u) {
1075 /* This is a dependency pointing back to the unit we want to merge with?
1076 * Suppress it (but warn) */
1077 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1078 continue;
1079 }
1080
1081 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1082 * point to 'u' instead. */
1083 HASHMAP_FOREACH_KEY(back_deps, back_dt, back->dependencies) {
1084 UnitDependencyInfo di_move;
1085
1086 di_move.data = hashmap_remove(back_deps, other);
1087 if (!di_move.data)
1088 continue;
1089
1090 assert_se(unit_per_dependency_type_hashmap_update(
1091 back_deps,
1092 u,
1093 di_move.origin_mask,
1094 di_move.destination_mask) >= 0);
1095 }
1096 }
1097
1098 /* Now all references towards 'other' of the current type 'dt' are corrected to point to
1099 * 'u'. Lets's now move the deps of type 'dt' from 'other' to 'u'. First, let's try to move
1100 * them per type wholesale. */
1101 r = hashmap_put(u->dependencies, dt, other_deps);
1102 if (r == -EEXIST) {
1103 Hashmap *deps;
1104
1105 /* The target unit already has dependencies of this type, let's then merge this individually. */
1106
1107 assert_se(deps = hashmap_get(u->dependencies, dt));
1108
1109 for (;;) {
1110 UnitDependencyInfo di_move;
1111
1112 /* Get first dep */
1113 di_move.data = hashmap_steal_first_key_and_value(other_deps, (void**) &back);
1114 if (!di_move.data)
1115 break; /* done */
1116 if (back == u) {
1117 /* Would point back to us, ignore */
1118 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1119 continue;
1120 }
1121
1122 assert_se(unit_per_dependency_type_hashmap_update(deps, back, di_move.origin_mask, di_move.destination_mask) >= 0);
1123 }
1124 } else {
1125 assert_se(r >= 0);
1126 TAKE_PTR(other_deps);
1127
1128 if (hashmap_remove(other_deps, u))
1129 unit_maybe_warn_about_dependency(u, other->id, UNIT_DEPENDENCY_FROM_PTR(dt));
1130 }
1131 }
1132
1133 other->dependencies = hashmap_free(other->dependencies);
1134 }
1135
1136 int unit_merge(Unit *u, Unit *other) {
1137 int r;
1138
1139 assert(u);
1140 assert(other);
1141 assert(u->manager == other->manager);
1142 assert(u->type != _UNIT_TYPE_INVALID);
1143
1144 other = unit_follow_merge(other);
1145
1146 if (other == u)
1147 return 0;
1148
1149 if (u->type != other->type)
1150 return -EINVAL;
1151
1152 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
1153 return -EEXIST;
1154
1155 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
1156 return -EEXIST;
1157
1158 if (!streq_ptr(u->instance, other->instance))
1159 return -EINVAL;
1160
1161 if (other->job)
1162 return -EEXIST;
1163
1164 if (other->nop_job)
1165 return -EEXIST;
1166
1167 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1168 return -EEXIST;
1169
1170 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1171 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1172 r = unit_reserve_dependencies(u, other);
1173 if (r < 0)
1174 return r;
1175
1176 /* Merge names */
1177 r = unit_merge_names(u, other);
1178 if (r < 0)
1179 return r;
1180
1181 /* Redirect all references */
1182 while (other->refs_by_target)
1183 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
1184
1185 /* Merge dependencies */
1186 unit_merge_dependencies(u, other);
1187
1188 other->load_state = UNIT_MERGED;
1189 other->merged_into = u;
1190
1191 /* If there is still some data attached to the other node, we
1192 * don't need it anymore, and can free it. */
1193 if (other->load_state != UNIT_STUB)
1194 if (UNIT_VTABLE(other)->done)
1195 UNIT_VTABLE(other)->done(other);
1196
1197 unit_add_to_dbus_queue(u);
1198 unit_add_to_cleanup_queue(other);
1199
1200 return 0;
1201 }
1202
1203 int unit_merge_by_name(Unit *u, const char *name) {
1204 _cleanup_free_ char *s = NULL;
1205 Unit *other;
1206 int r;
1207
1208 /* Either add name to u, or if a unit with name already exists, merge it with u.
1209 * If name is a template, do the same for name@instance, where instance is u's instance. */
1210
1211 assert(u);
1212 assert(name);
1213
1214 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1215 if (!u->instance)
1216 return -EINVAL;
1217
1218 r = unit_name_replace_instance(name, u->instance, &s);
1219 if (r < 0)
1220 return r;
1221
1222 name = s;
1223 }
1224
1225 other = manager_get_unit(u->manager, name);
1226 if (other)
1227 return unit_merge(u, other);
1228
1229 return unit_add_name(u, name);
1230 }
1231
1232 Unit* unit_follow_merge(Unit *u) {
1233 assert(u);
1234
1235 while (u->load_state == UNIT_MERGED)
1236 assert_se(u = u->merged_into);
1237
1238 return u;
1239 }
1240
1241 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1242 int r;
1243
1244 assert(u);
1245 assert(c);
1246
1247 if (c->working_directory && !c->working_directory_missing_ok) {
1248 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1249 if (r < 0)
1250 return r;
1251 }
1252
1253 if (c->root_directory) {
1254 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1255 if (r < 0)
1256 return r;
1257 }
1258
1259 if (c->root_image) {
1260 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1261 if (r < 0)
1262 return r;
1263 }
1264
1265 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1266 if (!u->manager->prefix[dt])
1267 continue;
1268
1269 for (size_t i = 0; i < c->directories[dt].n_items; i++) {
1270 _cleanup_free_ char *p = NULL;
1271
1272 p = path_join(u->manager->prefix[dt], c->directories[dt].items[i].path);
1273 if (!p)
1274 return -ENOMEM;
1275
1276 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1277 if (r < 0)
1278 return r;
1279 }
1280 }
1281
1282 if (!MANAGER_IS_SYSTEM(u->manager))
1283 return 0;
1284
1285 /* For the following three directory types we need write access, and /var/ is possibly on the root
1286 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1287 if (c->directories[EXEC_DIRECTORY_STATE].n_items > 0 ||
1288 c->directories[EXEC_DIRECTORY_CACHE].n_items > 0 ||
1289 c->directories[EXEC_DIRECTORY_LOGS].n_items > 0) {
1290 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1291 if (r < 0)
1292 return r;
1293 }
1294
1295 if (c->private_tmp) {
1296
1297 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1298 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1299 * /tmp specifically and masking other mount units should be handled more
1300 * gracefully too, see PR#16894. */
1301 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "tmp.mount", true, UNIT_DEPENDENCY_FILE);
1302 if (r < 0)
1303 return r;
1304
1305 r = unit_require_mounts_for(u, "/var/tmp", UNIT_DEPENDENCY_FILE);
1306 if (r < 0)
1307 return r;
1308
1309 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1310 if (r < 0)
1311 return r;
1312 }
1313
1314 if (c->root_image) {
1315 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1316 * implicit dependency on udev */
1317
1318 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1319 if (r < 0)
1320 return r;
1321 }
1322
1323 if (!IN_SET(c->std_output,
1324 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1325 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1326 !IN_SET(c->std_error,
1327 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1328 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1329 !c->log_namespace)
1330 return 0;
1331
1332 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1333 * is run first. */
1334
1335 if (c->log_namespace) {
1336 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1337
1338 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1339 if (r < 0)
1340 return r;
1341
1342 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1343 if (r < 0)
1344 return r;
1345
1346 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1347 if (r < 0)
1348 return r;
1349
1350 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1351 if (r < 0)
1352 return r;
1353 } else
1354 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1355 if (r < 0)
1356 return r;
1357
1358 return 0;
1359 }
1360
1361 const char* unit_description(Unit *u) {
1362 assert(u);
1363
1364 if (u->description)
1365 return u->description;
1366
1367 return strna(u->id);
1368 }
1369
1370 const char* unit_status_string(Unit *u, char **ret_combined_buffer) {
1371 assert(u);
1372 assert(u->id);
1373
1374 /* Return u->id, u->description, or "{u->id} - {u->description}".
1375 * Versions with u->description are only used if it is set.
1376 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1377 * pointer.
1378 *
1379 * Note that *ret_combined_buffer may be set to NULL. */
1380
1381 if (!u->description ||
1382 u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME ||
1383 (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && !ret_combined_buffer) ||
1384 streq(u->description, u->id)) {
1385
1386 if (ret_combined_buffer)
1387 *ret_combined_buffer = NULL;
1388 return u->id;
1389 }
1390
1391 if (ret_combined_buffer) {
1392 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED) {
1393 *ret_combined_buffer = strjoin(u->id, " - ", u->description);
1394 if (*ret_combined_buffer)
1395 return *ret_combined_buffer;
1396 log_oom(); /* Fall back to ->description */
1397 } else
1398 *ret_combined_buffer = NULL;
1399 }
1400
1401 return u->description;
1402 }
1403
1404 /* Common implementation for multiple backends */
1405 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1406 int r;
1407
1408 assert(u);
1409
1410 /* Load a .{service,socket,...} file */
1411 r = unit_load_fragment(u);
1412 if (r < 0)
1413 return r;
1414
1415 if (u->load_state == UNIT_STUB) {
1416 if (fragment_required)
1417 return -ENOENT;
1418
1419 u->load_state = UNIT_LOADED;
1420 }
1421
1422 /* Load drop-in directory data. If u is an alias, we might be reloading the
1423 * target unit needlessly. But we cannot be sure which drops-ins have already
1424 * been loaded and which not, at least without doing complicated book-keeping,
1425 * so let's always reread all drop-ins. */
1426 r = unit_load_dropin(unit_follow_merge(u));
1427 if (r < 0)
1428 return r;
1429
1430 if (u->source_path) {
1431 struct stat st;
1432
1433 if (stat(u->source_path, &st) >= 0)
1434 u->source_mtime = timespec_load(&st.st_mtim);
1435 else
1436 u->source_mtime = 0;
1437 }
1438
1439 return 0;
1440 }
1441
1442 void unit_add_to_target_deps_queue(Unit *u) {
1443 Manager *m = u->manager;
1444
1445 assert(u);
1446
1447 if (u->in_target_deps_queue)
1448 return;
1449
1450 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1451 u->in_target_deps_queue = true;
1452 }
1453
1454 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1455 assert(u);
1456 assert(target);
1457
1458 if (target->type != UNIT_TARGET)
1459 return 0;
1460
1461 /* Only add the dependency if both units are loaded, so that
1462 * that loop check below is reliable */
1463 if (u->load_state != UNIT_LOADED ||
1464 target->load_state != UNIT_LOADED)
1465 return 0;
1466
1467 /* If either side wants no automatic dependencies, then let's
1468 * skip this */
1469 if (!u->default_dependencies ||
1470 !target->default_dependencies)
1471 return 0;
1472
1473 /* Don't create loops */
1474 if (unit_has_dependency(target, UNIT_ATOM_BEFORE, u))
1475 return 0;
1476
1477 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1478 }
1479
1480 static int unit_add_slice_dependencies(Unit *u) {
1481 Unit *slice;
1482 assert(u);
1483
1484 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1485 return 0;
1486
1487 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1488 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1489 relationship). */
1490 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1491
1492 slice = UNIT_GET_SLICE(u);
1493 if (slice)
1494 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, slice, true, mask);
1495
1496 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1497 return 0;
1498
1499 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1500 }
1501
1502 static int unit_add_mount_dependencies(Unit *u) {
1503 UnitDependencyInfo di;
1504 const char *path;
1505 int r;
1506
1507 assert(u);
1508
1509 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1510 char prefix[strlen(path) + 1];
1511
1512 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1513 _cleanup_free_ char *p = NULL;
1514 Unit *m;
1515
1516 r = unit_name_from_path(prefix, ".mount", &p);
1517 if (IN_SET(r, -EINVAL, -ENAMETOOLONG))
1518 continue; /* If the path cannot be converted to a mount unit name, then it's
1519 * not manageable as a unit by systemd, and hence we don't need a
1520 * dependency on it. Let's thus silently ignore the issue. */
1521 if (r < 0)
1522 return r;
1523
1524 m = manager_get_unit(u->manager, p);
1525 if (!m) {
1526 /* Make sure to load the mount unit if it exists. If so the dependencies on
1527 * this unit will be added later during the loading of the mount unit. */
1528 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1529 continue;
1530 }
1531 if (m == u)
1532 continue;
1533
1534 if (m->load_state != UNIT_LOADED)
1535 continue;
1536
1537 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1538 if (r < 0)
1539 return r;
1540
1541 if (m->fragment_path) {
1542 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1543 if (r < 0)
1544 return r;
1545 }
1546 }
1547 }
1548
1549 return 0;
1550 }
1551
1552 static int unit_add_oomd_dependencies(Unit *u) {
1553 CGroupContext *c;
1554 bool wants_oomd;
1555 int r;
1556
1557 assert(u);
1558
1559 if (!u->default_dependencies)
1560 return 0;
1561
1562 c = unit_get_cgroup_context(u);
1563 if (!c)
1564 return 0;
1565
1566 wants_oomd = (c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL);
1567 if (!wants_oomd)
1568 return 0;
1569
1570 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1571 if (r < 0)
1572 return r;
1573
1574 return 0;
1575 }
1576
1577 static int unit_add_startup_units(Unit *u) {
1578 if (!unit_has_startup_cgroup_constraints(u))
1579 return 0;
1580
1581 return set_ensure_put(&u->manager->startup_units, NULL, u);
1582 }
1583
1584 static int unit_validate_on_failure_job_mode(
1585 Unit *u,
1586 const char *job_mode_setting,
1587 JobMode job_mode,
1588 const char *dependency_name,
1589 UnitDependencyAtom atom) {
1590
1591 Unit *other, *found = NULL;
1592
1593 if (job_mode != JOB_ISOLATE)
1594 return 0;
1595
1596 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
1597 if (!found)
1598 found = other;
1599 else if (found != other)
1600 return log_unit_error_errno(
1601 u, SYNTHETIC_ERRNO(ENOEXEC),
1602 "More than one %s dependencies specified but %sisolate set. Refusing.",
1603 dependency_name, job_mode_setting);
1604 }
1605
1606 return 0;
1607 }
1608
1609 int unit_load(Unit *u) {
1610 int r;
1611
1612 assert(u);
1613
1614 if (u->in_load_queue) {
1615 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1616 u->in_load_queue = false;
1617 }
1618
1619 if (u->type == _UNIT_TYPE_INVALID)
1620 return -EINVAL;
1621
1622 if (u->load_state != UNIT_STUB)
1623 return 0;
1624
1625 if (u->transient_file) {
1626 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1627 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1628
1629 r = fflush_and_check(u->transient_file);
1630 if (r < 0)
1631 goto fail;
1632
1633 u->transient_file = safe_fclose(u->transient_file);
1634 u->fragment_mtime = now(CLOCK_REALTIME);
1635 }
1636
1637 r = UNIT_VTABLE(u)->load(u);
1638 if (r < 0)
1639 goto fail;
1640
1641 assert(u->load_state != UNIT_STUB);
1642
1643 if (u->load_state == UNIT_LOADED) {
1644 unit_add_to_target_deps_queue(u);
1645
1646 r = unit_add_slice_dependencies(u);
1647 if (r < 0)
1648 goto fail;
1649
1650 r = unit_add_mount_dependencies(u);
1651 if (r < 0)
1652 goto fail;
1653
1654 r = unit_add_oomd_dependencies(u);
1655 if (r < 0)
1656 goto fail;
1657
1658 r = unit_add_startup_units(u);
1659 if (r < 0)
1660 goto fail;
1661
1662 r = unit_validate_on_failure_job_mode(u, "OnSuccessJobMode=", u->on_success_job_mode, "OnSuccess=", UNIT_ATOM_ON_SUCCESS);
1663 if (r < 0)
1664 goto fail;
1665
1666 r = unit_validate_on_failure_job_mode(u, "OnFailureJobMode=", u->on_failure_job_mode, "OnFailure=", UNIT_ATOM_ON_FAILURE);
1667 if (r < 0)
1668 goto fail;
1669
1670 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1671 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1672
1673 /* We finished loading, let's ensure our parents recalculate the members mask */
1674 unit_invalidate_cgroup_members_masks(u);
1675 }
1676
1677 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1678
1679 unit_add_to_dbus_queue(unit_follow_merge(u));
1680 unit_add_to_gc_queue(u);
1681 (void) manager_varlink_send_managed_oom_update(u);
1682
1683 return 0;
1684
1685 fail:
1686 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1687 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1688
1689 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1690 r == -ENOEXEC ? UNIT_BAD_SETTING :
1691 UNIT_ERROR;
1692 u->load_error = r;
1693
1694 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1695 * an attempt is made to load this unit, we know we need to check again. */
1696 if (u->load_state == UNIT_NOT_FOUND)
1697 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1698
1699 unit_add_to_dbus_queue(u);
1700 unit_add_to_gc_queue(u);
1701
1702 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1703 }
1704
1705 _printf_(7, 8)
1706 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1707 Unit *u = userdata;
1708 va_list ap;
1709 int r;
1710
1711 if (u && !unit_log_level_test(u, level))
1712 return -ERRNO_VALUE(error);
1713
1714 va_start(ap, format);
1715 if (u)
1716 r = log_object_internalv(level, error, file, line, func,
1717 u->manager->unit_log_field,
1718 u->id,
1719 u->manager->invocation_log_field,
1720 u->invocation_id_string,
1721 format, ap);
1722 else
1723 r = log_internalv(level, error, file, line, func, format, ap);
1724 va_end(ap);
1725
1726 return r;
1727 }
1728
1729 static bool unit_test_condition(Unit *u) {
1730 _cleanup_strv_free_ char **env = NULL;
1731 int r;
1732
1733 assert(u);
1734
1735 dual_timestamp_get(&u->condition_timestamp);
1736
1737 r = manager_get_effective_environment(u->manager, &env);
1738 if (r < 0) {
1739 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1740 u->condition_result = true;
1741 } else
1742 u->condition_result = condition_test_list(
1743 u->conditions,
1744 env,
1745 condition_type_to_string,
1746 log_unit_internal,
1747 u);
1748
1749 unit_add_to_dbus_queue(u);
1750 return u->condition_result;
1751 }
1752
1753 static bool unit_test_assert(Unit *u) {
1754 _cleanup_strv_free_ char **env = NULL;
1755 int r;
1756
1757 assert(u);
1758
1759 dual_timestamp_get(&u->assert_timestamp);
1760
1761 r = manager_get_effective_environment(u->manager, &env);
1762 if (r < 0) {
1763 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1764 u->assert_result = CONDITION_ERROR;
1765 } else
1766 u->assert_result = condition_test_list(
1767 u->asserts,
1768 env,
1769 assert_type_to_string,
1770 log_unit_internal,
1771 u);
1772
1773 unit_add_to_dbus_queue(u);
1774 return u->assert_result;
1775 }
1776
1777 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *format, const char *ident) {
1778 if (log_get_show_color()) {
1779 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_COMBINED && strchr(ident, ' '))
1780 ident = strjoina(ANSI_HIGHLIGHT, u->id, ANSI_NORMAL, " - ", u->description);
1781 else
1782 ident = strjoina(ANSI_HIGHLIGHT, ident, ANSI_NORMAL);
1783 }
1784
1785 DISABLE_WARNING_FORMAT_NONLITERAL;
1786 manager_status_printf(u->manager, status_type, status, format, ident);
1787 REENABLE_WARNING;
1788 }
1789
1790 int unit_test_start_limit(Unit *u) {
1791 const char *reason;
1792
1793 assert(u);
1794
1795 if (ratelimit_below(&u->start_ratelimit)) {
1796 u->start_limit_hit = false;
1797 return 0;
1798 }
1799
1800 log_unit_warning(u, "Start request repeated too quickly.");
1801 u->start_limit_hit = true;
1802
1803 reason = strjoina("unit ", u->id, " failed");
1804
1805 emergency_action(u->manager, u->start_limit_action,
1806 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1807 u->reboot_arg, -1, reason);
1808
1809 return -ECANCELED;
1810 }
1811
1812 bool unit_shall_confirm_spawn(Unit *u) {
1813 assert(u);
1814
1815 if (manager_is_confirm_spawn_disabled(u->manager))
1816 return false;
1817
1818 /* For some reasons units remaining in the same process group
1819 * as PID 1 fail to acquire the console even if it's not used
1820 * by any process. So skip the confirmation question for them. */
1821 return !unit_get_exec_context(u)->same_pgrp;
1822 }
1823
1824 static bool unit_verify_deps(Unit *u) {
1825 Unit *other;
1826
1827 assert(u);
1828
1829 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1830 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1831 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1832 * that are not used in conjunction with After= as for them any such check would make things entirely
1833 * racy. */
1834
1835 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
1836
1837 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other))
1838 continue;
1839
1840 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1841 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1842 return false;
1843 }
1844 }
1845
1846 return true;
1847 }
1848
1849 /* Errors that aren't really errors:
1850 * -EALREADY: Unit is already started.
1851 * -ECOMM: Condition failed
1852 * -EAGAIN: An operation is already in progress. Retry later.
1853 *
1854 * Errors that are real errors:
1855 * -EBADR: This unit type does not support starting.
1856 * -ECANCELED: Start limit hit, too many requests for now
1857 * -EPROTO: Assert failed
1858 * -EINVAL: Unit not loaded
1859 * -EOPNOTSUPP: Unit type not supported
1860 * -ENOLINK: The necessary dependencies are not fulfilled.
1861 * -ESTALE: This unit has been started before and can't be started a second time
1862 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1863 */
1864 int unit_start(Unit *u) {
1865 UnitActiveState state;
1866 Unit *following;
1867 int r;
1868
1869 assert(u);
1870
1871 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1872 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1873 * waiting is finished. */
1874 state = unit_active_state(u);
1875 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1876 return -EALREADY;
1877 if (state == UNIT_MAINTENANCE)
1878 return -EAGAIN;
1879
1880 /* Units that aren't loaded cannot be started */
1881 if (u->load_state != UNIT_LOADED)
1882 return -EINVAL;
1883
1884 /* Refuse starting scope units more than once */
1885 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1886 return -ESTALE;
1887
1888 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1889 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1890 * recheck the condition in that case. */
1891 if (state != UNIT_ACTIVATING &&
1892 !unit_test_condition(u))
1893 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1894
1895 /* If the asserts failed, fail the entire job */
1896 if (state != UNIT_ACTIVATING &&
1897 !unit_test_assert(u))
1898 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1899
1900 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1901 * condition checks, so that we rather return condition check errors (which are usually not
1902 * considered a true failure) than "not supported" errors (which are considered a failure).
1903 */
1904 if (!unit_type_supported(u->type))
1905 return -EOPNOTSUPP;
1906
1907 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1908 * should have taken care of this already, but let's check this here again. After all, our
1909 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1910 if (!unit_verify_deps(u))
1911 return -ENOLINK;
1912
1913 /* Forward to the main object, if we aren't it. */
1914 following = unit_following(u);
1915 if (following) {
1916 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1917 return unit_start(following);
1918 }
1919
1920 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1921 if (UNIT_VTABLE(u)->can_start) {
1922 r = UNIT_VTABLE(u)->can_start(u);
1923 if (r < 0)
1924 return r;
1925 }
1926
1927 /* If it is stopped, but we cannot start it, then fail */
1928 if (!UNIT_VTABLE(u)->start)
1929 return -EBADR;
1930
1931 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1932 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1933 * waits for a holdoff timer to elapse before it will start again. */
1934
1935 unit_add_to_dbus_queue(u);
1936 unit_cgroup_freezer_action(u, FREEZER_THAW);
1937
1938 return UNIT_VTABLE(u)->start(u);
1939 }
1940
1941 bool unit_can_start(Unit *u) {
1942 assert(u);
1943
1944 if (u->load_state != UNIT_LOADED)
1945 return false;
1946
1947 if (!unit_type_supported(u->type))
1948 return false;
1949
1950 /* Scope units may be started only once */
1951 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1952 return false;
1953
1954 return !!UNIT_VTABLE(u)->start;
1955 }
1956
1957 bool unit_can_isolate(Unit *u) {
1958 assert(u);
1959
1960 return unit_can_start(u) &&
1961 u->allow_isolate;
1962 }
1963
1964 /* Errors:
1965 * -EBADR: This unit type does not support stopping.
1966 * -EALREADY: Unit is already stopped.
1967 * -EAGAIN: An operation is already in progress. Retry later.
1968 */
1969 int unit_stop(Unit *u) {
1970 UnitActiveState state;
1971 Unit *following;
1972
1973 assert(u);
1974
1975 state = unit_active_state(u);
1976 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1977 return -EALREADY;
1978
1979 following = unit_following(u);
1980 if (following) {
1981 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1982 return unit_stop(following);
1983 }
1984
1985 if (!UNIT_VTABLE(u)->stop)
1986 return -EBADR;
1987
1988 unit_add_to_dbus_queue(u);
1989 unit_cgroup_freezer_action(u, FREEZER_THAW);
1990
1991 return UNIT_VTABLE(u)->stop(u);
1992 }
1993
1994 bool unit_can_stop(Unit *u) {
1995 assert(u);
1996
1997 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
1998 * Extrinsic units follow external state and they may stop following external state changes
1999 * (hence we return true here), but an attempt to do this through the manager will fail. */
2000
2001 if (!unit_type_supported(u->type))
2002 return false;
2003
2004 if (u->perpetual)
2005 return false;
2006
2007 return !!UNIT_VTABLE(u)->stop;
2008 }
2009
2010 /* Errors:
2011 * -EBADR: This unit type does not support reloading.
2012 * -ENOEXEC: Unit is not started.
2013 * -EAGAIN: An operation is already in progress. Retry later.
2014 */
2015 int unit_reload(Unit *u) {
2016 UnitActiveState state;
2017 Unit *following;
2018
2019 assert(u);
2020
2021 if (u->load_state != UNIT_LOADED)
2022 return -EINVAL;
2023
2024 if (!unit_can_reload(u))
2025 return -EBADR;
2026
2027 state = unit_active_state(u);
2028 if (state == UNIT_RELOADING)
2029 return -EAGAIN;
2030
2031 if (state != UNIT_ACTIVE)
2032 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
2033
2034 following = unit_following(u);
2035 if (following) {
2036 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
2037 return unit_reload(following);
2038 }
2039
2040 unit_add_to_dbus_queue(u);
2041
2042 if (!UNIT_VTABLE(u)->reload) {
2043 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2044 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
2045 return 0;
2046 }
2047
2048 unit_cgroup_freezer_action(u, FREEZER_THAW);
2049
2050 return UNIT_VTABLE(u)->reload(u);
2051 }
2052
2053 bool unit_can_reload(Unit *u) {
2054 assert(u);
2055
2056 if (UNIT_VTABLE(u)->can_reload)
2057 return UNIT_VTABLE(u)->can_reload(u);
2058
2059 if (unit_has_dependency(u, UNIT_ATOM_PROPAGATES_RELOAD_TO, NULL))
2060 return true;
2061
2062 return UNIT_VTABLE(u)->reload;
2063 }
2064
2065 bool unit_is_unneeded(Unit *u) {
2066 Unit *other;
2067 assert(u);
2068
2069 if (!u->stop_when_unneeded)
2070 return false;
2071
2072 /* Don't clean up while the unit is transitioning or is even inactive. */
2073 if (unit_active_state(u) != UNIT_ACTIVE)
2074 return false;
2075 if (u->job)
2076 return false;
2077
2078 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED) {
2079 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2080 * restart, then don't clean this one up. */
2081
2082 if (other->job)
2083 return false;
2084
2085 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2086 return false;
2087
2088 if (unit_will_restart(other))
2089 return false;
2090 }
2091
2092 return true;
2093 }
2094
2095 bool unit_is_upheld_by_active(Unit *u, Unit **ret_culprit) {
2096 Unit *other;
2097
2098 assert(u);
2099
2100 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2101 * that is active declared an Uphold= dependencies on it */
2102
2103 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) || u->job) {
2104 if (ret_culprit)
2105 *ret_culprit = NULL;
2106 return false;
2107 }
2108
2109 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_START_STEADILY) {
2110 if (other->job)
2111 continue;
2112
2113 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
2114 if (ret_culprit)
2115 *ret_culprit = other;
2116 return true;
2117 }
2118 }
2119
2120 if (ret_culprit)
2121 *ret_culprit = NULL;
2122 return false;
2123 }
2124
2125 bool unit_is_bound_by_inactive(Unit *u, Unit **ret_culprit) {
2126 Unit *other;
2127
2128 assert(u);
2129
2130 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2131 * because the other unit is down. */
2132
2133 if (unit_active_state(u) != UNIT_ACTIVE || u->job) {
2134 /* Don't clean up while the unit is transitioning or is even inactive. */
2135 if (ret_culprit)
2136 *ret_culprit = NULL;
2137 return false;
2138 }
2139
2140 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT) {
2141 if (other->job)
2142 continue;
2143
2144 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other))) {
2145 if (ret_culprit)
2146 *ret_culprit = other;
2147
2148 return true;
2149 }
2150 }
2151
2152 if (ret_culprit)
2153 *ret_culprit = NULL;
2154 return false;
2155 }
2156
2157 static void check_unneeded_dependencies(Unit *u) {
2158 Unit *other;
2159 assert(u);
2160
2161 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2162
2163 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE)
2164 unit_submit_to_stop_when_unneeded_queue(other);
2165 }
2166
2167 static void check_uphold_dependencies(Unit *u) {
2168 Unit *other;
2169 assert(u);
2170
2171 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2172
2173 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE)
2174 unit_submit_to_start_when_upheld_queue(other);
2175 }
2176
2177 static void check_bound_by_dependencies(Unit *u) {
2178 Unit *other;
2179 assert(u);
2180
2181 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2182
2183 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE)
2184 unit_submit_to_stop_when_bound_queue(other);
2185 }
2186
2187 static void retroactively_start_dependencies(Unit *u) {
2188 Unit *other;
2189
2190 assert(u);
2191 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2192
2193 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_REPLACE) /* Requires= + BindsTo= */
2194 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2195 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2196 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2197
2198 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_START_FAIL) /* Wants= */
2199 if (!unit_has_dependency(u, UNIT_ATOM_AFTER, other) &&
2200 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2201 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2202
2203 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_START) /* Conflicts= (and inverse) */
2204 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2205 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2206 }
2207
2208 static void retroactively_stop_dependencies(Unit *u) {
2209 Unit *other;
2210
2211 assert(u);
2212 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2213
2214 /* Pull down units which are bound to us recursively if enabled */
2215 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP) /* BoundBy= */
2216 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2217 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2218 }
2219
2220 void unit_start_on_failure(
2221 Unit *u,
2222 const char *dependency_name,
2223 UnitDependencyAtom atom,
2224 JobMode job_mode) {
2225
2226 int n_jobs = -1;
2227 Unit *other;
2228 int r;
2229
2230 assert(u);
2231 assert(dependency_name);
2232 assert(IN_SET(atom, UNIT_ATOM_ON_SUCCESS, UNIT_ATOM_ON_FAILURE));
2233
2234 /* Act on OnFailure= and OnSuccess= dependencies */
2235
2236 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
2237 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2238
2239 if (n_jobs < 0) {
2240 log_unit_info(u, "Triggering %s dependencies.", dependency_name);
2241 n_jobs = 0;
2242 }
2243
2244 r = manager_add_job(u->manager, JOB_START, other, job_mode, NULL, &error, NULL);
2245 if (r < 0)
2246 log_unit_warning_errno(
2247 u, r, "Failed to enqueue %s job, ignoring: %s",
2248 dependency_name, bus_error_message(&error, r));
2249 n_jobs ++;
2250 }
2251
2252 if (n_jobs >= 0)
2253 log_unit_debug(u, "Triggering %s dependencies done (%u %s).",
2254 dependency_name, n_jobs, n_jobs == 1 ? "job" : "jobs");
2255 }
2256
2257 void unit_trigger_notify(Unit *u) {
2258 Unit *other;
2259
2260 assert(u);
2261
2262 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY)
2263 if (UNIT_VTABLE(other)->trigger_notify)
2264 UNIT_VTABLE(other)->trigger_notify(other, u);
2265 }
2266
2267 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2268 if (condition_notice && log_level > LOG_NOTICE)
2269 return LOG_NOTICE;
2270 if (condition_info && log_level > LOG_INFO)
2271 return LOG_INFO;
2272 return log_level;
2273 }
2274
2275 static int unit_log_resources(Unit *u) {
2276 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2277 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2278 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2279 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
2280 size_t n_message_parts = 0, n_iovec = 0;
2281 char* message_parts[1 + 2 + 2 + 1], *t;
2282 nsec_t nsec = NSEC_INFINITY;
2283 int r;
2284 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2285 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2286 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2287 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2288 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2289 };
2290 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2291 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2292 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2293 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2294 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2295 };
2296
2297 assert(u);
2298
2299 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2300 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2301 * information and the complete data in structured fields. */
2302
2303 (void) unit_get_cpu_usage(u, &nsec);
2304 if (nsec != NSEC_INFINITY) {
2305 /* Format the CPU time for inclusion in the structured log message */
2306 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2307 r = log_oom();
2308 goto finish;
2309 }
2310 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2311
2312 /* Format the CPU time for inclusion in the human language message string */
2313 t = strjoin("consumed ", FORMAT_TIMESPAN(nsec / NSEC_PER_USEC, USEC_PER_MSEC), " CPU time");
2314 if (!t) {
2315 r = log_oom();
2316 goto finish;
2317 }
2318
2319 message_parts[n_message_parts++] = t;
2320
2321 log_level = raise_level(log_level,
2322 nsec > MENTIONWORTHY_CPU_NSEC,
2323 nsec > NOTICEWORTHY_CPU_NSEC);
2324 }
2325
2326 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2327 uint64_t value = UINT64_MAX;
2328
2329 assert(io_fields[k]);
2330
2331 (void) unit_get_io_accounting(u, k, k > 0, &value);
2332 if (value == UINT64_MAX)
2333 continue;
2334
2335 have_io_accounting = true;
2336 if (value > 0)
2337 any_io = true;
2338
2339 /* Format IO accounting data for inclusion in the structured log message */
2340 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2341 r = log_oom();
2342 goto finish;
2343 }
2344 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2345
2346 /* Format the IO accounting data for inclusion in the human language message string, but only
2347 * for the bytes counters (and not for the operations counters) */
2348 if (k == CGROUP_IO_READ_BYTES) {
2349 assert(!rr);
2350 rr = strjoin("read ", strna(FORMAT_BYTES(value)), " from disk");
2351 if (!rr) {
2352 r = log_oom();
2353 goto finish;
2354 }
2355 } else if (k == CGROUP_IO_WRITE_BYTES) {
2356 assert(!wr);
2357 wr = strjoin("written ", strna(FORMAT_BYTES(value)), " to disk");
2358 if (!wr) {
2359 r = log_oom();
2360 goto finish;
2361 }
2362 }
2363
2364 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2365 log_level = raise_level(log_level,
2366 value > MENTIONWORTHY_IO_BYTES,
2367 value > NOTICEWORTHY_IO_BYTES);
2368 }
2369
2370 if (have_io_accounting) {
2371 if (any_io) {
2372 if (rr)
2373 message_parts[n_message_parts++] = TAKE_PTR(rr);
2374 if (wr)
2375 message_parts[n_message_parts++] = TAKE_PTR(wr);
2376
2377 } else {
2378 char *k;
2379
2380 k = strdup("no IO");
2381 if (!k) {
2382 r = log_oom();
2383 goto finish;
2384 }
2385
2386 message_parts[n_message_parts++] = k;
2387 }
2388 }
2389
2390 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2391 uint64_t value = UINT64_MAX;
2392
2393 assert(ip_fields[m]);
2394
2395 (void) unit_get_ip_accounting(u, m, &value);
2396 if (value == UINT64_MAX)
2397 continue;
2398
2399 have_ip_accounting = true;
2400 if (value > 0)
2401 any_traffic = true;
2402
2403 /* Format IP accounting data for inclusion in the structured log message */
2404 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2405 r = log_oom();
2406 goto finish;
2407 }
2408 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2409
2410 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2411 * bytes counters (and not for the packets counters) */
2412 if (m == CGROUP_IP_INGRESS_BYTES) {
2413 assert(!igress);
2414 igress = strjoin("received ", strna(FORMAT_BYTES(value)), " IP traffic");
2415 if (!igress) {
2416 r = log_oom();
2417 goto finish;
2418 }
2419 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2420 assert(!egress);
2421 egress = strjoin("sent ", strna(FORMAT_BYTES(value)), " IP traffic");
2422 if (!egress) {
2423 r = log_oom();
2424 goto finish;
2425 }
2426 }
2427
2428 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2429 log_level = raise_level(log_level,
2430 value > MENTIONWORTHY_IP_BYTES,
2431 value > NOTICEWORTHY_IP_BYTES);
2432 }
2433
2434 /* This check is here because it is the earliest point following all possible log_level assignments. If
2435 * log_level is assigned anywhere after this point, move this check. */
2436 if (!unit_log_level_test(u, log_level)) {
2437 r = 0;
2438 goto finish;
2439 }
2440
2441 if (have_ip_accounting) {
2442 if (any_traffic) {
2443 if (igress)
2444 message_parts[n_message_parts++] = TAKE_PTR(igress);
2445 if (egress)
2446 message_parts[n_message_parts++] = TAKE_PTR(egress);
2447
2448 } else {
2449 char *k;
2450
2451 k = strdup("no IP traffic");
2452 if (!k) {
2453 r = log_oom();
2454 goto finish;
2455 }
2456
2457 message_parts[n_message_parts++] = k;
2458 }
2459 }
2460
2461 /* Is there any accounting data available at all? */
2462 if (n_iovec == 0) {
2463 r = 0;
2464 goto finish;
2465 }
2466
2467 if (n_message_parts == 0)
2468 t = strjoina("MESSAGE=", u->id, ": Completed.");
2469 else {
2470 _cleanup_free_ char *joined = NULL;
2471
2472 message_parts[n_message_parts] = NULL;
2473
2474 joined = strv_join(message_parts, ", ");
2475 if (!joined) {
2476 r = log_oom();
2477 goto finish;
2478 }
2479
2480 joined[0] = ascii_toupper(joined[0]);
2481 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2482 }
2483
2484 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2485 * and hence don't increase n_iovec for them */
2486 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2487 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2488
2489 t = strjoina(u->manager->unit_log_field, u->id);
2490 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2491
2492 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2493 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2494
2495 log_unit_struct_iovec(u, log_level, iovec, n_iovec + 4);
2496 r = 0;
2497
2498 finish:
2499 for (size_t i = 0; i < n_message_parts; i++)
2500 free(message_parts[i]);
2501
2502 for (size_t i = 0; i < n_iovec; i++)
2503 free(iovec[i].iov_base);
2504
2505 return r;
2506
2507 }
2508
2509 static void unit_update_on_console(Unit *u) {
2510 bool b;
2511
2512 assert(u);
2513
2514 b = unit_needs_console(u);
2515 if (u->on_console == b)
2516 return;
2517
2518 u->on_console = b;
2519 if (b)
2520 manager_ref_console(u->manager);
2521 else
2522 manager_unref_console(u->manager);
2523 }
2524
2525 static void unit_emit_audit_start(Unit *u) {
2526 assert(u);
2527
2528 if (u->type != UNIT_SERVICE)
2529 return;
2530
2531 /* Write audit record if we have just finished starting up */
2532 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2533 u->in_audit = true;
2534 }
2535
2536 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2537 assert(u);
2538
2539 if (u->type != UNIT_SERVICE)
2540 return;
2541
2542 if (u->in_audit) {
2543 /* Write audit record if we have just finished shutting down */
2544 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2545 u->in_audit = false;
2546 } else {
2547 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2548 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2549
2550 if (state == UNIT_INACTIVE)
2551 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2552 }
2553 }
2554
2555 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2556 bool unexpected = false;
2557 JobResult result;
2558
2559 assert(j);
2560
2561 if (j->state == JOB_WAITING)
2562
2563 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2564 * due to EAGAIN. */
2565 job_add_to_run_queue(j);
2566
2567 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2568 * hence needs to invalidate jobs. */
2569
2570 switch (j->type) {
2571
2572 case JOB_START:
2573 case JOB_VERIFY_ACTIVE:
2574
2575 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2576 job_finish_and_invalidate(j, JOB_DONE, true, false);
2577 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2578 unexpected = true;
2579
2580 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2581 if (ns == UNIT_FAILED)
2582 result = JOB_FAILED;
2583 else
2584 result = JOB_DONE;
2585
2586 job_finish_and_invalidate(j, result, true, false);
2587 }
2588 }
2589
2590 break;
2591
2592 case JOB_RELOAD:
2593 case JOB_RELOAD_OR_START:
2594 case JOB_TRY_RELOAD:
2595
2596 if (j->state == JOB_RUNNING) {
2597 if (ns == UNIT_ACTIVE)
2598 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2599 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2600 unexpected = true;
2601
2602 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2603 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2604 }
2605 }
2606
2607 break;
2608
2609 case JOB_STOP:
2610 case JOB_RESTART:
2611 case JOB_TRY_RESTART:
2612
2613 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2614 job_finish_and_invalidate(j, JOB_DONE, true, false);
2615 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2616 unexpected = true;
2617 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2618 }
2619
2620 break;
2621
2622 default:
2623 assert_not_reached();
2624 }
2625
2626 return unexpected;
2627 }
2628
2629 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2630 const char *reason;
2631 Manager *m;
2632
2633 assert(u);
2634 assert(os < _UNIT_ACTIVE_STATE_MAX);
2635 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2636
2637 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2638 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2639 * remounted this function will be called too! */
2640
2641 m = u->manager;
2642
2643 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2644 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2645 unit_add_to_dbus_queue(u);
2646
2647 /* Update systemd-oomd on the property/state change */
2648 if (os != ns) {
2649 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2650 * monitoring.
2651 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2652 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2653 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2654 * have the information on the property. Thus, indiscriminately send an update. */
2655 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2656 (void) manager_varlink_send_managed_oom_update(u);
2657 }
2658
2659 /* Update timestamps for state changes */
2660 if (!MANAGER_IS_RELOADING(m)) {
2661 dual_timestamp_get(&u->state_change_timestamp);
2662
2663 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2664 u->inactive_exit_timestamp = u->state_change_timestamp;
2665 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2666 u->inactive_enter_timestamp = u->state_change_timestamp;
2667
2668 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2669 u->active_enter_timestamp = u->state_change_timestamp;
2670 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2671 u->active_exit_timestamp = u->state_change_timestamp;
2672 }
2673
2674 /* Keep track of failed units */
2675 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2676
2677 /* Make sure the cgroup and state files are always removed when we become inactive */
2678 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2679 SET_FLAG(u->markers,
2680 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2681 false);
2682 unit_prune_cgroup(u);
2683 unit_unlink_state_files(u);
2684 } else if (ns != os && ns == UNIT_RELOADING)
2685 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2686
2687 unit_update_on_console(u);
2688
2689 if (!MANAGER_IS_RELOADING(m)) {
2690 bool unexpected;
2691
2692 /* Let's propagate state changes to the job */
2693 if (u->job)
2694 unexpected = unit_process_job(u->job, ns, flags);
2695 else
2696 unexpected = true;
2697
2698 /* If this state change happened without being requested by a job, then let's retroactively start or
2699 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2700 * additional jobs just because something is already activated. */
2701
2702 if (unexpected) {
2703 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2704 retroactively_start_dependencies(u);
2705 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2706 retroactively_stop_dependencies(u);
2707 }
2708
2709 if (ns != os && ns == UNIT_FAILED) {
2710 log_unit_debug(u, "Unit entered failed state.");
2711
2712 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2713 unit_start_on_failure(u, "OnFailure=", UNIT_ATOM_ON_FAILURE, u->on_failure_job_mode);
2714 }
2715
2716 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2717 /* This unit just finished starting up */
2718
2719 unit_emit_audit_start(u);
2720 manager_send_unit_plymouth(m, u);
2721 }
2722
2723 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2724 /* This unit just stopped/failed. */
2725
2726 unit_emit_audit_stop(u, ns);
2727 unit_log_resources(u);
2728 }
2729
2730 if (ns == UNIT_INACTIVE && !IN_SET(os, UNIT_FAILED, UNIT_INACTIVE, UNIT_MAINTENANCE) &&
2731 !(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2732 unit_start_on_failure(u, "OnSuccess=", UNIT_ATOM_ON_SUCCESS, u->on_success_job_mode);
2733 }
2734
2735 manager_recheck_journal(m);
2736 manager_recheck_dbus(m);
2737
2738 unit_trigger_notify(u);
2739
2740 if (!MANAGER_IS_RELOADING(m)) {
2741 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2742 reason = strjoina("unit ", u->id, " failed");
2743 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2744 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2745 reason = strjoina("unit ", u->id, " succeeded");
2746 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2747 }
2748 }
2749
2750 /* And now, add the unit or depending units to various queues that will act on the new situation if
2751 * needed. These queues generally check for continuous state changes rather than events (like most of
2752 * the state propagation above), and do work deferred instead of instantly, since they typically
2753 * don't want to run during reloading, and usually involve checking combined state of multiple units
2754 * at once. */
2755
2756 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2757 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2758 check_unneeded_dependencies(u);
2759 check_bound_by_dependencies(u);
2760
2761 /* Maybe someone wants us to remain up? */
2762 unit_submit_to_start_when_upheld_queue(u);
2763
2764 /* Maybe the unit should be GC'ed now? */
2765 unit_add_to_gc_queue(u);
2766 }
2767
2768 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2769 /* Start uphold units regardless if going up was expected or not */
2770 check_uphold_dependencies(u);
2771
2772 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2773 unit_submit_to_stop_when_unneeded_queue(u);
2774
2775 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2776 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2777 * inactive, without ever entering started.) */
2778 unit_submit_to_stop_when_bound_queue(u);
2779 }
2780 }
2781
2782 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2783 int r;
2784
2785 assert(u);
2786 assert(pid_is_valid(pid));
2787
2788 /* Watch a specific PID */
2789
2790 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2791 * opportunity to remove any stalled references to this PID as they can be created
2792 * easily (when watching a process which is not our direct child). */
2793 if (exclusive)
2794 manager_unwatch_pid(u->manager, pid);
2795
2796 r = set_ensure_allocated(&u->pids, NULL);
2797 if (r < 0)
2798 return r;
2799
2800 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2801 if (r < 0)
2802 return r;
2803
2804 /* First try, let's add the unit keyed by "pid". */
2805 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2806 if (r == -EEXIST) {
2807 Unit **array;
2808 bool found = false;
2809 size_t n = 0;
2810
2811 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2812 * to an array of Units rather than just a Unit), lists us already. */
2813
2814 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2815 if (array)
2816 for (; array[n]; n++)
2817 if (array[n] == u)
2818 found = true;
2819
2820 if (found) /* Found it already? if so, do nothing */
2821 r = 0;
2822 else {
2823 Unit **new_array;
2824
2825 /* Allocate a new array */
2826 new_array = new(Unit*, n + 2);
2827 if (!new_array)
2828 return -ENOMEM;
2829
2830 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2831 new_array[n] = u;
2832 new_array[n+1] = NULL;
2833
2834 /* Add or replace the old array */
2835 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2836 if (r < 0) {
2837 free(new_array);
2838 return r;
2839 }
2840
2841 free(array);
2842 }
2843 } else if (r < 0)
2844 return r;
2845
2846 r = set_put(u->pids, PID_TO_PTR(pid));
2847 if (r < 0)
2848 return r;
2849
2850 return 0;
2851 }
2852
2853 void unit_unwatch_pid(Unit *u, pid_t pid) {
2854 Unit **array;
2855
2856 assert(u);
2857 assert(pid_is_valid(pid));
2858
2859 /* First let's drop the unit in case it's keyed as "pid". */
2860 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2861
2862 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2863 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2864 if (array) {
2865 /* Let's iterate through the array, dropping our own entry */
2866
2867 size_t m = 0;
2868 for (size_t n = 0; array[n]; n++)
2869 if (array[n] != u)
2870 array[m++] = array[n];
2871 array[m] = NULL;
2872
2873 if (m == 0) {
2874 /* The array is now empty, remove the entire entry */
2875 assert_se(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2876 free(array);
2877 }
2878 }
2879
2880 (void) set_remove(u->pids, PID_TO_PTR(pid));
2881 }
2882
2883 void unit_unwatch_all_pids(Unit *u) {
2884 assert(u);
2885
2886 while (!set_isempty(u->pids))
2887 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2888
2889 u->pids = set_free(u->pids);
2890 }
2891
2892 static void unit_tidy_watch_pids(Unit *u) {
2893 pid_t except1, except2;
2894 void *e;
2895
2896 assert(u);
2897
2898 /* Cleans dead PIDs from our list */
2899
2900 except1 = unit_main_pid(u);
2901 except2 = unit_control_pid(u);
2902
2903 SET_FOREACH(e, u->pids) {
2904 pid_t pid = PTR_TO_PID(e);
2905
2906 if (pid == except1 || pid == except2)
2907 continue;
2908
2909 if (!pid_is_unwaited(pid))
2910 unit_unwatch_pid(u, pid);
2911 }
2912 }
2913
2914 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2915 Unit *u = userdata;
2916
2917 assert(s);
2918 assert(u);
2919
2920 unit_tidy_watch_pids(u);
2921 unit_watch_all_pids(u);
2922
2923 /* If the PID set is empty now, then let's finish this off. */
2924 unit_synthesize_cgroup_empty_event(u);
2925
2926 return 0;
2927 }
2928
2929 int unit_enqueue_rewatch_pids(Unit *u) {
2930 int r;
2931
2932 assert(u);
2933
2934 if (!u->cgroup_path)
2935 return -ENOENT;
2936
2937 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2938 if (r < 0)
2939 return r;
2940 if (r > 0) /* On unified we can use proper notifications */
2941 return 0;
2942
2943 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2944 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2945 * involves issuing kill(pid, 0) on all processes we watch. */
2946
2947 if (!u->rewatch_pids_event_source) {
2948 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2949
2950 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2951 if (r < 0)
2952 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2953
2954 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2955 if (r < 0)
2956 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2957
2958 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2959
2960 u->rewatch_pids_event_source = TAKE_PTR(s);
2961 }
2962
2963 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2964 if (r < 0)
2965 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2966
2967 return 0;
2968 }
2969
2970 void unit_dequeue_rewatch_pids(Unit *u) {
2971 int r;
2972 assert(u);
2973
2974 if (!u->rewatch_pids_event_source)
2975 return;
2976
2977 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2978 if (r < 0)
2979 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2980
2981 u->rewatch_pids_event_source = sd_event_source_disable_unref(u->rewatch_pids_event_source);
2982 }
2983
2984 bool unit_job_is_applicable(Unit *u, JobType j) {
2985 assert(u);
2986 assert(j >= 0 && j < _JOB_TYPE_MAX);
2987
2988 switch (j) {
2989
2990 case JOB_VERIFY_ACTIVE:
2991 case JOB_START:
2992 case JOB_NOP:
2993 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2994 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2995 * jobs for it. */
2996 return true;
2997
2998 case JOB_STOP:
2999 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3000 * external events), hence it makes no sense to permit enqueuing such a request either. */
3001 return !u->perpetual;
3002
3003 case JOB_RESTART:
3004 case JOB_TRY_RESTART:
3005 return unit_can_stop(u) && unit_can_start(u);
3006
3007 case JOB_RELOAD:
3008 case JOB_TRY_RELOAD:
3009 return unit_can_reload(u);
3010
3011 case JOB_RELOAD_OR_START:
3012 return unit_can_reload(u) && unit_can_start(u);
3013
3014 default:
3015 assert_not_reached();
3016 }
3017 }
3018
3019 int unit_add_dependency(
3020 Unit *u,
3021 UnitDependency d,
3022 Unit *other,
3023 bool add_reference,
3024 UnitDependencyMask mask) {
3025
3026 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
3027 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
3028 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
3029 [UNIT_WANTS] = UNIT_WANTED_BY,
3030 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
3031 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
3032 [UNIT_UPHOLDS] = UNIT_UPHELD_BY,
3033 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
3034 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
3035 [UNIT_WANTED_BY] = UNIT_WANTS,
3036 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
3037 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
3038 [UNIT_UPHELD_BY] = UNIT_UPHOLDS,
3039 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
3040 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
3041 [UNIT_BEFORE] = UNIT_AFTER,
3042 [UNIT_AFTER] = UNIT_BEFORE,
3043 [UNIT_ON_SUCCESS] = UNIT_ON_SUCCESS_OF,
3044 [UNIT_ON_SUCCESS_OF] = UNIT_ON_SUCCESS,
3045 [UNIT_ON_FAILURE] = UNIT_ON_FAILURE_OF,
3046 [UNIT_ON_FAILURE_OF] = UNIT_ON_FAILURE,
3047 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
3048 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
3049 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
3050 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
3051 [UNIT_PROPAGATES_STOP_TO] = UNIT_STOP_PROPAGATED_FROM,
3052 [UNIT_STOP_PROPAGATED_FROM] = UNIT_PROPAGATES_STOP_TO,
3053 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF, /* symmetric! 👓 */
3054 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
3055 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
3056 [UNIT_IN_SLICE] = UNIT_SLICE_OF,
3057 [UNIT_SLICE_OF] = UNIT_IN_SLICE,
3058 };
3059 Unit *original_u = u, *original_other = other;
3060 UnitDependencyAtom a;
3061 int r;
3062
3063 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3064 * there, no need to notify! */
3065 bool noop;
3066
3067 assert(u);
3068 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
3069 assert(other);
3070
3071 u = unit_follow_merge(u);
3072 other = unit_follow_merge(other);
3073 a = unit_dependency_to_atom(d);
3074 assert(a >= 0);
3075
3076 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3077 if (u == other) {
3078 unit_maybe_warn_about_dependency(original_u, original_other->id, d);
3079 return 0;
3080 }
3081
3082 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3083 return 0;
3084
3085 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3086 * running timeout at a specific time. */
3087 if (FLAGS_SET(a, UNIT_ATOM_BEFORE) && other->type == UNIT_DEVICE) {
3088 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
3089 return 0;
3090 }
3091
3092 if (FLAGS_SET(a, UNIT_ATOM_ON_FAILURE) && !UNIT_VTABLE(u)->can_fail) {
3093 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
3094 return 0;
3095 }
3096
3097 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERS) && !UNIT_VTABLE(u)->can_trigger)
3098 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3099 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
3100 if (FLAGS_SET(a, UNIT_ATOM_TRIGGERED_BY) && !UNIT_VTABLE(other)->can_trigger)
3101 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3102 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
3103
3104 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && other->type != UNIT_SLICE)
3105 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3106 "Requested dependency Slice=%s refused (%s is not a slice unit).", other->id, other->id);
3107 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && u->type != UNIT_SLICE)
3108 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3109 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other->id, u->id);
3110
3111 if (FLAGS_SET(a, UNIT_ATOM_IN_SLICE) && !UNIT_HAS_CGROUP_CONTEXT(u))
3112 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3113 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other->id, u->id);
3114
3115 if (FLAGS_SET(a, UNIT_ATOM_SLICE_OF) && !UNIT_HAS_CGROUP_CONTEXT(other))
3116 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
3117 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other->id, other->id);
3118
3119 r = unit_add_dependency_hashmap(&u->dependencies, d, other, mask, 0);
3120 if (r < 0)
3121 return r;
3122 noop = !r;
3123
3124 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
3125 r = unit_add_dependency_hashmap(&other->dependencies, inverse_table[d], u, 0, mask);
3126 if (r < 0)
3127 return r;
3128 if (r)
3129 noop = false;
3130 }
3131
3132 if (add_reference) {
3133 r = unit_add_dependency_hashmap(&u->dependencies, UNIT_REFERENCES, other, mask, 0);
3134 if (r < 0)
3135 return r;
3136 if (r)
3137 noop = false;
3138
3139 r = unit_add_dependency_hashmap(&other->dependencies, UNIT_REFERENCED_BY, u, 0, mask);
3140 if (r < 0)
3141 return r;
3142 if (r)
3143 noop = false;
3144 }
3145
3146 if (!noop)
3147 unit_add_to_dbus_queue(u);
3148
3149 return 0;
3150 }
3151
3152 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3153 int r;
3154
3155 assert(u);
3156
3157 r = unit_add_dependency(u, d, other, add_reference, mask);
3158 if (r < 0)
3159 return r;
3160
3161 return unit_add_dependency(u, e, other, add_reference, mask);
3162 }
3163
3164 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3165 int r;
3166
3167 assert(u);
3168 assert(name);
3169 assert(buf);
3170 assert(ret);
3171
3172 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3173 *buf = NULL;
3174 *ret = name;
3175 return 0;
3176 }
3177
3178 if (u->instance)
3179 r = unit_name_replace_instance(name, u->instance, buf);
3180 else {
3181 _cleanup_free_ char *i = NULL;
3182
3183 r = unit_name_to_prefix(u->id, &i);
3184 if (r < 0)
3185 return r;
3186
3187 r = unit_name_replace_instance(name, i, buf);
3188 }
3189 if (r < 0)
3190 return r;
3191
3192 *ret = *buf;
3193 return 0;
3194 }
3195
3196 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3197 _cleanup_free_ char *buf = NULL;
3198 Unit *other;
3199 int r;
3200
3201 assert(u);
3202 assert(name);
3203
3204 r = resolve_template(u, name, &buf, &name);
3205 if (r < 0)
3206 return r;
3207
3208 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3209 return 0;
3210
3211 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3212 if (r < 0)
3213 return r;
3214
3215 return unit_add_dependency(u, d, other, add_reference, mask);
3216 }
3217
3218 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3219 _cleanup_free_ char *buf = NULL;
3220 Unit *other;
3221 int r;
3222
3223 assert(u);
3224 assert(name);
3225
3226 r = resolve_template(u, name, &buf, &name);
3227 if (r < 0)
3228 return r;
3229
3230 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3231 return 0;
3232
3233 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3234 if (r < 0)
3235 return r;
3236
3237 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3238 }
3239
3240 int set_unit_path(const char *p) {
3241 /* This is mostly for debug purposes */
3242 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p, 1));
3243 }
3244
3245 char *unit_dbus_path(Unit *u) {
3246 assert(u);
3247
3248 if (!u->id)
3249 return NULL;
3250
3251 return unit_dbus_path_from_name(u->id);
3252 }
3253
3254 char *unit_dbus_path_invocation_id(Unit *u) {
3255 assert(u);
3256
3257 if (sd_id128_is_null(u->invocation_id))
3258 return NULL;
3259
3260 return unit_dbus_path_from_name(u->invocation_id_string);
3261 }
3262
3263 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
3264 int r;
3265
3266 assert(u);
3267
3268 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3269
3270 if (sd_id128_equal(u->invocation_id, id))
3271 return 0;
3272
3273 if (!sd_id128_is_null(u->invocation_id))
3274 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
3275
3276 if (sd_id128_is_null(id)) {
3277 r = 0;
3278 goto reset;
3279 }
3280
3281 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3282 if (r < 0)
3283 goto reset;
3284
3285 u->invocation_id = id;
3286 sd_id128_to_string(id, u->invocation_id_string);
3287
3288 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3289 if (r < 0)
3290 goto reset;
3291
3292 return 0;
3293
3294 reset:
3295 u->invocation_id = SD_ID128_NULL;
3296 u->invocation_id_string[0] = 0;
3297 return r;
3298 }
3299
3300 int unit_set_slice(Unit *u, Unit *slice) {
3301 int r;
3302
3303 assert(u);
3304 assert(slice);
3305
3306 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3307 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3308 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3309
3310 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3311 return -EOPNOTSUPP;
3312
3313 if (u->type == UNIT_SLICE)
3314 return -EINVAL;
3315
3316 if (unit_active_state(u) != UNIT_INACTIVE)
3317 return -EBUSY;
3318
3319 if (slice->type != UNIT_SLICE)
3320 return -EINVAL;
3321
3322 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3323 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3324 return -EPERM;
3325
3326 if (UNIT_GET_SLICE(u) == slice)
3327 return 0;
3328
3329 /* Disallow slice changes if @u is already bound to cgroups */
3330 if (UNIT_GET_SLICE(u) && u->cgroup_realized)
3331 return -EBUSY;
3332
3333 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3334 if (UNIT_GET_SLICE(u))
3335 unit_remove_dependencies(u, UNIT_DEPENDENCY_SLICE_PROPERTY);
3336
3337 r = unit_add_dependency(u, UNIT_IN_SLICE, slice, true, UNIT_DEPENDENCY_SLICE_PROPERTY);
3338 if (r < 0)
3339 return r;
3340
3341 return 1;
3342 }
3343
3344 int unit_set_default_slice(Unit *u) {
3345 const char *slice_name;
3346 Unit *slice;
3347 int r;
3348
3349 assert(u);
3350
3351 if (u->manager && FLAGS_SET(u->manager->test_run_flags, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES))
3352 return 0;
3353
3354 if (UNIT_GET_SLICE(u))
3355 return 0;
3356
3357 if (u->instance) {
3358 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3359
3360 /* Implicitly place all instantiated units in their
3361 * own per-template slice */
3362
3363 r = unit_name_to_prefix(u->id, &prefix);
3364 if (r < 0)
3365 return r;
3366
3367 /* The prefix is already escaped, but it might include
3368 * "-" which has a special meaning for slice units,
3369 * hence escape it here extra. */
3370 escaped = unit_name_escape(prefix);
3371 if (!escaped)
3372 return -ENOMEM;
3373
3374 if (MANAGER_IS_SYSTEM(u->manager))
3375 slice_name = strjoina("system-", escaped, ".slice");
3376 else
3377 slice_name = strjoina("app-", escaped, ".slice");
3378
3379 } else if (unit_is_extrinsic(u))
3380 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3381 * the root slice. They don't really belong in one of the subslices. */
3382 slice_name = SPECIAL_ROOT_SLICE;
3383
3384 else if (MANAGER_IS_SYSTEM(u->manager))
3385 slice_name = SPECIAL_SYSTEM_SLICE;
3386 else
3387 slice_name = SPECIAL_APP_SLICE;
3388
3389 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3390 if (r < 0)
3391 return r;
3392
3393 return unit_set_slice(u, slice);
3394 }
3395
3396 const char *unit_slice_name(Unit *u) {
3397 Unit *slice;
3398 assert(u);
3399
3400 slice = UNIT_GET_SLICE(u);
3401 if (!slice)
3402 return NULL;
3403
3404 return slice->id;
3405 }
3406
3407 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3408 _cleanup_free_ char *t = NULL;
3409 int r;
3410
3411 assert(u);
3412 assert(type);
3413 assert(_found);
3414
3415 r = unit_name_change_suffix(u->id, type, &t);
3416 if (r < 0)
3417 return r;
3418 if (unit_has_name(u, t))
3419 return -EINVAL;
3420
3421 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3422 assert(r < 0 || *_found != u);
3423 return r;
3424 }
3425
3426 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3427 const char *new_owner;
3428 Unit *u = userdata;
3429 int r;
3430
3431 assert(message);
3432 assert(u);
3433
3434 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3435 if (r < 0) {
3436 bus_log_parse_error(r);
3437 return 0;
3438 }
3439
3440 if (UNIT_VTABLE(u)->bus_name_owner_change)
3441 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3442
3443 return 0;
3444 }
3445
3446 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3447 const sd_bus_error *e;
3448 const char *new_owner;
3449 Unit *u = userdata;
3450 int r;
3451
3452 assert(message);
3453 assert(u);
3454
3455 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3456
3457 e = sd_bus_message_get_error(message);
3458 if (e) {
3459 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner")) {
3460 r = sd_bus_error_get_errno(e);
3461 log_unit_error_errno(u, r,
3462 "Unexpected error response from GetNameOwner(): %s",
3463 bus_error_message(e, r));
3464 }
3465
3466 new_owner = NULL;
3467 } else {
3468 r = sd_bus_message_read(message, "s", &new_owner);
3469 if (r < 0)
3470 return bus_log_parse_error(r);
3471
3472 assert(!isempty(new_owner));
3473 }
3474
3475 if (UNIT_VTABLE(u)->bus_name_owner_change)
3476 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3477
3478 return 0;
3479 }
3480
3481 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3482 const char *match;
3483 int r;
3484
3485 assert(u);
3486 assert(bus);
3487 assert(name);
3488
3489 if (u->match_bus_slot || u->get_name_owner_slot)
3490 return -EBUSY;
3491
3492 match = strjoina("type='signal',"
3493 "sender='org.freedesktop.DBus',"
3494 "path='/org/freedesktop/DBus',"
3495 "interface='org.freedesktop.DBus',"
3496 "member='NameOwnerChanged',"
3497 "arg0='", name, "'");
3498
3499 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3500 if (r < 0)
3501 return r;
3502
3503 r = sd_bus_call_method_async(
3504 bus,
3505 &u->get_name_owner_slot,
3506 "org.freedesktop.DBus",
3507 "/org/freedesktop/DBus",
3508 "org.freedesktop.DBus",
3509 "GetNameOwner",
3510 get_name_owner_handler,
3511 u,
3512 "s", name);
3513 if (r < 0) {
3514 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3515 return r;
3516 }
3517
3518 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3519 return 0;
3520 }
3521
3522 int unit_watch_bus_name(Unit *u, const char *name) {
3523 int r;
3524
3525 assert(u);
3526 assert(name);
3527
3528 /* Watch a specific name on the bus. We only support one unit
3529 * watching each name for now. */
3530
3531 if (u->manager->api_bus) {
3532 /* If the bus is already available, install the match directly.
3533 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3534 r = unit_install_bus_match(u, u->manager->api_bus, name);
3535 if (r < 0)
3536 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3537 }
3538
3539 r = hashmap_put(u->manager->watch_bus, name, u);
3540 if (r < 0) {
3541 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3542 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3543 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3544 }
3545
3546 return 0;
3547 }
3548
3549 void unit_unwatch_bus_name(Unit *u, const char *name) {
3550 assert(u);
3551 assert(name);
3552
3553 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3554 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3555 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3556 }
3557
3558 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3559 _cleanup_free_ char *e = NULL;
3560 Unit *device;
3561 int r;
3562
3563 assert(u);
3564
3565 /* Adds in links to the device node that this unit is based on */
3566 if (isempty(what))
3567 return 0;
3568
3569 if (!is_device_path(what))
3570 return 0;
3571
3572 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3573 if (!unit_type_supported(UNIT_DEVICE))
3574 return 0;
3575
3576 r = unit_name_from_path(what, ".device", &e);
3577 if (r < 0)
3578 return r;
3579
3580 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3581 if (r < 0)
3582 return r;
3583
3584 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3585 dep = UNIT_BINDS_TO;
3586
3587 return unit_add_two_dependencies(u, UNIT_AFTER,
3588 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3589 device, true, mask);
3590 }
3591
3592 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3593 _cleanup_free_ char *escaped = NULL, *target = NULL;
3594 int r;
3595
3596 assert(u);
3597
3598 if (isempty(what))
3599 return 0;
3600
3601 if (!path_startswith(what, "/dev/"))
3602 return 0;
3603
3604 /* If we don't support devices, then also don't bother with blockdev@.target */
3605 if (!unit_type_supported(UNIT_DEVICE))
3606 return 0;
3607
3608 r = unit_name_path_escape(what, &escaped);
3609 if (r < 0)
3610 return r;
3611
3612 r = unit_name_build("blockdev", escaped, ".target", &target);
3613 if (r < 0)
3614 return r;
3615
3616 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3617 }
3618
3619 int unit_coldplug(Unit *u) {
3620 int r = 0, q;
3621
3622 assert(u);
3623
3624 /* Make sure we don't enter a loop, when coldplugging recursively. */
3625 if (u->coldplugged)
3626 return 0;
3627
3628 u->coldplugged = true;
3629
3630 STRV_FOREACH(i, u->deserialized_refs) {
3631 q = bus_unit_track_add_name(u, *i);
3632 if (q < 0 && r >= 0)
3633 r = q;
3634 }
3635 u->deserialized_refs = strv_free(u->deserialized_refs);
3636
3637 if (UNIT_VTABLE(u)->coldplug) {
3638 q = UNIT_VTABLE(u)->coldplug(u);
3639 if (q < 0 && r >= 0)
3640 r = q;
3641 }
3642
3643 if (u->job) {
3644 q = job_coldplug(u->job);
3645 if (q < 0 && r >= 0)
3646 r = q;
3647 }
3648 if (u->nop_job) {
3649 q = job_coldplug(u->nop_job);
3650 if (q < 0 && r >= 0)
3651 r = q;
3652 }
3653
3654 return r;
3655 }
3656
3657 void unit_catchup(Unit *u) {
3658 assert(u);
3659
3660 if (UNIT_VTABLE(u)->catchup)
3661 UNIT_VTABLE(u)->catchup(u);
3662
3663 unit_cgroup_catchup(u);
3664 }
3665
3666 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3667 struct stat st;
3668
3669 if (!path)
3670 return false;
3671
3672 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3673 * are never out-of-date. */
3674 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3675 return false;
3676
3677 if (stat(path, &st) < 0)
3678 /* What, cannot access this anymore? */
3679 return true;
3680
3681 if (path_masked)
3682 /* For masked files check if they are still so */
3683 return !null_or_empty(&st);
3684 else
3685 /* For non-empty files check the mtime */
3686 return timespec_load(&st.st_mtim) > mtime;
3687
3688 return false;
3689 }
3690
3691 bool unit_need_daemon_reload(Unit *u) {
3692 _cleanup_strv_free_ char **t = NULL;
3693
3694 assert(u);
3695
3696 /* For unit files, we allow masking… */
3697 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3698 u->load_state == UNIT_MASKED))
3699 return true;
3700
3701 /* Source paths should not be masked… */
3702 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3703 return true;
3704
3705 if (u->load_state == UNIT_LOADED)
3706 (void) unit_find_dropin_paths(u, &t);
3707 if (!strv_equal(u->dropin_paths, t))
3708 return true;
3709
3710 /* … any drop-ins that are masked are simply omitted from the list. */
3711 STRV_FOREACH(path, u->dropin_paths)
3712 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3713 return true;
3714
3715 return false;
3716 }
3717
3718 void unit_reset_failed(Unit *u) {
3719 assert(u);
3720
3721 if (UNIT_VTABLE(u)->reset_failed)
3722 UNIT_VTABLE(u)->reset_failed(u);
3723
3724 ratelimit_reset(&u->start_ratelimit);
3725 u->start_limit_hit = false;
3726 }
3727
3728 Unit *unit_following(Unit *u) {
3729 assert(u);
3730
3731 if (UNIT_VTABLE(u)->following)
3732 return UNIT_VTABLE(u)->following(u);
3733
3734 return NULL;
3735 }
3736
3737 bool unit_stop_pending(Unit *u) {
3738 assert(u);
3739
3740 /* This call does check the current state of the unit. It's
3741 * hence useful to be called from state change calls of the
3742 * unit itself, where the state isn't updated yet. This is
3743 * different from unit_inactive_or_pending() which checks both
3744 * the current state and for a queued job. */
3745
3746 return unit_has_job_type(u, JOB_STOP);
3747 }
3748
3749 bool unit_inactive_or_pending(Unit *u) {
3750 assert(u);
3751
3752 /* Returns true if the unit is inactive or going down */
3753
3754 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3755 return true;
3756
3757 if (unit_stop_pending(u))
3758 return true;
3759
3760 return false;
3761 }
3762
3763 bool unit_active_or_pending(Unit *u) {
3764 assert(u);
3765
3766 /* Returns true if the unit is active or going up */
3767
3768 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3769 return true;
3770
3771 if (u->job &&
3772 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3773 return true;
3774
3775 return false;
3776 }
3777
3778 bool unit_will_restart_default(Unit *u) {
3779 assert(u);
3780
3781 return unit_has_job_type(u, JOB_START);
3782 }
3783
3784 bool unit_will_restart(Unit *u) {
3785 assert(u);
3786
3787 if (!UNIT_VTABLE(u)->will_restart)
3788 return false;
3789
3790 return UNIT_VTABLE(u)->will_restart(u);
3791 }
3792
3793 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3794 assert(u);
3795 assert(w >= 0 && w < _KILL_WHO_MAX);
3796 assert(SIGNAL_VALID(signo));
3797
3798 if (!UNIT_VTABLE(u)->kill)
3799 return -EOPNOTSUPP;
3800
3801 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3802 }
3803
3804 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3805 _cleanup_set_free_ Set *pid_set = NULL;
3806 int r;
3807
3808 pid_set = set_new(NULL);
3809 if (!pid_set)
3810 return NULL;
3811
3812 /* Exclude the main/control pids from being killed via the cgroup */
3813 if (main_pid > 0) {
3814 r = set_put(pid_set, PID_TO_PTR(main_pid));
3815 if (r < 0)
3816 return NULL;
3817 }
3818
3819 if (control_pid > 0) {
3820 r = set_put(pid_set, PID_TO_PTR(control_pid));
3821 if (r < 0)
3822 return NULL;
3823 }
3824
3825 return TAKE_PTR(pid_set);
3826 }
3827
3828 static int kill_common_log(pid_t pid, int signo, void *userdata) {
3829 _cleanup_free_ char *comm = NULL;
3830 Unit *u = userdata;
3831
3832 assert(u);
3833
3834 (void) get_process_comm(pid, &comm);
3835 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3836 signal_to_string(signo), pid, strna(comm));
3837
3838 return 1;
3839 }
3840
3841 int unit_kill_common(
3842 Unit *u,
3843 KillWho who,
3844 int signo,
3845 pid_t main_pid,
3846 pid_t control_pid,
3847 sd_bus_error *error) {
3848
3849 int r = 0;
3850 bool killed = false;
3851
3852 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
3853 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
3854 * stop a service ourselves. */
3855
3856 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3857 if (main_pid < 0)
3858 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3859 if (main_pid == 0)
3860 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3861 }
3862
3863 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3864 if (control_pid < 0)
3865 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3866 if (control_pid == 0)
3867 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3868 }
3869
3870 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3871 if (control_pid > 0) {
3872 _cleanup_free_ char *comm = NULL;
3873 (void) get_process_comm(control_pid, &comm);
3874
3875 if (kill(control_pid, signo) < 0) {
3876 /* Report this failure both to the logs and to the client */
3877 sd_bus_error_set_errnof(
3878 error, errno,
3879 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
3880 signal_to_string(signo), control_pid, strna(comm));
3881 r = log_unit_warning_errno(
3882 u, errno,
3883 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
3884 signal_to_string(signo), control_pid, strna(comm));
3885 } else {
3886 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
3887 signal_to_string(signo), control_pid, strna(comm));
3888 killed = true;
3889 }
3890 }
3891
3892 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3893 if (main_pid > 0) {
3894 _cleanup_free_ char *comm = NULL;
3895 (void) get_process_comm(main_pid, &comm);
3896
3897 if (kill(main_pid, signo) < 0) {
3898 if (r == 0)
3899 sd_bus_error_set_errnof(
3900 error, errno,
3901 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
3902 signal_to_string(signo), main_pid, strna(comm));
3903
3904 r = log_unit_warning_errno(
3905 u, errno,
3906 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
3907 signal_to_string(signo), main_pid, strna(comm));
3908 } else {
3909 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
3910 signal_to_string(signo), main_pid, strna(comm));
3911 killed = true;
3912 }
3913 }
3914
3915 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3916 _cleanup_set_free_ Set *pid_set = NULL;
3917 int q;
3918
3919 /* Exclude the main/control pids from being killed via the cgroup */
3920 pid_set = unit_pid_set(main_pid, control_pid);
3921 if (!pid_set)
3922 return log_oom();
3923
3924 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
3925 if (q < 0) {
3926 if (!IN_SET(q, -ESRCH, -ENOENT)) {
3927 if (r == 0)
3928 sd_bus_error_set_errnof(
3929 error, q,
3930 "Failed to send signal SIG%s to auxiliary processes: %m",
3931 signal_to_string(signo));
3932
3933 r = log_unit_warning_errno(
3934 u, q,
3935 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
3936 signal_to_string(signo));
3937 }
3938 } else
3939 killed = true;
3940 }
3941
3942 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
3943 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
3944 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
3945
3946 return r;
3947 }
3948
3949 int unit_following_set(Unit *u, Set **s) {
3950 assert(u);
3951 assert(s);
3952
3953 if (UNIT_VTABLE(u)->following_set)
3954 return UNIT_VTABLE(u)->following_set(u, s);
3955
3956 *s = NULL;
3957 return 0;
3958 }
3959
3960 UnitFileState unit_get_unit_file_state(Unit *u) {
3961 int r;
3962
3963 assert(u);
3964
3965 if (u->unit_file_state < 0 && u->fragment_path) {
3966 r = unit_file_get_state(
3967 u->manager->unit_file_scope,
3968 NULL,
3969 u->id,
3970 &u->unit_file_state);
3971 if (r < 0)
3972 u->unit_file_state = UNIT_FILE_BAD;
3973 }
3974
3975 return u->unit_file_state;
3976 }
3977
3978 int unit_get_unit_file_preset(Unit *u) {
3979 assert(u);
3980
3981 if (u->unit_file_preset < 0 && u->fragment_path)
3982 u->unit_file_preset = unit_file_query_preset(
3983 u->manager->unit_file_scope,
3984 NULL,
3985 basename(u->fragment_path),
3986 NULL);
3987
3988 return u->unit_file_preset;
3989 }
3990
3991 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3992 assert(ref);
3993 assert(source);
3994 assert(target);
3995
3996 if (ref->target)
3997 unit_ref_unset(ref);
3998
3999 ref->source = source;
4000 ref->target = target;
4001 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4002 return target;
4003 }
4004
4005 void unit_ref_unset(UnitRef *ref) {
4006 assert(ref);
4007
4008 if (!ref->target)
4009 return;
4010
4011 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4012 * be unreferenced now. */
4013 unit_add_to_gc_queue(ref->target);
4014
4015 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4016 ref->source = ref->target = NULL;
4017 }
4018
4019 static int user_from_unit_name(Unit *u, char **ret) {
4020
4021 static const uint8_t hash_key[] = {
4022 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4023 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4024 };
4025
4026 _cleanup_free_ char *n = NULL;
4027 int r;
4028
4029 r = unit_name_to_prefix(u->id, &n);
4030 if (r < 0)
4031 return r;
4032
4033 if (valid_user_group_name(n, 0)) {
4034 *ret = TAKE_PTR(n);
4035 return 0;
4036 }
4037
4038 /* If we can't use the unit name as a user name, then let's hash it and use that */
4039 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4040 return -ENOMEM;
4041
4042 return 0;
4043 }
4044
4045 int unit_patch_contexts(Unit *u) {
4046 CGroupContext *cc;
4047 ExecContext *ec;
4048 int r;
4049
4050 assert(u);
4051
4052 /* Patch in the manager defaults into the exec and cgroup
4053 * contexts, _after_ the rest of the settings have been
4054 * initialized */
4055
4056 ec = unit_get_exec_context(u);
4057 if (ec) {
4058 /* This only copies in the ones that need memory */
4059 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
4060 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4061 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4062 if (!ec->rlimit[i])
4063 return -ENOMEM;
4064 }
4065
4066 if (MANAGER_IS_USER(u->manager) &&
4067 !ec->working_directory) {
4068
4069 r = get_home_dir(&ec->working_directory);
4070 if (r < 0)
4071 return r;
4072
4073 /* Allow user services to run, even if the
4074 * home directory is missing */
4075 ec->working_directory_missing_ok = true;
4076 }
4077
4078 if (ec->private_devices)
4079 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4080
4081 if (ec->protect_kernel_modules)
4082 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4083
4084 if (ec->protect_kernel_logs)
4085 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4086
4087 if (ec->protect_clock)
4088 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4089
4090 if (ec->dynamic_user) {
4091 if (!ec->user) {
4092 r = user_from_unit_name(u, &ec->user);
4093 if (r < 0)
4094 return r;
4095 }
4096
4097 if (!ec->group) {
4098 ec->group = strdup(ec->user);
4099 if (!ec->group)
4100 return -ENOMEM;
4101 }
4102
4103 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4104 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4105 * sandbox. */
4106
4107 ec->private_tmp = true;
4108 ec->remove_ipc = true;
4109 ec->protect_system = PROTECT_SYSTEM_STRICT;
4110 if (ec->protect_home == PROTECT_HOME_NO)
4111 ec->protect_home = PROTECT_HOME_READ_ONLY;
4112
4113 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4114 * them. */
4115 ec->no_new_privileges = true;
4116 ec->restrict_suid_sgid = true;
4117 }
4118 }
4119
4120 cc = unit_get_cgroup_context(u);
4121 if (cc && ec) {
4122
4123 if (ec->private_devices &&
4124 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4125 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4126
4127 if ((ec->root_image || !LIST_IS_EMPTY(ec->mount_images)) &&
4128 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4129 const char *p;
4130
4131 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4132 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
4133 r = cgroup_add_device_allow(cc, p, "rw");
4134 if (r < 0)
4135 return r;
4136 }
4137 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
4138 r = cgroup_add_device_allow(cc, p, "rwm");
4139 if (r < 0)
4140 return r;
4141 }
4142
4143 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4144 * Same for mapper and verity. */
4145 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4146 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
4147 if (r < 0)
4148 return r;
4149 }
4150 }
4151
4152 if (ec->protect_clock) {
4153 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4154 if (r < 0)
4155 return r;
4156 }
4157 }
4158
4159 return 0;
4160 }
4161
4162 ExecContext *unit_get_exec_context(const Unit *u) {
4163 size_t offset;
4164 assert(u);
4165
4166 if (u->type < 0)
4167 return NULL;
4168
4169 offset = UNIT_VTABLE(u)->exec_context_offset;
4170 if (offset <= 0)
4171 return NULL;
4172
4173 return (ExecContext*) ((uint8_t*) u + offset);
4174 }
4175
4176 KillContext *unit_get_kill_context(Unit *u) {
4177 size_t offset;
4178 assert(u);
4179
4180 if (u->type < 0)
4181 return NULL;
4182
4183 offset = UNIT_VTABLE(u)->kill_context_offset;
4184 if (offset <= 0)
4185 return NULL;
4186
4187 return (KillContext*) ((uint8_t*) u + offset);
4188 }
4189
4190 CGroupContext *unit_get_cgroup_context(Unit *u) {
4191 size_t offset;
4192
4193 if (u->type < 0)
4194 return NULL;
4195
4196 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4197 if (offset <= 0)
4198 return NULL;
4199
4200 return (CGroupContext*) ((uint8_t*) u + offset);
4201 }
4202
4203 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4204 size_t offset;
4205
4206 if (u->type < 0)
4207 return NULL;
4208
4209 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4210 if (offset <= 0)
4211 return NULL;
4212
4213 return *(ExecRuntime**) ((uint8_t*) u + offset);
4214 }
4215
4216 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4217 assert(u);
4218
4219 if (UNIT_WRITE_FLAGS_NOOP(flags))
4220 return NULL;
4221
4222 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4223 return u->manager->lookup_paths.transient;
4224
4225 if (flags & UNIT_PERSISTENT)
4226 return u->manager->lookup_paths.persistent_control;
4227
4228 if (flags & UNIT_RUNTIME)
4229 return u->manager->lookup_paths.runtime_control;
4230
4231 return NULL;
4232 }
4233
4234 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4235 char *ret = NULL;
4236
4237 if (!s)
4238 return NULL;
4239
4240 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4241 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4242 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4243 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4244 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4245 * allocations. */
4246
4247 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4248 ret = specifier_escape(s);
4249 if (!ret)
4250 return NULL;
4251
4252 s = ret;
4253 }
4254
4255 if (flags & UNIT_ESCAPE_C) {
4256 char *a;
4257
4258 a = cescape(s);
4259 free(ret);
4260 if (!a)
4261 return NULL;
4262
4263 ret = a;
4264 }
4265
4266 if (buf) {
4267 *buf = ret;
4268 return ret ?: (char*) s;
4269 }
4270
4271 return ret ?: strdup(s);
4272 }
4273
4274 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4275 _cleanup_free_ char *result = NULL;
4276 size_t n = 0;
4277
4278 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4279 * way suitable for ExecStart= stanzas */
4280
4281 STRV_FOREACH(i, l) {
4282 _cleanup_free_ char *buf = NULL;
4283 const char *p;
4284 size_t a;
4285 char *q;
4286
4287 p = unit_escape_setting(*i, flags, &buf);
4288 if (!p)
4289 return NULL;
4290
4291 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4292 if (!GREEDY_REALLOC(result, n + a + 1))
4293 return NULL;
4294
4295 q = result + n;
4296 if (n > 0)
4297 *(q++) = ' ';
4298
4299 *(q++) = '"';
4300 q = stpcpy(q, p);
4301 *(q++) = '"';
4302
4303 n += a;
4304 }
4305
4306 if (!GREEDY_REALLOC(result, n + 1))
4307 return NULL;
4308
4309 result[n] = 0;
4310
4311 return TAKE_PTR(result);
4312 }
4313
4314 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4315 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4316 const char *dir, *wrapped;
4317 int r;
4318
4319 assert(u);
4320 assert(name);
4321 assert(data);
4322
4323 if (UNIT_WRITE_FLAGS_NOOP(flags))
4324 return 0;
4325
4326 data = unit_escape_setting(data, flags, &escaped);
4327 if (!data)
4328 return -ENOMEM;
4329
4330 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4331 * previous section header is the same */
4332
4333 if (flags & UNIT_PRIVATE) {
4334 if (!UNIT_VTABLE(u)->private_section)
4335 return -EINVAL;
4336
4337 if (!u->transient_file || u->last_section_private < 0)
4338 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4339 else if (u->last_section_private == 0)
4340 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4341 } else {
4342 if (!u->transient_file || u->last_section_private < 0)
4343 data = strjoina("[Unit]\n", data);
4344 else if (u->last_section_private > 0)
4345 data = strjoina("\n[Unit]\n", data);
4346 }
4347
4348 if (u->transient_file) {
4349 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4350 * write to the transient unit file. */
4351 fputs(data, u->transient_file);
4352
4353 if (!endswith(data, "\n"))
4354 fputc('\n', u->transient_file);
4355
4356 /* Remember which section we wrote this entry to */
4357 u->last_section_private = !!(flags & UNIT_PRIVATE);
4358 return 0;
4359 }
4360
4361 dir = unit_drop_in_dir(u, flags);
4362 if (!dir)
4363 return -EINVAL;
4364
4365 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4366 "# or an equivalent operation. Do not edit.\n",
4367 data,
4368 "\n");
4369
4370 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4371 if (r < 0)
4372 return r;
4373
4374 (void) mkdir_p_label(p, 0755);
4375
4376 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4377 * recreate the cache after every drop-in we write. */
4378 if (u->manager->unit_path_cache) {
4379 r = set_put_strdup(&u->manager->unit_path_cache, p);
4380 if (r < 0)
4381 return r;
4382 }
4383
4384 r = write_string_file_atomic_label(q, wrapped);
4385 if (r < 0)
4386 return r;
4387
4388 r = strv_push(&u->dropin_paths, q);
4389 if (r < 0)
4390 return r;
4391 q = NULL;
4392
4393 strv_uniq(u->dropin_paths);
4394
4395 u->dropin_mtime = now(CLOCK_REALTIME);
4396
4397 return 0;
4398 }
4399
4400 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4401 _cleanup_free_ char *p = NULL;
4402 va_list ap;
4403 int r;
4404
4405 assert(u);
4406 assert(name);
4407 assert(format);
4408
4409 if (UNIT_WRITE_FLAGS_NOOP(flags))
4410 return 0;
4411
4412 va_start(ap, format);
4413 r = vasprintf(&p, format, ap);
4414 va_end(ap);
4415
4416 if (r < 0)
4417 return -ENOMEM;
4418
4419 return unit_write_setting(u, flags, name, p);
4420 }
4421
4422 int unit_make_transient(Unit *u) {
4423 _cleanup_free_ char *path = NULL;
4424 FILE *f;
4425
4426 assert(u);
4427
4428 if (!UNIT_VTABLE(u)->can_transient)
4429 return -EOPNOTSUPP;
4430
4431 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4432
4433 path = path_join(u->manager->lookup_paths.transient, u->id);
4434 if (!path)
4435 return -ENOMEM;
4436
4437 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4438 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4439
4440 RUN_WITH_UMASK(0022) {
4441 f = fopen(path, "we");
4442 if (!f)
4443 return -errno;
4444 }
4445
4446 safe_fclose(u->transient_file);
4447 u->transient_file = f;
4448
4449 free_and_replace(u->fragment_path, path);
4450
4451 u->source_path = mfree(u->source_path);
4452 u->dropin_paths = strv_free(u->dropin_paths);
4453 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4454
4455 u->load_state = UNIT_STUB;
4456 u->load_error = 0;
4457 u->transient = true;
4458
4459 unit_add_to_dbus_queue(u);
4460 unit_add_to_gc_queue(u);
4461
4462 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4463 u->transient_file);
4464
4465 return 0;
4466 }
4467
4468 static int log_kill(pid_t pid, int sig, void *userdata) {
4469 _cleanup_free_ char *comm = NULL;
4470
4471 (void) get_process_comm(pid, &comm);
4472
4473 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4474 only, like for example systemd's own PAM stub process. */
4475 if (comm && comm[0] == '(')
4476 return 0;
4477
4478 log_unit_notice(userdata,
4479 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4480 pid,
4481 strna(comm),
4482 signal_to_string(sig));
4483
4484 return 1;
4485 }
4486
4487 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4488 assert(c);
4489
4490 switch (k) {
4491
4492 case KILL_TERMINATE:
4493 case KILL_TERMINATE_AND_LOG:
4494 *noteworthy = false;
4495 return c->kill_signal;
4496
4497 case KILL_RESTART:
4498 *noteworthy = false;
4499 return restart_kill_signal(c);
4500
4501 case KILL_KILL:
4502 *noteworthy = true;
4503 return c->final_kill_signal;
4504
4505 case KILL_WATCHDOG:
4506 *noteworthy = true;
4507 return c->watchdog_signal;
4508
4509 default:
4510 assert_not_reached();
4511 }
4512 }
4513
4514 int unit_kill_context(
4515 Unit *u,
4516 KillContext *c,
4517 KillOperation k,
4518 pid_t main_pid,
4519 pid_t control_pid,
4520 bool main_pid_alien) {
4521
4522 bool wait_for_exit = false, send_sighup;
4523 cg_kill_log_func_t log_func = NULL;
4524 int sig, r;
4525
4526 assert(u);
4527 assert(c);
4528
4529 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4530 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4531 * which is used for user-requested killing of unit processes. */
4532
4533 if (c->kill_mode == KILL_NONE)
4534 return 0;
4535
4536 bool noteworthy;
4537 sig = operation_to_signal(c, k, &noteworthy);
4538 if (noteworthy)
4539 log_func = log_kill;
4540
4541 send_sighup =
4542 c->send_sighup &&
4543 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4544 sig != SIGHUP;
4545
4546 if (main_pid > 0) {
4547 if (log_func)
4548 log_func(main_pid, sig, u);
4549
4550 r = kill_and_sigcont(main_pid, sig);
4551 if (r < 0 && r != -ESRCH) {
4552 _cleanup_free_ char *comm = NULL;
4553 (void) get_process_comm(main_pid, &comm);
4554
4555 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4556 } else {
4557 if (!main_pid_alien)
4558 wait_for_exit = true;
4559
4560 if (r != -ESRCH && send_sighup)
4561 (void) kill(main_pid, SIGHUP);
4562 }
4563 }
4564
4565 if (control_pid > 0) {
4566 if (log_func)
4567 log_func(control_pid, sig, u);
4568
4569 r = kill_and_sigcont(control_pid, sig);
4570 if (r < 0 && r != -ESRCH) {
4571 _cleanup_free_ char *comm = NULL;
4572 (void) get_process_comm(control_pid, &comm);
4573
4574 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4575 } else {
4576 wait_for_exit = true;
4577
4578 if (r != -ESRCH && send_sighup)
4579 (void) kill(control_pid, SIGHUP);
4580 }
4581 }
4582
4583 if (u->cgroup_path &&
4584 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4585 _cleanup_set_free_ Set *pid_set = NULL;
4586
4587 /* Exclude the main/control pids from being killed via the cgroup */
4588 pid_set = unit_pid_set(main_pid, control_pid);
4589 if (!pid_set)
4590 return -ENOMEM;
4591
4592 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4593 sig,
4594 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4595 pid_set,
4596 log_func, u);
4597 if (r < 0) {
4598 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4599 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", empty_to_root(u->cgroup_path));
4600
4601 } else if (r > 0) {
4602
4603 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4604 * we are running in a container or if this is a delegation unit, simply because cgroup
4605 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4606 * of containers it can be confused easily by left-over directories in the cgroup — which
4607 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4608 * there we get proper events. Hence rely on them. */
4609
4610 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4611 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4612 wait_for_exit = true;
4613
4614 if (send_sighup) {
4615 set_free(pid_set);
4616
4617 pid_set = unit_pid_set(main_pid, control_pid);
4618 if (!pid_set)
4619 return -ENOMEM;
4620
4621 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4622 SIGHUP,
4623 CGROUP_IGNORE_SELF,
4624 pid_set,
4625 NULL, NULL);
4626 }
4627 }
4628 }
4629
4630 return wait_for_exit;
4631 }
4632
4633 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4634 int r;
4635
4636 assert(u);
4637 assert(path);
4638
4639 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4640 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4641 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4642 * appearing mount units can easily determine which units to make themselves a dependency of. */
4643
4644 if (!path_is_absolute(path))
4645 return -EINVAL;
4646
4647 if (hashmap_contains(u->requires_mounts_for, path)) /* Exit quickly if the path is already covered. */
4648 return 0;
4649
4650 _cleanup_free_ char *p = strdup(path);
4651 if (!p)
4652 return -ENOMEM;
4653
4654 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4655 * only after simplification, since path_is_normalized() rejects paths with '.'.
4656 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4657 path = path_simplify(p);
4658
4659 if (!path_is_normalized(path))
4660 return -EPERM;
4661
4662 UnitDependencyInfo di = {
4663 .origin_mask = mask
4664 };
4665
4666 r = hashmap_ensure_put(&u->requires_mounts_for, &path_hash_ops, p, di.data);
4667 if (r < 0)
4668 return r;
4669 assert(r > 0);
4670 TAKE_PTR(p); /* path remains a valid pointer to the string stored in the hashmap */
4671
4672 char prefix[strlen(path) + 1];
4673 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4674 Set *x;
4675
4676 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4677 if (!x) {
4678 _cleanup_free_ char *q = NULL;
4679
4680 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4681 if (r < 0)
4682 return r;
4683
4684 q = strdup(prefix);
4685 if (!q)
4686 return -ENOMEM;
4687
4688 x = set_new(NULL);
4689 if (!x)
4690 return -ENOMEM;
4691
4692 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4693 if (r < 0) {
4694 set_free(x);
4695 return r;
4696 }
4697 q = NULL;
4698 }
4699
4700 r = set_put(x, u);
4701 if (r < 0)
4702 return r;
4703 }
4704
4705 return 0;
4706 }
4707
4708 int unit_setup_exec_runtime(Unit *u) {
4709 ExecRuntime **rt;
4710 size_t offset;
4711 Unit *other;
4712 int r;
4713
4714 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4715 assert(offset > 0);
4716
4717 /* Check if there already is an ExecRuntime for this unit? */
4718 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4719 if (*rt)
4720 return 0;
4721
4722 /* Try to get it from somebody else */
4723 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_JOINS_NAMESPACE_OF) {
4724 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4725 if (r == 1)
4726 return 1;
4727 }
4728
4729 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4730 }
4731
4732 int unit_setup_dynamic_creds(Unit *u) {
4733 ExecContext *ec;
4734 DynamicCreds *dcreds;
4735 size_t offset;
4736
4737 assert(u);
4738
4739 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4740 assert(offset > 0);
4741 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4742
4743 ec = unit_get_exec_context(u);
4744 assert(ec);
4745
4746 if (!ec->dynamic_user)
4747 return 0;
4748
4749 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4750 }
4751
4752 bool unit_type_supported(UnitType t) {
4753 if (_unlikely_(t < 0))
4754 return false;
4755 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4756 return false;
4757
4758 if (!unit_vtable[t]->supported)
4759 return true;
4760
4761 return unit_vtable[t]->supported();
4762 }
4763
4764 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4765 int r;
4766
4767 assert(u);
4768 assert(where);
4769
4770 if (!unit_log_level_test(u, LOG_NOTICE))
4771 return;
4772
4773 r = dir_is_empty(where);
4774 if (r > 0 || r == -ENOTDIR)
4775 return;
4776 if (r < 0) {
4777 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4778 return;
4779 }
4780
4781 log_unit_struct(u, LOG_NOTICE,
4782 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4783 LOG_UNIT_INVOCATION_ID(u),
4784 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4785 "WHERE=%s", where);
4786 }
4787
4788 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4789 _cleanup_free_ char *canonical_where = NULL;
4790 int r;
4791
4792 assert(u);
4793 assert(where);
4794
4795 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
4796 if (r < 0) {
4797 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4798 return 0;
4799 }
4800
4801 /* We will happily ignore a trailing slash (or any redundant slashes) */
4802 if (path_equal(where, canonical_where))
4803 return 0;
4804
4805 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4806 log_unit_struct(u, LOG_ERR,
4807 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4808 LOG_UNIT_INVOCATION_ID(u),
4809 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4810 "WHERE=%s", where);
4811
4812 return -ELOOP;
4813 }
4814
4815 bool unit_is_pristine(Unit *u) {
4816 assert(u);
4817
4818 /* Check if the unit already exists or is already around,
4819 * in a number of different ways. Note that to cater for unit
4820 * types such as slice, we are generally fine with units that
4821 * are marked UNIT_LOADED even though nothing was actually
4822 * loaded, as those unit types don't require a file on disk. */
4823
4824 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4825 u->fragment_path ||
4826 u->source_path ||
4827 !strv_isempty(u->dropin_paths) ||
4828 u->job ||
4829 u->merged_into);
4830 }
4831
4832 pid_t unit_control_pid(Unit *u) {
4833 assert(u);
4834
4835 if (UNIT_VTABLE(u)->control_pid)
4836 return UNIT_VTABLE(u)->control_pid(u);
4837
4838 return 0;
4839 }
4840
4841 pid_t unit_main_pid(Unit *u) {
4842 assert(u);
4843
4844 if (UNIT_VTABLE(u)->main_pid)
4845 return UNIT_VTABLE(u)->main_pid(u);
4846
4847 return 0;
4848 }
4849
4850 static void unit_unref_uid_internal(
4851 Unit *u,
4852 uid_t *ref_uid,
4853 bool destroy_now,
4854 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4855
4856 assert(u);
4857 assert(ref_uid);
4858 assert(_manager_unref_uid);
4859
4860 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4861 * gid_t are actually the same time, with the same validity rules.
4862 *
4863 * Drops a reference to UID/GID from a unit. */
4864
4865 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4866 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4867
4868 if (!uid_is_valid(*ref_uid))
4869 return;
4870
4871 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4872 *ref_uid = UID_INVALID;
4873 }
4874
4875 static void unit_unref_uid(Unit *u, bool destroy_now) {
4876 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4877 }
4878
4879 static void unit_unref_gid(Unit *u, bool destroy_now) {
4880 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4881 }
4882
4883 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4884 assert(u);
4885
4886 unit_unref_uid(u, destroy_now);
4887 unit_unref_gid(u, destroy_now);
4888 }
4889
4890 static int unit_ref_uid_internal(
4891 Unit *u,
4892 uid_t *ref_uid,
4893 uid_t uid,
4894 bool clean_ipc,
4895 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4896
4897 int r;
4898
4899 assert(u);
4900 assert(ref_uid);
4901 assert(uid_is_valid(uid));
4902 assert(_manager_ref_uid);
4903
4904 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4905 * are actually the same type, and have the same validity rules.
4906 *
4907 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4908 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4909 * drops to zero. */
4910
4911 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4912 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4913
4914 if (*ref_uid == uid)
4915 return 0;
4916
4917 if (uid_is_valid(*ref_uid)) /* Already set? */
4918 return -EBUSY;
4919
4920 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4921 if (r < 0)
4922 return r;
4923
4924 *ref_uid = uid;
4925 return 1;
4926 }
4927
4928 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4929 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4930 }
4931
4932 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4933 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4934 }
4935
4936 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4937 int r = 0, q = 0;
4938
4939 assert(u);
4940
4941 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4942
4943 if (uid_is_valid(uid)) {
4944 r = unit_ref_uid(u, uid, clean_ipc);
4945 if (r < 0)
4946 return r;
4947 }
4948
4949 if (gid_is_valid(gid)) {
4950 q = unit_ref_gid(u, gid, clean_ipc);
4951 if (q < 0) {
4952 if (r > 0)
4953 unit_unref_uid(u, false);
4954
4955 return q;
4956 }
4957 }
4958
4959 return r > 0 || q > 0;
4960 }
4961
4962 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4963 ExecContext *c;
4964 int r;
4965
4966 assert(u);
4967
4968 c = unit_get_exec_context(u);
4969
4970 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4971 if (r < 0)
4972 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4973
4974 return r;
4975 }
4976
4977 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4978 int r;
4979
4980 assert(u);
4981
4982 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4983 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4984 * objects when no service references the UID/GID anymore. */
4985
4986 r = unit_ref_uid_gid(u, uid, gid);
4987 if (r > 0)
4988 unit_add_to_dbus_queue(u);
4989 }
4990
4991 int unit_acquire_invocation_id(Unit *u) {
4992 sd_id128_t id;
4993 int r;
4994
4995 assert(u);
4996
4997 r = sd_id128_randomize(&id);
4998 if (r < 0)
4999 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5000
5001 r = unit_set_invocation_id(u, id);
5002 if (r < 0)
5003 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5004
5005 unit_add_to_dbus_queue(u);
5006 return 0;
5007 }
5008
5009 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5010 int r;
5011
5012 assert(u);
5013 assert(p);
5014
5015 /* Copy parameters from manager */
5016 r = manager_get_effective_environment(u->manager, &p->environment);
5017 if (r < 0)
5018 return r;
5019
5020 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5021 p->cgroup_supported = u->manager->cgroup_supported;
5022 p->prefix = u->manager->prefix;
5023 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5024
5025 /* Copy parameters from unit */
5026 p->cgroup_path = u->cgroup_path;
5027 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5028
5029 p->received_credentials = u->manager->received_credentials;
5030
5031 return 0;
5032 }
5033
5034 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5035 int r;
5036
5037 assert(u);
5038 assert(ret);
5039
5040 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5041 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5042
5043 (void) unit_realize_cgroup(u);
5044
5045 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5046 if (r != 0)
5047 return r;
5048
5049 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
5050 (void) ignore_signals(SIGPIPE);
5051
5052 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5053
5054 if (u->cgroup_path) {
5055 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5056 if (r < 0) {
5057 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", empty_to_root(u->cgroup_path));
5058 _exit(EXIT_CGROUP);
5059 }
5060 }
5061
5062 return 0;
5063 }
5064
5065 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5066 pid_t pid;
5067 int r;
5068
5069 assert(u);
5070 assert(ret_pid);
5071
5072 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5073 if (r < 0)
5074 return r;
5075 if (r == 0) {
5076 int ret = EXIT_SUCCESS;
5077
5078 STRV_FOREACH(i, paths) {
5079 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5080 if (r < 0) {
5081 log_error_errno(r, "Failed to remove '%s': %m", *i);
5082 ret = EXIT_FAILURE;
5083 }
5084 }
5085
5086 _exit(ret);
5087 }
5088
5089 r = unit_watch_pid(u, pid, true);
5090 if (r < 0)
5091 return r;
5092
5093 *ret_pid = pid;
5094 return 0;
5095 }
5096
5097 static void unit_update_dependency_mask(Hashmap *deps, Unit *other, UnitDependencyInfo di) {
5098 assert(deps);
5099 assert(other);
5100
5101 if (di.origin_mask == 0 && di.destination_mask == 0)
5102 /* No bit set anymore, let's drop the whole entry */
5103 assert_se(hashmap_remove(deps, other));
5104 else
5105 /* Mask was reduced, let's update the entry */
5106 assert_se(hashmap_update(deps, other, di.data) == 0);
5107 }
5108
5109 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5110 Hashmap *deps;
5111 assert(u);
5112
5113 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5114
5115 if (mask == 0)
5116 return;
5117
5118 HASHMAP_FOREACH(deps, u->dependencies) {
5119 bool done;
5120
5121 do {
5122 UnitDependencyInfo di;
5123 Unit *other;
5124
5125 done = true;
5126
5127 HASHMAP_FOREACH_KEY(di.data, other, deps) {
5128 Hashmap *other_deps;
5129
5130 if (FLAGS_SET(~mask, di.origin_mask))
5131 continue;
5132
5133 di.origin_mask &= ~mask;
5134 unit_update_dependency_mask(deps, other, di);
5135
5136 /* We updated the dependency from our unit to the other unit now. But most
5137 * dependencies imply a reverse dependency. Hence, let's delete that one
5138 * too. For that we go through all dependency types on the other unit and
5139 * delete all those which point to us and have the right mask set. */
5140
5141 HASHMAP_FOREACH(other_deps, other->dependencies) {
5142 UnitDependencyInfo dj;
5143
5144 dj.data = hashmap_get(other_deps, u);
5145 if (FLAGS_SET(~mask, dj.destination_mask))
5146 continue;
5147
5148 dj.destination_mask &= ~mask;
5149 unit_update_dependency_mask(other_deps, u, dj);
5150 }
5151
5152 unit_add_to_gc_queue(other);
5153
5154 done = false;
5155 break;
5156 }
5157
5158 } while (!done);
5159 }
5160 }
5161
5162 static int unit_get_invocation_path(Unit *u, char **ret) {
5163 char *p;
5164 int r;
5165
5166 assert(u);
5167 assert(ret);
5168
5169 if (MANAGER_IS_SYSTEM(u->manager))
5170 p = strjoin("/run/systemd/units/invocation:", u->id);
5171 else {
5172 _cleanup_free_ char *user_path = NULL;
5173 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5174 if (r < 0)
5175 return r;
5176 p = strjoin(user_path, u->id);
5177 }
5178
5179 if (!p)
5180 return -ENOMEM;
5181
5182 *ret = p;
5183 return 0;
5184 }
5185
5186 static int unit_export_invocation_id(Unit *u) {
5187 _cleanup_free_ char *p = NULL;
5188 int r;
5189
5190 assert(u);
5191
5192 if (u->exported_invocation_id)
5193 return 0;
5194
5195 if (sd_id128_is_null(u->invocation_id))
5196 return 0;
5197
5198 r = unit_get_invocation_path(u, &p);
5199 if (r < 0)
5200 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5201
5202 r = symlink_atomic_label(u->invocation_id_string, p);
5203 if (r < 0)
5204 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5205
5206 u->exported_invocation_id = true;
5207 return 0;
5208 }
5209
5210 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5211 const char *p;
5212 char buf[2];
5213 int r;
5214
5215 assert(u);
5216 assert(c);
5217
5218 if (u->exported_log_level_max)
5219 return 0;
5220
5221 if (c->log_level_max < 0)
5222 return 0;
5223
5224 assert(c->log_level_max <= 7);
5225
5226 buf[0] = '0' + c->log_level_max;
5227 buf[1] = 0;
5228
5229 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5230 r = symlink_atomic(buf, p);
5231 if (r < 0)
5232 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5233
5234 u->exported_log_level_max = true;
5235 return 0;
5236 }
5237
5238 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5239 _cleanup_close_ int fd = -1;
5240 struct iovec *iovec;
5241 const char *p;
5242 char *pattern;
5243 le64_t *sizes;
5244 ssize_t n;
5245 int r;
5246
5247 if (u->exported_log_extra_fields)
5248 return 0;
5249
5250 if (c->n_log_extra_fields <= 0)
5251 return 0;
5252
5253 sizes = newa(le64_t, c->n_log_extra_fields);
5254 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5255
5256 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
5257 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5258
5259 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5260 iovec[i*2+1] = c->log_extra_fields[i];
5261 }
5262
5263 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5264 pattern = strjoina(p, ".XXXXXX");
5265
5266 fd = mkostemp_safe(pattern);
5267 if (fd < 0)
5268 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5269
5270 n = writev(fd, iovec, c->n_log_extra_fields*2);
5271 if (n < 0) {
5272 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5273 goto fail;
5274 }
5275
5276 (void) fchmod(fd, 0644);
5277
5278 if (rename(pattern, p) < 0) {
5279 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5280 goto fail;
5281 }
5282
5283 u->exported_log_extra_fields = true;
5284 return 0;
5285
5286 fail:
5287 (void) unlink(pattern);
5288 return r;
5289 }
5290
5291 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5292 _cleanup_free_ char *buf = NULL;
5293 const char *p;
5294 int r;
5295
5296 assert(u);
5297 assert(c);
5298
5299 if (u->exported_log_ratelimit_interval)
5300 return 0;
5301
5302 if (c->log_ratelimit_interval_usec == 0)
5303 return 0;
5304
5305 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5306
5307 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5308 return log_oom();
5309
5310 r = symlink_atomic(buf, p);
5311 if (r < 0)
5312 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5313
5314 u->exported_log_ratelimit_interval = true;
5315 return 0;
5316 }
5317
5318 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5319 _cleanup_free_ char *buf = NULL;
5320 const char *p;
5321 int r;
5322
5323 assert(u);
5324 assert(c);
5325
5326 if (u->exported_log_ratelimit_burst)
5327 return 0;
5328
5329 if (c->log_ratelimit_burst == 0)
5330 return 0;
5331
5332 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5333
5334 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5335 return log_oom();
5336
5337 r = symlink_atomic(buf, p);
5338 if (r < 0)
5339 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5340
5341 u->exported_log_ratelimit_burst = true;
5342 return 0;
5343 }
5344
5345 void unit_export_state_files(Unit *u) {
5346 const ExecContext *c;
5347
5348 assert(u);
5349
5350 if (!u->id)
5351 return;
5352
5353 if (MANAGER_IS_TEST_RUN(u->manager))
5354 return;
5355
5356 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5357 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5358 * the IPC system itself and PID 1 also log to the journal.
5359 *
5360 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5361 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5362 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5363 * namespace at least.
5364 *
5365 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5366 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5367 * them with one. */
5368
5369 (void) unit_export_invocation_id(u);
5370
5371 if (!MANAGER_IS_SYSTEM(u->manager))
5372 return;
5373
5374 c = unit_get_exec_context(u);
5375 if (c) {
5376 (void) unit_export_log_level_max(u, c);
5377 (void) unit_export_log_extra_fields(u, c);
5378 (void) unit_export_log_ratelimit_interval(u, c);
5379 (void) unit_export_log_ratelimit_burst(u, c);
5380 }
5381 }
5382
5383 void unit_unlink_state_files(Unit *u) {
5384 const char *p;
5385
5386 assert(u);
5387
5388 if (!u->id)
5389 return;
5390
5391 /* Undoes the effect of unit_export_state() */
5392
5393 if (u->exported_invocation_id) {
5394 _cleanup_free_ char *invocation_path = NULL;
5395 int r = unit_get_invocation_path(u, &invocation_path);
5396 if (r >= 0) {
5397 (void) unlink(invocation_path);
5398 u->exported_invocation_id = false;
5399 }
5400 }
5401
5402 if (!MANAGER_IS_SYSTEM(u->manager))
5403 return;
5404
5405 if (u->exported_log_level_max) {
5406 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5407 (void) unlink(p);
5408
5409 u->exported_log_level_max = false;
5410 }
5411
5412 if (u->exported_log_extra_fields) {
5413 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5414 (void) unlink(p);
5415
5416 u->exported_log_extra_fields = false;
5417 }
5418
5419 if (u->exported_log_ratelimit_interval) {
5420 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5421 (void) unlink(p);
5422
5423 u->exported_log_ratelimit_interval = false;
5424 }
5425
5426 if (u->exported_log_ratelimit_burst) {
5427 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5428 (void) unlink(p);
5429
5430 u->exported_log_ratelimit_burst = false;
5431 }
5432 }
5433
5434 int unit_prepare_exec(Unit *u) {
5435 int r;
5436
5437 assert(u);
5438
5439 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5440 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5441 r = bpf_firewall_load_custom(u);
5442 if (r < 0)
5443 return r;
5444
5445 /* Prepares everything so that we can fork of a process for this unit */
5446
5447 (void) unit_realize_cgroup(u);
5448
5449 if (u->reset_accounting) {
5450 (void) unit_reset_accounting(u);
5451 u->reset_accounting = false;
5452 }
5453
5454 unit_export_state_files(u);
5455
5456 r = unit_setup_exec_runtime(u);
5457 if (r < 0)
5458 return r;
5459
5460 r = unit_setup_dynamic_creds(u);
5461 if (r < 0)
5462 return r;
5463
5464 return 0;
5465 }
5466
5467 static bool ignore_leftover_process(const char *comm) {
5468 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5469 }
5470
5471 int unit_log_leftover_process_start(pid_t pid, int sig, void *userdata) {
5472 _cleanup_free_ char *comm = NULL;
5473
5474 (void) get_process_comm(pid, &comm);
5475
5476 if (ignore_leftover_process(comm))
5477 return 0;
5478
5479 /* During start we print a warning */
5480
5481 log_unit_warning(userdata,
5482 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5483 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5484 pid, strna(comm));
5485
5486 return 1;
5487 }
5488
5489 int unit_log_leftover_process_stop(pid_t pid, int sig, void *userdata) {
5490 _cleanup_free_ char *comm = NULL;
5491
5492 (void) get_process_comm(pid, &comm);
5493
5494 if (ignore_leftover_process(comm))
5495 return 0;
5496
5497 /* During stop we only print an informational message */
5498
5499 log_unit_info(userdata,
5500 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5501 pid, strna(comm));
5502
5503 return 1;
5504 }
5505
5506 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5507 assert(u);
5508
5509 (void) unit_pick_cgroup_path(u);
5510
5511 if (!u->cgroup_path)
5512 return 0;
5513
5514 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_func, u);
5515 }
5516
5517 bool unit_needs_console(Unit *u) {
5518 ExecContext *ec;
5519 UnitActiveState state;
5520
5521 assert(u);
5522
5523 state = unit_active_state(u);
5524
5525 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5526 return false;
5527
5528 if (UNIT_VTABLE(u)->needs_console)
5529 return UNIT_VTABLE(u)->needs_console(u);
5530
5531 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5532 ec = unit_get_exec_context(u);
5533 if (!ec)
5534 return false;
5535
5536 return exec_context_may_touch_console(ec);
5537 }
5538
5539 const char *unit_label_path(const Unit *u) {
5540 const char *p;
5541
5542 assert(u);
5543
5544 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5545 * when validating access checks. */
5546
5547 if (IN_SET(u->load_state, UNIT_MASKED, UNIT_NOT_FOUND, UNIT_MERGED))
5548 return NULL; /* Shortcut things if we know there is no real, relevant unit file around */
5549
5550 p = u->source_path ?: u->fragment_path;
5551 if (!p)
5552 return NULL;
5553
5554 if (IN_SET(u->load_state, UNIT_LOADED, UNIT_BAD_SETTING, UNIT_ERROR))
5555 return p; /* Shortcut things, if we successfully loaded at least some stuff from the unit file */
5556
5557 /* Not loaded yet, we need to go to disk */
5558 assert(u->load_state == UNIT_STUB);
5559
5560 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5561 if (null_or_empty_path(p) > 0)
5562 return NULL;
5563
5564 return p;
5565 }
5566
5567 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5568 int r;
5569
5570 assert(u);
5571
5572 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5573 * and not a kernel thread either */
5574
5575 /* First, a simple range check */
5576 if (!pid_is_valid(pid))
5577 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5578
5579 /* Some extra safety check */
5580 if (pid == 1 || pid == getpid_cached())
5581 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5582
5583 /* Don't even begin to bother with kernel threads */
5584 r = is_kernel_thread(pid);
5585 if (r == -ESRCH)
5586 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5587 if (r < 0)
5588 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5589 if (r > 0)
5590 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5591
5592 return 0;
5593 }
5594
5595 void unit_log_success(Unit *u) {
5596 assert(u);
5597
5598 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5599 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5600 * a lot of devices. */
5601 log_unit_struct(u,
5602 MANAGER_IS_USER(u->manager) ? LOG_DEBUG : LOG_INFO,
5603 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5604 LOG_UNIT_INVOCATION_ID(u),
5605 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5606 }
5607
5608 void unit_log_failure(Unit *u, const char *result) {
5609 assert(u);
5610 assert(result);
5611
5612 log_unit_struct(u, LOG_WARNING,
5613 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5614 LOG_UNIT_INVOCATION_ID(u),
5615 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5616 "UNIT_RESULT=%s", result);
5617 }
5618
5619 void unit_log_skip(Unit *u, const char *result) {
5620 assert(u);
5621 assert(result);
5622
5623 log_unit_struct(u, LOG_INFO,
5624 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5625 LOG_UNIT_INVOCATION_ID(u),
5626 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5627 "UNIT_RESULT=%s", result);
5628 }
5629
5630 void unit_log_process_exit(
5631 Unit *u,
5632 const char *kind,
5633 const char *command,
5634 bool success,
5635 int code,
5636 int status) {
5637
5638 int level;
5639
5640 assert(u);
5641 assert(kind);
5642
5643 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5644 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5645 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5646 * WARNING. */
5647 if (success)
5648 level = LOG_DEBUG;
5649 else if (code == CLD_EXITED)
5650 level = LOG_NOTICE;
5651 else
5652 level = LOG_WARNING;
5653
5654 log_unit_struct(u, level,
5655 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5656 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s%s",
5657 kind,
5658 sigchld_code_to_string(code), status,
5659 strna(code == CLD_EXITED
5660 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5661 : signal_to_string(status)),
5662 success ? " (success)" : ""),
5663 "EXIT_CODE=%s", sigchld_code_to_string(code),
5664 "EXIT_STATUS=%i", status,
5665 "COMMAND=%s", strna(command),
5666 LOG_UNIT_INVOCATION_ID(u));
5667 }
5668
5669 int unit_exit_status(Unit *u) {
5670 assert(u);
5671
5672 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5673 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5674 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5675 * service process has exited abnormally (signal/coredump). */
5676
5677 if (!UNIT_VTABLE(u)->exit_status)
5678 return -EOPNOTSUPP;
5679
5680 return UNIT_VTABLE(u)->exit_status(u);
5681 }
5682
5683 int unit_failure_action_exit_status(Unit *u) {
5684 int r;
5685
5686 assert(u);
5687
5688 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5689
5690 if (u->failure_action_exit_status >= 0)
5691 return u->failure_action_exit_status;
5692
5693 r = unit_exit_status(u);
5694 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5695 return 255;
5696
5697 return r;
5698 }
5699
5700 int unit_success_action_exit_status(Unit *u) {
5701 int r;
5702
5703 assert(u);
5704
5705 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5706
5707 if (u->success_action_exit_status >= 0)
5708 return u->success_action_exit_status;
5709
5710 r = unit_exit_status(u);
5711 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5712 return 255;
5713
5714 return r;
5715 }
5716
5717 int unit_test_trigger_loaded(Unit *u) {
5718 Unit *trigger;
5719
5720 /* Tests whether the unit to trigger is loaded */
5721
5722 trigger = UNIT_TRIGGER(u);
5723 if (!trigger)
5724 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5725 "Refusing to start, no unit to trigger.");
5726 if (trigger->load_state != UNIT_LOADED)
5727 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5728 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5729
5730 return 0;
5731 }
5732
5733 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
5734 assert(u);
5735 assert(context);
5736
5737 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
5738 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
5739 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5740
5741 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
5742 }
5743
5744 int unit_clean(Unit *u, ExecCleanMask mask) {
5745 UnitActiveState state;
5746
5747 assert(u);
5748
5749 /* Special return values:
5750 *
5751 * -EOPNOTSUPP → cleaning not supported for this unit type
5752 * -EUNATCH → cleaning not defined for this resource type
5753 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5754 * a job queued or similar
5755 */
5756
5757 if (!UNIT_VTABLE(u)->clean)
5758 return -EOPNOTSUPP;
5759
5760 if (mask == 0)
5761 return -EUNATCH;
5762
5763 if (u->load_state != UNIT_LOADED)
5764 return -EBUSY;
5765
5766 if (u->job)
5767 return -EBUSY;
5768
5769 state = unit_active_state(u);
5770 if (!IN_SET(state, UNIT_INACTIVE))
5771 return -EBUSY;
5772
5773 return UNIT_VTABLE(u)->clean(u, mask);
5774 }
5775
5776 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5777 assert(u);
5778
5779 if (!UNIT_VTABLE(u)->clean ||
5780 u->load_state != UNIT_LOADED) {
5781 *ret = 0;
5782 return 0;
5783 }
5784
5785 /* When the clean() method is set, can_clean() really should be set too */
5786 assert(UNIT_VTABLE(u)->can_clean);
5787
5788 return UNIT_VTABLE(u)->can_clean(u, ret);
5789 }
5790
5791 bool unit_can_freeze(Unit *u) {
5792 assert(u);
5793
5794 if (UNIT_VTABLE(u)->can_freeze)
5795 return UNIT_VTABLE(u)->can_freeze(u);
5796
5797 return UNIT_VTABLE(u)->freeze;
5798 }
5799
5800 void unit_frozen(Unit *u) {
5801 assert(u);
5802
5803 u->freezer_state = FREEZER_FROZEN;
5804
5805 bus_unit_send_pending_freezer_message(u);
5806 }
5807
5808 void unit_thawed(Unit *u) {
5809 assert(u);
5810
5811 u->freezer_state = FREEZER_RUNNING;
5812
5813 bus_unit_send_pending_freezer_message(u);
5814 }
5815
5816 static int unit_freezer_action(Unit *u, FreezerAction action) {
5817 UnitActiveState s;
5818 int (*method)(Unit*);
5819 int r;
5820
5821 assert(u);
5822 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
5823
5824 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
5825 if (!method || !cg_freezer_supported())
5826 return -EOPNOTSUPP;
5827
5828 if (u->job)
5829 return -EBUSY;
5830
5831 if (u->load_state != UNIT_LOADED)
5832 return -EHOSTDOWN;
5833
5834 s = unit_active_state(u);
5835 if (s != UNIT_ACTIVE)
5836 return -EHOSTDOWN;
5837
5838 if (IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING))
5839 return -EALREADY;
5840
5841 r = method(u);
5842 if (r <= 0)
5843 return r;
5844
5845 return 1;
5846 }
5847
5848 int unit_freeze(Unit *u) {
5849 return unit_freezer_action(u, FREEZER_FREEZE);
5850 }
5851
5852 int unit_thaw(Unit *u) {
5853 return unit_freezer_action(u, FREEZER_THAW);
5854 }
5855
5856 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
5857 int unit_freeze_vtable_common(Unit *u) {
5858 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
5859 }
5860
5861 int unit_thaw_vtable_common(Unit *u) {
5862 return unit_cgroup_freezer_action(u, FREEZER_THAW);
5863 }
5864
5865 Condition *unit_find_failed_condition(Unit *u) {
5866 Condition *failed_trigger = NULL;
5867 bool has_succeeded_trigger = false;
5868
5869 if (u->condition_result)
5870 return NULL;
5871
5872 LIST_FOREACH(conditions, c, u->conditions)
5873 if (c->trigger) {
5874 if (c->result == CONDITION_SUCCEEDED)
5875 has_succeeded_trigger = true;
5876 else if (!failed_trigger)
5877 failed_trigger = c;
5878 } else if (c->result != CONDITION_SUCCEEDED)
5879 return c;
5880
5881 return failed_trigger && !has_succeeded_trigger ? failed_trigger : NULL;
5882 }
5883
5884 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5885 [COLLECT_INACTIVE] = "inactive",
5886 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5887 };
5888
5889 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
5890
5891 Unit* unit_has_dependency(const Unit *u, UnitDependencyAtom atom, Unit *other) {
5892 Unit *i;
5893
5894 assert(u);
5895
5896 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
5897 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
5898 * is NULL the first entry found), or NULL if not found. */
5899
5900 UNIT_FOREACH_DEPENDENCY(i, u, atom)
5901 if (!other || other == i)
5902 return i;
5903
5904 return NULL;
5905 }
5906
5907 int unit_get_dependency_array(const Unit *u, UnitDependencyAtom atom, Unit ***ret_array) {
5908 _cleanup_free_ Unit **array = NULL;
5909 size_t n = 0;
5910 Unit *other;
5911
5912 assert(u);
5913 assert(ret_array);
5914
5915 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
5916 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
5917 * while the dependency table is continuously updated. */
5918
5919 UNIT_FOREACH_DEPENDENCY(other, u, atom) {
5920 if (!GREEDY_REALLOC(array, n + 1))
5921 return -ENOMEM;
5922
5923 array[n++] = other;
5924 }
5925
5926 *ret_array = TAKE_PTR(array);
5927
5928 assert(n <= INT_MAX);
5929 return (int) n;
5930 }