]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #10753 from keszybz/pager-no-interrupt
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
39 #include "set.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
42 #include "special.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
48 #include "strv.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
51 #include "unit.h"
52 #include "user-util.h"
53 #include "virt.h"
54
55 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
56 [UNIT_SERVICE] = &service_vtable,
57 [UNIT_SOCKET] = &socket_vtable,
58 [UNIT_TARGET] = &target_vtable,
59 [UNIT_DEVICE] = &device_vtable,
60 [UNIT_MOUNT] = &mount_vtable,
61 [UNIT_AUTOMOUNT] = &automount_vtable,
62 [UNIT_SWAP] = &swap_vtable,
63 [UNIT_TIMER] = &timer_vtable,
64 [UNIT_PATH] = &path_vtable,
65 [UNIT_SLICE] = &slice_vtable,
66 [UNIT_SCOPE] = &scope_vtable,
67 };
68
69 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
70
71 Unit *unit_new(Manager *m, size_t size) {
72 Unit *u;
73
74 assert(m);
75 assert(size >= sizeof(Unit));
76
77 u = malloc0(size);
78 if (!u)
79 return NULL;
80
81 u->names = set_new(&string_hash_ops);
82 if (!u->names)
83 return mfree(u);
84
85 u->manager = m;
86 u->type = _UNIT_TYPE_INVALID;
87 u->default_dependencies = true;
88 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
89 u->unit_file_preset = -1;
90 u->on_failure_job_mode = JOB_REPLACE;
91 u->cgroup_inotify_wd = -1;
92 u->job_timeout = USEC_INFINITY;
93 u->job_running_timeout = USEC_INFINITY;
94 u->ref_uid = UID_INVALID;
95 u->ref_gid = GID_INVALID;
96 u->cpu_usage_last = NSEC_INFINITY;
97 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
98
99 u->ip_accounting_ingress_map_fd = -1;
100 u->ip_accounting_egress_map_fd = -1;
101 u->ipv4_allow_map_fd = -1;
102 u->ipv6_allow_map_fd = -1;
103 u->ipv4_deny_map_fd = -1;
104 u->ipv6_deny_map_fd = -1;
105
106 u->last_section_private = -1;
107
108 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
109 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
110
111 return u;
112 }
113
114 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
115 _cleanup_(unit_freep) Unit *u = NULL;
116 int r;
117
118 u = unit_new(m, size);
119 if (!u)
120 return -ENOMEM;
121
122 r = unit_add_name(u, name);
123 if (r < 0)
124 return r;
125
126 *ret = TAKE_PTR(u);
127
128 return r;
129 }
130
131 bool unit_has_name(Unit *u, const char *name) {
132 assert(u);
133 assert(name);
134
135 return set_contains(u->names, (char*) name);
136 }
137
138 static void unit_init(Unit *u) {
139 CGroupContext *cc;
140 ExecContext *ec;
141 KillContext *kc;
142
143 assert(u);
144 assert(u->manager);
145 assert(u->type >= 0);
146
147 cc = unit_get_cgroup_context(u);
148 if (cc) {
149 cgroup_context_init(cc);
150
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
154
155 cc->cpu_accounting = u->manager->default_cpu_accounting;
156 cc->io_accounting = u->manager->default_io_accounting;
157 cc->ip_accounting = u->manager->default_ip_accounting;
158 cc->blockio_accounting = u->manager->default_blockio_accounting;
159 cc->memory_accounting = u->manager->default_memory_accounting;
160 cc->tasks_accounting = u->manager->default_tasks_accounting;
161 cc->ip_accounting = u->manager->default_ip_accounting;
162
163 if (u->type != UNIT_SLICE)
164 cc->tasks_max = u->manager->default_tasks_max;
165 }
166
167 ec = unit_get_exec_context(u);
168 if (ec) {
169 exec_context_init(ec);
170
171 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
172 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
173 }
174
175 kc = unit_get_kill_context(u);
176 if (kc)
177 kill_context_init(kc);
178
179 if (UNIT_VTABLE(u)->init)
180 UNIT_VTABLE(u)->init(u);
181 }
182
183 int unit_add_name(Unit *u, const char *text) {
184 _cleanup_free_ char *s = NULL, *i = NULL;
185 UnitType t;
186 int r;
187
188 assert(u);
189 assert(text);
190
191 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
192
193 if (!u->instance)
194 return -EINVAL;
195
196 r = unit_name_replace_instance(text, u->instance, &s);
197 if (r < 0)
198 return r;
199 } else {
200 s = strdup(text);
201 if (!s)
202 return -ENOMEM;
203 }
204
205 if (set_contains(u->names, s))
206 return 0;
207 if (hashmap_contains(u->manager->units, s))
208 return -EEXIST;
209
210 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
211 return -EINVAL;
212
213 t = unit_name_to_type(s);
214 if (t < 0)
215 return -EINVAL;
216
217 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
218 return -EINVAL;
219
220 r = unit_name_to_instance(s, &i);
221 if (r < 0)
222 return r;
223
224 if (i && !unit_type_may_template(t))
225 return -EINVAL;
226
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
231 return -EINVAL;
232
233 if (!unit_type_may_alias(t) && !set_isempty(u->names))
234 return -EEXIST;
235
236 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
237 return -E2BIG;
238
239 r = set_put(u->names, s);
240 if (r < 0)
241 return r;
242 assert(r > 0);
243
244 r = hashmap_put(u->manager->units, s, u);
245 if (r < 0) {
246 (void) set_remove(u->names, s);
247 return r;
248 }
249
250 if (u->type == _UNIT_TYPE_INVALID) {
251 u->type = t;
252 u->id = s;
253 u->instance = TAKE_PTR(i);
254
255 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
256
257 unit_init(u);
258 }
259
260 s = NULL;
261
262 unit_add_to_dbus_queue(u);
263 return 0;
264 }
265
266 int unit_choose_id(Unit *u, const char *name) {
267 _cleanup_free_ char *t = NULL;
268 char *s, *i;
269 int r;
270
271 assert(u);
272 assert(name);
273
274 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
275
276 if (!u->instance)
277 return -EINVAL;
278
279 r = unit_name_replace_instance(name, u->instance, &t);
280 if (r < 0)
281 return r;
282
283 name = t;
284 }
285
286 /* Selects one of the names of this unit as the id */
287 s = set_get(u->names, (char*) name);
288 if (!s)
289 return -ENOENT;
290
291 /* Determine the new instance from the new id */
292 r = unit_name_to_instance(s, &i);
293 if (r < 0)
294 return r;
295
296 u->id = s;
297
298 free(u->instance);
299 u->instance = i;
300
301 unit_add_to_dbus_queue(u);
302
303 return 0;
304 }
305
306 int unit_set_description(Unit *u, const char *description) {
307 int r;
308
309 assert(u);
310
311 r = free_and_strdup(&u->description, empty_to_null(description));
312 if (r < 0)
313 return r;
314 if (r > 0)
315 unit_add_to_dbus_queue(u);
316
317 return 0;
318 }
319
320 bool unit_may_gc(Unit *u) {
321 UnitActiveState state;
322 int r;
323
324 assert(u);
325
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
329 *
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
332 */
333
334 if (u->job)
335 return false;
336
337 if (u->nop_job)
338 return false;
339
340 state = unit_active_state(u);
341
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
344 UNIT_VTABLE(u)->release_resources)
345 UNIT_VTABLE(u)->release_resources(u);
346
347 if (u->perpetual)
348 return false;
349
350 if (sd_bus_track_count(u->bus_track) > 0)
351 return false;
352
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u->collect_mode) {
355
356 case COLLECT_INACTIVE:
357 if (state != UNIT_INACTIVE)
358 return false;
359
360 break;
361
362 case COLLECT_INACTIVE_OR_FAILED:
363 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
364 return false;
365
366 break;
367
368 default:
369 assert_not_reached("Unknown garbage collection mode");
370 }
371
372 if (u->cgroup_path) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
375
376 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
377 if (r < 0)
378 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
379 if (r <= 0)
380 return false;
381 }
382
383 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
384 return false;
385
386 return true;
387 }
388
389 void unit_add_to_load_queue(Unit *u) {
390 assert(u);
391 assert(u->type != _UNIT_TYPE_INVALID);
392
393 if (u->load_state != UNIT_STUB || u->in_load_queue)
394 return;
395
396 LIST_PREPEND(load_queue, u->manager->load_queue, u);
397 u->in_load_queue = true;
398 }
399
400 void unit_add_to_cleanup_queue(Unit *u) {
401 assert(u);
402
403 if (u->in_cleanup_queue)
404 return;
405
406 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
407 u->in_cleanup_queue = true;
408 }
409
410 void unit_add_to_gc_queue(Unit *u) {
411 assert(u);
412
413 if (u->in_gc_queue || u->in_cleanup_queue)
414 return;
415
416 if (!unit_may_gc(u))
417 return;
418
419 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
420 u->in_gc_queue = true;
421 }
422
423 void unit_add_to_dbus_queue(Unit *u) {
424 assert(u);
425 assert(u->type != _UNIT_TYPE_INVALID);
426
427 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
428 return;
429
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
432 sd_bus_track_count(u->bus_track) <= 0 &&
433 set_isempty(u->manager->private_buses)) {
434 u->sent_dbus_new_signal = true;
435 return;
436 }
437
438 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
439 u->in_dbus_queue = true;
440 }
441
442 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
443 assert(u);
444
445 if (u->in_stop_when_unneeded_queue)
446 return;
447
448 if (!u->stop_when_unneeded)
449 return;
450
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
452 return;
453
454 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
455 u->in_stop_when_unneeded_queue = true;
456 }
457
458 static void bidi_set_free(Unit *u, Hashmap *h) {
459 Unit *other;
460 Iterator i;
461 void *v;
462
463 assert(u);
464
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
466
467 HASHMAP_FOREACH_KEY(v, other, h, i) {
468 UnitDependency d;
469
470 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
471 hashmap_remove(other->dependencies[d], u);
472
473 unit_add_to_gc_queue(other);
474 }
475
476 hashmap_free(h);
477 }
478
479 static void unit_remove_transient(Unit *u) {
480 char **i;
481
482 assert(u);
483
484 if (!u->transient)
485 return;
486
487 if (u->fragment_path)
488 (void) unlink(u->fragment_path);
489
490 STRV_FOREACH(i, u->dropin_paths) {
491 _cleanup_free_ char *p = NULL, *pp = NULL;
492
493 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
494 if (!p)
495 continue;
496
497 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
498 if (!pp)
499 continue;
500
501 /* Only drop transient drop-ins */
502 if (!path_equal(u->manager->lookup_paths.transient, pp))
503 continue;
504
505 (void) unlink(*i);
506 (void) rmdir(p);
507 }
508 }
509
510 static void unit_free_requires_mounts_for(Unit *u) {
511 assert(u);
512
513 for (;;) {
514 _cleanup_free_ char *path;
515
516 path = hashmap_steal_first_key(u->requires_mounts_for);
517 if (!path)
518 break;
519 else {
520 char s[strlen(path) + 1];
521
522 PATH_FOREACH_PREFIX_MORE(s, path) {
523 char *y;
524 Set *x;
525
526 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
527 if (!x)
528 continue;
529
530 (void) set_remove(x, u);
531
532 if (set_isempty(x)) {
533 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
534 free(y);
535 set_free(x);
536 }
537 }
538 }
539 }
540
541 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
542 }
543
544 static void unit_done(Unit *u) {
545 ExecContext *ec;
546 CGroupContext *cc;
547
548 assert(u);
549
550 if (u->type < 0)
551 return;
552
553 if (UNIT_VTABLE(u)->done)
554 UNIT_VTABLE(u)->done(u);
555
556 ec = unit_get_exec_context(u);
557 if (ec)
558 exec_context_done(ec);
559
560 cc = unit_get_cgroup_context(u);
561 if (cc)
562 cgroup_context_done(cc);
563 }
564
565 void unit_free(Unit *u) {
566 UnitDependency d;
567 Iterator i;
568 char *t;
569
570 if (!u)
571 return;
572
573 u->transient_file = safe_fclose(u->transient_file);
574
575 if (!MANAGER_IS_RELOADING(u->manager))
576 unit_remove_transient(u);
577
578 bus_unit_send_removed_signal(u);
579
580 unit_done(u);
581
582 unit_dequeue_rewatch_pids(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585 sd_bus_track_unref(u->bus_track);
586 u->deserialized_refs = strv_free(u->deserialized_refs);
587
588 unit_free_requires_mounts_for(u);
589
590 SET_FOREACH(t, u->names, i)
591 hashmap_remove_value(u->manager->units, t, u);
592
593 if (!sd_id128_is_null(u->invocation_id))
594 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
595
596 if (u->job) {
597 Job *j = u->job;
598 job_uninstall(j);
599 job_free(j);
600 }
601
602 if (u->nop_job) {
603 Job *j = u->nop_job;
604 job_uninstall(j);
605 job_free(j);
606 }
607
608 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
609 bidi_set_free(u, u->dependencies[d]);
610
611 if (u->on_console)
612 manager_unref_console(u->manager);
613
614 unit_release_cgroup(u);
615
616 if (!MANAGER_IS_RELOADING(u->manager))
617 unit_unlink_state_files(u);
618
619 unit_unref_uid_gid(u, false);
620
621 (void) manager_update_failed_units(u->manager, u, false);
622 set_remove(u->manager->startup_units, u);
623
624 unit_unwatch_all_pids(u);
625
626 unit_ref_unset(&u->slice);
627 while (u->refs_by_target)
628 unit_ref_unset(u->refs_by_target);
629
630 if (u->type != _UNIT_TYPE_INVALID)
631 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
632
633 if (u->in_load_queue)
634 LIST_REMOVE(load_queue, u->manager->load_queue, u);
635
636 if (u->in_dbus_queue)
637 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
638
639 if (u->in_gc_queue)
640 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
641
642 if (u->in_cgroup_realize_queue)
643 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
644
645 if (u->in_cgroup_empty_queue)
646 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
647
648 if (u->in_cleanup_queue)
649 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
650
651 if (u->in_target_deps_queue)
652 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
653
654 if (u->in_stop_when_unneeded_queue)
655 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
656
657 safe_close(u->ip_accounting_ingress_map_fd);
658 safe_close(u->ip_accounting_egress_map_fd);
659
660 safe_close(u->ipv4_allow_map_fd);
661 safe_close(u->ipv6_allow_map_fd);
662 safe_close(u->ipv4_deny_map_fd);
663 safe_close(u->ipv6_deny_map_fd);
664
665 bpf_program_unref(u->ip_bpf_ingress);
666 bpf_program_unref(u->ip_bpf_ingress_installed);
667 bpf_program_unref(u->ip_bpf_egress);
668 bpf_program_unref(u->ip_bpf_egress_installed);
669
670 bpf_program_unref(u->bpf_device_control_installed);
671
672 condition_free_list(u->conditions);
673 condition_free_list(u->asserts);
674
675 free(u->description);
676 strv_free(u->documentation);
677 free(u->fragment_path);
678 free(u->source_path);
679 strv_free(u->dropin_paths);
680 free(u->instance);
681
682 free(u->job_timeout_reboot_arg);
683
684 set_free_free(u->names);
685
686 free(u->reboot_arg);
687
688 free(u);
689 }
690
691 UnitActiveState unit_active_state(Unit *u) {
692 assert(u);
693
694 if (u->load_state == UNIT_MERGED)
695 return unit_active_state(unit_follow_merge(u));
696
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
700
701 return UNIT_VTABLE(u)->active_state(u);
702 }
703
704 const char* unit_sub_state_to_string(Unit *u) {
705 assert(u);
706
707 return UNIT_VTABLE(u)->sub_state_to_string(u);
708 }
709
710 static int set_complete_move(Set **s, Set **other) {
711 assert(s);
712 assert(other);
713
714 if (!other)
715 return 0;
716
717 if (*s)
718 return set_move(*s, *other);
719 else
720 *s = TAKE_PTR(*other);
721
722 return 0;
723 }
724
725 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
726 assert(s);
727 assert(other);
728
729 if (!*other)
730 return 0;
731
732 if (*s)
733 return hashmap_move(*s, *other);
734 else
735 *s = TAKE_PTR(*other);
736
737 return 0;
738 }
739
740 static int merge_names(Unit *u, Unit *other) {
741 char *t;
742 Iterator i;
743 int r;
744
745 assert(u);
746 assert(other);
747
748 r = set_complete_move(&u->names, &other->names);
749 if (r < 0)
750 return r;
751
752 set_free_free(other->names);
753 other->names = NULL;
754 other->id = NULL;
755
756 SET_FOREACH(t, u->names, i)
757 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
758
759 return 0;
760 }
761
762 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
763 unsigned n_reserve;
764
765 assert(u);
766 assert(other);
767 assert(d < _UNIT_DEPENDENCY_MAX);
768
769 /*
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
773 */
774 if (!u->dependencies[d])
775 return 0;
776
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
779
780 return hashmap_reserve(u->dependencies[d], n_reserve);
781 }
782
783 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
784 Iterator i;
785 Unit *back;
786 void *v;
787 int r;
788
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
790
791 assert(u);
792 assert(other);
793 assert(d < _UNIT_DEPENDENCY_MAX);
794
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
797 UnitDependency k;
798
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
801
802 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
803 if (back == u) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back->dependencies[k], other))
806 maybe_warn_about_dependency(u, other_id, k);
807 } else {
808 UnitDependencyInfo di_u, di_other, di_merged;
809
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
813
814 di_other.data = hashmap_get(back->dependencies[k], other);
815 if (!di_other.data)
816 continue; /* dependency isn't set, let's try the next one */
817
818 di_u.data = hashmap_get(back->dependencies[k], u);
819
820 di_merged = (UnitDependencyInfo) {
821 .origin_mask = di_u.origin_mask | di_other.origin_mask,
822 .destination_mask = di_u.destination_mask | di_other.destination_mask,
823 };
824
825 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
826 if (r < 0)
827 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
828 assert(r >= 0);
829
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
831 }
832 }
833
834 }
835
836 /* Also do not move dependencies on u to itself */
837 back = hashmap_remove(other->dependencies[d], u);
838 if (back)
839 maybe_warn_about_dependency(u, other_id, d);
840
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
843
844 other->dependencies[d] = hashmap_free(other->dependencies[d]);
845 }
846
847 int unit_merge(Unit *u, Unit *other) {
848 UnitDependency d;
849 const char *other_id = NULL;
850 int r;
851
852 assert(u);
853 assert(other);
854 assert(u->manager == other->manager);
855 assert(u->type != _UNIT_TYPE_INVALID);
856
857 other = unit_follow_merge(other);
858
859 if (other == u)
860 return 0;
861
862 if (u->type != other->type)
863 return -EINVAL;
864
865 if (!u->instance != !other->instance)
866 return -EINVAL;
867
868 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
869 return -EEXIST;
870
871 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
872 return -EEXIST;
873
874 if (other->job)
875 return -EEXIST;
876
877 if (other->nop_job)
878 return -EEXIST;
879
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
881 return -EEXIST;
882
883 if (other->id)
884 other_id = strdupa(other->id);
885
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
888 r = reserve_dependencies(u, other, d);
889 /*
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
892 */
893 if (r < 0)
894 return r;
895 }
896
897 /* Merge names */
898 r = merge_names(u, other);
899 if (r < 0)
900 return r;
901
902 /* Redirect all references */
903 while (other->refs_by_target)
904 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
905
906 /* Merge dependencies */
907 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
908 merge_dependencies(u, other, other_id, d);
909
910 other->load_state = UNIT_MERGED;
911 other->merged_into = u;
912
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other->load_state != UNIT_STUB)
916 if (UNIT_VTABLE(other)->done)
917 UNIT_VTABLE(other)->done(other);
918
919 unit_add_to_dbus_queue(u);
920 unit_add_to_cleanup_queue(other);
921
922 return 0;
923 }
924
925 int unit_merge_by_name(Unit *u, const char *name) {
926 _cleanup_free_ char *s = NULL;
927 Unit *other;
928 int r;
929
930 assert(u);
931 assert(name);
932
933 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
934 if (!u->instance)
935 return -EINVAL;
936
937 r = unit_name_replace_instance(name, u->instance, &s);
938 if (r < 0)
939 return r;
940
941 name = s;
942 }
943
944 other = manager_get_unit(u->manager, name);
945 if (other)
946 return unit_merge(u, other);
947
948 return unit_add_name(u, name);
949 }
950
951 Unit* unit_follow_merge(Unit *u) {
952 assert(u);
953
954 while (u->load_state == UNIT_MERGED)
955 assert_se(u = u->merged_into);
956
957 return u;
958 }
959
960 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
961 ExecDirectoryType dt;
962 char **dp;
963 int r;
964
965 assert(u);
966 assert(c);
967
968 if (c->working_directory && !c->working_directory_missing_ok) {
969 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
970 if (r < 0)
971 return r;
972 }
973
974 if (c->root_directory) {
975 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
976 if (r < 0)
977 return r;
978 }
979
980 if (c->root_image) {
981 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
982 if (r < 0)
983 return r;
984 }
985
986 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
987 if (!u->manager->prefix[dt])
988 continue;
989
990 STRV_FOREACH(dp, c->directories[dt].paths) {
991 _cleanup_free_ char *p;
992
993 p = strjoin(u->manager->prefix[dt], "/", *dp);
994 if (!p)
995 return -ENOMEM;
996
997 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
998 if (r < 0)
999 return r;
1000 }
1001 }
1002
1003 if (!MANAGER_IS_SYSTEM(u->manager))
1004 return 0;
1005
1006 if (c->private_tmp) {
1007 const char *p;
1008
1009 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1010 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1016 if (r < 0)
1017 return r;
1018 }
1019
1020 if (!IN_SET(c->std_output,
1021 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1022 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1023 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1024 !IN_SET(c->std_error,
1025 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1026 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1027 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1028 return 0;
1029
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1032
1033 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1034 if (r < 0)
1035 return r;
1036
1037 return 0;
1038 }
1039
1040 const char *unit_description(Unit *u) {
1041 assert(u);
1042
1043 if (u->description)
1044 return u->description;
1045
1046 return strna(u->id);
1047 }
1048
1049 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1050 const struct {
1051 UnitDependencyMask mask;
1052 const char *name;
1053 } table[] = {
1054 { UNIT_DEPENDENCY_FILE, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT, "default" },
1057 { UNIT_DEPENDENCY_UDEV, "udev" },
1058 { UNIT_DEPENDENCY_PATH, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1062 };
1063 size_t i;
1064
1065 assert(f);
1066 assert(kind);
1067 assert(space);
1068
1069 for (i = 0; i < ELEMENTSOF(table); i++) {
1070
1071 if (mask == 0)
1072 break;
1073
1074 if (FLAGS_SET(mask, table[i].mask)) {
1075 if (*space)
1076 fputc(' ', f);
1077 else
1078 *space = true;
1079
1080 fputs(kind, f);
1081 fputs("-", f);
1082 fputs(table[i].name, f);
1083
1084 mask &= ~table[i].mask;
1085 }
1086 }
1087
1088 assert(mask == 0);
1089 }
1090
1091 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1092 char *t, **j;
1093 UnitDependency d;
1094 Iterator i;
1095 const char *prefix2;
1096 char
1097 timestamp0[FORMAT_TIMESTAMP_MAX],
1098 timestamp1[FORMAT_TIMESTAMP_MAX],
1099 timestamp2[FORMAT_TIMESTAMP_MAX],
1100 timestamp3[FORMAT_TIMESTAMP_MAX],
1101 timestamp4[FORMAT_TIMESTAMP_MAX],
1102 timespan[FORMAT_TIMESPAN_MAX];
1103 Unit *following;
1104 _cleanup_set_free_ Set *following_set = NULL;
1105 const char *n;
1106 CGroupMask m;
1107 int r;
1108
1109 assert(u);
1110 assert(u->type >= 0);
1111
1112 prefix = strempty(prefix);
1113 prefix2 = strjoina(prefix, "\t");
1114
1115 fprintf(f,
1116 "%s-> Unit %s:\n"
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1126 "%s\tMay GC: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1131 "%s\tSlice: %s\n"
1132 "%s\tCGroup: %s\n"
1133 "%s\tCGroup realized: %s\n",
1134 prefix, u->id,
1135 prefix, unit_description(u),
1136 prefix, strna(u->instance),
1137 prefix, unit_load_state_to_string(u->load_state),
1138 prefix, unit_active_state_to_string(unit_active_state(u)),
1139 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1140 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1141 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1142 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1143 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1144 prefix, yes_no(unit_may_gc(u)),
1145 prefix, yes_no(unit_need_daemon_reload(u)),
1146 prefix, yes_no(u->transient),
1147 prefix, yes_no(u->perpetual),
1148 prefix, collect_mode_to_string(u->collect_mode),
1149 prefix, strna(unit_slice_name(u)),
1150 prefix, strna(u->cgroup_path),
1151 prefix, yes_no(u->cgroup_realized));
1152
1153 if (u->cgroup_realized_mask != 0) {
1154 _cleanup_free_ char *s = NULL;
1155 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1156 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1157 }
1158 if (u->cgroup_enabled_mask != 0) {
1159 _cleanup_free_ char *s = NULL;
1160 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1161 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1162 }
1163 m = unit_get_own_mask(u);
1164 if (m != 0) {
1165 _cleanup_free_ char *s = NULL;
1166 (void) cg_mask_to_string(m, &s);
1167 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1168 }
1169 m = unit_get_members_mask(u);
1170 if (m != 0) {
1171 _cleanup_free_ char *s = NULL;
1172 (void) cg_mask_to_string(m, &s);
1173 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1174 }
1175
1176 SET_FOREACH(t, u->names, i)
1177 fprintf(f, "%s\tName: %s\n", prefix, t);
1178
1179 if (!sd_id128_is_null(u->invocation_id))
1180 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1181 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1182
1183 STRV_FOREACH(j, u->documentation)
1184 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1185
1186 following = unit_following(u);
1187 if (following)
1188 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1189
1190 r = unit_following_set(u, &following_set);
1191 if (r >= 0) {
1192 Unit *other;
1193
1194 SET_FOREACH(other, following_set, i)
1195 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1196 }
1197
1198 if (u->fragment_path)
1199 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1200
1201 if (u->source_path)
1202 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1203
1204 STRV_FOREACH(j, u->dropin_paths)
1205 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1206
1207 if (u->failure_action != EMERGENCY_ACTION_NONE)
1208 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1209 if (u->success_action != EMERGENCY_ACTION_NONE)
1210 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1211
1212 if (u->job_timeout != USEC_INFINITY)
1213 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1214
1215 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1216 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1217
1218 if (u->job_timeout_reboot_arg)
1219 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1220
1221 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1222 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1223
1224 if (dual_timestamp_is_set(&u->condition_timestamp))
1225 fprintf(f,
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1229 prefix, yes_no(u->condition_result));
1230
1231 if (dual_timestamp_is_set(&u->assert_timestamp))
1232 fprintf(f,
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1236 prefix, yes_no(u->assert_result));
1237
1238 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1239 UnitDependencyInfo di;
1240 Unit *other;
1241
1242 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1243 bool space = false;
1244
1245 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1246
1247 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1248 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1249
1250 fputs(")\n", f);
1251 }
1252 }
1253
1254 if (!hashmap_isempty(u->requires_mounts_for)) {
1255 UnitDependencyInfo di;
1256 const char *path;
1257
1258 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1259 bool space = false;
1260
1261 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1262
1263 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1264 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1265
1266 fputs(")\n", f);
1267 }
1268 }
1269
1270 if (u->load_state == UNIT_LOADED) {
1271
1272 fprintf(f,
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix, yes_no(u->stop_when_unneeded),
1280 prefix, yes_no(u->refuse_manual_start),
1281 prefix, yes_no(u->refuse_manual_stop),
1282 prefix, yes_no(u->default_dependencies),
1283 prefix, job_mode_to_string(u->on_failure_job_mode),
1284 prefix, yes_no(u->ignore_on_isolate));
1285
1286 if (UNIT_VTABLE(u)->dump)
1287 UNIT_VTABLE(u)->dump(u, f, prefix2);
1288
1289 } else if (u->load_state == UNIT_MERGED)
1290 fprintf(f,
1291 "%s\tMerged into: %s\n",
1292 prefix, u->merged_into->id);
1293 else if (u->load_state == UNIT_ERROR)
1294 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1295
1296 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1297 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1298
1299 if (u->job)
1300 job_dump(u->job, f, prefix2);
1301
1302 if (u->nop_job)
1303 job_dump(u->nop_job, f, prefix2);
1304 }
1305
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit *u) {
1308 int r;
1309
1310 assert(u);
1311
1312 /* Load a .{service,socket,...} file */
1313 r = unit_load_fragment(u);
1314 if (r < 0)
1315 return r;
1316
1317 if (u->load_state == UNIT_STUB)
1318 return -ENOENT;
1319
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u));
1325 }
1326
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit *u) {
1329 int r;
1330
1331 assert(u);
1332
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1335
1336 /* Load a .service/.socket/.slice/… file */
1337 r = unit_load_fragment(u);
1338 if (r < 0)
1339 return r;
1340
1341 if (u->load_state == UNIT_STUB)
1342 u->load_state = UNIT_LOADED;
1343
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u));
1346 }
1347
1348 void unit_add_to_target_deps_queue(Unit *u) {
1349 Manager *m = u->manager;
1350
1351 assert(u);
1352
1353 if (u->in_target_deps_queue)
1354 return;
1355
1356 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1357 u->in_target_deps_queue = true;
1358 }
1359
1360 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1361 assert(u);
1362 assert(target);
1363
1364 if (target->type != UNIT_TARGET)
1365 return 0;
1366
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u->load_state != UNIT_LOADED ||
1370 target->load_state != UNIT_LOADED)
1371 return 0;
1372
1373 /* If either side wants no automatic dependencies, then let's
1374 * skip this */
1375 if (!u->default_dependencies ||
1376 !target->default_dependencies)
1377 return 0;
1378
1379 /* Don't create loops */
1380 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1381 return 0;
1382
1383 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1384 }
1385
1386 static int unit_add_slice_dependencies(Unit *u) {
1387 UnitDependencyMask mask;
1388 assert(u);
1389
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1391 return 0;
1392
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1395 relationship). */
1396 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1397
1398 if (UNIT_ISSET(u->slice))
1399 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1400
1401 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1402 return 0;
1403
1404 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1405 }
1406
1407 static int unit_add_mount_dependencies(Unit *u) {
1408 UnitDependencyInfo di;
1409 const char *path;
1410 Iterator i;
1411 int r;
1412
1413 assert(u);
1414
1415 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1416 char prefix[strlen(path) + 1];
1417
1418 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1419 _cleanup_free_ char *p = NULL;
1420 Unit *m;
1421
1422 r = unit_name_from_path(prefix, ".mount", &p);
1423 if (r < 0)
1424 return r;
1425
1426 m = manager_get_unit(u->manager, p);
1427 if (!m) {
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1432 * unit. */
1433 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1434 continue;
1435 }
1436 if (m == u)
1437 continue;
1438
1439 if (m->load_state != UNIT_LOADED)
1440 continue;
1441
1442 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1443 if (r < 0)
1444 return r;
1445
1446 if (m->fragment_path) {
1447 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1448 if (r < 0)
1449 return r;
1450 }
1451 }
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int unit_add_startup_units(Unit *u) {
1458 CGroupContext *c;
1459 int r;
1460
1461 c = unit_get_cgroup_context(u);
1462 if (!c)
1463 return 0;
1464
1465 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1466 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1467 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1468 return 0;
1469
1470 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1471 if (r < 0)
1472 return r;
1473
1474 return set_put(u->manager->startup_units, u);
1475 }
1476
1477 int unit_load(Unit *u) {
1478 int r;
1479
1480 assert(u);
1481
1482 if (u->in_load_queue) {
1483 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1484 u->in_load_queue = false;
1485 }
1486
1487 if (u->type == _UNIT_TYPE_INVALID)
1488 return -EINVAL;
1489
1490 if (u->load_state != UNIT_STUB)
1491 return 0;
1492
1493 if (u->transient_file) {
1494 r = fflush_and_check(u->transient_file);
1495 if (r < 0)
1496 goto fail;
1497
1498 u->transient_file = safe_fclose(u->transient_file);
1499 u->fragment_mtime = now(CLOCK_REALTIME);
1500 }
1501
1502 if (UNIT_VTABLE(u)->load) {
1503 r = UNIT_VTABLE(u)->load(u);
1504 if (r < 0)
1505 goto fail;
1506 }
1507
1508 if (u->load_state == UNIT_STUB) {
1509 r = -ENOENT;
1510 goto fail;
1511 }
1512
1513 if (u->load_state == UNIT_LOADED) {
1514 unit_add_to_target_deps_queue(u);
1515
1516 r = unit_add_slice_dependencies(u);
1517 if (r < 0)
1518 goto fail;
1519
1520 r = unit_add_mount_dependencies(u);
1521 if (r < 0)
1522 goto fail;
1523
1524 r = unit_add_startup_units(u);
1525 if (r < 0)
1526 goto fail;
1527
1528 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1529 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1530 r = -ENOEXEC;
1531 goto fail;
1532 }
1533
1534 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1535 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1536
1537 unit_update_cgroup_members_masks(u);
1538 }
1539
1540 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1541
1542 unit_add_to_dbus_queue(unit_follow_merge(u));
1543 unit_add_to_gc_queue(u);
1544
1545 return 0;
1546
1547 fail:
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1550
1551 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1552 r == -ENOEXEC ? UNIT_BAD_SETTING :
1553 UNIT_ERROR;
1554 u->load_error = r;
1555
1556 unit_add_to_dbus_queue(u);
1557 unit_add_to_gc_queue(u);
1558
1559 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1560 }
1561
1562 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1563 Condition *c;
1564 int triggered = -1;
1565
1566 assert(u);
1567 assert(to_string);
1568
1569 /* If the condition list is empty, then it is true */
1570 if (!first)
1571 return true;
1572
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions, c, first) {
1577 int r;
1578
1579 r = condition_test(c);
1580 if (r < 0)
1581 log_unit_warning(u,
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1583 to_string(c->type),
1584 c->trigger ? "|" : "",
1585 c->negate ? "!" : "",
1586 c->parameter);
1587 else
1588 log_unit_debug(u,
1589 "%s=%s%s%s %s.",
1590 to_string(c->type),
1591 c->trigger ? "|" : "",
1592 c->negate ? "!" : "",
1593 c->parameter,
1594 condition_result_to_string(c->result));
1595
1596 if (!c->trigger && r <= 0)
1597 return false;
1598
1599 if (c->trigger && triggered <= 0)
1600 triggered = r > 0;
1601 }
1602
1603 return triggered != 0;
1604 }
1605
1606 static bool unit_condition_test(Unit *u) {
1607 assert(u);
1608
1609 dual_timestamp_get(&u->condition_timestamp);
1610 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1611
1612 return u->condition_result;
1613 }
1614
1615 static bool unit_assert_test(Unit *u) {
1616 assert(u);
1617
1618 dual_timestamp_get(&u->assert_timestamp);
1619 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1620
1621 return u->assert_result;
1622 }
1623
1624 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL;
1626 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1627 REENABLE_WARNING;
1628 }
1629
1630 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1631 const char *format;
1632 const UnitStatusMessageFormats *format_table;
1633
1634 assert(u);
1635 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1636
1637 if (t != JOB_RELOAD) {
1638 format_table = &UNIT_VTABLE(u)->status_message_formats;
1639 if (format_table) {
1640 format = format_table->starting_stopping[t == JOB_STOP];
1641 if (format)
1642 return format;
1643 }
1644 }
1645
1646 /* Return generic strings */
1647 if (t == JOB_START)
1648 return "Starting %s.";
1649 else if (t == JOB_STOP)
1650 return "Stopping %s.";
1651 else
1652 return "Reloading %s.";
1653 }
1654
1655 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1656 const char *format;
1657
1658 assert(u);
1659
1660 /* Reload status messages have traditionally not been printed to console. */
1661 if (!IN_SET(t, JOB_START, JOB_STOP))
1662 return;
1663
1664 format = unit_get_status_message_format(u, t);
1665
1666 DISABLE_WARNING_FORMAT_NONLITERAL;
1667 unit_status_printf(u, "", format);
1668 REENABLE_WARNING;
1669 }
1670
1671 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1672 const char *format, *mid;
1673 char buf[LINE_MAX];
1674
1675 assert(u);
1676
1677 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1678 return;
1679
1680 if (log_on_console())
1681 return;
1682
1683 /* We log status messages for all units and all operations. */
1684
1685 format = unit_get_status_message_format(u, t);
1686
1687 DISABLE_WARNING_FORMAT_NONLITERAL;
1688 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1689 REENABLE_WARNING;
1690
1691 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1692 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1693 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1694
1695 /* Note that we deliberately use LOG_MESSAGE() instead of
1696 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1697 * closely what is written to screen using the status output,
1698 * which is supposed the highest level, friendliest output
1699 * possible, which means we should avoid the low-level unit
1700 * name. */
1701 log_struct(LOG_INFO,
1702 LOG_MESSAGE("%s", buf),
1703 LOG_UNIT_ID(u),
1704 LOG_UNIT_INVOCATION_ID(u),
1705 mid);
1706 }
1707
1708 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1709 assert(u);
1710 assert(t >= 0);
1711 assert(t < _JOB_TYPE_MAX);
1712
1713 unit_status_log_starting_stopping_reloading(u, t);
1714 unit_status_print_starting_stopping(u, t);
1715 }
1716
1717 int unit_start_limit_test(Unit *u) {
1718 const char *reason;
1719
1720 assert(u);
1721
1722 if (ratelimit_below(&u->start_limit)) {
1723 u->start_limit_hit = false;
1724 return 0;
1725 }
1726
1727 log_unit_warning(u, "Start request repeated too quickly.");
1728 u->start_limit_hit = true;
1729
1730 reason = strjoina("unit ", u->id, " failed");
1731
1732 return emergency_action(u->manager, u->start_limit_action,
1733 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1734 u->reboot_arg, reason);
1735 }
1736
1737 bool unit_shall_confirm_spawn(Unit *u) {
1738 assert(u);
1739
1740 if (manager_is_confirm_spawn_disabled(u->manager))
1741 return false;
1742
1743 /* For some reasons units remaining in the same process group
1744 * as PID 1 fail to acquire the console even if it's not used
1745 * by any process. So skip the confirmation question for them. */
1746 return !unit_get_exec_context(u)->same_pgrp;
1747 }
1748
1749 static bool unit_verify_deps(Unit *u) {
1750 Unit *other;
1751 Iterator j;
1752 void *v;
1753
1754 assert(u);
1755
1756 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1757 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1758 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1759 * conjunction with After= as for them any such check would make things entirely racy. */
1760
1761 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1762
1763 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1764 continue;
1765
1766 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1767 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1768 return false;
1769 }
1770 }
1771
1772 return true;
1773 }
1774
1775 /* Errors:
1776 * -EBADR: This unit type does not support starting.
1777 * -EALREADY: Unit is already started.
1778 * -EAGAIN: An operation is already in progress. Retry later.
1779 * -ECANCELED: Too many requests for now.
1780 * -EPROTO: Assert failed
1781 * -EINVAL: Unit not loaded
1782 * -EOPNOTSUPP: Unit type not supported
1783 * -ENOLINK: The necessary dependencies are not fulfilled.
1784 * -ESTALE: This unit has been started before and can't be started a second time
1785 */
1786 int unit_start(Unit *u) {
1787 UnitActiveState state;
1788 Unit *following;
1789
1790 assert(u);
1791
1792 /* If this is already started, then this will succeed. Note
1793 * that this will even succeed if this unit is not startable
1794 * by the user. This is relied on to detect when we need to
1795 * wait for units and when waiting is finished. */
1796 state = unit_active_state(u);
1797 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1798 return -EALREADY;
1799
1800 /* Units that aren't loaded cannot be started */
1801 if (u->load_state != UNIT_LOADED)
1802 return -EINVAL;
1803
1804 /* Refuse starting scope units more than once */
1805 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1806 return -ESTALE;
1807
1808 /* If the conditions failed, don't do anything at all. If we
1809 * already are activating this call might still be useful to
1810 * speed up activation in case there is some hold-off time,
1811 * but we don't want to recheck the condition in that case. */
1812 if (state != UNIT_ACTIVATING &&
1813 !unit_condition_test(u)) {
1814 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1815 return -EALREADY;
1816 }
1817
1818 /* If the asserts failed, fail the entire job */
1819 if (state != UNIT_ACTIVATING &&
1820 !unit_assert_test(u)) {
1821 log_unit_notice(u, "Starting requested but asserts failed.");
1822 return -EPROTO;
1823 }
1824
1825 /* Units of types that aren't supported cannot be
1826 * started. Note that we do this test only after the condition
1827 * checks, so that we rather return condition check errors
1828 * (which are usually not considered a true failure) than "not
1829 * supported" errors (which are considered a failure).
1830 */
1831 if (!unit_supported(u))
1832 return -EOPNOTSUPP;
1833
1834 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1835 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1836 * effect anymore, due to a reload or due to a failed condition. */
1837 if (!unit_verify_deps(u))
1838 return -ENOLINK;
1839
1840 /* Forward to the main object, if we aren't it. */
1841 following = unit_following(u);
1842 if (following) {
1843 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1844 return unit_start(following);
1845 }
1846
1847 /* If it is stopped, but we cannot start it, then fail */
1848 if (!UNIT_VTABLE(u)->start)
1849 return -EBADR;
1850
1851 /* We don't suppress calls to ->start() here when we are
1852 * already starting, to allow this request to be used as a
1853 * "hurry up" call, for example when the unit is in some "auto
1854 * restart" state where it waits for a holdoff timer to elapse
1855 * before it will start again. */
1856
1857 unit_add_to_dbus_queue(u);
1858
1859 return UNIT_VTABLE(u)->start(u);
1860 }
1861
1862 bool unit_can_start(Unit *u) {
1863 assert(u);
1864
1865 if (u->load_state != UNIT_LOADED)
1866 return false;
1867
1868 if (!unit_supported(u))
1869 return false;
1870
1871 /* Scope units may be started only once */
1872 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1873 return false;
1874
1875 return !!UNIT_VTABLE(u)->start;
1876 }
1877
1878 bool unit_can_isolate(Unit *u) {
1879 assert(u);
1880
1881 return unit_can_start(u) &&
1882 u->allow_isolate;
1883 }
1884
1885 /* Errors:
1886 * -EBADR: This unit type does not support stopping.
1887 * -EALREADY: Unit is already stopped.
1888 * -EAGAIN: An operation is already in progress. Retry later.
1889 */
1890 int unit_stop(Unit *u) {
1891 UnitActiveState state;
1892 Unit *following;
1893
1894 assert(u);
1895
1896 state = unit_active_state(u);
1897 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1898 return -EALREADY;
1899
1900 following = unit_following(u);
1901 if (following) {
1902 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1903 return unit_stop(following);
1904 }
1905
1906 if (!UNIT_VTABLE(u)->stop)
1907 return -EBADR;
1908
1909 unit_add_to_dbus_queue(u);
1910
1911 return UNIT_VTABLE(u)->stop(u);
1912 }
1913
1914 bool unit_can_stop(Unit *u) {
1915 assert(u);
1916
1917 if (!unit_supported(u))
1918 return false;
1919
1920 if (u->perpetual)
1921 return false;
1922
1923 return !!UNIT_VTABLE(u)->stop;
1924 }
1925
1926 /* Errors:
1927 * -EBADR: This unit type does not support reloading.
1928 * -ENOEXEC: Unit is not started.
1929 * -EAGAIN: An operation is already in progress. Retry later.
1930 */
1931 int unit_reload(Unit *u) {
1932 UnitActiveState state;
1933 Unit *following;
1934
1935 assert(u);
1936
1937 if (u->load_state != UNIT_LOADED)
1938 return -EINVAL;
1939
1940 if (!unit_can_reload(u))
1941 return -EBADR;
1942
1943 state = unit_active_state(u);
1944 if (state == UNIT_RELOADING)
1945 return -EALREADY;
1946
1947 if (state != UNIT_ACTIVE) {
1948 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1949 return -ENOEXEC;
1950 }
1951
1952 following = unit_following(u);
1953 if (following) {
1954 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1955 return unit_reload(following);
1956 }
1957
1958 unit_add_to_dbus_queue(u);
1959
1960 if (!UNIT_VTABLE(u)->reload) {
1961 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1962 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1963 return 0;
1964 }
1965
1966 return UNIT_VTABLE(u)->reload(u);
1967 }
1968
1969 bool unit_can_reload(Unit *u) {
1970 assert(u);
1971
1972 if (UNIT_VTABLE(u)->can_reload)
1973 return UNIT_VTABLE(u)->can_reload(u);
1974
1975 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1976 return true;
1977
1978 return UNIT_VTABLE(u)->reload;
1979 }
1980
1981 bool unit_is_unneeded(Unit *u) {
1982 static const UnitDependency deps[] = {
1983 UNIT_REQUIRED_BY,
1984 UNIT_REQUISITE_OF,
1985 UNIT_WANTED_BY,
1986 UNIT_BOUND_BY,
1987 };
1988 size_t j;
1989
1990 assert(u);
1991
1992 if (!u->stop_when_unneeded)
1993 return false;
1994
1995 /* Don't clean up while the unit is transitioning or is even inactive. */
1996 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1997 return false;
1998 if (u->job)
1999 return false;
2000
2001 for (j = 0; j < ELEMENTSOF(deps); j++) {
2002 Unit *other;
2003 Iterator i;
2004 void *v;
2005
2006 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2007 * restart, then don't clean this one up. */
2008
2009 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
2010 if (other->job)
2011 return false;
2012
2013 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2014 return false;
2015
2016 if (unit_will_restart(other))
2017 return false;
2018 }
2019 }
2020
2021 return true;
2022 }
2023
2024 static void check_unneeded_dependencies(Unit *u) {
2025
2026 static const UnitDependency deps[] = {
2027 UNIT_REQUIRES,
2028 UNIT_REQUISITE,
2029 UNIT_WANTS,
2030 UNIT_BINDS_TO,
2031 };
2032 size_t j;
2033
2034 assert(u);
2035
2036 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2037
2038 for (j = 0; j < ELEMENTSOF(deps); j++) {
2039 Unit *other;
2040 Iterator i;
2041 void *v;
2042
2043 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2044 unit_submit_to_stop_when_unneeded_queue(other);
2045 }
2046 }
2047
2048 static void unit_check_binds_to(Unit *u) {
2049 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2050 bool stop = false;
2051 Unit *other;
2052 Iterator i;
2053 void *v;
2054 int r;
2055
2056 assert(u);
2057
2058 if (u->job)
2059 return;
2060
2061 if (unit_active_state(u) != UNIT_ACTIVE)
2062 return;
2063
2064 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2065 if (other->job)
2066 continue;
2067
2068 if (!other->coldplugged)
2069 /* We might yet create a job for the other unit… */
2070 continue;
2071
2072 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2073 continue;
2074
2075 stop = true;
2076 break;
2077 }
2078
2079 if (!stop)
2080 return;
2081
2082 /* If stopping a unit fails continuously we might enter a stop
2083 * loop here, hence stop acting on the service being
2084 * unnecessary after a while. */
2085 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2086 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2087 return;
2088 }
2089
2090 assert(other);
2091 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2092
2093 /* A unit we need to run is gone. Sniff. Let's stop this. */
2094 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2095 if (r < 0)
2096 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2097 }
2098
2099 static void retroactively_start_dependencies(Unit *u) {
2100 Iterator i;
2101 Unit *other;
2102 void *v;
2103
2104 assert(u);
2105 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2106
2107 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2108 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2109 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2110 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2111
2112 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2113 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2114 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2115 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2116
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2118 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2119 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2120 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2121
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2124 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2125
2126 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2127 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2128 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2129 }
2130
2131 static void retroactively_stop_dependencies(Unit *u) {
2132 Unit *other;
2133 Iterator i;
2134 void *v;
2135
2136 assert(u);
2137 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2138
2139 /* Pull down units which are bound to us recursively if enabled */
2140 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2141 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2142 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2143 }
2144
2145 void unit_start_on_failure(Unit *u) {
2146 Unit *other;
2147 Iterator i;
2148 void *v;
2149 int r;
2150
2151 assert(u);
2152
2153 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2154 return;
2155
2156 log_unit_info(u, "Triggering OnFailure= dependencies.");
2157
2158 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2159 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2160
2161 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2162 if (r < 0)
2163 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2164 }
2165 }
2166
2167 void unit_trigger_notify(Unit *u) {
2168 Unit *other;
2169 Iterator i;
2170 void *v;
2171
2172 assert(u);
2173
2174 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2175 if (UNIT_VTABLE(other)->trigger_notify)
2176 UNIT_VTABLE(other)->trigger_notify(other, u);
2177 }
2178
2179 static int unit_log_resources(Unit *u) {
2180 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2181 bool any_traffic = false, have_ip_accounting = false;
2182 _cleanup_free_ char *igress = NULL, *egress = NULL;
2183 size_t n_message_parts = 0, n_iovec = 0;
2184 char* message_parts[3 + 1], *t;
2185 nsec_t nsec = NSEC_INFINITY;
2186 CGroupIPAccountingMetric m;
2187 size_t i;
2188 int r;
2189 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2190 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2194 };
2195
2196 assert(u);
2197
2198 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2199 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2200 * information and the complete data in structured fields. */
2201
2202 (void) unit_get_cpu_usage(u, &nsec);
2203 if (nsec != NSEC_INFINITY) {
2204 char buf[FORMAT_TIMESPAN_MAX] = "";
2205
2206 /* Format the CPU time for inclusion in the structured log message */
2207 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2208 r = log_oom();
2209 goto finish;
2210 }
2211 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2212
2213 /* Format the CPU time for inclusion in the human language message string */
2214 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2215 t = strjoin("consumed ", buf, " CPU time");
2216 if (!t) {
2217 r = log_oom();
2218 goto finish;
2219 }
2220
2221 message_parts[n_message_parts++] = t;
2222 }
2223
2224 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2225 char buf[FORMAT_BYTES_MAX] = "";
2226 uint64_t value = UINT64_MAX;
2227
2228 assert(ip_fields[m]);
2229
2230 (void) unit_get_ip_accounting(u, m, &value);
2231 if (value == UINT64_MAX)
2232 continue;
2233
2234 have_ip_accounting = true;
2235 if (value > 0)
2236 any_traffic = true;
2237
2238 /* Format IP accounting data for inclusion in the structured log message */
2239 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2240 r = log_oom();
2241 goto finish;
2242 }
2243 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2244
2245 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2246 * bytes counters (and not for the packets counters) */
2247 if (m == CGROUP_IP_INGRESS_BYTES) {
2248 assert(!igress);
2249 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2250 if (!igress) {
2251 r = log_oom();
2252 goto finish;
2253 }
2254 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2255 assert(!egress);
2256 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2257 if (!egress) {
2258 r = log_oom();
2259 goto finish;
2260 }
2261 }
2262 }
2263
2264 if (have_ip_accounting) {
2265 if (any_traffic) {
2266 if (igress)
2267 message_parts[n_message_parts++] = TAKE_PTR(igress);
2268 if (egress)
2269 message_parts[n_message_parts++] = TAKE_PTR(egress);
2270
2271 } else {
2272 char *k;
2273
2274 k = strdup("no IP traffic");
2275 if (!k) {
2276 r = log_oom();
2277 goto finish;
2278 }
2279
2280 message_parts[n_message_parts++] = k;
2281 }
2282 }
2283
2284 /* Is there any accounting data available at all? */
2285 if (n_iovec == 0) {
2286 r = 0;
2287 goto finish;
2288 }
2289
2290 if (n_message_parts == 0)
2291 t = strjoina("MESSAGE=", u->id, ": Completed.");
2292 else {
2293 _cleanup_free_ char *joined;
2294
2295 message_parts[n_message_parts] = NULL;
2296
2297 joined = strv_join(message_parts, ", ");
2298 if (!joined) {
2299 r = log_oom();
2300 goto finish;
2301 }
2302
2303 joined[0] = ascii_toupper(joined[0]);
2304 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2305 }
2306
2307 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2308 * and hence don't increase n_iovec for them */
2309 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2310 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2311
2312 t = strjoina(u->manager->unit_log_field, u->id);
2313 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2314
2315 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2316 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2317
2318 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2319 r = 0;
2320
2321 finish:
2322 for (i = 0; i < n_message_parts; i++)
2323 free(message_parts[i]);
2324
2325 for (i = 0; i < n_iovec; i++)
2326 free(iovec[i].iov_base);
2327
2328 return r;
2329
2330 }
2331
2332 static void unit_update_on_console(Unit *u) {
2333 bool b;
2334
2335 assert(u);
2336
2337 b = unit_needs_console(u);
2338 if (u->on_console == b)
2339 return;
2340
2341 u->on_console = b;
2342 if (b)
2343 manager_ref_console(u->manager);
2344 else
2345 manager_unref_console(u->manager);
2346 }
2347
2348 static void unit_emit_audit_start(Unit *u) {
2349 assert(u);
2350
2351 if (u->type != UNIT_SERVICE)
2352 return;
2353
2354 /* Write audit record if we have just finished starting up */
2355 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2356 u->in_audit = true;
2357 }
2358
2359 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2360 assert(u);
2361
2362 if (u->type != UNIT_SERVICE)
2363 return;
2364
2365 if (u->in_audit) {
2366 /* Write audit record if we have just finished shutting down */
2367 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2368 u->in_audit = false;
2369 } else {
2370 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2371 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2372
2373 if (state == UNIT_INACTIVE)
2374 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2375 }
2376 }
2377
2378 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2379 bool unexpected;
2380 const char *reason;
2381 Manager *m;
2382
2383 assert(u);
2384 assert(os < _UNIT_ACTIVE_STATE_MAX);
2385 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2386
2387 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2388 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2389 * remounted this function will be called too! */
2390
2391 m = u->manager;
2392
2393 /* Update timestamps for state changes */
2394 if (!MANAGER_IS_RELOADING(m)) {
2395 dual_timestamp_get(&u->state_change_timestamp);
2396
2397 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2398 u->inactive_exit_timestamp = u->state_change_timestamp;
2399 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2400 u->inactive_enter_timestamp = u->state_change_timestamp;
2401
2402 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2403 u->active_enter_timestamp = u->state_change_timestamp;
2404 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2405 u->active_exit_timestamp = u->state_change_timestamp;
2406 }
2407
2408 /* Keep track of failed units */
2409 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2410
2411 /* Make sure the cgroup and state files are always removed when we become inactive */
2412 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2413 unit_prune_cgroup(u);
2414 unit_unlink_state_files(u);
2415 }
2416
2417 unit_update_on_console(u);
2418
2419 if (u->job) {
2420 unexpected = false;
2421
2422 if (u->job->state == JOB_WAITING)
2423
2424 /* So we reached a different state for this
2425 * job. Let's see if we can run it now if it
2426 * failed previously due to EAGAIN. */
2427 job_add_to_run_queue(u->job);
2428
2429 /* Let's check whether this state change constitutes a
2430 * finished job, or maybe contradicts a running job and
2431 * hence needs to invalidate jobs. */
2432
2433 switch (u->job->type) {
2434
2435 case JOB_START:
2436 case JOB_VERIFY_ACTIVE:
2437
2438 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2439 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2440 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2441 unexpected = true;
2442
2443 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2444 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2445 }
2446
2447 break;
2448
2449 case JOB_RELOAD:
2450 case JOB_RELOAD_OR_START:
2451 case JOB_TRY_RELOAD:
2452
2453 if (u->job->state == JOB_RUNNING) {
2454 if (ns == UNIT_ACTIVE)
2455 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2456 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2457 unexpected = true;
2458
2459 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2460 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2461 }
2462 }
2463
2464 break;
2465
2466 case JOB_STOP:
2467 case JOB_RESTART:
2468 case JOB_TRY_RESTART:
2469
2470 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2471 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2472 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2473 unexpected = true;
2474 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2475 }
2476
2477 break;
2478
2479 default:
2480 assert_not_reached("Job type unknown");
2481 }
2482
2483 } else
2484 unexpected = true;
2485
2486 if (!MANAGER_IS_RELOADING(m)) {
2487
2488 /* If this state change happened without being
2489 * requested by a job, then let's retroactively start
2490 * or stop dependencies. We skip that step when
2491 * deserializing, since we don't want to create any
2492 * additional jobs just because something is already
2493 * activated. */
2494
2495 if (unexpected) {
2496 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2497 retroactively_start_dependencies(u);
2498 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2499 retroactively_stop_dependencies(u);
2500 }
2501
2502 /* stop unneeded units regardless if going down was expected or not */
2503 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2504 check_unneeded_dependencies(u);
2505
2506 if (ns != os && ns == UNIT_FAILED) {
2507 log_unit_debug(u, "Unit entered failed state.");
2508
2509 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2510 unit_start_on_failure(u);
2511 }
2512
2513 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2514 /* This unit just finished starting up */
2515
2516 unit_emit_audit_start(u);
2517 manager_send_unit_plymouth(m, u);
2518 }
2519
2520 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2521 /* This unit just stopped/failed. */
2522
2523 unit_emit_audit_stop(u, ns);
2524 unit_log_resources(u);
2525 }
2526 }
2527
2528 manager_recheck_journal(m);
2529 manager_recheck_dbus(m);
2530
2531 unit_trigger_notify(u);
2532
2533 if (!MANAGER_IS_RELOADING(m)) {
2534 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2535 unit_submit_to_stop_when_unneeded_queue(u);
2536
2537 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2538 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2539 * without ever entering started.) */
2540 unit_check_binds_to(u);
2541
2542 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2543 reason = strjoina("unit ", u->id, " failed");
2544 (void) emergency_action(m, u->failure_action, 0, u->reboot_arg, reason);
2545 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2546 reason = strjoina("unit ", u->id, " succeeded");
2547 (void) emergency_action(m, u->success_action, 0, u->reboot_arg, reason);
2548 }
2549 }
2550
2551 unit_add_to_dbus_queue(u);
2552 unit_add_to_gc_queue(u);
2553 }
2554
2555 int unit_watch_pid(Unit *u, pid_t pid) {
2556 int r;
2557
2558 assert(u);
2559 assert(pid_is_valid(pid));
2560
2561 /* Watch a specific PID */
2562
2563 r = set_ensure_allocated(&u->pids, NULL);
2564 if (r < 0)
2565 return r;
2566
2567 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2568 if (r < 0)
2569 return r;
2570
2571 /* First try, let's add the unit keyed by "pid". */
2572 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2573 if (r == -EEXIST) {
2574 Unit **array;
2575 bool found = false;
2576 size_t n = 0;
2577
2578 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2579 * to an array of Units rather than just a Unit), lists us already. */
2580
2581 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2582 if (array)
2583 for (; array[n]; n++)
2584 if (array[n] == u)
2585 found = true;
2586
2587 if (found) /* Found it already? if so, do nothing */
2588 r = 0;
2589 else {
2590 Unit **new_array;
2591
2592 /* Allocate a new array */
2593 new_array = new(Unit*, n + 2);
2594 if (!new_array)
2595 return -ENOMEM;
2596
2597 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2598 new_array[n] = u;
2599 new_array[n+1] = NULL;
2600
2601 /* Add or replace the old array */
2602 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2603 if (r < 0) {
2604 free(new_array);
2605 return r;
2606 }
2607
2608 free(array);
2609 }
2610 } else if (r < 0)
2611 return r;
2612
2613 r = set_put(u->pids, PID_TO_PTR(pid));
2614 if (r < 0)
2615 return r;
2616
2617 return 0;
2618 }
2619
2620 void unit_unwatch_pid(Unit *u, pid_t pid) {
2621 Unit **array;
2622
2623 assert(u);
2624 assert(pid_is_valid(pid));
2625
2626 /* First let's drop the unit in case it's keyed as "pid". */
2627 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2628
2629 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2630 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2631 if (array) {
2632 size_t n, m = 0;
2633
2634 /* Let's iterate through the array, dropping our own entry */
2635 for (n = 0; array[n]; n++)
2636 if (array[n] != u)
2637 array[m++] = array[n];
2638 array[m] = NULL;
2639
2640 if (m == 0) {
2641 /* The array is now empty, remove the entire entry */
2642 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2643 free(array);
2644 }
2645 }
2646
2647 (void) set_remove(u->pids, PID_TO_PTR(pid));
2648 }
2649
2650 void unit_unwatch_all_pids(Unit *u) {
2651 assert(u);
2652
2653 while (!set_isempty(u->pids))
2654 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2655
2656 u->pids = set_free(u->pids);
2657 }
2658
2659 static void unit_tidy_watch_pids(Unit *u) {
2660 pid_t except1, except2;
2661 Iterator i;
2662 void *e;
2663
2664 assert(u);
2665
2666 /* Cleans dead PIDs from our list */
2667
2668 except1 = unit_main_pid(u);
2669 except2 = unit_control_pid(u);
2670
2671 SET_FOREACH(e, u->pids, i) {
2672 pid_t pid = PTR_TO_PID(e);
2673
2674 if (pid == except1 || pid == except2)
2675 continue;
2676
2677 if (!pid_is_unwaited(pid))
2678 unit_unwatch_pid(u, pid);
2679 }
2680 }
2681
2682 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2683 Unit *u = userdata;
2684
2685 assert(s);
2686 assert(u);
2687
2688 unit_tidy_watch_pids(u);
2689 unit_watch_all_pids(u);
2690
2691 /* If the PID set is empty now, then let's finish this off. */
2692 unit_synthesize_cgroup_empty_event(u);
2693
2694 return 0;
2695 }
2696
2697 int unit_enqueue_rewatch_pids(Unit *u) {
2698 int r;
2699
2700 assert(u);
2701
2702 if (!u->cgroup_path)
2703 return -ENOENT;
2704
2705 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2706 if (r < 0)
2707 return r;
2708 if (r > 0) /* On unified we can use proper notifications */
2709 return 0;
2710
2711 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2712 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2713 * involves issuing kill(pid, 0) on all processes we watch. */
2714
2715 if (!u->rewatch_pids_event_source) {
2716 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2717
2718 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2719 if (r < 0)
2720 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2721
2722 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2723 if (r < 0)
2724 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2725
2726 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2727
2728 u->rewatch_pids_event_source = TAKE_PTR(s);
2729 }
2730
2731 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2732 if (r < 0)
2733 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2734
2735 return 0;
2736 }
2737
2738 void unit_dequeue_rewatch_pids(Unit *u) {
2739 int r;
2740 assert(u);
2741
2742 if (!u->rewatch_pids_event_source)
2743 return;
2744
2745 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2746 if (r < 0)
2747 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2748
2749 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2750 }
2751
2752 bool unit_job_is_applicable(Unit *u, JobType j) {
2753 assert(u);
2754 assert(j >= 0 && j < _JOB_TYPE_MAX);
2755
2756 switch (j) {
2757
2758 case JOB_VERIFY_ACTIVE:
2759 case JOB_START:
2760 case JOB_NOP:
2761 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2762 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2763 * jobs for it. */
2764 return true;
2765
2766 case JOB_STOP:
2767 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2768 * external events), hence it makes no sense to permit enqueing such a request either. */
2769 return !u->perpetual;
2770
2771 case JOB_RESTART:
2772 case JOB_TRY_RESTART:
2773 return unit_can_stop(u) && unit_can_start(u);
2774
2775 case JOB_RELOAD:
2776 case JOB_TRY_RELOAD:
2777 return unit_can_reload(u);
2778
2779 case JOB_RELOAD_OR_START:
2780 return unit_can_reload(u) && unit_can_start(u);
2781
2782 default:
2783 assert_not_reached("Invalid job type");
2784 }
2785 }
2786
2787 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2788 assert(u);
2789
2790 /* Only warn about some unit types */
2791 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2792 return;
2793
2794 if (streq_ptr(u->id, other))
2795 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2796 else
2797 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2798 }
2799
2800 static int unit_add_dependency_hashmap(
2801 Hashmap **h,
2802 Unit *other,
2803 UnitDependencyMask origin_mask,
2804 UnitDependencyMask destination_mask) {
2805
2806 UnitDependencyInfo info;
2807 int r;
2808
2809 assert(h);
2810 assert(other);
2811 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2812 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2813 assert(origin_mask > 0 || destination_mask > 0);
2814
2815 r = hashmap_ensure_allocated(h, NULL);
2816 if (r < 0)
2817 return r;
2818
2819 assert_cc(sizeof(void*) == sizeof(info));
2820
2821 info.data = hashmap_get(*h, other);
2822 if (info.data) {
2823 /* Entry already exists. Add in our mask. */
2824
2825 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2826 FLAGS_SET(destination_mask, info.destination_mask))
2827 return 0; /* NOP */
2828
2829 info.origin_mask |= origin_mask;
2830 info.destination_mask |= destination_mask;
2831
2832 r = hashmap_update(*h, other, info.data);
2833 } else {
2834 info = (UnitDependencyInfo) {
2835 .origin_mask = origin_mask,
2836 .destination_mask = destination_mask,
2837 };
2838
2839 r = hashmap_put(*h, other, info.data);
2840 }
2841 if (r < 0)
2842 return r;
2843
2844 return 1;
2845 }
2846
2847 int unit_add_dependency(
2848 Unit *u,
2849 UnitDependency d,
2850 Unit *other,
2851 bool add_reference,
2852 UnitDependencyMask mask) {
2853
2854 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2855 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2856 [UNIT_WANTS] = UNIT_WANTED_BY,
2857 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2858 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2859 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2860 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2861 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2862 [UNIT_WANTED_BY] = UNIT_WANTS,
2863 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2864 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2865 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2866 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2867 [UNIT_BEFORE] = UNIT_AFTER,
2868 [UNIT_AFTER] = UNIT_BEFORE,
2869 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2870 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2871 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2872 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2873 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2874 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2875 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2876 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2877 };
2878 Unit *original_u = u, *original_other = other;
2879 int r;
2880
2881 assert(u);
2882 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2883 assert(other);
2884
2885 u = unit_follow_merge(u);
2886 other = unit_follow_merge(other);
2887
2888 /* We won't allow dependencies on ourselves. We will not
2889 * consider them an error however. */
2890 if (u == other) {
2891 maybe_warn_about_dependency(original_u, original_other->id, d);
2892 return 0;
2893 }
2894
2895 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2896 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2897 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2898 return 0;
2899 }
2900
2901 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2902 if (r < 0)
2903 return r;
2904
2905 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2906 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2907 if (r < 0)
2908 return r;
2909 }
2910
2911 if (add_reference) {
2912 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2913 if (r < 0)
2914 return r;
2915
2916 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2917 if (r < 0)
2918 return r;
2919 }
2920
2921 unit_add_to_dbus_queue(u);
2922 return 0;
2923 }
2924
2925 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2926 int r;
2927
2928 assert(u);
2929
2930 r = unit_add_dependency(u, d, other, add_reference, mask);
2931 if (r < 0)
2932 return r;
2933
2934 return unit_add_dependency(u, e, other, add_reference, mask);
2935 }
2936
2937 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2938 int r;
2939
2940 assert(u);
2941 assert(name);
2942 assert(buf);
2943 assert(ret);
2944
2945 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2946 *buf = NULL;
2947 *ret = name;
2948 return 0;
2949 }
2950
2951 if (u->instance)
2952 r = unit_name_replace_instance(name, u->instance, buf);
2953 else {
2954 _cleanup_free_ char *i = NULL;
2955
2956 r = unit_name_to_prefix(u->id, &i);
2957 if (r < 0)
2958 return r;
2959
2960 r = unit_name_replace_instance(name, i, buf);
2961 }
2962 if (r < 0)
2963 return r;
2964
2965 *ret = *buf;
2966 return 0;
2967 }
2968
2969 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2970 _cleanup_free_ char *buf = NULL;
2971 Unit *other;
2972 int r;
2973
2974 assert(u);
2975 assert(name);
2976
2977 r = resolve_template(u, name, &buf, &name);
2978 if (r < 0)
2979 return r;
2980
2981 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2982 if (r < 0)
2983 return r;
2984
2985 return unit_add_dependency(u, d, other, add_reference, mask);
2986 }
2987
2988 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2989 _cleanup_free_ char *buf = NULL;
2990 Unit *other;
2991 int r;
2992
2993 assert(u);
2994 assert(name);
2995
2996 r = resolve_template(u, name, &buf, &name);
2997 if (r < 0)
2998 return r;
2999
3000 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3001 if (r < 0)
3002 return r;
3003
3004 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3005 }
3006
3007 int set_unit_path(const char *p) {
3008 /* This is mostly for debug purposes */
3009 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3010 return -errno;
3011
3012 return 0;
3013 }
3014
3015 char *unit_dbus_path(Unit *u) {
3016 assert(u);
3017
3018 if (!u->id)
3019 return NULL;
3020
3021 return unit_dbus_path_from_name(u->id);
3022 }
3023
3024 char *unit_dbus_path_invocation_id(Unit *u) {
3025 assert(u);
3026
3027 if (sd_id128_is_null(u->invocation_id))
3028 return NULL;
3029
3030 return unit_dbus_path_from_name(u->invocation_id_string);
3031 }
3032
3033 int unit_set_slice(Unit *u, Unit *slice) {
3034 assert(u);
3035 assert(slice);
3036
3037 /* Sets the unit slice if it has not been set before. Is extra
3038 * careful, to only allow this for units that actually have a
3039 * cgroup context. Also, we don't allow to set this for slices
3040 * (since the parent slice is derived from the name). Make
3041 * sure the unit we set is actually a slice. */
3042
3043 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3044 return -EOPNOTSUPP;
3045
3046 if (u->type == UNIT_SLICE)
3047 return -EINVAL;
3048
3049 if (unit_active_state(u) != UNIT_INACTIVE)
3050 return -EBUSY;
3051
3052 if (slice->type != UNIT_SLICE)
3053 return -EINVAL;
3054
3055 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3056 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3057 return -EPERM;
3058
3059 if (UNIT_DEREF(u->slice) == slice)
3060 return 0;
3061
3062 /* Disallow slice changes if @u is already bound to cgroups */
3063 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3064 return -EBUSY;
3065
3066 unit_ref_set(&u->slice, u, slice);
3067 return 1;
3068 }
3069
3070 int unit_set_default_slice(Unit *u) {
3071 _cleanup_free_ char *b = NULL;
3072 const char *slice_name;
3073 Unit *slice;
3074 int r;
3075
3076 assert(u);
3077
3078 if (UNIT_ISSET(u->slice))
3079 return 0;
3080
3081 if (u->instance) {
3082 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3083
3084 /* Implicitly place all instantiated units in their
3085 * own per-template slice */
3086
3087 r = unit_name_to_prefix(u->id, &prefix);
3088 if (r < 0)
3089 return r;
3090
3091 /* The prefix is already escaped, but it might include
3092 * "-" which has a special meaning for slice units,
3093 * hence escape it here extra. */
3094 escaped = unit_name_escape(prefix);
3095 if (!escaped)
3096 return -ENOMEM;
3097
3098 if (MANAGER_IS_SYSTEM(u->manager))
3099 b = strjoin("system-", escaped, ".slice");
3100 else
3101 b = strappend(escaped, ".slice");
3102 if (!b)
3103 return -ENOMEM;
3104
3105 slice_name = b;
3106 } else
3107 slice_name =
3108 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3109 ? SPECIAL_SYSTEM_SLICE
3110 : SPECIAL_ROOT_SLICE;
3111
3112 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3113 if (r < 0)
3114 return r;
3115
3116 return unit_set_slice(u, slice);
3117 }
3118
3119 const char *unit_slice_name(Unit *u) {
3120 assert(u);
3121
3122 if (!UNIT_ISSET(u->slice))
3123 return NULL;
3124
3125 return UNIT_DEREF(u->slice)->id;
3126 }
3127
3128 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3129 _cleanup_free_ char *t = NULL;
3130 int r;
3131
3132 assert(u);
3133 assert(type);
3134 assert(_found);
3135
3136 r = unit_name_change_suffix(u->id, type, &t);
3137 if (r < 0)
3138 return r;
3139 if (unit_has_name(u, t))
3140 return -EINVAL;
3141
3142 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3143 assert(r < 0 || *_found != u);
3144 return r;
3145 }
3146
3147 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3148 const char *name, *old_owner, *new_owner;
3149 Unit *u = userdata;
3150 int r;
3151
3152 assert(message);
3153 assert(u);
3154
3155 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3156 if (r < 0) {
3157 bus_log_parse_error(r);
3158 return 0;
3159 }
3160
3161 old_owner = empty_to_null(old_owner);
3162 new_owner = empty_to_null(new_owner);
3163
3164 if (UNIT_VTABLE(u)->bus_name_owner_change)
3165 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3166
3167 return 0;
3168 }
3169
3170 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3171 const char *match;
3172
3173 assert(u);
3174 assert(bus);
3175 assert(name);
3176
3177 if (u->match_bus_slot)
3178 return -EBUSY;
3179
3180 match = strjoina("type='signal',"
3181 "sender='org.freedesktop.DBus',"
3182 "path='/org/freedesktop/DBus',"
3183 "interface='org.freedesktop.DBus',"
3184 "member='NameOwnerChanged',"
3185 "arg0='", name, "'");
3186
3187 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3188 }
3189
3190 int unit_watch_bus_name(Unit *u, const char *name) {
3191 int r;
3192
3193 assert(u);
3194 assert(name);
3195
3196 /* Watch a specific name on the bus. We only support one unit
3197 * watching each name for now. */
3198
3199 if (u->manager->api_bus) {
3200 /* If the bus is already available, install the match directly.
3201 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3202 r = unit_install_bus_match(u, u->manager->api_bus, name);
3203 if (r < 0)
3204 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3205 }
3206
3207 r = hashmap_put(u->manager->watch_bus, name, u);
3208 if (r < 0) {
3209 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3210 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3211 }
3212
3213 return 0;
3214 }
3215
3216 void unit_unwatch_bus_name(Unit *u, const char *name) {
3217 assert(u);
3218 assert(name);
3219
3220 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3221 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3222 }
3223
3224 bool unit_can_serialize(Unit *u) {
3225 assert(u);
3226
3227 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3228 }
3229
3230 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3231 _cleanup_free_ char *s = NULL;
3232 int r;
3233
3234 assert(f);
3235 assert(key);
3236
3237 if (mask == 0)
3238 return 0;
3239
3240 r = cg_mask_to_string(mask, &s);
3241 if (r < 0)
3242 return log_error_errno(r, "Failed to format cgroup mask: %m");
3243
3244 return serialize_item(f, key, s);
3245 }
3246
3247 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3248 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3249 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3250 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3251 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3252 };
3253
3254 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3255 CGroupIPAccountingMetric m;
3256 int r;
3257
3258 assert(u);
3259 assert(f);
3260 assert(fds);
3261
3262 if (unit_can_serialize(u)) {
3263 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3264 if (r < 0)
3265 return r;
3266 }
3267
3268 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3269
3270 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3271 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3272 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3273 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3274
3275 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3276 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3277
3278 if (dual_timestamp_is_set(&u->condition_timestamp))
3279 (void) serialize_bool(f, "condition-result", u->condition_result);
3280
3281 if (dual_timestamp_is_set(&u->assert_timestamp))
3282 (void) serialize_bool(f, "assert-result", u->assert_result);
3283
3284 (void) serialize_bool(f, "transient", u->transient);
3285 (void) serialize_bool(f, "in-audit", u->in_audit);
3286
3287 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3288 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3289 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3290 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3291 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3292
3293 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3294 if (u->cpu_usage_last != NSEC_INFINITY)
3295 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3296
3297 if (u->cgroup_path)
3298 (void) serialize_item(f, "cgroup", u->cgroup_path);
3299
3300 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3301 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3302 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3303 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3304
3305 if (uid_is_valid(u->ref_uid))
3306 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3307 if (gid_is_valid(u->ref_gid))
3308 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3309
3310 if (!sd_id128_is_null(u->invocation_id))
3311 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3312
3313 bus_track_serialize(u->bus_track, f, "ref");
3314
3315 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3316 uint64_t v;
3317
3318 r = unit_get_ip_accounting(u, m, &v);
3319 if (r >= 0)
3320 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3321 }
3322
3323 if (serialize_jobs) {
3324 if (u->job) {
3325 fputs("job\n", f);
3326 job_serialize(u->job, f);
3327 }
3328
3329 if (u->nop_job) {
3330 fputs("job\n", f);
3331 job_serialize(u->nop_job, f);
3332 }
3333 }
3334
3335 /* End marker */
3336 fputc('\n', f);
3337 return 0;
3338 }
3339
3340 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3341 int r;
3342
3343 assert(u);
3344 assert(f);
3345 assert(fds);
3346
3347 for (;;) {
3348 _cleanup_free_ char *line = NULL;
3349 CGroupIPAccountingMetric m;
3350 char *l, *v;
3351 size_t k;
3352
3353 r = read_line(f, LONG_LINE_MAX, &line);
3354 if (r < 0)
3355 return log_error_errno(r, "Failed to read serialization line: %m");
3356 if (r == 0) /* eof */
3357 break;
3358
3359 l = strstrip(line);
3360 if (isempty(l)) /* End marker */
3361 break;
3362
3363 k = strcspn(l, "=");
3364
3365 if (l[k] == '=') {
3366 l[k] = 0;
3367 v = l+k+1;
3368 } else
3369 v = l+k;
3370
3371 if (streq(l, "job")) {
3372 if (v[0] == '\0') {
3373 /* new-style serialized job */
3374 Job *j;
3375
3376 j = job_new_raw(u);
3377 if (!j)
3378 return log_oom();
3379
3380 r = job_deserialize(j, f);
3381 if (r < 0) {
3382 job_free(j);
3383 return r;
3384 }
3385
3386 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3387 if (r < 0) {
3388 job_free(j);
3389 return r;
3390 }
3391
3392 r = job_install_deserialized(j);
3393 if (r < 0) {
3394 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3395 job_free(j);
3396 return r;
3397 }
3398 } else /* legacy for pre-44 */
3399 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3400 continue;
3401 } else if (streq(l, "state-change-timestamp")) {
3402 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3403 continue;
3404 } else if (streq(l, "inactive-exit-timestamp")) {
3405 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3406 continue;
3407 } else if (streq(l, "active-enter-timestamp")) {
3408 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3409 continue;
3410 } else if (streq(l, "active-exit-timestamp")) {
3411 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3412 continue;
3413 } else if (streq(l, "inactive-enter-timestamp")) {
3414 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3415 continue;
3416 } else if (streq(l, "condition-timestamp")) {
3417 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3418 continue;
3419 } else if (streq(l, "assert-timestamp")) {
3420 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3421 continue;
3422 } else if (streq(l, "condition-result")) {
3423
3424 r = parse_boolean(v);
3425 if (r < 0)
3426 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3427 else
3428 u->condition_result = r;
3429
3430 continue;
3431
3432 } else if (streq(l, "assert-result")) {
3433
3434 r = parse_boolean(v);
3435 if (r < 0)
3436 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3437 else
3438 u->assert_result = r;
3439
3440 continue;
3441
3442 } else if (streq(l, "transient")) {
3443
3444 r = parse_boolean(v);
3445 if (r < 0)
3446 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3447 else
3448 u->transient = r;
3449
3450 continue;
3451
3452 } else if (streq(l, "in-audit")) {
3453
3454 r = parse_boolean(v);
3455 if (r < 0)
3456 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3457 else
3458 u->in_audit = r;
3459
3460 continue;
3461
3462 } else if (streq(l, "exported-invocation-id")) {
3463
3464 r = parse_boolean(v);
3465 if (r < 0)
3466 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3467 else
3468 u->exported_invocation_id = r;
3469
3470 continue;
3471
3472 } else if (streq(l, "exported-log-level-max")) {
3473
3474 r = parse_boolean(v);
3475 if (r < 0)
3476 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3477 else
3478 u->exported_log_level_max = r;
3479
3480 continue;
3481
3482 } else if (streq(l, "exported-log-extra-fields")) {
3483
3484 r = parse_boolean(v);
3485 if (r < 0)
3486 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3487 else
3488 u->exported_log_extra_fields = r;
3489
3490 continue;
3491
3492 } else if (streq(l, "exported-log-rate-limit-interval")) {
3493
3494 r = parse_boolean(v);
3495 if (r < 0)
3496 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3497 else
3498 u->exported_log_rate_limit_interval = r;
3499
3500 continue;
3501
3502 } else if (streq(l, "exported-log-rate-limit-burst")) {
3503
3504 r = parse_boolean(v);
3505 if (r < 0)
3506 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3507 else
3508 u->exported_log_rate_limit_burst = r;
3509
3510 continue;
3511
3512 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3513
3514 r = safe_atou64(v, &u->cpu_usage_base);
3515 if (r < 0)
3516 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3517
3518 continue;
3519
3520 } else if (streq(l, "cpu-usage-last")) {
3521
3522 r = safe_atou64(v, &u->cpu_usage_last);
3523 if (r < 0)
3524 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3525
3526 continue;
3527
3528 } else if (streq(l, "cgroup")) {
3529
3530 r = unit_set_cgroup_path(u, v);
3531 if (r < 0)
3532 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3533
3534 (void) unit_watch_cgroup(u);
3535
3536 continue;
3537 } else if (streq(l, "cgroup-realized")) {
3538 int b;
3539
3540 b = parse_boolean(v);
3541 if (b < 0)
3542 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3543 else
3544 u->cgroup_realized = b;
3545
3546 continue;
3547
3548 } else if (streq(l, "cgroup-realized-mask")) {
3549
3550 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3551 if (r < 0)
3552 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3553 continue;
3554
3555 } else if (streq(l, "cgroup-enabled-mask")) {
3556
3557 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3558 if (r < 0)
3559 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3560 continue;
3561
3562 } else if (streq(l, "cgroup-invalidated-mask")) {
3563
3564 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3565 if (r < 0)
3566 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3567 continue;
3568
3569 } else if (streq(l, "ref-uid")) {
3570 uid_t uid;
3571
3572 r = parse_uid(v, &uid);
3573 if (r < 0)
3574 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3575 else
3576 unit_ref_uid_gid(u, uid, GID_INVALID);
3577
3578 continue;
3579
3580 } else if (streq(l, "ref-gid")) {
3581 gid_t gid;
3582
3583 r = parse_gid(v, &gid);
3584 if (r < 0)
3585 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3586 else
3587 unit_ref_uid_gid(u, UID_INVALID, gid);
3588
3589 continue;
3590
3591 } else if (streq(l, "ref")) {
3592
3593 r = strv_extend(&u->deserialized_refs, v);
3594 if (r < 0)
3595 return log_oom();
3596
3597 continue;
3598 } else if (streq(l, "invocation-id")) {
3599 sd_id128_t id;
3600
3601 r = sd_id128_from_string(v, &id);
3602 if (r < 0)
3603 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3604 else {
3605 r = unit_set_invocation_id(u, id);
3606 if (r < 0)
3607 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3608 }
3609
3610 continue;
3611 }
3612
3613 /* Check if this is an IP accounting metric serialization field */
3614 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3615 if (streq(l, ip_accounting_metric_field[m]))
3616 break;
3617 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3618 uint64_t c;
3619
3620 r = safe_atou64(v, &c);
3621 if (r < 0)
3622 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3623 else
3624 u->ip_accounting_extra[m] = c;
3625 continue;
3626 }
3627
3628 if (unit_can_serialize(u)) {
3629 r = exec_runtime_deserialize_compat(u, l, v, fds);
3630 if (r < 0) {
3631 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3632 continue;
3633 }
3634
3635 /* Returns positive if key was handled by the call */
3636 if (r > 0)
3637 continue;
3638
3639 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3640 if (r < 0)
3641 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3642 }
3643 }
3644
3645 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3646 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3647 * before 228 where the base for timeouts was not persistent across reboots. */
3648
3649 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3650 dual_timestamp_get(&u->state_change_timestamp);
3651
3652 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3653 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3654 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3655 unit_invalidate_cgroup_bpf(u);
3656
3657 return 0;
3658 }
3659
3660 int unit_deserialize_skip(FILE *f) {
3661 int r;
3662 assert(f);
3663
3664 /* Skip serialized data for this unit. We don't know what it is. */
3665
3666 for (;;) {
3667 _cleanup_free_ char *line = NULL;
3668 char *l;
3669
3670 r = read_line(f, LONG_LINE_MAX, &line);
3671 if (r < 0)
3672 return log_error_errno(r, "Failed to read serialization line: %m");
3673 if (r == 0)
3674 return 0;
3675
3676 l = strstrip(line);
3677
3678 /* End marker */
3679 if (isempty(l))
3680 return 1;
3681 }
3682 }
3683
3684 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3685 Unit *device;
3686 _cleanup_free_ char *e = NULL;
3687 int r;
3688
3689 assert(u);
3690
3691 /* Adds in links to the device node that this unit is based on */
3692 if (isempty(what))
3693 return 0;
3694
3695 if (!is_device_path(what))
3696 return 0;
3697
3698 /* When device units aren't supported (such as in a
3699 * container), don't create dependencies on them. */
3700 if (!unit_type_supported(UNIT_DEVICE))
3701 return 0;
3702
3703 r = unit_name_from_path(what, ".device", &e);
3704 if (r < 0)
3705 return r;
3706
3707 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3708 if (r < 0)
3709 return r;
3710
3711 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3712 dep = UNIT_BINDS_TO;
3713
3714 r = unit_add_two_dependencies(u, UNIT_AFTER,
3715 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3716 device, true, mask);
3717 if (r < 0)
3718 return r;
3719
3720 if (wants) {
3721 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3722 if (r < 0)
3723 return r;
3724 }
3725
3726 return 0;
3727 }
3728
3729 int unit_coldplug(Unit *u) {
3730 int r = 0, q;
3731 char **i;
3732
3733 assert(u);
3734
3735 /* Make sure we don't enter a loop, when coldplugging recursively. */
3736 if (u->coldplugged)
3737 return 0;
3738
3739 u->coldplugged = true;
3740
3741 STRV_FOREACH(i, u->deserialized_refs) {
3742 q = bus_unit_track_add_name(u, *i);
3743 if (q < 0 && r >= 0)
3744 r = q;
3745 }
3746 u->deserialized_refs = strv_free(u->deserialized_refs);
3747
3748 if (UNIT_VTABLE(u)->coldplug) {
3749 q = UNIT_VTABLE(u)->coldplug(u);
3750 if (q < 0 && r >= 0)
3751 r = q;
3752 }
3753
3754 if (u->job) {
3755 q = job_coldplug(u->job);
3756 if (q < 0 && r >= 0)
3757 r = q;
3758 }
3759
3760 return r;
3761 }
3762
3763 void unit_catchup(Unit *u) {
3764 assert(u);
3765
3766 if (UNIT_VTABLE(u)->catchup)
3767 UNIT_VTABLE(u)->catchup(u);
3768 }
3769
3770 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3771 struct stat st;
3772
3773 if (!path)
3774 return false;
3775
3776 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3777 * are never out-of-date. */
3778 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3779 return false;
3780
3781 if (stat(path, &st) < 0)
3782 /* What, cannot access this anymore? */
3783 return true;
3784
3785 if (path_masked)
3786 /* For masked files check if they are still so */
3787 return !null_or_empty(&st);
3788 else
3789 /* For non-empty files check the mtime */
3790 return timespec_load(&st.st_mtim) > mtime;
3791
3792 return false;
3793 }
3794
3795 bool unit_need_daemon_reload(Unit *u) {
3796 _cleanup_strv_free_ char **t = NULL;
3797 char **path;
3798
3799 assert(u);
3800
3801 /* For unit files, we allow masking… */
3802 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3803 u->load_state == UNIT_MASKED))
3804 return true;
3805
3806 /* Source paths should not be masked… */
3807 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3808 return true;
3809
3810 if (u->load_state == UNIT_LOADED)
3811 (void) unit_find_dropin_paths(u, &t);
3812 if (!strv_equal(u->dropin_paths, t))
3813 return true;
3814
3815 /* … any drop-ins that are masked are simply omitted from the list. */
3816 STRV_FOREACH(path, u->dropin_paths)
3817 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3818 return true;
3819
3820 return false;
3821 }
3822
3823 void unit_reset_failed(Unit *u) {
3824 assert(u);
3825
3826 if (UNIT_VTABLE(u)->reset_failed)
3827 UNIT_VTABLE(u)->reset_failed(u);
3828
3829 RATELIMIT_RESET(u->start_limit);
3830 u->start_limit_hit = false;
3831 }
3832
3833 Unit *unit_following(Unit *u) {
3834 assert(u);
3835
3836 if (UNIT_VTABLE(u)->following)
3837 return UNIT_VTABLE(u)->following(u);
3838
3839 return NULL;
3840 }
3841
3842 bool unit_stop_pending(Unit *u) {
3843 assert(u);
3844
3845 /* This call does check the current state of the unit. It's
3846 * hence useful to be called from state change calls of the
3847 * unit itself, where the state isn't updated yet. This is
3848 * different from unit_inactive_or_pending() which checks both
3849 * the current state and for a queued job. */
3850
3851 return u->job && u->job->type == JOB_STOP;
3852 }
3853
3854 bool unit_inactive_or_pending(Unit *u) {
3855 assert(u);
3856
3857 /* Returns true if the unit is inactive or going down */
3858
3859 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3860 return true;
3861
3862 if (unit_stop_pending(u))
3863 return true;
3864
3865 return false;
3866 }
3867
3868 bool unit_active_or_pending(Unit *u) {
3869 assert(u);
3870
3871 /* Returns true if the unit is active or going up */
3872
3873 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3874 return true;
3875
3876 if (u->job &&
3877 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3878 return true;
3879
3880 return false;
3881 }
3882
3883 bool unit_will_restart(Unit *u) {
3884 assert(u);
3885
3886 if (!UNIT_VTABLE(u)->will_restart)
3887 return false;
3888
3889 return UNIT_VTABLE(u)->will_restart(u);
3890 }
3891
3892 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3893 assert(u);
3894 assert(w >= 0 && w < _KILL_WHO_MAX);
3895 assert(SIGNAL_VALID(signo));
3896
3897 if (!UNIT_VTABLE(u)->kill)
3898 return -EOPNOTSUPP;
3899
3900 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3901 }
3902
3903 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3904 _cleanup_set_free_ Set *pid_set = NULL;
3905 int r;
3906
3907 pid_set = set_new(NULL);
3908 if (!pid_set)
3909 return NULL;
3910
3911 /* Exclude the main/control pids from being killed via the cgroup */
3912 if (main_pid > 0) {
3913 r = set_put(pid_set, PID_TO_PTR(main_pid));
3914 if (r < 0)
3915 return NULL;
3916 }
3917
3918 if (control_pid > 0) {
3919 r = set_put(pid_set, PID_TO_PTR(control_pid));
3920 if (r < 0)
3921 return NULL;
3922 }
3923
3924 return TAKE_PTR(pid_set);
3925 }
3926
3927 int unit_kill_common(
3928 Unit *u,
3929 KillWho who,
3930 int signo,
3931 pid_t main_pid,
3932 pid_t control_pid,
3933 sd_bus_error *error) {
3934
3935 int r = 0;
3936 bool killed = false;
3937
3938 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3939 if (main_pid < 0)
3940 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3941 else if (main_pid == 0)
3942 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3943 }
3944
3945 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3946 if (control_pid < 0)
3947 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3948 else if (control_pid == 0)
3949 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3950 }
3951
3952 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3953 if (control_pid > 0) {
3954 if (kill(control_pid, signo) < 0)
3955 r = -errno;
3956 else
3957 killed = true;
3958 }
3959
3960 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3961 if (main_pid > 0) {
3962 if (kill(main_pid, signo) < 0)
3963 r = -errno;
3964 else
3965 killed = true;
3966 }
3967
3968 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3969 _cleanup_set_free_ Set *pid_set = NULL;
3970 int q;
3971
3972 /* Exclude the main/control pids from being killed via the cgroup */
3973 pid_set = unit_pid_set(main_pid, control_pid);
3974 if (!pid_set)
3975 return -ENOMEM;
3976
3977 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3978 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3979 r = q;
3980 else
3981 killed = true;
3982 }
3983
3984 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3985 return -ESRCH;
3986
3987 return r;
3988 }
3989
3990 int unit_following_set(Unit *u, Set **s) {
3991 assert(u);
3992 assert(s);
3993
3994 if (UNIT_VTABLE(u)->following_set)
3995 return UNIT_VTABLE(u)->following_set(u, s);
3996
3997 *s = NULL;
3998 return 0;
3999 }
4000
4001 UnitFileState unit_get_unit_file_state(Unit *u) {
4002 int r;
4003
4004 assert(u);
4005
4006 if (u->unit_file_state < 0 && u->fragment_path) {
4007 r = unit_file_get_state(
4008 u->manager->unit_file_scope,
4009 NULL,
4010 u->id,
4011 &u->unit_file_state);
4012 if (r < 0)
4013 u->unit_file_state = UNIT_FILE_BAD;
4014 }
4015
4016 return u->unit_file_state;
4017 }
4018
4019 int unit_get_unit_file_preset(Unit *u) {
4020 assert(u);
4021
4022 if (u->unit_file_preset < 0 && u->fragment_path)
4023 u->unit_file_preset = unit_file_query_preset(
4024 u->manager->unit_file_scope,
4025 NULL,
4026 basename(u->fragment_path));
4027
4028 return u->unit_file_preset;
4029 }
4030
4031 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4032 assert(ref);
4033 assert(source);
4034 assert(target);
4035
4036 if (ref->target)
4037 unit_ref_unset(ref);
4038
4039 ref->source = source;
4040 ref->target = target;
4041 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4042 return target;
4043 }
4044
4045 void unit_ref_unset(UnitRef *ref) {
4046 assert(ref);
4047
4048 if (!ref->target)
4049 return;
4050
4051 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4052 * be unreferenced now. */
4053 unit_add_to_gc_queue(ref->target);
4054
4055 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4056 ref->source = ref->target = NULL;
4057 }
4058
4059 static int user_from_unit_name(Unit *u, char **ret) {
4060
4061 static const uint8_t hash_key[] = {
4062 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4063 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4064 };
4065
4066 _cleanup_free_ char *n = NULL;
4067 int r;
4068
4069 r = unit_name_to_prefix(u->id, &n);
4070 if (r < 0)
4071 return r;
4072
4073 if (valid_user_group_name(n)) {
4074 *ret = TAKE_PTR(n);
4075 return 0;
4076 }
4077
4078 /* If we can't use the unit name as a user name, then let's hash it and use that */
4079 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4080 return -ENOMEM;
4081
4082 return 0;
4083 }
4084
4085 int unit_patch_contexts(Unit *u) {
4086 CGroupContext *cc;
4087 ExecContext *ec;
4088 unsigned i;
4089 int r;
4090
4091 assert(u);
4092
4093 /* Patch in the manager defaults into the exec and cgroup
4094 * contexts, _after_ the rest of the settings have been
4095 * initialized */
4096
4097 ec = unit_get_exec_context(u);
4098 if (ec) {
4099 /* This only copies in the ones that need memory */
4100 for (i = 0; i < _RLIMIT_MAX; i++)
4101 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4102 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4103 if (!ec->rlimit[i])
4104 return -ENOMEM;
4105 }
4106
4107 if (MANAGER_IS_USER(u->manager) &&
4108 !ec->working_directory) {
4109
4110 r = get_home_dir(&ec->working_directory);
4111 if (r < 0)
4112 return r;
4113
4114 /* Allow user services to run, even if the
4115 * home directory is missing */
4116 ec->working_directory_missing_ok = true;
4117 }
4118
4119 if (ec->private_devices)
4120 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4121
4122 if (ec->protect_kernel_modules)
4123 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4124
4125 if (ec->dynamic_user) {
4126 if (!ec->user) {
4127 r = user_from_unit_name(u, &ec->user);
4128 if (r < 0)
4129 return r;
4130 }
4131
4132 if (!ec->group) {
4133 ec->group = strdup(ec->user);
4134 if (!ec->group)
4135 return -ENOMEM;
4136 }
4137
4138 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4139 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4140
4141 ec->private_tmp = true;
4142 ec->remove_ipc = true;
4143 ec->protect_system = PROTECT_SYSTEM_STRICT;
4144 if (ec->protect_home == PROTECT_HOME_NO)
4145 ec->protect_home = PROTECT_HOME_READ_ONLY;
4146 }
4147 }
4148
4149 cc = unit_get_cgroup_context(u);
4150 if (cc && ec) {
4151
4152 if (ec->private_devices &&
4153 cc->device_policy == CGROUP_AUTO)
4154 cc->device_policy = CGROUP_CLOSED;
4155
4156 if (ec->root_image &&
4157 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4158
4159 /* When RootImage= is specified, the following devices are touched. */
4160 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4161 if (r < 0)
4162 return r;
4163
4164 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4165 if (r < 0)
4166 return r;
4167
4168 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4169 if (r < 0)
4170 return r;
4171 }
4172 }
4173
4174 return 0;
4175 }
4176
4177 ExecContext *unit_get_exec_context(Unit *u) {
4178 size_t offset;
4179 assert(u);
4180
4181 if (u->type < 0)
4182 return NULL;
4183
4184 offset = UNIT_VTABLE(u)->exec_context_offset;
4185 if (offset <= 0)
4186 return NULL;
4187
4188 return (ExecContext*) ((uint8_t*) u + offset);
4189 }
4190
4191 KillContext *unit_get_kill_context(Unit *u) {
4192 size_t offset;
4193 assert(u);
4194
4195 if (u->type < 0)
4196 return NULL;
4197
4198 offset = UNIT_VTABLE(u)->kill_context_offset;
4199 if (offset <= 0)
4200 return NULL;
4201
4202 return (KillContext*) ((uint8_t*) u + offset);
4203 }
4204
4205 CGroupContext *unit_get_cgroup_context(Unit *u) {
4206 size_t offset;
4207
4208 if (u->type < 0)
4209 return NULL;
4210
4211 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4212 if (offset <= 0)
4213 return NULL;
4214
4215 return (CGroupContext*) ((uint8_t*) u + offset);
4216 }
4217
4218 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4219 size_t offset;
4220
4221 if (u->type < 0)
4222 return NULL;
4223
4224 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4225 if (offset <= 0)
4226 return NULL;
4227
4228 return *(ExecRuntime**) ((uint8_t*) u + offset);
4229 }
4230
4231 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4232 assert(u);
4233
4234 if (UNIT_WRITE_FLAGS_NOOP(flags))
4235 return NULL;
4236
4237 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4238 return u->manager->lookup_paths.transient;
4239
4240 if (flags & UNIT_PERSISTENT)
4241 return u->manager->lookup_paths.persistent_control;
4242
4243 if (flags & UNIT_RUNTIME)
4244 return u->manager->lookup_paths.runtime_control;
4245
4246 return NULL;
4247 }
4248
4249 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4250 char *ret = NULL;
4251
4252 if (!s)
4253 return NULL;
4254
4255 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4256 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4257 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4258 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4259 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4260 * allocations. */
4261
4262 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4263 ret = specifier_escape(s);
4264 if (!ret)
4265 return NULL;
4266
4267 s = ret;
4268 }
4269
4270 if (flags & UNIT_ESCAPE_C) {
4271 char *a;
4272
4273 a = cescape(s);
4274 free(ret);
4275 if (!a)
4276 return NULL;
4277
4278 ret = a;
4279 }
4280
4281 if (buf) {
4282 *buf = ret;
4283 return ret ?: (char*) s;
4284 }
4285
4286 return ret ?: strdup(s);
4287 }
4288
4289 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4290 _cleanup_free_ char *result = NULL;
4291 size_t n = 0, allocated = 0;
4292 char **i;
4293
4294 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4295 * way suitable for ExecStart= stanzas */
4296
4297 STRV_FOREACH(i, l) {
4298 _cleanup_free_ char *buf = NULL;
4299 const char *p;
4300 size_t a;
4301 char *q;
4302
4303 p = unit_escape_setting(*i, flags, &buf);
4304 if (!p)
4305 return NULL;
4306
4307 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4308 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4309 return NULL;
4310
4311 q = result + n;
4312 if (n > 0)
4313 *(q++) = ' ';
4314
4315 *(q++) = '"';
4316 q = stpcpy(q, p);
4317 *(q++) = '"';
4318
4319 n += a;
4320 }
4321
4322 if (!GREEDY_REALLOC(result, allocated, n + 1))
4323 return NULL;
4324
4325 result[n] = 0;
4326
4327 return TAKE_PTR(result);
4328 }
4329
4330 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4331 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4332 const char *dir, *wrapped;
4333 int r;
4334
4335 assert(u);
4336 assert(name);
4337 assert(data);
4338
4339 if (UNIT_WRITE_FLAGS_NOOP(flags))
4340 return 0;
4341
4342 data = unit_escape_setting(data, flags, &escaped);
4343 if (!data)
4344 return -ENOMEM;
4345
4346 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4347 * previous section header is the same */
4348
4349 if (flags & UNIT_PRIVATE) {
4350 if (!UNIT_VTABLE(u)->private_section)
4351 return -EINVAL;
4352
4353 if (!u->transient_file || u->last_section_private < 0)
4354 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4355 else if (u->last_section_private == 0)
4356 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4357 } else {
4358 if (!u->transient_file || u->last_section_private < 0)
4359 data = strjoina("[Unit]\n", data);
4360 else if (u->last_section_private > 0)
4361 data = strjoina("\n[Unit]\n", data);
4362 }
4363
4364 if (u->transient_file) {
4365 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4366 * write to the transient unit file. */
4367 fputs(data, u->transient_file);
4368
4369 if (!endswith(data, "\n"))
4370 fputc('\n', u->transient_file);
4371
4372 /* Remember which section we wrote this entry to */
4373 u->last_section_private = !!(flags & UNIT_PRIVATE);
4374 return 0;
4375 }
4376
4377 dir = unit_drop_in_dir(u, flags);
4378 if (!dir)
4379 return -EINVAL;
4380
4381 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4382 "# or an equivalent operation. Do not edit.\n",
4383 data,
4384 "\n");
4385
4386 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4387 if (r < 0)
4388 return r;
4389
4390 (void) mkdir_p_label(p, 0755);
4391 r = write_string_file_atomic_label(q, wrapped);
4392 if (r < 0)
4393 return r;
4394
4395 r = strv_push(&u->dropin_paths, q);
4396 if (r < 0)
4397 return r;
4398 q = NULL;
4399
4400 strv_uniq(u->dropin_paths);
4401
4402 u->dropin_mtime = now(CLOCK_REALTIME);
4403
4404 return 0;
4405 }
4406
4407 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4408 _cleanup_free_ char *p = NULL;
4409 va_list ap;
4410 int r;
4411
4412 assert(u);
4413 assert(name);
4414 assert(format);
4415
4416 if (UNIT_WRITE_FLAGS_NOOP(flags))
4417 return 0;
4418
4419 va_start(ap, format);
4420 r = vasprintf(&p, format, ap);
4421 va_end(ap);
4422
4423 if (r < 0)
4424 return -ENOMEM;
4425
4426 return unit_write_setting(u, flags, name, p);
4427 }
4428
4429 int unit_make_transient(Unit *u) {
4430 _cleanup_free_ char *path = NULL;
4431 FILE *f;
4432
4433 assert(u);
4434
4435 if (!UNIT_VTABLE(u)->can_transient)
4436 return -EOPNOTSUPP;
4437
4438 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4439
4440 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4441 if (!path)
4442 return -ENOMEM;
4443
4444 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4445 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4446
4447 RUN_WITH_UMASK(0022) {
4448 f = fopen(path, "we");
4449 if (!f)
4450 return -errno;
4451 }
4452
4453 safe_fclose(u->transient_file);
4454 u->transient_file = f;
4455
4456 free_and_replace(u->fragment_path, path);
4457
4458 u->source_path = mfree(u->source_path);
4459 u->dropin_paths = strv_free(u->dropin_paths);
4460 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4461
4462 u->load_state = UNIT_STUB;
4463 u->load_error = 0;
4464 u->transient = true;
4465
4466 unit_add_to_dbus_queue(u);
4467 unit_add_to_gc_queue(u);
4468
4469 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4470 u->transient_file);
4471
4472 return 0;
4473 }
4474
4475 static void log_kill(pid_t pid, int sig, void *userdata) {
4476 _cleanup_free_ char *comm = NULL;
4477
4478 (void) get_process_comm(pid, &comm);
4479
4480 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4481 only, like for example systemd's own PAM stub process. */
4482 if (comm && comm[0] == '(')
4483 return;
4484
4485 log_unit_notice(userdata,
4486 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4487 pid,
4488 strna(comm),
4489 signal_to_string(sig));
4490 }
4491
4492 static int operation_to_signal(KillContext *c, KillOperation k) {
4493 assert(c);
4494
4495 switch (k) {
4496
4497 case KILL_TERMINATE:
4498 case KILL_TERMINATE_AND_LOG:
4499 return c->kill_signal;
4500
4501 case KILL_KILL:
4502 return c->final_kill_signal;
4503
4504 case KILL_WATCHDOG:
4505 return c->watchdog_signal;
4506
4507 default:
4508 assert_not_reached("KillOperation unknown");
4509 }
4510 }
4511
4512 int unit_kill_context(
4513 Unit *u,
4514 KillContext *c,
4515 KillOperation k,
4516 pid_t main_pid,
4517 pid_t control_pid,
4518 bool main_pid_alien) {
4519
4520 bool wait_for_exit = false, send_sighup;
4521 cg_kill_log_func_t log_func = NULL;
4522 int sig, r;
4523
4524 assert(u);
4525 assert(c);
4526
4527 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4528 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4529
4530 if (c->kill_mode == KILL_NONE)
4531 return 0;
4532
4533 sig = operation_to_signal(c, k);
4534
4535 send_sighup =
4536 c->send_sighup &&
4537 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4538 sig != SIGHUP;
4539
4540 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4541 log_func = log_kill;
4542
4543 if (main_pid > 0) {
4544 if (log_func)
4545 log_func(main_pid, sig, u);
4546
4547 r = kill_and_sigcont(main_pid, sig);
4548 if (r < 0 && r != -ESRCH) {
4549 _cleanup_free_ char *comm = NULL;
4550 (void) get_process_comm(main_pid, &comm);
4551
4552 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4553 } else {
4554 if (!main_pid_alien)
4555 wait_for_exit = true;
4556
4557 if (r != -ESRCH && send_sighup)
4558 (void) kill(main_pid, SIGHUP);
4559 }
4560 }
4561
4562 if (control_pid > 0) {
4563 if (log_func)
4564 log_func(control_pid, sig, u);
4565
4566 r = kill_and_sigcont(control_pid, sig);
4567 if (r < 0 && r != -ESRCH) {
4568 _cleanup_free_ char *comm = NULL;
4569 (void) get_process_comm(control_pid, &comm);
4570
4571 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4572 } else {
4573 wait_for_exit = true;
4574
4575 if (r != -ESRCH && send_sighup)
4576 (void) kill(control_pid, SIGHUP);
4577 }
4578 }
4579
4580 if (u->cgroup_path &&
4581 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4582 _cleanup_set_free_ Set *pid_set = NULL;
4583
4584 /* Exclude the main/control pids from being killed via the cgroup */
4585 pid_set = unit_pid_set(main_pid, control_pid);
4586 if (!pid_set)
4587 return -ENOMEM;
4588
4589 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4590 sig,
4591 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4592 pid_set,
4593 log_func, u);
4594 if (r < 0) {
4595 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4596 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4597
4598 } else if (r > 0) {
4599
4600 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4601 * we are running in a container or if this is a delegation unit, simply because cgroup
4602 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4603 * of containers it can be confused easily by left-over directories in the cgroup — which
4604 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4605 * there we get proper events. Hence rely on them. */
4606
4607 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4608 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4609 wait_for_exit = true;
4610
4611 if (send_sighup) {
4612 set_free(pid_set);
4613
4614 pid_set = unit_pid_set(main_pid, control_pid);
4615 if (!pid_set)
4616 return -ENOMEM;
4617
4618 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4619 SIGHUP,
4620 CGROUP_IGNORE_SELF,
4621 pid_set,
4622 NULL, NULL);
4623 }
4624 }
4625 }
4626
4627 return wait_for_exit;
4628 }
4629
4630 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4631 _cleanup_free_ char *p = NULL;
4632 char *prefix;
4633 UnitDependencyInfo di;
4634 int r;
4635
4636 assert(u);
4637 assert(path);
4638
4639 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4640 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4641 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4642 * determine which units to make themselves a dependency of. */
4643
4644 if (!path_is_absolute(path))
4645 return -EINVAL;
4646
4647 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4648 if (r < 0)
4649 return r;
4650
4651 p = strdup(path);
4652 if (!p)
4653 return -ENOMEM;
4654
4655 path = path_simplify(p, false);
4656
4657 if (!path_is_normalized(path))
4658 return -EPERM;
4659
4660 if (hashmap_contains(u->requires_mounts_for, path))
4661 return 0;
4662
4663 di = (UnitDependencyInfo) {
4664 .origin_mask = mask
4665 };
4666
4667 r = hashmap_put(u->requires_mounts_for, path, di.data);
4668 if (r < 0)
4669 return r;
4670 p = NULL;
4671
4672 prefix = alloca(strlen(path) + 1);
4673 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4674 Set *x;
4675
4676 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4677 if (!x) {
4678 _cleanup_free_ char *q = NULL;
4679
4680 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4681 if (r < 0)
4682 return r;
4683
4684 q = strdup(prefix);
4685 if (!q)
4686 return -ENOMEM;
4687
4688 x = set_new(NULL);
4689 if (!x)
4690 return -ENOMEM;
4691
4692 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4693 if (r < 0) {
4694 set_free(x);
4695 return r;
4696 }
4697 q = NULL;
4698 }
4699
4700 r = set_put(x, u);
4701 if (r < 0)
4702 return r;
4703 }
4704
4705 return 0;
4706 }
4707
4708 int unit_setup_exec_runtime(Unit *u) {
4709 ExecRuntime **rt;
4710 size_t offset;
4711 Unit *other;
4712 Iterator i;
4713 void *v;
4714 int r;
4715
4716 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4717 assert(offset > 0);
4718
4719 /* Check if there already is an ExecRuntime for this unit? */
4720 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4721 if (*rt)
4722 return 0;
4723
4724 /* Try to get it from somebody else */
4725 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4726 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4727 if (r == 1)
4728 return 1;
4729 }
4730
4731 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4732 }
4733
4734 int unit_setup_dynamic_creds(Unit *u) {
4735 ExecContext *ec;
4736 DynamicCreds *dcreds;
4737 size_t offset;
4738
4739 assert(u);
4740
4741 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4742 assert(offset > 0);
4743 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4744
4745 ec = unit_get_exec_context(u);
4746 assert(ec);
4747
4748 if (!ec->dynamic_user)
4749 return 0;
4750
4751 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4752 }
4753
4754 bool unit_type_supported(UnitType t) {
4755 if (_unlikely_(t < 0))
4756 return false;
4757 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4758 return false;
4759
4760 if (!unit_vtable[t]->supported)
4761 return true;
4762
4763 return unit_vtable[t]->supported();
4764 }
4765
4766 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4767 int r;
4768
4769 assert(u);
4770 assert(where);
4771
4772 r = dir_is_empty(where);
4773 if (r > 0 || r == -ENOTDIR)
4774 return;
4775 if (r < 0) {
4776 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4777 return;
4778 }
4779
4780 log_struct(LOG_NOTICE,
4781 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4782 LOG_UNIT_ID(u),
4783 LOG_UNIT_INVOCATION_ID(u),
4784 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4785 "WHERE=%s", where);
4786 }
4787
4788 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4789 _cleanup_free_ char *canonical_where;
4790 int r;
4791
4792 assert(u);
4793 assert(where);
4794
4795 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4796 if (r < 0) {
4797 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4798 return 0;
4799 }
4800
4801 /* We will happily ignore a trailing slash (or any redundant slashes) */
4802 if (path_equal(where, canonical_where))
4803 return 0;
4804
4805 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4806 log_struct(LOG_ERR,
4807 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4808 LOG_UNIT_ID(u),
4809 LOG_UNIT_INVOCATION_ID(u),
4810 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4811 "WHERE=%s", where);
4812
4813 return -ELOOP;
4814 }
4815
4816 bool unit_is_pristine(Unit *u) {
4817 assert(u);
4818
4819 /* Check if the unit already exists or is already around,
4820 * in a number of different ways. Note that to cater for unit
4821 * types such as slice, we are generally fine with units that
4822 * are marked UNIT_LOADED even though nothing was actually
4823 * loaded, as those unit types don't require a file on disk. */
4824
4825 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4826 u->fragment_path ||
4827 u->source_path ||
4828 !strv_isempty(u->dropin_paths) ||
4829 u->job ||
4830 u->merged_into);
4831 }
4832
4833 pid_t unit_control_pid(Unit *u) {
4834 assert(u);
4835
4836 if (UNIT_VTABLE(u)->control_pid)
4837 return UNIT_VTABLE(u)->control_pid(u);
4838
4839 return 0;
4840 }
4841
4842 pid_t unit_main_pid(Unit *u) {
4843 assert(u);
4844
4845 if (UNIT_VTABLE(u)->main_pid)
4846 return UNIT_VTABLE(u)->main_pid(u);
4847
4848 return 0;
4849 }
4850
4851 static void unit_unref_uid_internal(
4852 Unit *u,
4853 uid_t *ref_uid,
4854 bool destroy_now,
4855 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4856
4857 assert(u);
4858 assert(ref_uid);
4859 assert(_manager_unref_uid);
4860
4861 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4862 * gid_t are actually the same time, with the same validity rules.
4863 *
4864 * Drops a reference to UID/GID from a unit. */
4865
4866 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4867 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4868
4869 if (!uid_is_valid(*ref_uid))
4870 return;
4871
4872 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4873 *ref_uid = UID_INVALID;
4874 }
4875
4876 void unit_unref_uid(Unit *u, bool destroy_now) {
4877 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4878 }
4879
4880 void unit_unref_gid(Unit *u, bool destroy_now) {
4881 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4882 }
4883
4884 static int unit_ref_uid_internal(
4885 Unit *u,
4886 uid_t *ref_uid,
4887 uid_t uid,
4888 bool clean_ipc,
4889 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4890
4891 int r;
4892
4893 assert(u);
4894 assert(ref_uid);
4895 assert(uid_is_valid(uid));
4896 assert(_manager_ref_uid);
4897
4898 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4899 * are actually the same type, and have the same validity rules.
4900 *
4901 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4902 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4903 * drops to zero. */
4904
4905 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4906 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4907
4908 if (*ref_uid == uid)
4909 return 0;
4910
4911 if (uid_is_valid(*ref_uid)) /* Already set? */
4912 return -EBUSY;
4913
4914 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4915 if (r < 0)
4916 return r;
4917
4918 *ref_uid = uid;
4919 return 1;
4920 }
4921
4922 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4923 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4924 }
4925
4926 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4927 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4928 }
4929
4930 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4931 int r = 0, q = 0;
4932
4933 assert(u);
4934
4935 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4936
4937 if (uid_is_valid(uid)) {
4938 r = unit_ref_uid(u, uid, clean_ipc);
4939 if (r < 0)
4940 return r;
4941 }
4942
4943 if (gid_is_valid(gid)) {
4944 q = unit_ref_gid(u, gid, clean_ipc);
4945 if (q < 0) {
4946 if (r > 0)
4947 unit_unref_uid(u, false);
4948
4949 return q;
4950 }
4951 }
4952
4953 return r > 0 || q > 0;
4954 }
4955
4956 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4957 ExecContext *c;
4958 int r;
4959
4960 assert(u);
4961
4962 c = unit_get_exec_context(u);
4963
4964 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4965 if (r < 0)
4966 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4967
4968 return r;
4969 }
4970
4971 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4972 assert(u);
4973
4974 unit_unref_uid(u, destroy_now);
4975 unit_unref_gid(u, destroy_now);
4976 }
4977
4978 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4979 int r;
4980
4981 assert(u);
4982
4983 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4984 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4985 * objects when no service references the UID/GID anymore. */
4986
4987 r = unit_ref_uid_gid(u, uid, gid);
4988 if (r > 0)
4989 bus_unit_send_change_signal(u);
4990 }
4991
4992 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4993 int r;
4994
4995 assert(u);
4996
4997 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4998
4999 if (sd_id128_equal(u->invocation_id, id))
5000 return 0;
5001
5002 if (!sd_id128_is_null(u->invocation_id))
5003 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5004
5005 if (sd_id128_is_null(id)) {
5006 r = 0;
5007 goto reset;
5008 }
5009
5010 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5011 if (r < 0)
5012 goto reset;
5013
5014 u->invocation_id = id;
5015 sd_id128_to_string(id, u->invocation_id_string);
5016
5017 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5018 if (r < 0)
5019 goto reset;
5020
5021 return 0;
5022
5023 reset:
5024 u->invocation_id = SD_ID128_NULL;
5025 u->invocation_id_string[0] = 0;
5026 return r;
5027 }
5028
5029 int unit_acquire_invocation_id(Unit *u) {
5030 sd_id128_t id;
5031 int r;
5032
5033 assert(u);
5034
5035 r = sd_id128_randomize(&id);
5036 if (r < 0)
5037 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5038
5039 r = unit_set_invocation_id(u, id);
5040 if (r < 0)
5041 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5042
5043 return 0;
5044 }
5045
5046 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5047 int r;
5048
5049 assert(u);
5050 assert(p);
5051
5052 /* Copy parameters from manager */
5053 r = manager_get_effective_environment(u->manager, &p->environment);
5054 if (r < 0)
5055 return r;
5056
5057 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5058 p->cgroup_supported = u->manager->cgroup_supported;
5059 p->prefix = u->manager->prefix;
5060 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5061
5062 /* Copy paramaters from unit */
5063 p->cgroup_path = u->cgroup_path;
5064 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5065
5066 return 0;
5067 }
5068
5069 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5070 int r;
5071
5072 assert(u);
5073 assert(ret);
5074
5075 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5076 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5077
5078 (void) unit_realize_cgroup(u);
5079
5080 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5081 if (r != 0)
5082 return r;
5083
5084 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5085 (void) ignore_signals(SIGPIPE, -1);
5086
5087 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5088
5089 if (u->cgroup_path) {
5090 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5091 if (r < 0) {
5092 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5093 _exit(EXIT_CGROUP);
5094 }
5095 }
5096
5097 return 0;
5098 }
5099
5100 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5101 assert(u);
5102 assert(d >= 0);
5103 assert(d < _UNIT_DEPENDENCY_MAX);
5104 assert(other);
5105
5106 if (di.origin_mask == 0 && di.destination_mask == 0) {
5107 /* No bit set anymore, let's drop the whole entry */
5108 assert_se(hashmap_remove(u->dependencies[d], other));
5109 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5110 } else
5111 /* Mask was reduced, let's update the entry */
5112 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5113 }
5114
5115 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5116 UnitDependency d;
5117
5118 assert(u);
5119
5120 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5121
5122 if (mask == 0)
5123 return;
5124
5125 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5126 bool done;
5127
5128 do {
5129 UnitDependencyInfo di;
5130 Unit *other;
5131 Iterator i;
5132
5133 done = true;
5134
5135 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5136 UnitDependency q;
5137
5138 if ((di.origin_mask & ~mask) == di.origin_mask)
5139 continue;
5140 di.origin_mask &= ~mask;
5141 unit_update_dependency_mask(u, d, other, di);
5142
5143 /* We updated the dependency from our unit to the other unit now. But most dependencies
5144 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5145 * all dependency types on the other unit and delete all those which point to us and
5146 * have the right mask set. */
5147
5148 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5149 UnitDependencyInfo dj;
5150
5151 dj.data = hashmap_get(other->dependencies[q], u);
5152 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5153 continue;
5154 dj.destination_mask &= ~mask;
5155
5156 unit_update_dependency_mask(other, q, u, dj);
5157 }
5158
5159 unit_add_to_gc_queue(other);
5160
5161 done = false;
5162 break;
5163 }
5164
5165 } while (!done);
5166 }
5167 }
5168
5169 static int unit_export_invocation_id(Unit *u) {
5170 const char *p;
5171 int r;
5172
5173 assert(u);
5174
5175 if (u->exported_invocation_id)
5176 return 0;
5177
5178 if (sd_id128_is_null(u->invocation_id))
5179 return 0;
5180
5181 p = strjoina("/run/systemd/units/invocation:", u->id);
5182 r = symlink_atomic(u->invocation_id_string, p);
5183 if (r < 0)
5184 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5185
5186 u->exported_invocation_id = true;
5187 return 0;
5188 }
5189
5190 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5191 const char *p;
5192 char buf[2];
5193 int r;
5194
5195 assert(u);
5196 assert(c);
5197
5198 if (u->exported_log_level_max)
5199 return 0;
5200
5201 if (c->log_level_max < 0)
5202 return 0;
5203
5204 assert(c->log_level_max <= 7);
5205
5206 buf[0] = '0' + c->log_level_max;
5207 buf[1] = 0;
5208
5209 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5210 r = symlink_atomic(buf, p);
5211 if (r < 0)
5212 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5213
5214 u->exported_log_level_max = true;
5215 return 0;
5216 }
5217
5218 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5219 _cleanup_close_ int fd = -1;
5220 struct iovec *iovec;
5221 const char *p;
5222 char *pattern;
5223 le64_t *sizes;
5224 ssize_t n;
5225 size_t i;
5226 int r;
5227
5228 if (u->exported_log_extra_fields)
5229 return 0;
5230
5231 if (c->n_log_extra_fields <= 0)
5232 return 0;
5233
5234 sizes = newa(le64_t, c->n_log_extra_fields);
5235 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5236
5237 for (i = 0; i < c->n_log_extra_fields; i++) {
5238 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5239
5240 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5241 iovec[i*2+1] = c->log_extra_fields[i];
5242 }
5243
5244 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5245 pattern = strjoina(p, ".XXXXXX");
5246
5247 fd = mkostemp_safe(pattern);
5248 if (fd < 0)
5249 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5250
5251 n = writev(fd, iovec, c->n_log_extra_fields*2);
5252 if (n < 0) {
5253 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5254 goto fail;
5255 }
5256
5257 (void) fchmod(fd, 0644);
5258
5259 if (rename(pattern, p) < 0) {
5260 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5261 goto fail;
5262 }
5263
5264 u->exported_log_extra_fields = true;
5265 return 0;
5266
5267 fail:
5268 (void) unlink(pattern);
5269 return r;
5270 }
5271
5272 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5273 _cleanup_free_ char *buf = NULL;
5274 const char *p;
5275 int r;
5276
5277 assert(u);
5278 assert(c);
5279
5280 if (u->exported_log_rate_limit_interval)
5281 return 0;
5282
5283 if (c->log_rate_limit_interval_usec == 0)
5284 return 0;
5285
5286 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5287
5288 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5289 return log_oom();
5290
5291 r = symlink_atomic(buf, p);
5292 if (r < 0)
5293 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5294
5295 u->exported_log_rate_limit_interval = true;
5296 return 0;
5297 }
5298
5299 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5300 _cleanup_free_ char *buf = NULL;
5301 const char *p;
5302 int r;
5303
5304 assert(u);
5305 assert(c);
5306
5307 if (u->exported_log_rate_limit_burst)
5308 return 0;
5309
5310 if (c->log_rate_limit_burst == 0)
5311 return 0;
5312
5313 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5314
5315 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5316 return log_oom();
5317
5318 r = symlink_atomic(buf, p);
5319 if (r < 0)
5320 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5321
5322 u->exported_log_rate_limit_burst = true;
5323 return 0;
5324 }
5325
5326 void unit_export_state_files(Unit *u) {
5327 const ExecContext *c;
5328
5329 assert(u);
5330
5331 if (!u->id)
5332 return;
5333
5334 if (!MANAGER_IS_SYSTEM(u->manager))
5335 return;
5336
5337 if (MANAGER_IS_TEST_RUN(u->manager))
5338 return;
5339
5340 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5341 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5342 * the IPC system itself and PID 1 also log to the journal.
5343 *
5344 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5345 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5346 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5347 * namespace at least.
5348 *
5349 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5350 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5351 * them with one. */
5352
5353 (void) unit_export_invocation_id(u);
5354
5355 c = unit_get_exec_context(u);
5356 if (c) {
5357 (void) unit_export_log_level_max(u, c);
5358 (void) unit_export_log_extra_fields(u, c);
5359 (void) unit_export_log_rate_limit_interval(u, c);
5360 (void) unit_export_log_rate_limit_burst(u, c);
5361 }
5362 }
5363
5364 void unit_unlink_state_files(Unit *u) {
5365 const char *p;
5366
5367 assert(u);
5368
5369 if (!u->id)
5370 return;
5371
5372 if (!MANAGER_IS_SYSTEM(u->manager))
5373 return;
5374
5375 /* Undoes the effect of unit_export_state() */
5376
5377 if (u->exported_invocation_id) {
5378 p = strjoina("/run/systemd/units/invocation:", u->id);
5379 (void) unlink(p);
5380
5381 u->exported_invocation_id = false;
5382 }
5383
5384 if (u->exported_log_level_max) {
5385 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5386 (void) unlink(p);
5387
5388 u->exported_log_level_max = false;
5389 }
5390
5391 if (u->exported_log_extra_fields) {
5392 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5393 (void) unlink(p);
5394
5395 u->exported_log_extra_fields = false;
5396 }
5397
5398 if (u->exported_log_rate_limit_interval) {
5399 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5400 (void) unlink(p);
5401
5402 u->exported_log_rate_limit_interval = false;
5403 }
5404
5405 if (u->exported_log_rate_limit_burst) {
5406 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5407 (void) unlink(p);
5408
5409 u->exported_log_rate_limit_burst = false;
5410 }
5411 }
5412
5413 int unit_prepare_exec(Unit *u) {
5414 int r;
5415
5416 assert(u);
5417
5418 /* Prepares everything so that we can fork of a process for this unit */
5419
5420 (void) unit_realize_cgroup(u);
5421
5422 if (u->reset_accounting) {
5423 (void) unit_reset_cpu_accounting(u);
5424 (void) unit_reset_ip_accounting(u);
5425 u->reset_accounting = false;
5426 }
5427
5428 unit_export_state_files(u);
5429
5430 r = unit_setup_exec_runtime(u);
5431 if (r < 0)
5432 return r;
5433
5434 r = unit_setup_dynamic_creds(u);
5435 if (r < 0)
5436 return r;
5437
5438 return 0;
5439 }
5440
5441 static void log_leftover(pid_t pid, int sig, void *userdata) {
5442 _cleanup_free_ char *comm = NULL;
5443
5444 (void) get_process_comm(pid, &comm);
5445
5446 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5447 return;
5448
5449 log_unit_warning(userdata,
5450 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5451 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5452 pid, strna(comm));
5453 }
5454
5455 void unit_warn_leftover_processes(Unit *u) {
5456 assert(u);
5457
5458 (void) unit_pick_cgroup_path(u);
5459
5460 if (!u->cgroup_path)
5461 return;
5462
5463 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5464 }
5465
5466 bool unit_needs_console(Unit *u) {
5467 ExecContext *ec;
5468 UnitActiveState state;
5469
5470 assert(u);
5471
5472 state = unit_active_state(u);
5473
5474 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5475 return false;
5476
5477 if (UNIT_VTABLE(u)->needs_console)
5478 return UNIT_VTABLE(u)->needs_console(u);
5479
5480 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5481 ec = unit_get_exec_context(u);
5482 if (!ec)
5483 return false;
5484
5485 return exec_context_may_touch_console(ec);
5486 }
5487
5488 const char *unit_label_path(Unit *u) {
5489 const char *p;
5490
5491 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5492 * when validating access checks. */
5493
5494 p = u->source_path ?: u->fragment_path;
5495 if (!p)
5496 return NULL;
5497
5498 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5499 if (path_equal(p, "/dev/null"))
5500 return NULL;
5501
5502 return p;
5503 }
5504
5505 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5506 int r;
5507
5508 assert(u);
5509
5510 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5511 * and not a kernel thread either */
5512
5513 /* First, a simple range check */
5514 if (!pid_is_valid(pid))
5515 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5516
5517 /* Some extra safety check */
5518 if (pid == 1 || pid == getpid_cached())
5519 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5520
5521 /* Don't even begin to bother with kernel threads */
5522 r = is_kernel_thread(pid);
5523 if (r == -ESRCH)
5524 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5525 if (r < 0)
5526 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5527 if (r > 0)
5528 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5529
5530 return 0;
5531 }
5532
5533 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5534 [COLLECT_INACTIVE] = "inactive",
5535 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5536 };
5537
5538 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);