]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
pkgconfig: define variables relative to ${prefix}/${rootprefix}/${sysconfdir}
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
16 #include "bus-util.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "id128-util.h"
28 #include "io-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
31 #include "log.h"
32 #include "macro.h"
33 #include "missing.h"
34 #include "mkdir.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
39 #include "set.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
42 #include "special.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
48 #include "strv.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
51 #include "unit.h"
52 #include "user-util.h"
53 #include "virt.h"
54
55 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
56 [UNIT_SERVICE] = &service_vtable,
57 [UNIT_SOCKET] = &socket_vtable,
58 [UNIT_TARGET] = &target_vtable,
59 [UNIT_DEVICE] = &device_vtable,
60 [UNIT_MOUNT] = &mount_vtable,
61 [UNIT_AUTOMOUNT] = &automount_vtable,
62 [UNIT_SWAP] = &swap_vtable,
63 [UNIT_TIMER] = &timer_vtable,
64 [UNIT_PATH] = &path_vtable,
65 [UNIT_SLICE] = &slice_vtable,
66 [UNIT_SCOPE] = &scope_vtable,
67 };
68
69 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
70
71 Unit *unit_new(Manager *m, size_t size) {
72 Unit *u;
73
74 assert(m);
75 assert(size >= sizeof(Unit));
76
77 u = malloc0(size);
78 if (!u)
79 return NULL;
80
81 u->names = set_new(&string_hash_ops);
82 if (!u->names)
83 return mfree(u);
84
85 u->manager = m;
86 u->type = _UNIT_TYPE_INVALID;
87 u->default_dependencies = true;
88 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
89 u->unit_file_preset = -1;
90 u->on_failure_job_mode = JOB_REPLACE;
91 u->cgroup_inotify_wd = -1;
92 u->job_timeout = USEC_INFINITY;
93 u->job_running_timeout = USEC_INFINITY;
94 u->ref_uid = UID_INVALID;
95 u->ref_gid = GID_INVALID;
96 u->cpu_usage_last = NSEC_INFINITY;
97 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
98
99 u->ip_accounting_ingress_map_fd = -1;
100 u->ip_accounting_egress_map_fd = -1;
101 u->ipv4_allow_map_fd = -1;
102 u->ipv6_allow_map_fd = -1;
103 u->ipv4_deny_map_fd = -1;
104 u->ipv6_deny_map_fd = -1;
105
106 u->last_section_private = -1;
107
108 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
109 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
110
111 return u;
112 }
113
114 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
115 _cleanup_(unit_freep) Unit *u = NULL;
116 int r;
117
118 u = unit_new(m, size);
119 if (!u)
120 return -ENOMEM;
121
122 r = unit_add_name(u, name);
123 if (r < 0)
124 return r;
125
126 *ret = TAKE_PTR(u);
127
128 return r;
129 }
130
131 bool unit_has_name(Unit *u, const char *name) {
132 assert(u);
133 assert(name);
134
135 return set_contains(u->names, (char*) name);
136 }
137
138 static void unit_init(Unit *u) {
139 CGroupContext *cc;
140 ExecContext *ec;
141 KillContext *kc;
142
143 assert(u);
144 assert(u->manager);
145 assert(u->type >= 0);
146
147 cc = unit_get_cgroup_context(u);
148 if (cc) {
149 cgroup_context_init(cc);
150
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
154
155 cc->cpu_accounting = u->manager->default_cpu_accounting;
156 cc->io_accounting = u->manager->default_io_accounting;
157 cc->ip_accounting = u->manager->default_ip_accounting;
158 cc->blockio_accounting = u->manager->default_blockio_accounting;
159 cc->memory_accounting = u->manager->default_memory_accounting;
160 cc->tasks_accounting = u->manager->default_tasks_accounting;
161 cc->ip_accounting = u->manager->default_ip_accounting;
162
163 if (u->type != UNIT_SLICE)
164 cc->tasks_max = u->manager->default_tasks_max;
165 }
166
167 ec = unit_get_exec_context(u);
168 if (ec) {
169 exec_context_init(ec);
170
171 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
172 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
173 }
174
175 kc = unit_get_kill_context(u);
176 if (kc)
177 kill_context_init(kc);
178
179 if (UNIT_VTABLE(u)->init)
180 UNIT_VTABLE(u)->init(u);
181 }
182
183 int unit_add_name(Unit *u, const char *text) {
184 _cleanup_free_ char *s = NULL, *i = NULL;
185 UnitType t;
186 int r;
187
188 assert(u);
189 assert(text);
190
191 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
192
193 if (!u->instance)
194 return -EINVAL;
195
196 r = unit_name_replace_instance(text, u->instance, &s);
197 if (r < 0)
198 return r;
199 } else {
200 s = strdup(text);
201 if (!s)
202 return -ENOMEM;
203 }
204
205 if (set_contains(u->names, s))
206 return 0;
207 if (hashmap_contains(u->manager->units, s))
208 return -EEXIST;
209
210 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
211 return -EINVAL;
212
213 t = unit_name_to_type(s);
214 if (t < 0)
215 return -EINVAL;
216
217 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
218 return -EINVAL;
219
220 r = unit_name_to_instance(s, &i);
221 if (r < 0)
222 return r;
223
224 if (i && !unit_type_may_template(t))
225 return -EINVAL;
226
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
231 return -EINVAL;
232
233 if (!unit_type_may_alias(t) && !set_isempty(u->names))
234 return -EEXIST;
235
236 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
237 return -E2BIG;
238
239 r = set_put(u->names, s);
240 if (r < 0)
241 return r;
242 assert(r > 0);
243
244 r = hashmap_put(u->manager->units, s, u);
245 if (r < 0) {
246 (void) set_remove(u->names, s);
247 return r;
248 }
249
250 if (u->type == _UNIT_TYPE_INVALID) {
251 u->type = t;
252 u->id = s;
253 u->instance = TAKE_PTR(i);
254
255 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
256
257 unit_init(u);
258 }
259
260 s = NULL;
261
262 unit_add_to_dbus_queue(u);
263 return 0;
264 }
265
266 int unit_choose_id(Unit *u, const char *name) {
267 _cleanup_free_ char *t = NULL;
268 char *s, *i;
269 int r;
270
271 assert(u);
272 assert(name);
273
274 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
275
276 if (!u->instance)
277 return -EINVAL;
278
279 r = unit_name_replace_instance(name, u->instance, &t);
280 if (r < 0)
281 return r;
282
283 name = t;
284 }
285
286 /* Selects one of the names of this unit as the id */
287 s = set_get(u->names, (char*) name);
288 if (!s)
289 return -ENOENT;
290
291 /* Determine the new instance from the new id */
292 r = unit_name_to_instance(s, &i);
293 if (r < 0)
294 return r;
295
296 u->id = s;
297
298 free(u->instance);
299 u->instance = i;
300
301 unit_add_to_dbus_queue(u);
302
303 return 0;
304 }
305
306 int unit_set_description(Unit *u, const char *description) {
307 int r;
308
309 assert(u);
310
311 r = free_and_strdup(&u->description, empty_to_null(description));
312 if (r < 0)
313 return r;
314 if (r > 0)
315 unit_add_to_dbus_queue(u);
316
317 return 0;
318 }
319
320 bool unit_may_gc(Unit *u) {
321 UnitActiveState state;
322 int r;
323
324 assert(u);
325
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
329 *
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
332 */
333
334 if (u->job)
335 return false;
336
337 if (u->nop_job)
338 return false;
339
340 state = unit_active_state(u);
341
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
344 UNIT_VTABLE(u)->release_resources)
345 UNIT_VTABLE(u)->release_resources(u);
346
347 if (u->perpetual)
348 return false;
349
350 if (sd_bus_track_count(u->bus_track) > 0)
351 return false;
352
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u->collect_mode) {
355
356 case COLLECT_INACTIVE:
357 if (state != UNIT_INACTIVE)
358 return false;
359
360 break;
361
362 case COLLECT_INACTIVE_OR_FAILED:
363 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
364 return false;
365
366 break;
367
368 default:
369 assert_not_reached("Unknown garbage collection mode");
370 }
371
372 if (u->cgroup_path) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
375
376 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
377 if (r < 0)
378 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
379 if (r <= 0)
380 return false;
381 }
382
383 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
384 return false;
385
386 return true;
387 }
388
389 void unit_add_to_load_queue(Unit *u) {
390 assert(u);
391 assert(u->type != _UNIT_TYPE_INVALID);
392
393 if (u->load_state != UNIT_STUB || u->in_load_queue)
394 return;
395
396 LIST_PREPEND(load_queue, u->manager->load_queue, u);
397 u->in_load_queue = true;
398 }
399
400 void unit_add_to_cleanup_queue(Unit *u) {
401 assert(u);
402
403 if (u->in_cleanup_queue)
404 return;
405
406 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
407 u->in_cleanup_queue = true;
408 }
409
410 void unit_add_to_gc_queue(Unit *u) {
411 assert(u);
412
413 if (u->in_gc_queue || u->in_cleanup_queue)
414 return;
415
416 if (!unit_may_gc(u))
417 return;
418
419 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
420 u->in_gc_queue = true;
421 }
422
423 void unit_add_to_dbus_queue(Unit *u) {
424 assert(u);
425 assert(u->type != _UNIT_TYPE_INVALID);
426
427 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
428 return;
429
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
432 sd_bus_track_count(u->bus_track) <= 0 &&
433 set_isempty(u->manager->private_buses)) {
434 u->sent_dbus_new_signal = true;
435 return;
436 }
437
438 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
439 u->in_dbus_queue = true;
440 }
441
442 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
443 assert(u);
444
445 if (u->in_stop_when_unneeded_queue)
446 return;
447
448 if (!u->stop_when_unneeded)
449 return;
450
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
452 return;
453
454 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
455 u->in_stop_when_unneeded_queue = true;
456 }
457
458 static void bidi_set_free(Unit *u, Hashmap *h) {
459 Unit *other;
460 Iterator i;
461 void *v;
462
463 assert(u);
464
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
466
467 HASHMAP_FOREACH_KEY(v, other, h, i) {
468 UnitDependency d;
469
470 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
471 hashmap_remove(other->dependencies[d], u);
472
473 unit_add_to_gc_queue(other);
474 }
475
476 hashmap_free(h);
477 }
478
479 static void unit_remove_transient(Unit *u) {
480 char **i;
481
482 assert(u);
483
484 if (!u->transient)
485 return;
486
487 if (u->fragment_path)
488 (void) unlink(u->fragment_path);
489
490 STRV_FOREACH(i, u->dropin_paths) {
491 _cleanup_free_ char *p = NULL, *pp = NULL;
492
493 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
494 if (!p)
495 continue;
496
497 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
498 if (!pp)
499 continue;
500
501 /* Only drop transient drop-ins */
502 if (!path_equal(u->manager->lookup_paths.transient, pp))
503 continue;
504
505 (void) unlink(*i);
506 (void) rmdir(p);
507 }
508 }
509
510 static void unit_free_requires_mounts_for(Unit *u) {
511 assert(u);
512
513 for (;;) {
514 _cleanup_free_ char *path;
515
516 path = hashmap_steal_first_key(u->requires_mounts_for);
517 if (!path)
518 break;
519 else {
520 char s[strlen(path) + 1];
521
522 PATH_FOREACH_PREFIX_MORE(s, path) {
523 char *y;
524 Set *x;
525
526 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
527 if (!x)
528 continue;
529
530 (void) set_remove(x, u);
531
532 if (set_isempty(x)) {
533 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
534 free(y);
535 set_free(x);
536 }
537 }
538 }
539 }
540
541 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
542 }
543
544 static void unit_done(Unit *u) {
545 ExecContext *ec;
546 CGroupContext *cc;
547
548 assert(u);
549
550 if (u->type < 0)
551 return;
552
553 if (UNIT_VTABLE(u)->done)
554 UNIT_VTABLE(u)->done(u);
555
556 ec = unit_get_exec_context(u);
557 if (ec)
558 exec_context_done(ec);
559
560 cc = unit_get_cgroup_context(u);
561 if (cc)
562 cgroup_context_done(cc);
563 }
564
565 void unit_free(Unit *u) {
566 UnitDependency d;
567 Iterator i;
568 char *t;
569
570 if (!u)
571 return;
572
573 u->transient_file = safe_fclose(u->transient_file);
574
575 if (!MANAGER_IS_RELOADING(u->manager))
576 unit_remove_transient(u);
577
578 bus_unit_send_removed_signal(u);
579
580 unit_done(u);
581
582 unit_dequeue_rewatch_pids(u);
583
584 sd_bus_slot_unref(u->match_bus_slot);
585 sd_bus_track_unref(u->bus_track);
586 u->deserialized_refs = strv_free(u->deserialized_refs);
587
588 unit_free_requires_mounts_for(u);
589
590 SET_FOREACH(t, u->names, i)
591 hashmap_remove_value(u->manager->units, t, u);
592
593 if (!sd_id128_is_null(u->invocation_id))
594 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
595
596 if (u->job) {
597 Job *j = u->job;
598 job_uninstall(j);
599 job_free(j);
600 }
601
602 if (u->nop_job) {
603 Job *j = u->nop_job;
604 job_uninstall(j);
605 job_free(j);
606 }
607
608 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
609 bidi_set_free(u, u->dependencies[d]);
610
611 if (u->on_console)
612 manager_unref_console(u->manager);
613
614 unit_release_cgroup(u);
615
616 if (!MANAGER_IS_RELOADING(u->manager))
617 unit_unlink_state_files(u);
618
619 unit_unref_uid_gid(u, false);
620
621 (void) manager_update_failed_units(u->manager, u, false);
622 set_remove(u->manager->startup_units, u);
623
624 unit_unwatch_all_pids(u);
625
626 unit_ref_unset(&u->slice);
627 while (u->refs_by_target)
628 unit_ref_unset(u->refs_by_target);
629
630 if (u->type != _UNIT_TYPE_INVALID)
631 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
632
633 if (u->in_load_queue)
634 LIST_REMOVE(load_queue, u->manager->load_queue, u);
635
636 if (u->in_dbus_queue)
637 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
638
639 if (u->in_gc_queue)
640 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
641
642 if (u->in_cgroup_realize_queue)
643 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
644
645 if (u->in_cgroup_empty_queue)
646 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
647
648 if (u->in_cleanup_queue)
649 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
650
651 if (u->in_target_deps_queue)
652 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
653
654 if (u->in_stop_when_unneeded_queue)
655 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
656
657 safe_close(u->ip_accounting_ingress_map_fd);
658 safe_close(u->ip_accounting_egress_map_fd);
659
660 safe_close(u->ipv4_allow_map_fd);
661 safe_close(u->ipv6_allow_map_fd);
662 safe_close(u->ipv4_deny_map_fd);
663 safe_close(u->ipv6_deny_map_fd);
664
665 bpf_program_unref(u->ip_bpf_ingress);
666 bpf_program_unref(u->ip_bpf_ingress_installed);
667 bpf_program_unref(u->ip_bpf_egress);
668 bpf_program_unref(u->ip_bpf_egress_installed);
669
670 bpf_program_unref(u->bpf_device_control_installed);
671
672 condition_free_list(u->conditions);
673 condition_free_list(u->asserts);
674
675 free(u->description);
676 strv_free(u->documentation);
677 free(u->fragment_path);
678 free(u->source_path);
679 strv_free(u->dropin_paths);
680 free(u->instance);
681
682 free(u->job_timeout_reboot_arg);
683
684 set_free_free(u->names);
685
686 free(u->reboot_arg);
687
688 free(u);
689 }
690
691 UnitActiveState unit_active_state(Unit *u) {
692 assert(u);
693
694 if (u->load_state == UNIT_MERGED)
695 return unit_active_state(unit_follow_merge(u));
696
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
700
701 return UNIT_VTABLE(u)->active_state(u);
702 }
703
704 const char* unit_sub_state_to_string(Unit *u) {
705 assert(u);
706
707 return UNIT_VTABLE(u)->sub_state_to_string(u);
708 }
709
710 static int set_complete_move(Set **s, Set **other) {
711 assert(s);
712 assert(other);
713
714 if (!other)
715 return 0;
716
717 if (*s)
718 return set_move(*s, *other);
719 else
720 *s = TAKE_PTR(*other);
721
722 return 0;
723 }
724
725 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
726 assert(s);
727 assert(other);
728
729 if (!*other)
730 return 0;
731
732 if (*s)
733 return hashmap_move(*s, *other);
734 else
735 *s = TAKE_PTR(*other);
736
737 return 0;
738 }
739
740 static int merge_names(Unit *u, Unit *other) {
741 char *t;
742 Iterator i;
743 int r;
744
745 assert(u);
746 assert(other);
747
748 r = set_complete_move(&u->names, &other->names);
749 if (r < 0)
750 return r;
751
752 set_free_free(other->names);
753 other->names = NULL;
754 other->id = NULL;
755
756 SET_FOREACH(t, u->names, i)
757 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
758
759 return 0;
760 }
761
762 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
763 unsigned n_reserve;
764
765 assert(u);
766 assert(other);
767 assert(d < _UNIT_DEPENDENCY_MAX);
768
769 /*
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
773 */
774 if (!u->dependencies[d])
775 return 0;
776
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
779
780 return hashmap_reserve(u->dependencies[d], n_reserve);
781 }
782
783 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
784 Iterator i;
785 Unit *back;
786 void *v;
787 int r;
788
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
790
791 assert(u);
792 assert(other);
793 assert(d < _UNIT_DEPENDENCY_MAX);
794
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
797 UnitDependency k;
798
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
801
802 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
803 if (back == u) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back->dependencies[k], other))
806 maybe_warn_about_dependency(u, other_id, k);
807 } else {
808 UnitDependencyInfo di_u, di_other, di_merged;
809
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
813
814 di_other.data = hashmap_get(back->dependencies[k], other);
815 if (!di_other.data)
816 continue; /* dependency isn't set, let's try the next one */
817
818 di_u.data = hashmap_get(back->dependencies[k], u);
819
820 di_merged = (UnitDependencyInfo) {
821 .origin_mask = di_u.origin_mask | di_other.origin_mask,
822 .destination_mask = di_u.destination_mask | di_other.destination_mask,
823 };
824
825 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
826 if (r < 0)
827 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
828 assert(r >= 0);
829
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
831 }
832 }
833
834 }
835
836 /* Also do not move dependencies on u to itself */
837 back = hashmap_remove(other->dependencies[d], u);
838 if (back)
839 maybe_warn_about_dependency(u, other_id, d);
840
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
843
844 other->dependencies[d] = hashmap_free(other->dependencies[d]);
845 }
846
847 int unit_merge(Unit *u, Unit *other) {
848 UnitDependency d;
849 const char *other_id = NULL;
850 int r;
851
852 assert(u);
853 assert(other);
854 assert(u->manager == other->manager);
855 assert(u->type != _UNIT_TYPE_INVALID);
856
857 other = unit_follow_merge(other);
858
859 if (other == u)
860 return 0;
861
862 if (u->type != other->type)
863 return -EINVAL;
864
865 if (!u->instance != !other->instance)
866 return -EINVAL;
867
868 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
869 return -EEXIST;
870
871 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
872 return -EEXIST;
873
874 if (other->job)
875 return -EEXIST;
876
877 if (other->nop_job)
878 return -EEXIST;
879
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
881 return -EEXIST;
882
883 if (other->id)
884 other_id = strdupa(other->id);
885
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
888 r = reserve_dependencies(u, other, d);
889 /*
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
892 */
893 if (r < 0)
894 return r;
895 }
896
897 /* Merge names */
898 r = merge_names(u, other);
899 if (r < 0)
900 return r;
901
902 /* Redirect all references */
903 while (other->refs_by_target)
904 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
905
906 /* Merge dependencies */
907 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
908 merge_dependencies(u, other, other_id, d);
909
910 other->load_state = UNIT_MERGED;
911 other->merged_into = u;
912
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other->load_state != UNIT_STUB)
916 if (UNIT_VTABLE(other)->done)
917 UNIT_VTABLE(other)->done(other);
918
919 unit_add_to_dbus_queue(u);
920 unit_add_to_cleanup_queue(other);
921
922 return 0;
923 }
924
925 int unit_merge_by_name(Unit *u, const char *name) {
926 _cleanup_free_ char *s = NULL;
927 Unit *other;
928 int r;
929
930 assert(u);
931 assert(name);
932
933 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
934 if (!u->instance)
935 return -EINVAL;
936
937 r = unit_name_replace_instance(name, u->instance, &s);
938 if (r < 0)
939 return r;
940
941 name = s;
942 }
943
944 other = manager_get_unit(u->manager, name);
945 if (other)
946 return unit_merge(u, other);
947
948 return unit_add_name(u, name);
949 }
950
951 Unit* unit_follow_merge(Unit *u) {
952 assert(u);
953
954 while (u->load_state == UNIT_MERGED)
955 assert_se(u = u->merged_into);
956
957 return u;
958 }
959
960 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
961 ExecDirectoryType dt;
962 char **dp;
963 int r;
964
965 assert(u);
966 assert(c);
967
968 if (c->working_directory && !c->working_directory_missing_ok) {
969 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
970 if (r < 0)
971 return r;
972 }
973
974 if (c->root_directory) {
975 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
976 if (r < 0)
977 return r;
978 }
979
980 if (c->root_image) {
981 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
982 if (r < 0)
983 return r;
984 }
985
986 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
987 if (!u->manager->prefix[dt])
988 continue;
989
990 STRV_FOREACH(dp, c->directories[dt].paths) {
991 _cleanup_free_ char *p;
992
993 p = strjoin(u->manager->prefix[dt], "/", *dp);
994 if (!p)
995 return -ENOMEM;
996
997 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
998 if (r < 0)
999 return r;
1000 }
1001 }
1002
1003 if (!MANAGER_IS_SYSTEM(u->manager))
1004 return 0;
1005
1006 if (c->private_tmp) {
1007 const char *p;
1008
1009 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1010 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1016 if (r < 0)
1017 return r;
1018 }
1019
1020 if (!IN_SET(c->std_output,
1021 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1022 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1023 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1024 !IN_SET(c->std_error,
1025 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1026 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1027 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1028 return 0;
1029
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1032
1033 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1034 if (r < 0)
1035 return r;
1036
1037 return 0;
1038 }
1039
1040 const char *unit_description(Unit *u) {
1041 assert(u);
1042
1043 if (u->description)
1044 return u->description;
1045
1046 return strna(u->id);
1047 }
1048
1049 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1050 const struct {
1051 UnitDependencyMask mask;
1052 const char *name;
1053 } table[] = {
1054 { UNIT_DEPENDENCY_FILE, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT, "default" },
1057 { UNIT_DEPENDENCY_UDEV, "udev" },
1058 { UNIT_DEPENDENCY_PATH, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1062 };
1063 size_t i;
1064
1065 assert(f);
1066 assert(kind);
1067 assert(space);
1068
1069 for (i = 0; i < ELEMENTSOF(table); i++) {
1070
1071 if (mask == 0)
1072 break;
1073
1074 if (FLAGS_SET(mask, table[i].mask)) {
1075 if (*space)
1076 fputc(' ', f);
1077 else
1078 *space = true;
1079
1080 fputs(kind, f);
1081 fputs("-", f);
1082 fputs(table[i].name, f);
1083
1084 mask &= ~table[i].mask;
1085 }
1086 }
1087
1088 assert(mask == 0);
1089 }
1090
1091 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1092 char *t, **j;
1093 UnitDependency d;
1094 Iterator i;
1095 const char *prefix2;
1096 char
1097 timestamp0[FORMAT_TIMESTAMP_MAX],
1098 timestamp1[FORMAT_TIMESTAMP_MAX],
1099 timestamp2[FORMAT_TIMESTAMP_MAX],
1100 timestamp3[FORMAT_TIMESTAMP_MAX],
1101 timestamp4[FORMAT_TIMESTAMP_MAX],
1102 timespan[FORMAT_TIMESPAN_MAX];
1103 Unit *following;
1104 _cleanup_set_free_ Set *following_set = NULL;
1105 const char *n;
1106 CGroupMask m;
1107 int r;
1108
1109 assert(u);
1110 assert(u->type >= 0);
1111
1112 prefix = strempty(prefix);
1113 prefix2 = strjoina(prefix, "\t");
1114
1115 fprintf(f,
1116 "%s-> Unit %s:\n"
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1126 "%s\tMay GC: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1131 "%s\tSlice: %s\n"
1132 "%s\tCGroup: %s\n"
1133 "%s\tCGroup realized: %s\n",
1134 prefix, u->id,
1135 prefix, unit_description(u),
1136 prefix, strna(u->instance),
1137 prefix, unit_load_state_to_string(u->load_state),
1138 prefix, unit_active_state_to_string(unit_active_state(u)),
1139 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1140 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1141 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1142 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1143 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1144 prefix, yes_no(unit_may_gc(u)),
1145 prefix, yes_no(unit_need_daemon_reload(u)),
1146 prefix, yes_no(u->transient),
1147 prefix, yes_no(u->perpetual),
1148 prefix, collect_mode_to_string(u->collect_mode),
1149 prefix, strna(unit_slice_name(u)),
1150 prefix, strna(u->cgroup_path),
1151 prefix, yes_no(u->cgroup_realized));
1152
1153 if (u->cgroup_realized_mask != 0) {
1154 _cleanup_free_ char *s = NULL;
1155 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1156 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1157 }
1158 if (u->cgroup_enabled_mask != 0) {
1159 _cleanup_free_ char *s = NULL;
1160 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1161 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1162 }
1163 m = unit_get_own_mask(u);
1164 if (m != 0) {
1165 _cleanup_free_ char *s = NULL;
1166 (void) cg_mask_to_string(m, &s);
1167 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1168 }
1169 m = unit_get_members_mask(u);
1170 if (m != 0) {
1171 _cleanup_free_ char *s = NULL;
1172 (void) cg_mask_to_string(m, &s);
1173 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1174 }
1175
1176 SET_FOREACH(t, u->names, i)
1177 fprintf(f, "%s\tName: %s\n", prefix, t);
1178
1179 if (!sd_id128_is_null(u->invocation_id))
1180 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1181 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1182
1183 STRV_FOREACH(j, u->documentation)
1184 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1185
1186 following = unit_following(u);
1187 if (following)
1188 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1189
1190 r = unit_following_set(u, &following_set);
1191 if (r >= 0) {
1192 Unit *other;
1193
1194 SET_FOREACH(other, following_set, i)
1195 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1196 }
1197
1198 if (u->fragment_path)
1199 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1200
1201 if (u->source_path)
1202 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1203
1204 STRV_FOREACH(j, u->dropin_paths)
1205 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1206
1207 if (u->failure_action != EMERGENCY_ACTION_NONE)
1208 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1209 if (u->success_action != EMERGENCY_ACTION_NONE)
1210 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1211
1212 if (u->job_timeout != USEC_INFINITY)
1213 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1214
1215 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1216 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1217
1218 if (u->job_timeout_reboot_arg)
1219 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1220
1221 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1222 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1223
1224 if (dual_timestamp_is_set(&u->condition_timestamp))
1225 fprintf(f,
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1229 prefix, yes_no(u->condition_result));
1230
1231 if (dual_timestamp_is_set(&u->assert_timestamp))
1232 fprintf(f,
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1236 prefix, yes_no(u->assert_result));
1237
1238 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1239 UnitDependencyInfo di;
1240 Unit *other;
1241
1242 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1243 bool space = false;
1244
1245 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1246
1247 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1248 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1249
1250 fputs(")\n", f);
1251 }
1252 }
1253
1254 if (!hashmap_isempty(u->requires_mounts_for)) {
1255 UnitDependencyInfo di;
1256 const char *path;
1257
1258 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1259 bool space = false;
1260
1261 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1262
1263 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1264 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1265
1266 fputs(")\n", f);
1267 }
1268 }
1269
1270 if (u->load_state == UNIT_LOADED) {
1271
1272 fprintf(f,
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix, yes_no(u->stop_when_unneeded),
1280 prefix, yes_no(u->refuse_manual_start),
1281 prefix, yes_no(u->refuse_manual_stop),
1282 prefix, yes_no(u->default_dependencies),
1283 prefix, job_mode_to_string(u->on_failure_job_mode),
1284 prefix, yes_no(u->ignore_on_isolate));
1285
1286 if (UNIT_VTABLE(u)->dump)
1287 UNIT_VTABLE(u)->dump(u, f, prefix2);
1288
1289 } else if (u->load_state == UNIT_MERGED)
1290 fprintf(f,
1291 "%s\tMerged into: %s\n",
1292 prefix, u->merged_into->id);
1293 else if (u->load_state == UNIT_ERROR)
1294 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1295
1296 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1297 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1298
1299 if (u->job)
1300 job_dump(u->job, f, prefix2);
1301
1302 if (u->nop_job)
1303 job_dump(u->nop_job, f, prefix2);
1304 }
1305
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit *u) {
1308 int r;
1309
1310 assert(u);
1311
1312 /* Load a .{service,socket,...} file */
1313 r = unit_load_fragment(u);
1314 if (r < 0)
1315 return r;
1316
1317 if (u->load_state == UNIT_STUB)
1318 return -ENOENT;
1319
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u));
1325 }
1326
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit *u) {
1329 int r;
1330
1331 assert(u);
1332
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1335
1336 /* Load a .service/.socket/.slice/… file */
1337 r = unit_load_fragment(u);
1338 if (r < 0)
1339 return r;
1340
1341 if (u->load_state == UNIT_STUB)
1342 u->load_state = UNIT_LOADED;
1343
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u));
1346 }
1347
1348 void unit_add_to_target_deps_queue(Unit *u) {
1349 Manager *m = u->manager;
1350
1351 assert(u);
1352
1353 if (u->in_target_deps_queue)
1354 return;
1355
1356 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1357 u->in_target_deps_queue = true;
1358 }
1359
1360 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1361 assert(u);
1362 assert(target);
1363
1364 if (target->type != UNIT_TARGET)
1365 return 0;
1366
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u->load_state != UNIT_LOADED ||
1370 target->load_state != UNIT_LOADED)
1371 return 0;
1372
1373 /* If either side wants no automatic dependencies, then let's
1374 * skip this */
1375 if (!u->default_dependencies ||
1376 !target->default_dependencies)
1377 return 0;
1378
1379 /* Don't create loops */
1380 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1381 return 0;
1382
1383 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1384 }
1385
1386 static int unit_add_slice_dependencies(Unit *u) {
1387 UnitDependencyMask mask;
1388 assert(u);
1389
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1391 return 0;
1392
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1395 relationship). */
1396 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1397
1398 if (UNIT_ISSET(u->slice))
1399 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1400
1401 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1402 return 0;
1403
1404 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1405 }
1406
1407 static int unit_add_mount_dependencies(Unit *u) {
1408 UnitDependencyInfo di;
1409 const char *path;
1410 Iterator i;
1411 int r;
1412
1413 assert(u);
1414
1415 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1416 char prefix[strlen(path) + 1];
1417
1418 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1419 _cleanup_free_ char *p = NULL;
1420 Unit *m;
1421
1422 r = unit_name_from_path(prefix, ".mount", &p);
1423 if (r < 0)
1424 return r;
1425
1426 m = manager_get_unit(u->manager, p);
1427 if (!m) {
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1432 * unit. */
1433 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1434 continue;
1435 }
1436 if (m == u)
1437 continue;
1438
1439 if (m->load_state != UNIT_LOADED)
1440 continue;
1441
1442 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1443 if (r < 0)
1444 return r;
1445
1446 if (m->fragment_path) {
1447 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1448 if (r < 0)
1449 return r;
1450 }
1451 }
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int unit_add_startup_units(Unit *u) {
1458 CGroupContext *c;
1459 int r;
1460
1461 c = unit_get_cgroup_context(u);
1462 if (!c)
1463 return 0;
1464
1465 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1466 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1467 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1468 return 0;
1469
1470 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1471 if (r < 0)
1472 return r;
1473
1474 return set_put(u->manager->startup_units, u);
1475 }
1476
1477 int unit_load(Unit *u) {
1478 int r;
1479
1480 assert(u);
1481
1482 if (u->in_load_queue) {
1483 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1484 u->in_load_queue = false;
1485 }
1486
1487 if (u->type == _UNIT_TYPE_INVALID)
1488 return -EINVAL;
1489
1490 if (u->load_state != UNIT_STUB)
1491 return 0;
1492
1493 if (u->transient_file) {
1494 r = fflush_and_check(u->transient_file);
1495 if (r < 0)
1496 goto fail;
1497
1498 u->transient_file = safe_fclose(u->transient_file);
1499 u->fragment_mtime = now(CLOCK_REALTIME);
1500 }
1501
1502 if (UNIT_VTABLE(u)->load) {
1503 r = UNIT_VTABLE(u)->load(u);
1504 if (r < 0)
1505 goto fail;
1506 }
1507
1508 if (u->load_state == UNIT_STUB) {
1509 r = -ENOENT;
1510 goto fail;
1511 }
1512
1513 if (u->load_state == UNIT_LOADED) {
1514 unit_add_to_target_deps_queue(u);
1515
1516 r = unit_add_slice_dependencies(u);
1517 if (r < 0)
1518 goto fail;
1519
1520 r = unit_add_mount_dependencies(u);
1521 if (r < 0)
1522 goto fail;
1523
1524 r = unit_add_startup_units(u);
1525 if (r < 0)
1526 goto fail;
1527
1528 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1529 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1530 r = -ENOEXEC;
1531 goto fail;
1532 }
1533
1534 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1535 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1536
1537 unit_update_cgroup_members_masks(u);
1538 }
1539
1540 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1541
1542 unit_add_to_dbus_queue(unit_follow_merge(u));
1543 unit_add_to_gc_queue(u);
1544
1545 return 0;
1546
1547 fail:
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1550
1551 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1552 r == -ENOEXEC ? UNIT_BAD_SETTING :
1553 UNIT_ERROR;
1554 u->load_error = r;
1555
1556 unit_add_to_dbus_queue(u);
1557 unit_add_to_gc_queue(u);
1558
1559 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1560 }
1561
1562 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1563 Condition *c;
1564 int triggered = -1;
1565
1566 assert(u);
1567 assert(to_string);
1568
1569 /* If the condition list is empty, then it is true */
1570 if (!first)
1571 return true;
1572
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions, c, first) {
1577 int r;
1578
1579 r = condition_test(c);
1580 if (r < 0)
1581 log_unit_warning(u,
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1583 to_string(c->type),
1584 c->trigger ? "|" : "",
1585 c->negate ? "!" : "",
1586 c->parameter);
1587 else
1588 log_unit_debug(u,
1589 "%s=%s%s%s %s.",
1590 to_string(c->type),
1591 c->trigger ? "|" : "",
1592 c->negate ? "!" : "",
1593 c->parameter,
1594 condition_result_to_string(c->result));
1595
1596 if (!c->trigger && r <= 0)
1597 return false;
1598
1599 if (c->trigger && triggered <= 0)
1600 triggered = r > 0;
1601 }
1602
1603 return triggered != 0;
1604 }
1605
1606 static bool unit_condition_test(Unit *u) {
1607 assert(u);
1608
1609 dual_timestamp_get(&u->condition_timestamp);
1610 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1611
1612 return u->condition_result;
1613 }
1614
1615 static bool unit_assert_test(Unit *u) {
1616 assert(u);
1617
1618 dual_timestamp_get(&u->assert_timestamp);
1619 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1620
1621 return u->assert_result;
1622 }
1623
1624 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL;
1626 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1627 REENABLE_WARNING;
1628 }
1629
1630
1631 int unit_start_limit_test(Unit *u) {
1632 const char *reason;
1633
1634 assert(u);
1635
1636 if (ratelimit_below(&u->start_limit)) {
1637 u->start_limit_hit = false;
1638 return 0;
1639 }
1640
1641 log_unit_warning(u, "Start request repeated too quickly.");
1642 u->start_limit_hit = true;
1643
1644 reason = strjoina("unit ", u->id, " failed");
1645
1646 return emergency_action(u->manager, u->start_limit_action,
1647 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1648 u->reboot_arg, reason);
1649 }
1650
1651 bool unit_shall_confirm_spawn(Unit *u) {
1652 assert(u);
1653
1654 if (manager_is_confirm_spawn_disabled(u->manager))
1655 return false;
1656
1657 /* For some reasons units remaining in the same process group
1658 * as PID 1 fail to acquire the console even if it's not used
1659 * by any process. So skip the confirmation question for them. */
1660 return !unit_get_exec_context(u)->same_pgrp;
1661 }
1662
1663 static bool unit_verify_deps(Unit *u) {
1664 Unit *other;
1665 Iterator j;
1666 void *v;
1667
1668 assert(u);
1669
1670 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1671 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1672 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1673 * conjunction with After= as for them any such check would make things entirely racy. */
1674
1675 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1676
1677 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1678 continue;
1679
1680 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1681 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1682 return false;
1683 }
1684 }
1685
1686 return true;
1687 }
1688
1689 /* Errors:
1690 * -EBADR: This unit type does not support starting.
1691 * -EALREADY: Unit is already started.
1692 * -EAGAIN: An operation is already in progress. Retry later.
1693 * -ECANCELED: Too many requests for now.
1694 * -EPROTO: Assert failed
1695 * -EINVAL: Unit not loaded
1696 * -EOPNOTSUPP: Unit type not supported
1697 * -ENOLINK: The necessary dependencies are not fulfilled.
1698 * -ESTALE: This unit has been started before and can't be started a second time
1699 */
1700 int unit_start(Unit *u) {
1701 UnitActiveState state;
1702 Unit *following;
1703
1704 assert(u);
1705
1706 /* If this is already started, then this will succeed. Note
1707 * that this will even succeed if this unit is not startable
1708 * by the user. This is relied on to detect when we need to
1709 * wait for units and when waiting is finished. */
1710 state = unit_active_state(u);
1711 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1712 return -EALREADY;
1713
1714 /* Units that aren't loaded cannot be started */
1715 if (u->load_state != UNIT_LOADED)
1716 return -EINVAL;
1717
1718 /* Refuse starting scope units more than once */
1719 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1720 return -ESTALE;
1721
1722 /* If the conditions failed, don't do anything at all. If we
1723 * already are activating this call might still be useful to
1724 * speed up activation in case there is some hold-off time,
1725 * but we don't want to recheck the condition in that case. */
1726 if (state != UNIT_ACTIVATING &&
1727 !unit_condition_test(u)) {
1728 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1729 return -ECOMM;
1730 }
1731
1732 /* If the asserts failed, fail the entire job */
1733 if (state != UNIT_ACTIVATING &&
1734 !unit_assert_test(u)) {
1735 log_unit_notice(u, "Starting requested but asserts failed.");
1736 return -EPROTO;
1737 }
1738
1739 /* Units of types that aren't supported cannot be
1740 * started. Note that we do this test only after the condition
1741 * checks, so that we rather return condition check errors
1742 * (which are usually not considered a true failure) than "not
1743 * supported" errors (which are considered a failure).
1744 */
1745 if (!unit_supported(u))
1746 return -EOPNOTSUPP;
1747
1748 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1749 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1750 * effect anymore, due to a reload or due to a failed condition. */
1751 if (!unit_verify_deps(u))
1752 return -ENOLINK;
1753
1754 /* Forward to the main object, if we aren't it. */
1755 following = unit_following(u);
1756 if (following) {
1757 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1758 return unit_start(following);
1759 }
1760
1761 /* If it is stopped, but we cannot start it, then fail */
1762 if (!UNIT_VTABLE(u)->start)
1763 return -EBADR;
1764
1765 /* We don't suppress calls to ->start() here when we are
1766 * already starting, to allow this request to be used as a
1767 * "hurry up" call, for example when the unit is in some "auto
1768 * restart" state where it waits for a holdoff timer to elapse
1769 * before it will start again. */
1770
1771 unit_add_to_dbus_queue(u);
1772
1773 return UNIT_VTABLE(u)->start(u);
1774 }
1775
1776 bool unit_can_start(Unit *u) {
1777 assert(u);
1778
1779 if (u->load_state != UNIT_LOADED)
1780 return false;
1781
1782 if (!unit_supported(u))
1783 return false;
1784
1785 /* Scope units may be started only once */
1786 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1787 return false;
1788
1789 return !!UNIT_VTABLE(u)->start;
1790 }
1791
1792 bool unit_can_isolate(Unit *u) {
1793 assert(u);
1794
1795 return unit_can_start(u) &&
1796 u->allow_isolate;
1797 }
1798
1799 /* Errors:
1800 * -EBADR: This unit type does not support stopping.
1801 * -EALREADY: Unit is already stopped.
1802 * -EAGAIN: An operation is already in progress. Retry later.
1803 */
1804 int unit_stop(Unit *u) {
1805 UnitActiveState state;
1806 Unit *following;
1807
1808 assert(u);
1809
1810 state = unit_active_state(u);
1811 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1812 return -EALREADY;
1813
1814 following = unit_following(u);
1815 if (following) {
1816 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1817 return unit_stop(following);
1818 }
1819
1820 if (!UNIT_VTABLE(u)->stop)
1821 return -EBADR;
1822
1823 unit_add_to_dbus_queue(u);
1824
1825 return UNIT_VTABLE(u)->stop(u);
1826 }
1827
1828 bool unit_can_stop(Unit *u) {
1829 assert(u);
1830
1831 if (!unit_supported(u))
1832 return false;
1833
1834 if (u->perpetual)
1835 return false;
1836
1837 return !!UNIT_VTABLE(u)->stop;
1838 }
1839
1840 /* Errors:
1841 * -EBADR: This unit type does not support reloading.
1842 * -ENOEXEC: Unit is not started.
1843 * -EAGAIN: An operation is already in progress. Retry later.
1844 */
1845 int unit_reload(Unit *u) {
1846 UnitActiveState state;
1847 Unit *following;
1848
1849 assert(u);
1850
1851 if (u->load_state != UNIT_LOADED)
1852 return -EINVAL;
1853
1854 if (!unit_can_reload(u))
1855 return -EBADR;
1856
1857 state = unit_active_state(u);
1858 if (state == UNIT_RELOADING)
1859 return -EALREADY;
1860
1861 if (state != UNIT_ACTIVE) {
1862 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1863 return -ENOEXEC;
1864 }
1865
1866 following = unit_following(u);
1867 if (following) {
1868 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1869 return unit_reload(following);
1870 }
1871
1872 unit_add_to_dbus_queue(u);
1873
1874 if (!UNIT_VTABLE(u)->reload) {
1875 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1876 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1877 return 0;
1878 }
1879
1880 return UNIT_VTABLE(u)->reload(u);
1881 }
1882
1883 bool unit_can_reload(Unit *u) {
1884 assert(u);
1885
1886 if (UNIT_VTABLE(u)->can_reload)
1887 return UNIT_VTABLE(u)->can_reload(u);
1888
1889 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1890 return true;
1891
1892 return UNIT_VTABLE(u)->reload;
1893 }
1894
1895 bool unit_is_unneeded(Unit *u) {
1896 static const UnitDependency deps[] = {
1897 UNIT_REQUIRED_BY,
1898 UNIT_REQUISITE_OF,
1899 UNIT_WANTED_BY,
1900 UNIT_BOUND_BY,
1901 };
1902 size_t j;
1903
1904 assert(u);
1905
1906 if (!u->stop_when_unneeded)
1907 return false;
1908
1909 /* Don't clean up while the unit is transitioning or is even inactive. */
1910 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1911 return false;
1912 if (u->job)
1913 return false;
1914
1915 for (j = 0; j < ELEMENTSOF(deps); j++) {
1916 Unit *other;
1917 Iterator i;
1918 void *v;
1919
1920 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1921 * restart, then don't clean this one up. */
1922
1923 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1924 if (other->job)
1925 return false;
1926
1927 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1928 return false;
1929
1930 if (unit_will_restart(other))
1931 return false;
1932 }
1933 }
1934
1935 return true;
1936 }
1937
1938 static void check_unneeded_dependencies(Unit *u) {
1939
1940 static const UnitDependency deps[] = {
1941 UNIT_REQUIRES,
1942 UNIT_REQUISITE,
1943 UNIT_WANTS,
1944 UNIT_BINDS_TO,
1945 };
1946 size_t j;
1947
1948 assert(u);
1949
1950 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1951
1952 for (j = 0; j < ELEMENTSOF(deps); j++) {
1953 Unit *other;
1954 Iterator i;
1955 void *v;
1956
1957 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
1958 unit_submit_to_stop_when_unneeded_queue(other);
1959 }
1960 }
1961
1962 static void unit_check_binds_to(Unit *u) {
1963 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1964 bool stop = false;
1965 Unit *other;
1966 Iterator i;
1967 void *v;
1968 int r;
1969
1970 assert(u);
1971
1972 if (u->job)
1973 return;
1974
1975 if (unit_active_state(u) != UNIT_ACTIVE)
1976 return;
1977
1978 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
1979 if (other->job)
1980 continue;
1981
1982 if (!other->coldplugged)
1983 /* We might yet create a job for the other unit… */
1984 continue;
1985
1986 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1987 continue;
1988
1989 stop = true;
1990 break;
1991 }
1992
1993 if (!stop)
1994 return;
1995
1996 /* If stopping a unit fails continuously we might enter a stop
1997 * loop here, hence stop acting on the service being
1998 * unnecessary after a while. */
1999 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2000 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2001 return;
2002 }
2003
2004 assert(other);
2005 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2006
2007 /* A unit we need to run is gone. Sniff. Let's stop this. */
2008 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2009 if (r < 0)
2010 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2011 }
2012
2013 static void retroactively_start_dependencies(Unit *u) {
2014 Iterator i;
2015 Unit *other;
2016 void *v;
2017
2018 assert(u);
2019 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2020
2021 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2022 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2023 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2024 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2025
2026 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2027 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2028 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2029 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2030
2031 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2032 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2033 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2034 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2035
2036 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2037 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2038 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2039
2040 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2041 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2042 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2043 }
2044
2045 static void retroactively_stop_dependencies(Unit *u) {
2046 Unit *other;
2047 Iterator i;
2048 void *v;
2049
2050 assert(u);
2051 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2052
2053 /* Pull down units which are bound to us recursively if enabled */
2054 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2055 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2056 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2057 }
2058
2059 void unit_start_on_failure(Unit *u) {
2060 Unit *other;
2061 Iterator i;
2062 void *v;
2063 int r;
2064
2065 assert(u);
2066
2067 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2068 return;
2069
2070 log_unit_info(u, "Triggering OnFailure= dependencies.");
2071
2072 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2073 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2074
2075 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, &error, NULL);
2076 if (r < 0)
2077 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2078 }
2079 }
2080
2081 void unit_trigger_notify(Unit *u) {
2082 Unit *other;
2083 Iterator i;
2084 void *v;
2085
2086 assert(u);
2087
2088 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2089 if (UNIT_VTABLE(other)->trigger_notify)
2090 UNIT_VTABLE(other)->trigger_notify(other, u);
2091 }
2092
2093 static int unit_log_resources(Unit *u) {
2094 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2095 bool any_traffic = false, have_ip_accounting = false;
2096 _cleanup_free_ char *igress = NULL, *egress = NULL;
2097 size_t n_message_parts = 0, n_iovec = 0;
2098 char* message_parts[3 + 1], *t;
2099 nsec_t nsec = NSEC_INFINITY;
2100 CGroupIPAccountingMetric m;
2101 size_t i;
2102 int r;
2103 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2104 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2105 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2106 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2107 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2108 };
2109
2110 assert(u);
2111
2112 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2113 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2114 * information and the complete data in structured fields. */
2115
2116 (void) unit_get_cpu_usage(u, &nsec);
2117 if (nsec != NSEC_INFINITY) {
2118 char buf[FORMAT_TIMESPAN_MAX] = "";
2119
2120 /* Format the CPU time for inclusion in the structured log message */
2121 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2122 r = log_oom();
2123 goto finish;
2124 }
2125 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2126
2127 /* Format the CPU time for inclusion in the human language message string */
2128 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2129 t = strjoin("consumed ", buf, " CPU time");
2130 if (!t) {
2131 r = log_oom();
2132 goto finish;
2133 }
2134
2135 message_parts[n_message_parts++] = t;
2136 }
2137
2138 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2139 char buf[FORMAT_BYTES_MAX] = "";
2140 uint64_t value = UINT64_MAX;
2141
2142 assert(ip_fields[m]);
2143
2144 (void) unit_get_ip_accounting(u, m, &value);
2145 if (value == UINT64_MAX)
2146 continue;
2147
2148 have_ip_accounting = true;
2149 if (value > 0)
2150 any_traffic = true;
2151
2152 /* Format IP accounting data for inclusion in the structured log message */
2153 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2154 r = log_oom();
2155 goto finish;
2156 }
2157 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2158
2159 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2160 * bytes counters (and not for the packets counters) */
2161 if (m == CGROUP_IP_INGRESS_BYTES) {
2162 assert(!igress);
2163 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2164 if (!igress) {
2165 r = log_oom();
2166 goto finish;
2167 }
2168 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2169 assert(!egress);
2170 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2171 if (!egress) {
2172 r = log_oom();
2173 goto finish;
2174 }
2175 }
2176 }
2177
2178 if (have_ip_accounting) {
2179 if (any_traffic) {
2180 if (igress)
2181 message_parts[n_message_parts++] = TAKE_PTR(igress);
2182 if (egress)
2183 message_parts[n_message_parts++] = TAKE_PTR(egress);
2184
2185 } else {
2186 char *k;
2187
2188 k = strdup("no IP traffic");
2189 if (!k) {
2190 r = log_oom();
2191 goto finish;
2192 }
2193
2194 message_parts[n_message_parts++] = k;
2195 }
2196 }
2197
2198 /* Is there any accounting data available at all? */
2199 if (n_iovec == 0) {
2200 r = 0;
2201 goto finish;
2202 }
2203
2204 if (n_message_parts == 0)
2205 t = strjoina("MESSAGE=", u->id, ": Completed.");
2206 else {
2207 _cleanup_free_ char *joined;
2208
2209 message_parts[n_message_parts] = NULL;
2210
2211 joined = strv_join(message_parts, ", ");
2212 if (!joined) {
2213 r = log_oom();
2214 goto finish;
2215 }
2216
2217 joined[0] = ascii_toupper(joined[0]);
2218 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2219 }
2220
2221 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2222 * and hence don't increase n_iovec for them */
2223 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2224 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2225
2226 t = strjoina(u->manager->unit_log_field, u->id);
2227 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2228
2229 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2230 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2231
2232 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2233 r = 0;
2234
2235 finish:
2236 for (i = 0; i < n_message_parts; i++)
2237 free(message_parts[i]);
2238
2239 for (i = 0; i < n_iovec; i++)
2240 free(iovec[i].iov_base);
2241
2242 return r;
2243
2244 }
2245
2246 static void unit_update_on_console(Unit *u) {
2247 bool b;
2248
2249 assert(u);
2250
2251 b = unit_needs_console(u);
2252 if (u->on_console == b)
2253 return;
2254
2255 u->on_console = b;
2256 if (b)
2257 manager_ref_console(u->manager);
2258 else
2259 manager_unref_console(u->manager);
2260 }
2261
2262 static void unit_emit_audit_start(Unit *u) {
2263 assert(u);
2264
2265 if (u->type != UNIT_SERVICE)
2266 return;
2267
2268 /* Write audit record if we have just finished starting up */
2269 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2270 u->in_audit = true;
2271 }
2272
2273 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2274 assert(u);
2275
2276 if (u->type != UNIT_SERVICE)
2277 return;
2278
2279 if (u->in_audit) {
2280 /* Write audit record if we have just finished shutting down */
2281 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2282 u->in_audit = false;
2283 } else {
2284 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2285 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2286
2287 if (state == UNIT_INACTIVE)
2288 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2289 }
2290 }
2291
2292 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2293 bool unexpected;
2294 const char *reason;
2295 Manager *m;
2296
2297 assert(u);
2298 assert(os < _UNIT_ACTIVE_STATE_MAX);
2299 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2300
2301 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2302 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2303 * remounted this function will be called too! */
2304
2305 m = u->manager;
2306
2307 /* Update timestamps for state changes */
2308 if (!MANAGER_IS_RELOADING(m)) {
2309 dual_timestamp_get(&u->state_change_timestamp);
2310
2311 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2312 u->inactive_exit_timestamp = u->state_change_timestamp;
2313 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2314 u->inactive_enter_timestamp = u->state_change_timestamp;
2315
2316 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2317 u->active_enter_timestamp = u->state_change_timestamp;
2318 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2319 u->active_exit_timestamp = u->state_change_timestamp;
2320 }
2321
2322 /* Keep track of failed units */
2323 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2324
2325 /* Make sure the cgroup and state files are always removed when we become inactive */
2326 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2327 unit_prune_cgroup(u);
2328 unit_unlink_state_files(u);
2329 }
2330
2331 unit_update_on_console(u);
2332
2333 if (u->job) {
2334 unexpected = false;
2335
2336 if (u->job->state == JOB_WAITING)
2337
2338 /* So we reached a different state for this
2339 * job. Let's see if we can run it now if it
2340 * failed previously due to EAGAIN. */
2341 job_add_to_run_queue(u->job);
2342
2343 /* Let's check whether this state change constitutes a
2344 * finished job, or maybe contradicts a running job and
2345 * hence needs to invalidate jobs. */
2346
2347 switch (u->job->type) {
2348
2349 case JOB_START:
2350 case JOB_VERIFY_ACTIVE:
2351
2352 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2353 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2354 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2355 unexpected = true;
2356
2357 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2358 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2359 }
2360
2361 break;
2362
2363 case JOB_RELOAD:
2364 case JOB_RELOAD_OR_START:
2365 case JOB_TRY_RELOAD:
2366
2367 if (u->job->state == JOB_RUNNING) {
2368 if (ns == UNIT_ACTIVE)
2369 job_finish_and_invalidate(u->job, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2370 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2371 unexpected = true;
2372
2373 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2374 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2375 }
2376 }
2377
2378 break;
2379
2380 case JOB_STOP:
2381 case JOB_RESTART:
2382 case JOB_TRY_RESTART:
2383
2384 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2385 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2386 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2387 unexpected = true;
2388 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2389 }
2390
2391 break;
2392
2393 default:
2394 assert_not_reached("Job type unknown");
2395 }
2396
2397 } else
2398 unexpected = true;
2399
2400 if (!MANAGER_IS_RELOADING(m)) {
2401
2402 /* If this state change happened without being
2403 * requested by a job, then let's retroactively start
2404 * or stop dependencies. We skip that step when
2405 * deserializing, since we don't want to create any
2406 * additional jobs just because something is already
2407 * activated. */
2408
2409 if (unexpected) {
2410 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2411 retroactively_start_dependencies(u);
2412 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2413 retroactively_stop_dependencies(u);
2414 }
2415
2416 /* stop unneeded units regardless if going down was expected or not */
2417 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2418 check_unneeded_dependencies(u);
2419
2420 if (ns != os && ns == UNIT_FAILED) {
2421 log_unit_debug(u, "Unit entered failed state.");
2422
2423 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2424 unit_start_on_failure(u);
2425 }
2426
2427 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2428 /* This unit just finished starting up */
2429
2430 unit_emit_audit_start(u);
2431 manager_send_unit_plymouth(m, u);
2432 }
2433
2434 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2435 /* This unit just stopped/failed. */
2436
2437 unit_emit_audit_stop(u, ns);
2438 unit_log_resources(u);
2439 }
2440 }
2441
2442 manager_recheck_journal(m);
2443 manager_recheck_dbus(m);
2444
2445 unit_trigger_notify(u);
2446
2447 if (!MANAGER_IS_RELOADING(m)) {
2448 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2449 unit_submit_to_stop_when_unneeded_queue(u);
2450
2451 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2452 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2453 * without ever entering started.) */
2454 unit_check_binds_to(u);
2455
2456 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2457 reason = strjoina("unit ", u->id, " failed");
2458 (void) emergency_action(m, u->failure_action, 0, u->reboot_arg, reason);
2459 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2460 reason = strjoina("unit ", u->id, " succeeded");
2461 (void) emergency_action(m, u->success_action, 0, u->reboot_arg, reason);
2462 }
2463 }
2464
2465 unit_add_to_dbus_queue(u);
2466 unit_add_to_gc_queue(u);
2467 }
2468
2469 int unit_watch_pid(Unit *u, pid_t pid) {
2470 int r;
2471
2472 assert(u);
2473 assert(pid_is_valid(pid));
2474
2475 /* Watch a specific PID */
2476
2477 r = set_ensure_allocated(&u->pids, NULL);
2478 if (r < 0)
2479 return r;
2480
2481 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2482 if (r < 0)
2483 return r;
2484
2485 /* First try, let's add the unit keyed by "pid". */
2486 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2487 if (r == -EEXIST) {
2488 Unit **array;
2489 bool found = false;
2490 size_t n = 0;
2491
2492 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2493 * to an array of Units rather than just a Unit), lists us already. */
2494
2495 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2496 if (array)
2497 for (; array[n]; n++)
2498 if (array[n] == u)
2499 found = true;
2500
2501 if (found) /* Found it already? if so, do nothing */
2502 r = 0;
2503 else {
2504 Unit **new_array;
2505
2506 /* Allocate a new array */
2507 new_array = new(Unit*, n + 2);
2508 if (!new_array)
2509 return -ENOMEM;
2510
2511 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2512 new_array[n] = u;
2513 new_array[n+1] = NULL;
2514
2515 /* Add or replace the old array */
2516 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2517 if (r < 0) {
2518 free(new_array);
2519 return r;
2520 }
2521
2522 free(array);
2523 }
2524 } else if (r < 0)
2525 return r;
2526
2527 r = set_put(u->pids, PID_TO_PTR(pid));
2528 if (r < 0)
2529 return r;
2530
2531 return 0;
2532 }
2533
2534 void unit_unwatch_pid(Unit *u, pid_t pid) {
2535 Unit **array;
2536
2537 assert(u);
2538 assert(pid_is_valid(pid));
2539
2540 /* First let's drop the unit in case it's keyed as "pid". */
2541 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2542
2543 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2544 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2545 if (array) {
2546 size_t n, m = 0;
2547
2548 /* Let's iterate through the array, dropping our own entry */
2549 for (n = 0; array[n]; n++)
2550 if (array[n] != u)
2551 array[m++] = array[n];
2552 array[m] = NULL;
2553
2554 if (m == 0) {
2555 /* The array is now empty, remove the entire entry */
2556 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2557 free(array);
2558 }
2559 }
2560
2561 (void) set_remove(u->pids, PID_TO_PTR(pid));
2562 }
2563
2564 void unit_unwatch_all_pids(Unit *u) {
2565 assert(u);
2566
2567 while (!set_isempty(u->pids))
2568 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2569
2570 u->pids = set_free(u->pids);
2571 }
2572
2573 static void unit_tidy_watch_pids(Unit *u) {
2574 pid_t except1, except2;
2575 Iterator i;
2576 void *e;
2577
2578 assert(u);
2579
2580 /* Cleans dead PIDs from our list */
2581
2582 except1 = unit_main_pid(u);
2583 except2 = unit_control_pid(u);
2584
2585 SET_FOREACH(e, u->pids, i) {
2586 pid_t pid = PTR_TO_PID(e);
2587
2588 if (pid == except1 || pid == except2)
2589 continue;
2590
2591 if (!pid_is_unwaited(pid))
2592 unit_unwatch_pid(u, pid);
2593 }
2594 }
2595
2596 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2597 Unit *u = userdata;
2598
2599 assert(s);
2600 assert(u);
2601
2602 unit_tidy_watch_pids(u);
2603 unit_watch_all_pids(u);
2604
2605 /* If the PID set is empty now, then let's finish this off. */
2606 unit_synthesize_cgroup_empty_event(u);
2607
2608 return 0;
2609 }
2610
2611 int unit_enqueue_rewatch_pids(Unit *u) {
2612 int r;
2613
2614 assert(u);
2615
2616 if (!u->cgroup_path)
2617 return -ENOENT;
2618
2619 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2620 if (r < 0)
2621 return r;
2622 if (r > 0) /* On unified we can use proper notifications */
2623 return 0;
2624
2625 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2626 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2627 * involves issuing kill(pid, 0) on all processes we watch. */
2628
2629 if (!u->rewatch_pids_event_source) {
2630 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2631
2632 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2633 if (r < 0)
2634 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2635
2636 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2637 if (r < 0)
2638 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2639
2640 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2641
2642 u->rewatch_pids_event_source = TAKE_PTR(s);
2643 }
2644
2645 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2646 if (r < 0)
2647 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2648
2649 return 0;
2650 }
2651
2652 void unit_dequeue_rewatch_pids(Unit *u) {
2653 int r;
2654 assert(u);
2655
2656 if (!u->rewatch_pids_event_source)
2657 return;
2658
2659 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2660 if (r < 0)
2661 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2662
2663 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2664 }
2665
2666 bool unit_job_is_applicable(Unit *u, JobType j) {
2667 assert(u);
2668 assert(j >= 0 && j < _JOB_TYPE_MAX);
2669
2670 switch (j) {
2671
2672 case JOB_VERIFY_ACTIVE:
2673 case JOB_START:
2674 case JOB_NOP:
2675 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2676 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2677 * jobs for it. */
2678 return true;
2679
2680 case JOB_STOP:
2681 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2682 * external events), hence it makes no sense to permit enqueing such a request either. */
2683 return !u->perpetual;
2684
2685 case JOB_RESTART:
2686 case JOB_TRY_RESTART:
2687 return unit_can_stop(u) && unit_can_start(u);
2688
2689 case JOB_RELOAD:
2690 case JOB_TRY_RELOAD:
2691 return unit_can_reload(u);
2692
2693 case JOB_RELOAD_OR_START:
2694 return unit_can_reload(u) && unit_can_start(u);
2695
2696 default:
2697 assert_not_reached("Invalid job type");
2698 }
2699 }
2700
2701 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2702 assert(u);
2703
2704 /* Only warn about some unit types */
2705 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2706 return;
2707
2708 if (streq_ptr(u->id, other))
2709 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2710 else
2711 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2712 }
2713
2714 static int unit_add_dependency_hashmap(
2715 Hashmap **h,
2716 Unit *other,
2717 UnitDependencyMask origin_mask,
2718 UnitDependencyMask destination_mask) {
2719
2720 UnitDependencyInfo info;
2721 int r;
2722
2723 assert(h);
2724 assert(other);
2725 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2726 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2727 assert(origin_mask > 0 || destination_mask > 0);
2728
2729 r = hashmap_ensure_allocated(h, NULL);
2730 if (r < 0)
2731 return r;
2732
2733 assert_cc(sizeof(void*) == sizeof(info));
2734
2735 info.data = hashmap_get(*h, other);
2736 if (info.data) {
2737 /* Entry already exists. Add in our mask. */
2738
2739 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2740 FLAGS_SET(destination_mask, info.destination_mask))
2741 return 0; /* NOP */
2742
2743 info.origin_mask |= origin_mask;
2744 info.destination_mask |= destination_mask;
2745
2746 r = hashmap_update(*h, other, info.data);
2747 } else {
2748 info = (UnitDependencyInfo) {
2749 .origin_mask = origin_mask,
2750 .destination_mask = destination_mask,
2751 };
2752
2753 r = hashmap_put(*h, other, info.data);
2754 }
2755 if (r < 0)
2756 return r;
2757
2758 return 1;
2759 }
2760
2761 int unit_add_dependency(
2762 Unit *u,
2763 UnitDependency d,
2764 Unit *other,
2765 bool add_reference,
2766 UnitDependencyMask mask) {
2767
2768 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2769 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2770 [UNIT_WANTS] = UNIT_WANTED_BY,
2771 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2772 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2773 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2774 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2775 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2776 [UNIT_WANTED_BY] = UNIT_WANTS,
2777 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2778 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2779 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2780 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2781 [UNIT_BEFORE] = UNIT_AFTER,
2782 [UNIT_AFTER] = UNIT_BEFORE,
2783 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2784 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2785 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2786 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2787 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2788 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2789 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2790 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2791 };
2792 Unit *original_u = u, *original_other = other;
2793 int r;
2794
2795 assert(u);
2796 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2797 assert(other);
2798
2799 u = unit_follow_merge(u);
2800 other = unit_follow_merge(other);
2801
2802 /* We won't allow dependencies on ourselves. We will not
2803 * consider them an error however. */
2804 if (u == other) {
2805 maybe_warn_about_dependency(original_u, original_other->id, d);
2806 return 0;
2807 }
2808
2809 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2810 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2811 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2812 return 0;
2813 }
2814
2815 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2816 if (r < 0)
2817 return r;
2818
2819 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2820 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2821 if (r < 0)
2822 return r;
2823 }
2824
2825 if (add_reference) {
2826 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2827 if (r < 0)
2828 return r;
2829
2830 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2831 if (r < 0)
2832 return r;
2833 }
2834
2835 unit_add_to_dbus_queue(u);
2836 return 0;
2837 }
2838
2839 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2840 int r;
2841
2842 assert(u);
2843
2844 r = unit_add_dependency(u, d, other, add_reference, mask);
2845 if (r < 0)
2846 return r;
2847
2848 return unit_add_dependency(u, e, other, add_reference, mask);
2849 }
2850
2851 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2852 int r;
2853
2854 assert(u);
2855 assert(name);
2856 assert(buf);
2857 assert(ret);
2858
2859 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2860 *buf = NULL;
2861 *ret = name;
2862 return 0;
2863 }
2864
2865 if (u->instance)
2866 r = unit_name_replace_instance(name, u->instance, buf);
2867 else {
2868 _cleanup_free_ char *i = NULL;
2869
2870 r = unit_name_to_prefix(u->id, &i);
2871 if (r < 0)
2872 return r;
2873
2874 r = unit_name_replace_instance(name, i, buf);
2875 }
2876 if (r < 0)
2877 return r;
2878
2879 *ret = *buf;
2880 return 0;
2881 }
2882
2883 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2884 _cleanup_free_ char *buf = NULL;
2885 Unit *other;
2886 int r;
2887
2888 assert(u);
2889 assert(name);
2890
2891 r = resolve_template(u, name, &buf, &name);
2892 if (r < 0)
2893 return r;
2894
2895 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2896 if (r < 0)
2897 return r;
2898
2899 return unit_add_dependency(u, d, other, add_reference, mask);
2900 }
2901
2902 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2903 _cleanup_free_ char *buf = NULL;
2904 Unit *other;
2905 int r;
2906
2907 assert(u);
2908 assert(name);
2909
2910 r = resolve_template(u, name, &buf, &name);
2911 if (r < 0)
2912 return r;
2913
2914 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2915 if (r < 0)
2916 return r;
2917
2918 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2919 }
2920
2921 int set_unit_path(const char *p) {
2922 /* This is mostly for debug purposes */
2923 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2924 return -errno;
2925
2926 return 0;
2927 }
2928
2929 char *unit_dbus_path(Unit *u) {
2930 assert(u);
2931
2932 if (!u->id)
2933 return NULL;
2934
2935 return unit_dbus_path_from_name(u->id);
2936 }
2937
2938 char *unit_dbus_path_invocation_id(Unit *u) {
2939 assert(u);
2940
2941 if (sd_id128_is_null(u->invocation_id))
2942 return NULL;
2943
2944 return unit_dbus_path_from_name(u->invocation_id_string);
2945 }
2946
2947 int unit_set_slice(Unit *u, Unit *slice) {
2948 assert(u);
2949 assert(slice);
2950
2951 /* Sets the unit slice if it has not been set before. Is extra
2952 * careful, to only allow this for units that actually have a
2953 * cgroup context. Also, we don't allow to set this for slices
2954 * (since the parent slice is derived from the name). Make
2955 * sure the unit we set is actually a slice. */
2956
2957 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2958 return -EOPNOTSUPP;
2959
2960 if (u->type == UNIT_SLICE)
2961 return -EINVAL;
2962
2963 if (unit_active_state(u) != UNIT_INACTIVE)
2964 return -EBUSY;
2965
2966 if (slice->type != UNIT_SLICE)
2967 return -EINVAL;
2968
2969 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2970 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2971 return -EPERM;
2972
2973 if (UNIT_DEREF(u->slice) == slice)
2974 return 0;
2975
2976 /* Disallow slice changes if @u is already bound to cgroups */
2977 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2978 return -EBUSY;
2979
2980 unit_ref_set(&u->slice, u, slice);
2981 return 1;
2982 }
2983
2984 int unit_set_default_slice(Unit *u) {
2985 _cleanup_free_ char *b = NULL;
2986 const char *slice_name;
2987 Unit *slice;
2988 int r;
2989
2990 assert(u);
2991
2992 if (UNIT_ISSET(u->slice))
2993 return 0;
2994
2995 if (u->instance) {
2996 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2997
2998 /* Implicitly place all instantiated units in their
2999 * own per-template slice */
3000
3001 r = unit_name_to_prefix(u->id, &prefix);
3002 if (r < 0)
3003 return r;
3004
3005 /* The prefix is already escaped, but it might include
3006 * "-" which has a special meaning for slice units,
3007 * hence escape it here extra. */
3008 escaped = unit_name_escape(prefix);
3009 if (!escaped)
3010 return -ENOMEM;
3011
3012 if (MANAGER_IS_SYSTEM(u->manager))
3013 b = strjoin("system-", escaped, ".slice");
3014 else
3015 b = strappend(escaped, ".slice");
3016 if (!b)
3017 return -ENOMEM;
3018
3019 slice_name = b;
3020 } else
3021 slice_name =
3022 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3023 ? SPECIAL_SYSTEM_SLICE
3024 : SPECIAL_ROOT_SLICE;
3025
3026 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3027 if (r < 0)
3028 return r;
3029
3030 return unit_set_slice(u, slice);
3031 }
3032
3033 const char *unit_slice_name(Unit *u) {
3034 assert(u);
3035
3036 if (!UNIT_ISSET(u->slice))
3037 return NULL;
3038
3039 return UNIT_DEREF(u->slice)->id;
3040 }
3041
3042 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3043 _cleanup_free_ char *t = NULL;
3044 int r;
3045
3046 assert(u);
3047 assert(type);
3048 assert(_found);
3049
3050 r = unit_name_change_suffix(u->id, type, &t);
3051 if (r < 0)
3052 return r;
3053 if (unit_has_name(u, t))
3054 return -EINVAL;
3055
3056 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3057 assert(r < 0 || *_found != u);
3058 return r;
3059 }
3060
3061 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3062 const char *name, *old_owner, *new_owner;
3063 Unit *u = userdata;
3064 int r;
3065
3066 assert(message);
3067 assert(u);
3068
3069 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3070 if (r < 0) {
3071 bus_log_parse_error(r);
3072 return 0;
3073 }
3074
3075 old_owner = empty_to_null(old_owner);
3076 new_owner = empty_to_null(new_owner);
3077
3078 if (UNIT_VTABLE(u)->bus_name_owner_change)
3079 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3080
3081 return 0;
3082 }
3083
3084 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3085 const char *match;
3086
3087 assert(u);
3088 assert(bus);
3089 assert(name);
3090
3091 if (u->match_bus_slot)
3092 return -EBUSY;
3093
3094 match = strjoina("type='signal',"
3095 "sender='org.freedesktop.DBus',"
3096 "path='/org/freedesktop/DBus',"
3097 "interface='org.freedesktop.DBus',"
3098 "member='NameOwnerChanged',"
3099 "arg0='", name, "'");
3100
3101 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3102 }
3103
3104 int unit_watch_bus_name(Unit *u, const char *name) {
3105 int r;
3106
3107 assert(u);
3108 assert(name);
3109
3110 /* Watch a specific name on the bus. We only support one unit
3111 * watching each name for now. */
3112
3113 if (u->manager->api_bus) {
3114 /* If the bus is already available, install the match directly.
3115 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3116 r = unit_install_bus_match(u, u->manager->api_bus, name);
3117 if (r < 0)
3118 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3119 }
3120
3121 r = hashmap_put(u->manager->watch_bus, name, u);
3122 if (r < 0) {
3123 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3124 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3125 }
3126
3127 return 0;
3128 }
3129
3130 void unit_unwatch_bus_name(Unit *u, const char *name) {
3131 assert(u);
3132 assert(name);
3133
3134 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3135 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3136 }
3137
3138 bool unit_can_serialize(Unit *u) {
3139 assert(u);
3140
3141 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3142 }
3143
3144 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3145 _cleanup_free_ char *s = NULL;
3146 int r;
3147
3148 assert(f);
3149 assert(key);
3150
3151 if (mask == 0)
3152 return 0;
3153
3154 r = cg_mask_to_string(mask, &s);
3155 if (r < 0)
3156 return log_error_errno(r, "Failed to format cgroup mask: %m");
3157
3158 return serialize_item(f, key, s);
3159 }
3160
3161 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3162 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3163 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3164 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3165 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3166 };
3167
3168 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3169 CGroupIPAccountingMetric m;
3170 int r;
3171
3172 assert(u);
3173 assert(f);
3174 assert(fds);
3175
3176 if (unit_can_serialize(u)) {
3177 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3178 if (r < 0)
3179 return r;
3180 }
3181
3182 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3183
3184 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3185 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3186 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3187 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3188
3189 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3190 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3191
3192 if (dual_timestamp_is_set(&u->condition_timestamp))
3193 (void) serialize_bool(f, "condition-result", u->condition_result);
3194
3195 if (dual_timestamp_is_set(&u->assert_timestamp))
3196 (void) serialize_bool(f, "assert-result", u->assert_result);
3197
3198 (void) serialize_bool(f, "transient", u->transient);
3199 (void) serialize_bool(f, "in-audit", u->in_audit);
3200
3201 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3202 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3203 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3204 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3205 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3206
3207 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3208 if (u->cpu_usage_last != NSEC_INFINITY)
3209 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3210
3211 if (u->cgroup_path)
3212 (void) serialize_item(f, "cgroup", u->cgroup_path);
3213
3214 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3215 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3216 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3217 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3218
3219 if (uid_is_valid(u->ref_uid))
3220 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3221 if (gid_is_valid(u->ref_gid))
3222 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3223
3224 if (!sd_id128_is_null(u->invocation_id))
3225 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3226
3227 bus_track_serialize(u->bus_track, f, "ref");
3228
3229 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3230 uint64_t v;
3231
3232 r = unit_get_ip_accounting(u, m, &v);
3233 if (r >= 0)
3234 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3235 }
3236
3237 if (serialize_jobs) {
3238 if (u->job) {
3239 fputs("job\n", f);
3240 job_serialize(u->job, f);
3241 }
3242
3243 if (u->nop_job) {
3244 fputs("job\n", f);
3245 job_serialize(u->nop_job, f);
3246 }
3247 }
3248
3249 /* End marker */
3250 fputc('\n', f);
3251 return 0;
3252 }
3253
3254 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3255 int r;
3256
3257 assert(u);
3258 assert(f);
3259 assert(fds);
3260
3261 for (;;) {
3262 _cleanup_free_ char *line = NULL;
3263 CGroupIPAccountingMetric m;
3264 char *l, *v;
3265 size_t k;
3266
3267 r = read_line(f, LONG_LINE_MAX, &line);
3268 if (r < 0)
3269 return log_error_errno(r, "Failed to read serialization line: %m");
3270 if (r == 0) /* eof */
3271 break;
3272
3273 l = strstrip(line);
3274 if (isempty(l)) /* End marker */
3275 break;
3276
3277 k = strcspn(l, "=");
3278
3279 if (l[k] == '=') {
3280 l[k] = 0;
3281 v = l+k+1;
3282 } else
3283 v = l+k;
3284
3285 if (streq(l, "job")) {
3286 if (v[0] == '\0') {
3287 /* new-style serialized job */
3288 Job *j;
3289
3290 j = job_new_raw(u);
3291 if (!j)
3292 return log_oom();
3293
3294 r = job_deserialize(j, f);
3295 if (r < 0) {
3296 job_free(j);
3297 return r;
3298 }
3299
3300 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3301 if (r < 0) {
3302 job_free(j);
3303 return r;
3304 }
3305
3306 r = job_install_deserialized(j);
3307 if (r < 0) {
3308 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3309 job_free(j);
3310 return r;
3311 }
3312 } else /* legacy for pre-44 */
3313 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3314 continue;
3315 } else if (streq(l, "state-change-timestamp")) {
3316 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3317 continue;
3318 } else if (streq(l, "inactive-exit-timestamp")) {
3319 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3320 continue;
3321 } else if (streq(l, "active-enter-timestamp")) {
3322 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3323 continue;
3324 } else if (streq(l, "active-exit-timestamp")) {
3325 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3326 continue;
3327 } else if (streq(l, "inactive-enter-timestamp")) {
3328 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3329 continue;
3330 } else if (streq(l, "condition-timestamp")) {
3331 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3332 continue;
3333 } else if (streq(l, "assert-timestamp")) {
3334 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3335 continue;
3336 } else if (streq(l, "condition-result")) {
3337
3338 r = parse_boolean(v);
3339 if (r < 0)
3340 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3341 else
3342 u->condition_result = r;
3343
3344 continue;
3345
3346 } else if (streq(l, "assert-result")) {
3347
3348 r = parse_boolean(v);
3349 if (r < 0)
3350 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3351 else
3352 u->assert_result = r;
3353
3354 continue;
3355
3356 } else if (streq(l, "transient")) {
3357
3358 r = parse_boolean(v);
3359 if (r < 0)
3360 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3361 else
3362 u->transient = r;
3363
3364 continue;
3365
3366 } else if (streq(l, "in-audit")) {
3367
3368 r = parse_boolean(v);
3369 if (r < 0)
3370 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3371 else
3372 u->in_audit = r;
3373
3374 continue;
3375
3376 } else if (streq(l, "exported-invocation-id")) {
3377
3378 r = parse_boolean(v);
3379 if (r < 0)
3380 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3381 else
3382 u->exported_invocation_id = r;
3383
3384 continue;
3385
3386 } else if (streq(l, "exported-log-level-max")) {
3387
3388 r = parse_boolean(v);
3389 if (r < 0)
3390 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3391 else
3392 u->exported_log_level_max = r;
3393
3394 continue;
3395
3396 } else if (streq(l, "exported-log-extra-fields")) {
3397
3398 r = parse_boolean(v);
3399 if (r < 0)
3400 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3401 else
3402 u->exported_log_extra_fields = r;
3403
3404 continue;
3405
3406 } else if (streq(l, "exported-log-rate-limit-interval")) {
3407
3408 r = parse_boolean(v);
3409 if (r < 0)
3410 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3411 else
3412 u->exported_log_rate_limit_interval = r;
3413
3414 continue;
3415
3416 } else if (streq(l, "exported-log-rate-limit-burst")) {
3417
3418 r = parse_boolean(v);
3419 if (r < 0)
3420 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3421 else
3422 u->exported_log_rate_limit_burst = r;
3423
3424 continue;
3425
3426 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3427
3428 r = safe_atou64(v, &u->cpu_usage_base);
3429 if (r < 0)
3430 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3431
3432 continue;
3433
3434 } else if (streq(l, "cpu-usage-last")) {
3435
3436 r = safe_atou64(v, &u->cpu_usage_last);
3437 if (r < 0)
3438 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3439
3440 continue;
3441
3442 } else if (streq(l, "cgroup")) {
3443
3444 r = unit_set_cgroup_path(u, v);
3445 if (r < 0)
3446 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3447
3448 (void) unit_watch_cgroup(u);
3449
3450 continue;
3451 } else if (streq(l, "cgroup-realized")) {
3452 int b;
3453
3454 b = parse_boolean(v);
3455 if (b < 0)
3456 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3457 else
3458 u->cgroup_realized = b;
3459
3460 continue;
3461
3462 } else if (streq(l, "cgroup-realized-mask")) {
3463
3464 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3465 if (r < 0)
3466 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3467 continue;
3468
3469 } else if (streq(l, "cgroup-enabled-mask")) {
3470
3471 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3472 if (r < 0)
3473 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3474 continue;
3475
3476 } else if (streq(l, "cgroup-invalidated-mask")) {
3477
3478 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3479 if (r < 0)
3480 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3481 continue;
3482
3483 } else if (streq(l, "ref-uid")) {
3484 uid_t uid;
3485
3486 r = parse_uid(v, &uid);
3487 if (r < 0)
3488 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3489 else
3490 unit_ref_uid_gid(u, uid, GID_INVALID);
3491
3492 continue;
3493
3494 } else if (streq(l, "ref-gid")) {
3495 gid_t gid;
3496
3497 r = parse_gid(v, &gid);
3498 if (r < 0)
3499 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3500 else
3501 unit_ref_uid_gid(u, UID_INVALID, gid);
3502
3503 continue;
3504
3505 } else if (streq(l, "ref")) {
3506
3507 r = strv_extend(&u->deserialized_refs, v);
3508 if (r < 0)
3509 return log_oom();
3510
3511 continue;
3512 } else if (streq(l, "invocation-id")) {
3513 sd_id128_t id;
3514
3515 r = sd_id128_from_string(v, &id);
3516 if (r < 0)
3517 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3518 else {
3519 r = unit_set_invocation_id(u, id);
3520 if (r < 0)
3521 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3522 }
3523
3524 continue;
3525 }
3526
3527 /* Check if this is an IP accounting metric serialization field */
3528 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3529 if (streq(l, ip_accounting_metric_field[m]))
3530 break;
3531 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3532 uint64_t c;
3533
3534 r = safe_atou64(v, &c);
3535 if (r < 0)
3536 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3537 else
3538 u->ip_accounting_extra[m] = c;
3539 continue;
3540 }
3541
3542 if (unit_can_serialize(u)) {
3543 r = exec_runtime_deserialize_compat(u, l, v, fds);
3544 if (r < 0) {
3545 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3546 continue;
3547 }
3548
3549 /* Returns positive if key was handled by the call */
3550 if (r > 0)
3551 continue;
3552
3553 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3554 if (r < 0)
3555 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3556 }
3557 }
3558
3559 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3560 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3561 * before 228 where the base for timeouts was not persistent across reboots. */
3562
3563 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3564 dual_timestamp_get(&u->state_change_timestamp);
3565
3566 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3567 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3568 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3569 unit_invalidate_cgroup_bpf(u);
3570
3571 return 0;
3572 }
3573
3574 int unit_deserialize_skip(FILE *f) {
3575 int r;
3576 assert(f);
3577
3578 /* Skip serialized data for this unit. We don't know what it is. */
3579
3580 for (;;) {
3581 _cleanup_free_ char *line = NULL;
3582 char *l;
3583
3584 r = read_line(f, LONG_LINE_MAX, &line);
3585 if (r < 0)
3586 return log_error_errno(r, "Failed to read serialization line: %m");
3587 if (r == 0)
3588 return 0;
3589
3590 l = strstrip(line);
3591
3592 /* End marker */
3593 if (isempty(l))
3594 return 1;
3595 }
3596 }
3597
3598 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3599 Unit *device;
3600 _cleanup_free_ char *e = NULL;
3601 int r;
3602
3603 assert(u);
3604
3605 /* Adds in links to the device node that this unit is based on */
3606 if (isempty(what))
3607 return 0;
3608
3609 if (!is_device_path(what))
3610 return 0;
3611
3612 /* When device units aren't supported (such as in a
3613 * container), don't create dependencies on them. */
3614 if (!unit_type_supported(UNIT_DEVICE))
3615 return 0;
3616
3617 r = unit_name_from_path(what, ".device", &e);
3618 if (r < 0)
3619 return r;
3620
3621 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3622 if (r < 0)
3623 return r;
3624
3625 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3626 dep = UNIT_BINDS_TO;
3627
3628 r = unit_add_two_dependencies(u, UNIT_AFTER,
3629 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3630 device, true, mask);
3631 if (r < 0)
3632 return r;
3633
3634 if (wants) {
3635 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3636 if (r < 0)
3637 return r;
3638 }
3639
3640 return 0;
3641 }
3642
3643 int unit_coldplug(Unit *u) {
3644 int r = 0, q;
3645 char **i;
3646
3647 assert(u);
3648
3649 /* Make sure we don't enter a loop, when coldplugging recursively. */
3650 if (u->coldplugged)
3651 return 0;
3652
3653 u->coldplugged = true;
3654
3655 STRV_FOREACH(i, u->deserialized_refs) {
3656 q = bus_unit_track_add_name(u, *i);
3657 if (q < 0 && r >= 0)
3658 r = q;
3659 }
3660 u->deserialized_refs = strv_free(u->deserialized_refs);
3661
3662 if (UNIT_VTABLE(u)->coldplug) {
3663 q = UNIT_VTABLE(u)->coldplug(u);
3664 if (q < 0 && r >= 0)
3665 r = q;
3666 }
3667
3668 if (u->job) {
3669 q = job_coldplug(u->job);
3670 if (q < 0 && r >= 0)
3671 r = q;
3672 }
3673
3674 return r;
3675 }
3676
3677 void unit_catchup(Unit *u) {
3678 assert(u);
3679
3680 if (UNIT_VTABLE(u)->catchup)
3681 UNIT_VTABLE(u)->catchup(u);
3682 }
3683
3684 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3685 struct stat st;
3686
3687 if (!path)
3688 return false;
3689
3690 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3691 * are never out-of-date. */
3692 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3693 return false;
3694
3695 if (stat(path, &st) < 0)
3696 /* What, cannot access this anymore? */
3697 return true;
3698
3699 if (path_masked)
3700 /* For masked files check if they are still so */
3701 return !null_or_empty(&st);
3702 else
3703 /* For non-empty files check the mtime */
3704 return timespec_load(&st.st_mtim) > mtime;
3705
3706 return false;
3707 }
3708
3709 bool unit_need_daemon_reload(Unit *u) {
3710 _cleanup_strv_free_ char **t = NULL;
3711 char **path;
3712
3713 assert(u);
3714
3715 /* For unit files, we allow masking… */
3716 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3717 u->load_state == UNIT_MASKED))
3718 return true;
3719
3720 /* Source paths should not be masked… */
3721 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3722 return true;
3723
3724 if (u->load_state == UNIT_LOADED)
3725 (void) unit_find_dropin_paths(u, &t);
3726 if (!strv_equal(u->dropin_paths, t))
3727 return true;
3728
3729 /* … any drop-ins that are masked are simply omitted from the list. */
3730 STRV_FOREACH(path, u->dropin_paths)
3731 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3732 return true;
3733
3734 return false;
3735 }
3736
3737 void unit_reset_failed(Unit *u) {
3738 assert(u);
3739
3740 if (UNIT_VTABLE(u)->reset_failed)
3741 UNIT_VTABLE(u)->reset_failed(u);
3742
3743 RATELIMIT_RESET(u->start_limit);
3744 u->start_limit_hit = false;
3745 }
3746
3747 Unit *unit_following(Unit *u) {
3748 assert(u);
3749
3750 if (UNIT_VTABLE(u)->following)
3751 return UNIT_VTABLE(u)->following(u);
3752
3753 return NULL;
3754 }
3755
3756 bool unit_stop_pending(Unit *u) {
3757 assert(u);
3758
3759 /* This call does check the current state of the unit. It's
3760 * hence useful to be called from state change calls of the
3761 * unit itself, where the state isn't updated yet. This is
3762 * different from unit_inactive_or_pending() which checks both
3763 * the current state and for a queued job. */
3764
3765 return u->job && u->job->type == JOB_STOP;
3766 }
3767
3768 bool unit_inactive_or_pending(Unit *u) {
3769 assert(u);
3770
3771 /* Returns true if the unit is inactive or going down */
3772
3773 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3774 return true;
3775
3776 if (unit_stop_pending(u))
3777 return true;
3778
3779 return false;
3780 }
3781
3782 bool unit_active_or_pending(Unit *u) {
3783 assert(u);
3784
3785 /* Returns true if the unit is active or going up */
3786
3787 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3788 return true;
3789
3790 if (u->job &&
3791 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3792 return true;
3793
3794 return false;
3795 }
3796
3797 bool unit_will_restart(Unit *u) {
3798 assert(u);
3799
3800 if (!UNIT_VTABLE(u)->will_restart)
3801 return false;
3802
3803 return UNIT_VTABLE(u)->will_restart(u);
3804 }
3805
3806 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3807 assert(u);
3808 assert(w >= 0 && w < _KILL_WHO_MAX);
3809 assert(SIGNAL_VALID(signo));
3810
3811 if (!UNIT_VTABLE(u)->kill)
3812 return -EOPNOTSUPP;
3813
3814 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3815 }
3816
3817 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3818 _cleanup_set_free_ Set *pid_set = NULL;
3819 int r;
3820
3821 pid_set = set_new(NULL);
3822 if (!pid_set)
3823 return NULL;
3824
3825 /* Exclude the main/control pids from being killed via the cgroup */
3826 if (main_pid > 0) {
3827 r = set_put(pid_set, PID_TO_PTR(main_pid));
3828 if (r < 0)
3829 return NULL;
3830 }
3831
3832 if (control_pid > 0) {
3833 r = set_put(pid_set, PID_TO_PTR(control_pid));
3834 if (r < 0)
3835 return NULL;
3836 }
3837
3838 return TAKE_PTR(pid_set);
3839 }
3840
3841 int unit_kill_common(
3842 Unit *u,
3843 KillWho who,
3844 int signo,
3845 pid_t main_pid,
3846 pid_t control_pid,
3847 sd_bus_error *error) {
3848
3849 int r = 0;
3850 bool killed = false;
3851
3852 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3853 if (main_pid < 0)
3854 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3855 else if (main_pid == 0)
3856 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3857 }
3858
3859 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3860 if (control_pid < 0)
3861 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3862 else if (control_pid == 0)
3863 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3864 }
3865
3866 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3867 if (control_pid > 0) {
3868 if (kill(control_pid, signo) < 0)
3869 r = -errno;
3870 else
3871 killed = true;
3872 }
3873
3874 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3875 if (main_pid > 0) {
3876 if (kill(main_pid, signo) < 0)
3877 r = -errno;
3878 else
3879 killed = true;
3880 }
3881
3882 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3883 _cleanup_set_free_ Set *pid_set = NULL;
3884 int q;
3885
3886 /* Exclude the main/control pids from being killed via the cgroup */
3887 pid_set = unit_pid_set(main_pid, control_pid);
3888 if (!pid_set)
3889 return -ENOMEM;
3890
3891 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3892 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3893 r = q;
3894 else
3895 killed = true;
3896 }
3897
3898 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3899 return -ESRCH;
3900
3901 return r;
3902 }
3903
3904 int unit_following_set(Unit *u, Set **s) {
3905 assert(u);
3906 assert(s);
3907
3908 if (UNIT_VTABLE(u)->following_set)
3909 return UNIT_VTABLE(u)->following_set(u, s);
3910
3911 *s = NULL;
3912 return 0;
3913 }
3914
3915 UnitFileState unit_get_unit_file_state(Unit *u) {
3916 int r;
3917
3918 assert(u);
3919
3920 if (u->unit_file_state < 0 && u->fragment_path) {
3921 r = unit_file_get_state(
3922 u->manager->unit_file_scope,
3923 NULL,
3924 u->id,
3925 &u->unit_file_state);
3926 if (r < 0)
3927 u->unit_file_state = UNIT_FILE_BAD;
3928 }
3929
3930 return u->unit_file_state;
3931 }
3932
3933 int unit_get_unit_file_preset(Unit *u) {
3934 assert(u);
3935
3936 if (u->unit_file_preset < 0 && u->fragment_path)
3937 u->unit_file_preset = unit_file_query_preset(
3938 u->manager->unit_file_scope,
3939 NULL,
3940 basename(u->fragment_path));
3941
3942 return u->unit_file_preset;
3943 }
3944
3945 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3946 assert(ref);
3947 assert(source);
3948 assert(target);
3949
3950 if (ref->target)
3951 unit_ref_unset(ref);
3952
3953 ref->source = source;
3954 ref->target = target;
3955 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3956 return target;
3957 }
3958
3959 void unit_ref_unset(UnitRef *ref) {
3960 assert(ref);
3961
3962 if (!ref->target)
3963 return;
3964
3965 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3966 * be unreferenced now. */
3967 unit_add_to_gc_queue(ref->target);
3968
3969 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3970 ref->source = ref->target = NULL;
3971 }
3972
3973 static int user_from_unit_name(Unit *u, char **ret) {
3974
3975 static const uint8_t hash_key[] = {
3976 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3977 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3978 };
3979
3980 _cleanup_free_ char *n = NULL;
3981 int r;
3982
3983 r = unit_name_to_prefix(u->id, &n);
3984 if (r < 0)
3985 return r;
3986
3987 if (valid_user_group_name(n)) {
3988 *ret = TAKE_PTR(n);
3989 return 0;
3990 }
3991
3992 /* If we can't use the unit name as a user name, then let's hash it and use that */
3993 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3994 return -ENOMEM;
3995
3996 return 0;
3997 }
3998
3999 int unit_patch_contexts(Unit *u) {
4000 CGroupContext *cc;
4001 ExecContext *ec;
4002 unsigned i;
4003 int r;
4004
4005 assert(u);
4006
4007 /* Patch in the manager defaults into the exec and cgroup
4008 * contexts, _after_ the rest of the settings have been
4009 * initialized */
4010
4011 ec = unit_get_exec_context(u);
4012 if (ec) {
4013 /* This only copies in the ones that need memory */
4014 for (i = 0; i < _RLIMIT_MAX; i++)
4015 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4016 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4017 if (!ec->rlimit[i])
4018 return -ENOMEM;
4019 }
4020
4021 if (MANAGER_IS_USER(u->manager) &&
4022 !ec->working_directory) {
4023
4024 r = get_home_dir(&ec->working_directory);
4025 if (r < 0)
4026 return r;
4027
4028 /* Allow user services to run, even if the
4029 * home directory is missing */
4030 ec->working_directory_missing_ok = true;
4031 }
4032
4033 if (ec->private_devices)
4034 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4035
4036 if (ec->protect_kernel_modules)
4037 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4038
4039 if (ec->dynamic_user) {
4040 if (!ec->user) {
4041 r = user_from_unit_name(u, &ec->user);
4042 if (r < 0)
4043 return r;
4044 }
4045
4046 if (!ec->group) {
4047 ec->group = strdup(ec->user);
4048 if (!ec->group)
4049 return -ENOMEM;
4050 }
4051
4052 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4053 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4054
4055 ec->private_tmp = true;
4056 ec->remove_ipc = true;
4057 ec->protect_system = PROTECT_SYSTEM_STRICT;
4058 if (ec->protect_home == PROTECT_HOME_NO)
4059 ec->protect_home = PROTECT_HOME_READ_ONLY;
4060 }
4061 }
4062
4063 cc = unit_get_cgroup_context(u);
4064 if (cc && ec) {
4065
4066 if (ec->private_devices &&
4067 cc->device_policy == CGROUP_AUTO)
4068 cc->device_policy = CGROUP_CLOSED;
4069
4070 if (ec->root_image &&
4071 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4072
4073 /* When RootImage= is specified, the following devices are touched. */
4074 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4075 if (r < 0)
4076 return r;
4077
4078 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4079 if (r < 0)
4080 return r;
4081
4082 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4083 if (r < 0)
4084 return r;
4085 }
4086 }
4087
4088 return 0;
4089 }
4090
4091 ExecContext *unit_get_exec_context(Unit *u) {
4092 size_t offset;
4093 assert(u);
4094
4095 if (u->type < 0)
4096 return NULL;
4097
4098 offset = UNIT_VTABLE(u)->exec_context_offset;
4099 if (offset <= 0)
4100 return NULL;
4101
4102 return (ExecContext*) ((uint8_t*) u + offset);
4103 }
4104
4105 KillContext *unit_get_kill_context(Unit *u) {
4106 size_t offset;
4107 assert(u);
4108
4109 if (u->type < 0)
4110 return NULL;
4111
4112 offset = UNIT_VTABLE(u)->kill_context_offset;
4113 if (offset <= 0)
4114 return NULL;
4115
4116 return (KillContext*) ((uint8_t*) u + offset);
4117 }
4118
4119 CGroupContext *unit_get_cgroup_context(Unit *u) {
4120 size_t offset;
4121
4122 if (u->type < 0)
4123 return NULL;
4124
4125 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4126 if (offset <= 0)
4127 return NULL;
4128
4129 return (CGroupContext*) ((uint8_t*) u + offset);
4130 }
4131
4132 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4133 size_t offset;
4134
4135 if (u->type < 0)
4136 return NULL;
4137
4138 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4139 if (offset <= 0)
4140 return NULL;
4141
4142 return *(ExecRuntime**) ((uint8_t*) u + offset);
4143 }
4144
4145 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4146 assert(u);
4147
4148 if (UNIT_WRITE_FLAGS_NOOP(flags))
4149 return NULL;
4150
4151 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4152 return u->manager->lookup_paths.transient;
4153
4154 if (flags & UNIT_PERSISTENT)
4155 return u->manager->lookup_paths.persistent_control;
4156
4157 if (flags & UNIT_RUNTIME)
4158 return u->manager->lookup_paths.runtime_control;
4159
4160 return NULL;
4161 }
4162
4163 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4164 char *ret = NULL;
4165
4166 if (!s)
4167 return NULL;
4168
4169 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4170 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4171 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4172 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4173 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4174 * allocations. */
4175
4176 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4177 ret = specifier_escape(s);
4178 if (!ret)
4179 return NULL;
4180
4181 s = ret;
4182 }
4183
4184 if (flags & UNIT_ESCAPE_C) {
4185 char *a;
4186
4187 a = cescape(s);
4188 free(ret);
4189 if (!a)
4190 return NULL;
4191
4192 ret = a;
4193 }
4194
4195 if (buf) {
4196 *buf = ret;
4197 return ret ?: (char*) s;
4198 }
4199
4200 return ret ?: strdup(s);
4201 }
4202
4203 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4204 _cleanup_free_ char *result = NULL;
4205 size_t n = 0, allocated = 0;
4206 char **i;
4207
4208 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4209 * way suitable for ExecStart= stanzas */
4210
4211 STRV_FOREACH(i, l) {
4212 _cleanup_free_ char *buf = NULL;
4213 const char *p;
4214 size_t a;
4215 char *q;
4216
4217 p = unit_escape_setting(*i, flags, &buf);
4218 if (!p)
4219 return NULL;
4220
4221 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4222 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4223 return NULL;
4224
4225 q = result + n;
4226 if (n > 0)
4227 *(q++) = ' ';
4228
4229 *(q++) = '"';
4230 q = stpcpy(q, p);
4231 *(q++) = '"';
4232
4233 n += a;
4234 }
4235
4236 if (!GREEDY_REALLOC(result, allocated, n + 1))
4237 return NULL;
4238
4239 result[n] = 0;
4240
4241 return TAKE_PTR(result);
4242 }
4243
4244 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4245 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4246 const char *dir, *wrapped;
4247 int r;
4248
4249 assert(u);
4250 assert(name);
4251 assert(data);
4252
4253 if (UNIT_WRITE_FLAGS_NOOP(flags))
4254 return 0;
4255
4256 data = unit_escape_setting(data, flags, &escaped);
4257 if (!data)
4258 return -ENOMEM;
4259
4260 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4261 * previous section header is the same */
4262
4263 if (flags & UNIT_PRIVATE) {
4264 if (!UNIT_VTABLE(u)->private_section)
4265 return -EINVAL;
4266
4267 if (!u->transient_file || u->last_section_private < 0)
4268 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4269 else if (u->last_section_private == 0)
4270 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4271 } else {
4272 if (!u->transient_file || u->last_section_private < 0)
4273 data = strjoina("[Unit]\n", data);
4274 else if (u->last_section_private > 0)
4275 data = strjoina("\n[Unit]\n", data);
4276 }
4277
4278 if (u->transient_file) {
4279 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4280 * write to the transient unit file. */
4281 fputs(data, u->transient_file);
4282
4283 if (!endswith(data, "\n"))
4284 fputc('\n', u->transient_file);
4285
4286 /* Remember which section we wrote this entry to */
4287 u->last_section_private = !!(flags & UNIT_PRIVATE);
4288 return 0;
4289 }
4290
4291 dir = unit_drop_in_dir(u, flags);
4292 if (!dir)
4293 return -EINVAL;
4294
4295 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4296 "# or an equivalent operation. Do not edit.\n",
4297 data,
4298 "\n");
4299
4300 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4301 if (r < 0)
4302 return r;
4303
4304 (void) mkdir_p_label(p, 0755);
4305 r = write_string_file_atomic_label(q, wrapped);
4306 if (r < 0)
4307 return r;
4308
4309 r = strv_push(&u->dropin_paths, q);
4310 if (r < 0)
4311 return r;
4312 q = NULL;
4313
4314 strv_uniq(u->dropin_paths);
4315
4316 u->dropin_mtime = now(CLOCK_REALTIME);
4317
4318 return 0;
4319 }
4320
4321 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4322 _cleanup_free_ char *p = NULL;
4323 va_list ap;
4324 int r;
4325
4326 assert(u);
4327 assert(name);
4328 assert(format);
4329
4330 if (UNIT_WRITE_FLAGS_NOOP(flags))
4331 return 0;
4332
4333 va_start(ap, format);
4334 r = vasprintf(&p, format, ap);
4335 va_end(ap);
4336
4337 if (r < 0)
4338 return -ENOMEM;
4339
4340 return unit_write_setting(u, flags, name, p);
4341 }
4342
4343 int unit_make_transient(Unit *u) {
4344 _cleanup_free_ char *path = NULL;
4345 FILE *f;
4346
4347 assert(u);
4348
4349 if (!UNIT_VTABLE(u)->can_transient)
4350 return -EOPNOTSUPP;
4351
4352 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4353
4354 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4355 if (!path)
4356 return -ENOMEM;
4357
4358 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4359 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4360
4361 RUN_WITH_UMASK(0022) {
4362 f = fopen(path, "we");
4363 if (!f)
4364 return -errno;
4365 }
4366
4367 safe_fclose(u->transient_file);
4368 u->transient_file = f;
4369
4370 free_and_replace(u->fragment_path, path);
4371
4372 u->source_path = mfree(u->source_path);
4373 u->dropin_paths = strv_free(u->dropin_paths);
4374 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4375
4376 u->load_state = UNIT_STUB;
4377 u->load_error = 0;
4378 u->transient = true;
4379
4380 unit_add_to_dbus_queue(u);
4381 unit_add_to_gc_queue(u);
4382
4383 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4384 u->transient_file);
4385
4386 return 0;
4387 }
4388
4389 static void log_kill(pid_t pid, int sig, void *userdata) {
4390 _cleanup_free_ char *comm = NULL;
4391
4392 (void) get_process_comm(pid, &comm);
4393
4394 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4395 only, like for example systemd's own PAM stub process. */
4396 if (comm && comm[0] == '(')
4397 return;
4398
4399 log_unit_notice(userdata,
4400 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4401 pid,
4402 strna(comm),
4403 signal_to_string(sig));
4404 }
4405
4406 static int operation_to_signal(KillContext *c, KillOperation k) {
4407 assert(c);
4408
4409 switch (k) {
4410
4411 case KILL_TERMINATE:
4412 case KILL_TERMINATE_AND_LOG:
4413 return c->kill_signal;
4414
4415 case KILL_KILL:
4416 return c->final_kill_signal;
4417
4418 case KILL_WATCHDOG:
4419 return c->watchdog_signal;
4420
4421 default:
4422 assert_not_reached("KillOperation unknown");
4423 }
4424 }
4425
4426 int unit_kill_context(
4427 Unit *u,
4428 KillContext *c,
4429 KillOperation k,
4430 pid_t main_pid,
4431 pid_t control_pid,
4432 bool main_pid_alien) {
4433
4434 bool wait_for_exit = false, send_sighup;
4435 cg_kill_log_func_t log_func = NULL;
4436 int sig, r;
4437
4438 assert(u);
4439 assert(c);
4440
4441 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4442 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4443
4444 if (c->kill_mode == KILL_NONE)
4445 return 0;
4446
4447 sig = operation_to_signal(c, k);
4448
4449 send_sighup =
4450 c->send_sighup &&
4451 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4452 sig != SIGHUP;
4453
4454 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4455 log_func = log_kill;
4456
4457 if (main_pid > 0) {
4458 if (log_func)
4459 log_func(main_pid, sig, u);
4460
4461 r = kill_and_sigcont(main_pid, sig);
4462 if (r < 0 && r != -ESRCH) {
4463 _cleanup_free_ char *comm = NULL;
4464 (void) get_process_comm(main_pid, &comm);
4465
4466 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4467 } else {
4468 if (!main_pid_alien)
4469 wait_for_exit = true;
4470
4471 if (r != -ESRCH && send_sighup)
4472 (void) kill(main_pid, SIGHUP);
4473 }
4474 }
4475
4476 if (control_pid > 0) {
4477 if (log_func)
4478 log_func(control_pid, sig, u);
4479
4480 r = kill_and_sigcont(control_pid, sig);
4481 if (r < 0 && r != -ESRCH) {
4482 _cleanup_free_ char *comm = NULL;
4483 (void) get_process_comm(control_pid, &comm);
4484
4485 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4486 } else {
4487 wait_for_exit = true;
4488
4489 if (r != -ESRCH && send_sighup)
4490 (void) kill(control_pid, SIGHUP);
4491 }
4492 }
4493
4494 if (u->cgroup_path &&
4495 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4496 _cleanup_set_free_ Set *pid_set = NULL;
4497
4498 /* Exclude the main/control pids from being killed via the cgroup */
4499 pid_set = unit_pid_set(main_pid, control_pid);
4500 if (!pid_set)
4501 return -ENOMEM;
4502
4503 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4504 sig,
4505 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4506 pid_set,
4507 log_func, u);
4508 if (r < 0) {
4509 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4510 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4511
4512 } else if (r > 0) {
4513
4514 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4515 * we are running in a container or if this is a delegation unit, simply because cgroup
4516 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4517 * of containers it can be confused easily by left-over directories in the cgroup — which
4518 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4519 * there we get proper events. Hence rely on them. */
4520
4521 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4522 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4523 wait_for_exit = true;
4524
4525 if (send_sighup) {
4526 set_free(pid_set);
4527
4528 pid_set = unit_pid_set(main_pid, control_pid);
4529 if (!pid_set)
4530 return -ENOMEM;
4531
4532 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4533 SIGHUP,
4534 CGROUP_IGNORE_SELF,
4535 pid_set,
4536 NULL, NULL);
4537 }
4538 }
4539 }
4540
4541 return wait_for_exit;
4542 }
4543
4544 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4545 _cleanup_free_ char *p = NULL;
4546 char *prefix;
4547 UnitDependencyInfo di;
4548 int r;
4549
4550 assert(u);
4551 assert(path);
4552
4553 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4554 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4555 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4556 * determine which units to make themselves a dependency of. */
4557
4558 if (!path_is_absolute(path))
4559 return -EINVAL;
4560
4561 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4562 if (r < 0)
4563 return r;
4564
4565 p = strdup(path);
4566 if (!p)
4567 return -ENOMEM;
4568
4569 path = path_simplify(p, false);
4570
4571 if (!path_is_normalized(path))
4572 return -EPERM;
4573
4574 if (hashmap_contains(u->requires_mounts_for, path))
4575 return 0;
4576
4577 di = (UnitDependencyInfo) {
4578 .origin_mask = mask
4579 };
4580
4581 r = hashmap_put(u->requires_mounts_for, path, di.data);
4582 if (r < 0)
4583 return r;
4584 p = NULL;
4585
4586 prefix = alloca(strlen(path) + 1);
4587 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4588 Set *x;
4589
4590 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4591 if (!x) {
4592 _cleanup_free_ char *q = NULL;
4593
4594 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4595 if (r < 0)
4596 return r;
4597
4598 q = strdup(prefix);
4599 if (!q)
4600 return -ENOMEM;
4601
4602 x = set_new(NULL);
4603 if (!x)
4604 return -ENOMEM;
4605
4606 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4607 if (r < 0) {
4608 set_free(x);
4609 return r;
4610 }
4611 q = NULL;
4612 }
4613
4614 r = set_put(x, u);
4615 if (r < 0)
4616 return r;
4617 }
4618
4619 return 0;
4620 }
4621
4622 int unit_setup_exec_runtime(Unit *u) {
4623 ExecRuntime **rt;
4624 size_t offset;
4625 Unit *other;
4626 Iterator i;
4627 void *v;
4628 int r;
4629
4630 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4631 assert(offset > 0);
4632
4633 /* Check if there already is an ExecRuntime for this unit? */
4634 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4635 if (*rt)
4636 return 0;
4637
4638 /* Try to get it from somebody else */
4639 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4640 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4641 if (r == 1)
4642 return 1;
4643 }
4644
4645 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4646 }
4647
4648 int unit_setup_dynamic_creds(Unit *u) {
4649 ExecContext *ec;
4650 DynamicCreds *dcreds;
4651 size_t offset;
4652
4653 assert(u);
4654
4655 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4656 assert(offset > 0);
4657 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4658
4659 ec = unit_get_exec_context(u);
4660 assert(ec);
4661
4662 if (!ec->dynamic_user)
4663 return 0;
4664
4665 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4666 }
4667
4668 bool unit_type_supported(UnitType t) {
4669 if (_unlikely_(t < 0))
4670 return false;
4671 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4672 return false;
4673
4674 if (!unit_vtable[t]->supported)
4675 return true;
4676
4677 return unit_vtable[t]->supported();
4678 }
4679
4680 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4681 int r;
4682
4683 assert(u);
4684 assert(where);
4685
4686 r = dir_is_empty(where);
4687 if (r > 0 || r == -ENOTDIR)
4688 return;
4689 if (r < 0) {
4690 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4691 return;
4692 }
4693
4694 log_struct(LOG_NOTICE,
4695 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4696 LOG_UNIT_ID(u),
4697 LOG_UNIT_INVOCATION_ID(u),
4698 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4699 "WHERE=%s", where);
4700 }
4701
4702 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4703 _cleanup_free_ char *canonical_where;
4704 int r;
4705
4706 assert(u);
4707 assert(where);
4708
4709 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4710 if (r < 0) {
4711 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4712 return 0;
4713 }
4714
4715 /* We will happily ignore a trailing slash (or any redundant slashes) */
4716 if (path_equal(where, canonical_where))
4717 return 0;
4718
4719 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4720 log_struct(LOG_ERR,
4721 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4722 LOG_UNIT_ID(u),
4723 LOG_UNIT_INVOCATION_ID(u),
4724 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4725 "WHERE=%s", where);
4726
4727 return -ELOOP;
4728 }
4729
4730 bool unit_is_pristine(Unit *u) {
4731 assert(u);
4732
4733 /* Check if the unit already exists or is already around,
4734 * in a number of different ways. Note that to cater for unit
4735 * types such as slice, we are generally fine with units that
4736 * are marked UNIT_LOADED even though nothing was actually
4737 * loaded, as those unit types don't require a file on disk. */
4738
4739 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4740 u->fragment_path ||
4741 u->source_path ||
4742 !strv_isempty(u->dropin_paths) ||
4743 u->job ||
4744 u->merged_into);
4745 }
4746
4747 pid_t unit_control_pid(Unit *u) {
4748 assert(u);
4749
4750 if (UNIT_VTABLE(u)->control_pid)
4751 return UNIT_VTABLE(u)->control_pid(u);
4752
4753 return 0;
4754 }
4755
4756 pid_t unit_main_pid(Unit *u) {
4757 assert(u);
4758
4759 if (UNIT_VTABLE(u)->main_pid)
4760 return UNIT_VTABLE(u)->main_pid(u);
4761
4762 return 0;
4763 }
4764
4765 static void unit_unref_uid_internal(
4766 Unit *u,
4767 uid_t *ref_uid,
4768 bool destroy_now,
4769 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4770
4771 assert(u);
4772 assert(ref_uid);
4773 assert(_manager_unref_uid);
4774
4775 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4776 * gid_t are actually the same time, with the same validity rules.
4777 *
4778 * Drops a reference to UID/GID from a unit. */
4779
4780 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4781 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4782
4783 if (!uid_is_valid(*ref_uid))
4784 return;
4785
4786 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4787 *ref_uid = UID_INVALID;
4788 }
4789
4790 void unit_unref_uid(Unit *u, bool destroy_now) {
4791 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4792 }
4793
4794 void unit_unref_gid(Unit *u, bool destroy_now) {
4795 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4796 }
4797
4798 static int unit_ref_uid_internal(
4799 Unit *u,
4800 uid_t *ref_uid,
4801 uid_t uid,
4802 bool clean_ipc,
4803 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4804
4805 int r;
4806
4807 assert(u);
4808 assert(ref_uid);
4809 assert(uid_is_valid(uid));
4810 assert(_manager_ref_uid);
4811
4812 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4813 * are actually the same type, and have the same validity rules.
4814 *
4815 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4816 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4817 * drops to zero. */
4818
4819 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4820 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4821
4822 if (*ref_uid == uid)
4823 return 0;
4824
4825 if (uid_is_valid(*ref_uid)) /* Already set? */
4826 return -EBUSY;
4827
4828 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4829 if (r < 0)
4830 return r;
4831
4832 *ref_uid = uid;
4833 return 1;
4834 }
4835
4836 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4837 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4838 }
4839
4840 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4841 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4842 }
4843
4844 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4845 int r = 0, q = 0;
4846
4847 assert(u);
4848
4849 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4850
4851 if (uid_is_valid(uid)) {
4852 r = unit_ref_uid(u, uid, clean_ipc);
4853 if (r < 0)
4854 return r;
4855 }
4856
4857 if (gid_is_valid(gid)) {
4858 q = unit_ref_gid(u, gid, clean_ipc);
4859 if (q < 0) {
4860 if (r > 0)
4861 unit_unref_uid(u, false);
4862
4863 return q;
4864 }
4865 }
4866
4867 return r > 0 || q > 0;
4868 }
4869
4870 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4871 ExecContext *c;
4872 int r;
4873
4874 assert(u);
4875
4876 c = unit_get_exec_context(u);
4877
4878 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4879 if (r < 0)
4880 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4881
4882 return r;
4883 }
4884
4885 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4886 assert(u);
4887
4888 unit_unref_uid(u, destroy_now);
4889 unit_unref_gid(u, destroy_now);
4890 }
4891
4892 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4893 int r;
4894
4895 assert(u);
4896
4897 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4898 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4899 * objects when no service references the UID/GID anymore. */
4900
4901 r = unit_ref_uid_gid(u, uid, gid);
4902 if (r > 0)
4903 bus_unit_send_change_signal(u);
4904 }
4905
4906 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4907 int r;
4908
4909 assert(u);
4910
4911 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4912
4913 if (sd_id128_equal(u->invocation_id, id))
4914 return 0;
4915
4916 if (!sd_id128_is_null(u->invocation_id))
4917 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4918
4919 if (sd_id128_is_null(id)) {
4920 r = 0;
4921 goto reset;
4922 }
4923
4924 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4925 if (r < 0)
4926 goto reset;
4927
4928 u->invocation_id = id;
4929 sd_id128_to_string(id, u->invocation_id_string);
4930
4931 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4932 if (r < 0)
4933 goto reset;
4934
4935 return 0;
4936
4937 reset:
4938 u->invocation_id = SD_ID128_NULL;
4939 u->invocation_id_string[0] = 0;
4940 return r;
4941 }
4942
4943 int unit_acquire_invocation_id(Unit *u) {
4944 sd_id128_t id;
4945 int r;
4946
4947 assert(u);
4948
4949 r = sd_id128_randomize(&id);
4950 if (r < 0)
4951 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4952
4953 r = unit_set_invocation_id(u, id);
4954 if (r < 0)
4955 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4956
4957 return 0;
4958 }
4959
4960 int unit_set_exec_params(Unit *u, ExecParameters *p) {
4961 int r;
4962
4963 assert(u);
4964 assert(p);
4965
4966 /* Copy parameters from manager */
4967 r = manager_get_effective_environment(u->manager, &p->environment);
4968 if (r < 0)
4969 return r;
4970
4971 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4972 p->cgroup_supported = u->manager->cgroup_supported;
4973 p->prefix = u->manager->prefix;
4974 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4975
4976 /* Copy paramaters from unit */
4977 p->cgroup_path = u->cgroup_path;
4978 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4979
4980 return 0;
4981 }
4982
4983 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4984 int r;
4985
4986 assert(u);
4987 assert(ret);
4988
4989 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4990 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4991
4992 (void) unit_realize_cgroup(u);
4993
4994 r = safe_fork(name, FORK_REOPEN_LOG, ret);
4995 if (r != 0)
4996 return r;
4997
4998 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4999 (void) ignore_signals(SIGPIPE, -1);
5000
5001 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5002
5003 if (u->cgroup_path) {
5004 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5005 if (r < 0) {
5006 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5007 _exit(EXIT_CGROUP);
5008 }
5009 }
5010
5011 return 0;
5012 }
5013
5014 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5015 assert(u);
5016 assert(d >= 0);
5017 assert(d < _UNIT_DEPENDENCY_MAX);
5018 assert(other);
5019
5020 if (di.origin_mask == 0 && di.destination_mask == 0) {
5021 /* No bit set anymore, let's drop the whole entry */
5022 assert_se(hashmap_remove(u->dependencies[d], other));
5023 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5024 } else
5025 /* Mask was reduced, let's update the entry */
5026 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5027 }
5028
5029 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5030 UnitDependency d;
5031
5032 assert(u);
5033
5034 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5035
5036 if (mask == 0)
5037 return;
5038
5039 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5040 bool done;
5041
5042 do {
5043 UnitDependencyInfo di;
5044 Unit *other;
5045 Iterator i;
5046
5047 done = true;
5048
5049 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5050 UnitDependency q;
5051
5052 if ((di.origin_mask & ~mask) == di.origin_mask)
5053 continue;
5054 di.origin_mask &= ~mask;
5055 unit_update_dependency_mask(u, d, other, di);
5056
5057 /* We updated the dependency from our unit to the other unit now. But most dependencies
5058 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5059 * all dependency types on the other unit and delete all those which point to us and
5060 * have the right mask set. */
5061
5062 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5063 UnitDependencyInfo dj;
5064
5065 dj.data = hashmap_get(other->dependencies[q], u);
5066 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5067 continue;
5068 dj.destination_mask &= ~mask;
5069
5070 unit_update_dependency_mask(other, q, u, dj);
5071 }
5072
5073 unit_add_to_gc_queue(other);
5074
5075 done = false;
5076 break;
5077 }
5078
5079 } while (!done);
5080 }
5081 }
5082
5083 static int unit_export_invocation_id(Unit *u) {
5084 const char *p;
5085 int r;
5086
5087 assert(u);
5088
5089 if (u->exported_invocation_id)
5090 return 0;
5091
5092 if (sd_id128_is_null(u->invocation_id))
5093 return 0;
5094
5095 p = strjoina("/run/systemd/units/invocation:", u->id);
5096 r = symlink_atomic(u->invocation_id_string, p);
5097 if (r < 0)
5098 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5099
5100 u->exported_invocation_id = true;
5101 return 0;
5102 }
5103
5104 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5105 const char *p;
5106 char buf[2];
5107 int r;
5108
5109 assert(u);
5110 assert(c);
5111
5112 if (u->exported_log_level_max)
5113 return 0;
5114
5115 if (c->log_level_max < 0)
5116 return 0;
5117
5118 assert(c->log_level_max <= 7);
5119
5120 buf[0] = '0' + c->log_level_max;
5121 buf[1] = 0;
5122
5123 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5124 r = symlink_atomic(buf, p);
5125 if (r < 0)
5126 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5127
5128 u->exported_log_level_max = true;
5129 return 0;
5130 }
5131
5132 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5133 _cleanup_close_ int fd = -1;
5134 struct iovec *iovec;
5135 const char *p;
5136 char *pattern;
5137 le64_t *sizes;
5138 ssize_t n;
5139 size_t i;
5140 int r;
5141
5142 if (u->exported_log_extra_fields)
5143 return 0;
5144
5145 if (c->n_log_extra_fields <= 0)
5146 return 0;
5147
5148 sizes = newa(le64_t, c->n_log_extra_fields);
5149 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5150
5151 for (i = 0; i < c->n_log_extra_fields; i++) {
5152 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5153
5154 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5155 iovec[i*2+1] = c->log_extra_fields[i];
5156 }
5157
5158 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5159 pattern = strjoina(p, ".XXXXXX");
5160
5161 fd = mkostemp_safe(pattern);
5162 if (fd < 0)
5163 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5164
5165 n = writev(fd, iovec, c->n_log_extra_fields*2);
5166 if (n < 0) {
5167 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5168 goto fail;
5169 }
5170
5171 (void) fchmod(fd, 0644);
5172
5173 if (rename(pattern, p) < 0) {
5174 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5175 goto fail;
5176 }
5177
5178 u->exported_log_extra_fields = true;
5179 return 0;
5180
5181 fail:
5182 (void) unlink(pattern);
5183 return r;
5184 }
5185
5186 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5187 _cleanup_free_ char *buf = NULL;
5188 const char *p;
5189 int r;
5190
5191 assert(u);
5192 assert(c);
5193
5194 if (u->exported_log_rate_limit_interval)
5195 return 0;
5196
5197 if (c->log_rate_limit_interval_usec == 0)
5198 return 0;
5199
5200 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5201
5202 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5203 return log_oom();
5204
5205 r = symlink_atomic(buf, p);
5206 if (r < 0)
5207 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5208
5209 u->exported_log_rate_limit_interval = true;
5210 return 0;
5211 }
5212
5213 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5214 _cleanup_free_ char *buf = NULL;
5215 const char *p;
5216 int r;
5217
5218 assert(u);
5219 assert(c);
5220
5221 if (u->exported_log_rate_limit_burst)
5222 return 0;
5223
5224 if (c->log_rate_limit_burst == 0)
5225 return 0;
5226
5227 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5228
5229 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5230 return log_oom();
5231
5232 r = symlink_atomic(buf, p);
5233 if (r < 0)
5234 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5235
5236 u->exported_log_rate_limit_burst = true;
5237 return 0;
5238 }
5239
5240 void unit_export_state_files(Unit *u) {
5241 const ExecContext *c;
5242
5243 assert(u);
5244
5245 if (!u->id)
5246 return;
5247
5248 if (!MANAGER_IS_SYSTEM(u->manager))
5249 return;
5250
5251 if (MANAGER_IS_TEST_RUN(u->manager))
5252 return;
5253
5254 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5255 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5256 * the IPC system itself and PID 1 also log to the journal.
5257 *
5258 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5259 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5260 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5261 * namespace at least.
5262 *
5263 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5264 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5265 * them with one. */
5266
5267 (void) unit_export_invocation_id(u);
5268
5269 c = unit_get_exec_context(u);
5270 if (c) {
5271 (void) unit_export_log_level_max(u, c);
5272 (void) unit_export_log_extra_fields(u, c);
5273 (void) unit_export_log_rate_limit_interval(u, c);
5274 (void) unit_export_log_rate_limit_burst(u, c);
5275 }
5276 }
5277
5278 void unit_unlink_state_files(Unit *u) {
5279 const char *p;
5280
5281 assert(u);
5282
5283 if (!u->id)
5284 return;
5285
5286 if (!MANAGER_IS_SYSTEM(u->manager))
5287 return;
5288
5289 /* Undoes the effect of unit_export_state() */
5290
5291 if (u->exported_invocation_id) {
5292 p = strjoina("/run/systemd/units/invocation:", u->id);
5293 (void) unlink(p);
5294
5295 u->exported_invocation_id = false;
5296 }
5297
5298 if (u->exported_log_level_max) {
5299 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5300 (void) unlink(p);
5301
5302 u->exported_log_level_max = false;
5303 }
5304
5305 if (u->exported_log_extra_fields) {
5306 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5307 (void) unlink(p);
5308
5309 u->exported_log_extra_fields = false;
5310 }
5311
5312 if (u->exported_log_rate_limit_interval) {
5313 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5314 (void) unlink(p);
5315
5316 u->exported_log_rate_limit_interval = false;
5317 }
5318
5319 if (u->exported_log_rate_limit_burst) {
5320 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5321 (void) unlink(p);
5322
5323 u->exported_log_rate_limit_burst = false;
5324 }
5325 }
5326
5327 int unit_prepare_exec(Unit *u) {
5328 int r;
5329
5330 assert(u);
5331
5332 /* Prepares everything so that we can fork of a process for this unit */
5333
5334 (void) unit_realize_cgroup(u);
5335
5336 if (u->reset_accounting) {
5337 (void) unit_reset_cpu_accounting(u);
5338 (void) unit_reset_ip_accounting(u);
5339 u->reset_accounting = false;
5340 }
5341
5342 unit_export_state_files(u);
5343
5344 r = unit_setup_exec_runtime(u);
5345 if (r < 0)
5346 return r;
5347
5348 r = unit_setup_dynamic_creds(u);
5349 if (r < 0)
5350 return r;
5351
5352 return 0;
5353 }
5354
5355 static void log_leftover(pid_t pid, int sig, void *userdata) {
5356 _cleanup_free_ char *comm = NULL;
5357
5358 (void) get_process_comm(pid, &comm);
5359
5360 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5361 return;
5362
5363 log_unit_warning(userdata,
5364 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5365 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5366 pid, strna(comm));
5367 }
5368
5369 void unit_warn_leftover_processes(Unit *u) {
5370 assert(u);
5371
5372 (void) unit_pick_cgroup_path(u);
5373
5374 if (!u->cgroup_path)
5375 return;
5376
5377 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5378 }
5379
5380 bool unit_needs_console(Unit *u) {
5381 ExecContext *ec;
5382 UnitActiveState state;
5383
5384 assert(u);
5385
5386 state = unit_active_state(u);
5387
5388 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5389 return false;
5390
5391 if (UNIT_VTABLE(u)->needs_console)
5392 return UNIT_VTABLE(u)->needs_console(u);
5393
5394 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5395 ec = unit_get_exec_context(u);
5396 if (!ec)
5397 return false;
5398
5399 return exec_context_may_touch_console(ec);
5400 }
5401
5402 const char *unit_label_path(Unit *u) {
5403 const char *p;
5404
5405 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5406 * when validating access checks. */
5407
5408 p = u->source_path ?: u->fragment_path;
5409 if (!p)
5410 return NULL;
5411
5412 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5413 if (path_equal(p, "/dev/null"))
5414 return NULL;
5415
5416 return p;
5417 }
5418
5419 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5420 int r;
5421
5422 assert(u);
5423
5424 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5425 * and not a kernel thread either */
5426
5427 /* First, a simple range check */
5428 if (!pid_is_valid(pid))
5429 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5430
5431 /* Some extra safety check */
5432 if (pid == 1 || pid == getpid_cached())
5433 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5434
5435 /* Don't even begin to bother with kernel threads */
5436 r = is_kernel_thread(pid);
5437 if (r == -ESRCH)
5438 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5439 if (r < 0)
5440 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5441 if (r > 0)
5442 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5443
5444 return 0;
5445 }
5446
5447 void unit_log_success(Unit *u) {
5448 assert(u);
5449
5450 log_struct(LOG_INFO,
5451 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5452 LOG_UNIT_ID(u),
5453 LOG_UNIT_INVOCATION_ID(u),
5454 LOG_UNIT_MESSAGE(u, "Succeeded."));
5455 }
5456
5457 void unit_log_failure(Unit *u, const char *result) {
5458 assert(u);
5459 assert(result);
5460
5461 log_struct(LOG_WARNING,
5462 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5463 LOG_UNIT_ID(u),
5464 LOG_UNIT_INVOCATION_ID(u),
5465 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5466 "UNIT_RESULT=%s", result);
5467 }
5468
5469 void unit_log_process_exit(
5470 Unit *u,
5471 int level,
5472 const char *kind,
5473 const char *command,
5474 int code,
5475 int status) {
5476
5477 assert(u);
5478 assert(kind);
5479
5480 if (code != CLD_EXITED)
5481 level = LOG_WARNING;
5482
5483 log_struct(level,
5484 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5485 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5486 kind,
5487 sigchld_code_to_string(code), status,
5488 strna(code == CLD_EXITED
5489 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5490 : signal_to_string(status))),
5491 "EXIT_CODE=%s", sigchld_code_to_string(code),
5492 "EXIT_STATUS=%i", status,
5493 "COMMAND=%s", strna(command),
5494 LOG_UNIT_ID(u),
5495 LOG_UNIT_INVOCATION_ID(u));
5496 }
5497
5498 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5499 [COLLECT_INACTIVE] = "inactive",
5500 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5501 };
5502
5503 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);