]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
tree-wide: drop license boilerplate
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6 ***/
7
8 #include <errno.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/prctl.h>
12 #include <sys/stat.h>
13 #include <unistd.h>
14
15 #include "sd-id128.h"
16 #include "sd-messages.h"
17
18 #include "alloc-util.h"
19 #include "bus-common-errors.h"
20 #include "bus-util.h"
21 #include "cgroup-util.h"
22 #include "dbus-unit.h"
23 #include "dbus.h"
24 #include "dropin.h"
25 #include "escape.h"
26 #include "execute.h"
27 #include "fd-util.h"
28 #include "fileio-label.h"
29 #include "format-util.h"
30 #include "fs-util.h"
31 #include "id128-util.h"
32 #include "io-util.h"
33 #include "load-dropin.h"
34 #include "load-fragment.h"
35 #include "log.h"
36 #include "macro.h"
37 #include "missing.h"
38 #include "mkdir.h"
39 #include "parse-util.h"
40 #include "path-util.h"
41 #include "process-util.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "umask-util.h"
53 #include "unit-name.h"
54 #include "unit.h"
55 #include "user-util.h"
56 #include "virt.h"
57
58 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
59 [UNIT_SERVICE] = &service_vtable,
60 [UNIT_SOCKET] = &socket_vtable,
61 [UNIT_TARGET] = &target_vtable,
62 [UNIT_DEVICE] = &device_vtable,
63 [UNIT_MOUNT] = &mount_vtable,
64 [UNIT_AUTOMOUNT] = &automount_vtable,
65 [UNIT_SWAP] = &swap_vtable,
66 [UNIT_TIMER] = &timer_vtable,
67 [UNIT_PATH] = &path_vtable,
68 [UNIT_SLICE] = &slice_vtable,
69 [UNIT_SCOPE] = &scope_vtable,
70 };
71
72 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
73
74 Unit *unit_new(Manager *m, size_t size) {
75 Unit *u;
76
77 assert(m);
78 assert(size >= sizeof(Unit));
79
80 u = malloc0(size);
81 if (!u)
82 return NULL;
83
84 u->names = set_new(&string_hash_ops);
85 if (!u->names)
86 return mfree(u);
87
88 u->manager = m;
89 u->type = _UNIT_TYPE_INVALID;
90 u->default_dependencies = true;
91 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
92 u->unit_file_preset = -1;
93 u->on_failure_job_mode = JOB_REPLACE;
94 u->cgroup_inotify_wd = -1;
95 u->job_timeout = USEC_INFINITY;
96 u->job_running_timeout = USEC_INFINITY;
97 u->ref_uid = UID_INVALID;
98 u->ref_gid = GID_INVALID;
99 u->cpu_usage_last = NSEC_INFINITY;
100 u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
101
102 u->ip_accounting_ingress_map_fd = -1;
103 u->ip_accounting_egress_map_fd = -1;
104 u->ipv4_allow_map_fd = -1;
105 u->ipv6_allow_map_fd = -1;
106 u->ipv4_deny_map_fd = -1;
107 u->ipv6_deny_map_fd = -1;
108
109 u->last_section_private = -1;
110
111 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
112 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
113
114 return u;
115 }
116
117 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
118 _cleanup_(unit_freep) Unit *u = NULL;
119 int r;
120
121 u = unit_new(m, size);
122 if (!u)
123 return -ENOMEM;
124
125 r = unit_add_name(u, name);
126 if (r < 0)
127 return r;
128
129 *ret = TAKE_PTR(u);
130
131 return r;
132 }
133
134 bool unit_has_name(Unit *u, const char *name) {
135 assert(u);
136 assert(name);
137
138 return set_contains(u->names, (char*) name);
139 }
140
141 static void unit_init(Unit *u) {
142 CGroupContext *cc;
143 ExecContext *ec;
144 KillContext *kc;
145
146 assert(u);
147 assert(u->manager);
148 assert(u->type >= 0);
149
150 cc = unit_get_cgroup_context(u);
151 if (cc) {
152 cgroup_context_init(cc);
153
154 /* Copy in the manager defaults into the cgroup
155 * context, _before_ the rest of the settings have
156 * been initialized */
157
158 cc->cpu_accounting = u->manager->default_cpu_accounting;
159 cc->io_accounting = u->manager->default_io_accounting;
160 cc->ip_accounting = u->manager->default_ip_accounting;
161 cc->blockio_accounting = u->manager->default_blockio_accounting;
162 cc->memory_accounting = u->manager->default_memory_accounting;
163 cc->tasks_accounting = u->manager->default_tasks_accounting;
164 cc->ip_accounting = u->manager->default_ip_accounting;
165
166 if (u->type != UNIT_SLICE)
167 cc->tasks_max = u->manager->default_tasks_max;
168 }
169
170 ec = unit_get_exec_context(u);
171 if (ec) {
172 exec_context_init(ec);
173
174 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
175 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
176 }
177
178 kc = unit_get_kill_context(u);
179 if (kc)
180 kill_context_init(kc);
181
182 if (UNIT_VTABLE(u)->init)
183 UNIT_VTABLE(u)->init(u);
184 }
185
186 int unit_add_name(Unit *u, const char *text) {
187 _cleanup_free_ char *s = NULL, *i = NULL;
188 UnitType t;
189 int r;
190
191 assert(u);
192 assert(text);
193
194 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
195
196 if (!u->instance)
197 return -EINVAL;
198
199 r = unit_name_replace_instance(text, u->instance, &s);
200 if (r < 0)
201 return r;
202 } else {
203 s = strdup(text);
204 if (!s)
205 return -ENOMEM;
206 }
207
208 if (set_contains(u->names, s))
209 return 0;
210 if (hashmap_contains(u->manager->units, s))
211 return -EEXIST;
212
213 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
214 return -EINVAL;
215
216 t = unit_name_to_type(s);
217 if (t < 0)
218 return -EINVAL;
219
220 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
221 return -EINVAL;
222
223 r = unit_name_to_instance(s, &i);
224 if (r < 0)
225 return r;
226
227 if (i && !unit_type_may_template(t))
228 return -EINVAL;
229
230 /* Ensure that this unit is either instanced or not instanced,
231 * but not both. Note that we do allow names with different
232 * instance names however! */
233 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
234 return -EINVAL;
235
236 if (!unit_type_may_alias(t) && !set_isempty(u->names))
237 return -EEXIST;
238
239 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
240 return -E2BIG;
241
242 r = set_put(u->names, s);
243 if (r < 0)
244 return r;
245 assert(r > 0);
246
247 r = hashmap_put(u->manager->units, s, u);
248 if (r < 0) {
249 (void) set_remove(u->names, s);
250 return r;
251 }
252
253 if (u->type == _UNIT_TYPE_INVALID) {
254 u->type = t;
255 u->id = s;
256 u->instance = TAKE_PTR(i);
257
258 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
259
260 unit_init(u);
261 }
262
263 s = NULL;
264
265 unit_add_to_dbus_queue(u);
266 return 0;
267 }
268
269 int unit_choose_id(Unit *u, const char *name) {
270 _cleanup_free_ char *t = NULL;
271 char *s, *i;
272 int r;
273
274 assert(u);
275 assert(name);
276
277 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
278
279 if (!u->instance)
280 return -EINVAL;
281
282 r = unit_name_replace_instance(name, u->instance, &t);
283 if (r < 0)
284 return r;
285
286 name = t;
287 }
288
289 /* Selects one of the names of this unit as the id */
290 s = set_get(u->names, (char*) name);
291 if (!s)
292 return -ENOENT;
293
294 /* Determine the new instance from the new id */
295 r = unit_name_to_instance(s, &i);
296 if (r < 0)
297 return r;
298
299 u->id = s;
300
301 free(u->instance);
302 u->instance = i;
303
304 unit_add_to_dbus_queue(u);
305
306 return 0;
307 }
308
309 int unit_set_description(Unit *u, const char *description) {
310 int r;
311
312 assert(u);
313
314 r = free_and_strdup(&u->description, empty_to_null(description));
315 if (r < 0)
316 return r;
317 if (r > 0)
318 unit_add_to_dbus_queue(u);
319
320 return 0;
321 }
322
323 bool unit_may_gc(Unit *u) {
324 UnitActiveState state;
325 int r;
326
327 assert(u);
328
329 /* Checks whether the unit is ready to be unloaded for garbage collection.
330 * Returns true when the unit may be collected, and false if there's some
331 * reason to keep it loaded.
332 *
333 * References from other units are *not* checked here. Instead, this is done
334 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
335 */
336
337 if (u->job)
338 return false;
339
340 if (u->nop_job)
341 return false;
342
343 state = unit_active_state(u);
344
345 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
346 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
347 UNIT_VTABLE(u)->release_resources)
348 UNIT_VTABLE(u)->release_resources(u);
349
350 if (u->perpetual)
351 return false;
352
353 if (sd_bus_track_count(u->bus_track) > 0)
354 return false;
355
356 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
357 switch (u->collect_mode) {
358
359 case COLLECT_INACTIVE:
360 if (state != UNIT_INACTIVE)
361 return false;
362
363 break;
364
365 case COLLECT_INACTIVE_OR_FAILED:
366 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
367 return false;
368
369 break;
370
371 default:
372 assert_not_reached("Unknown garbage collection mode");
373 }
374
375 if (u->cgroup_path) {
376 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
377 * around. Units with active processes should never be collected. */
378
379 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
380 if (r < 0)
381 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
382 if (r <= 0)
383 return false;
384 }
385
386 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
387 return false;
388
389 return true;
390 }
391
392 void unit_add_to_load_queue(Unit *u) {
393 assert(u);
394 assert(u->type != _UNIT_TYPE_INVALID);
395
396 if (u->load_state != UNIT_STUB || u->in_load_queue)
397 return;
398
399 LIST_PREPEND(load_queue, u->manager->load_queue, u);
400 u->in_load_queue = true;
401 }
402
403 void unit_add_to_cleanup_queue(Unit *u) {
404 assert(u);
405
406 if (u->in_cleanup_queue)
407 return;
408
409 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
410 u->in_cleanup_queue = true;
411 }
412
413 void unit_add_to_gc_queue(Unit *u) {
414 assert(u);
415
416 if (u->in_gc_queue || u->in_cleanup_queue)
417 return;
418
419 if (!unit_may_gc(u))
420 return;
421
422 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
423 u->in_gc_queue = true;
424 }
425
426 void unit_add_to_dbus_queue(Unit *u) {
427 assert(u);
428 assert(u->type != _UNIT_TYPE_INVALID);
429
430 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
431 return;
432
433 /* Shortcut things if nobody cares */
434 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
435 sd_bus_track_count(u->bus_track) <= 0 &&
436 set_isempty(u->manager->private_buses)) {
437 u->sent_dbus_new_signal = true;
438 return;
439 }
440
441 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
442 u->in_dbus_queue = true;
443 }
444
445 static void bidi_set_free(Unit *u, Hashmap *h) {
446 Unit *other;
447 Iterator i;
448 void *v;
449
450 assert(u);
451
452 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
453
454 HASHMAP_FOREACH_KEY(v, other, h, i) {
455 UnitDependency d;
456
457 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
458 hashmap_remove(other->dependencies[d], u);
459
460 unit_add_to_gc_queue(other);
461 }
462
463 hashmap_free(h);
464 }
465
466 static void unit_remove_transient(Unit *u) {
467 char **i;
468
469 assert(u);
470
471 if (!u->transient)
472 return;
473
474 if (u->fragment_path)
475 (void) unlink(u->fragment_path);
476
477 STRV_FOREACH(i, u->dropin_paths) {
478 _cleanup_free_ char *p = NULL, *pp = NULL;
479
480 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
481 if (!p)
482 continue;
483
484 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
485 if (!pp)
486 continue;
487
488 /* Only drop transient drop-ins */
489 if (!path_equal(u->manager->lookup_paths.transient, pp))
490 continue;
491
492 (void) unlink(*i);
493 (void) rmdir(p);
494 }
495 }
496
497 static void unit_free_requires_mounts_for(Unit *u) {
498 assert(u);
499
500 for (;;) {
501 _cleanup_free_ char *path;
502
503 path = hashmap_steal_first_key(u->requires_mounts_for);
504 if (!path)
505 break;
506 else {
507 char s[strlen(path) + 1];
508
509 PATH_FOREACH_PREFIX_MORE(s, path) {
510 char *y;
511 Set *x;
512
513 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
514 if (!x)
515 continue;
516
517 (void) set_remove(x, u);
518
519 if (set_isempty(x)) {
520 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
521 free(y);
522 set_free(x);
523 }
524 }
525 }
526 }
527
528 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
529 }
530
531 static void unit_done(Unit *u) {
532 ExecContext *ec;
533 CGroupContext *cc;
534
535 assert(u);
536
537 if (u->type < 0)
538 return;
539
540 if (UNIT_VTABLE(u)->done)
541 UNIT_VTABLE(u)->done(u);
542
543 ec = unit_get_exec_context(u);
544 if (ec)
545 exec_context_done(ec);
546
547 cc = unit_get_cgroup_context(u);
548 if (cc)
549 cgroup_context_done(cc);
550 }
551
552 void unit_free(Unit *u) {
553 UnitDependency d;
554 Iterator i;
555 char *t;
556
557 if (!u)
558 return;
559
560 u->transient_file = safe_fclose(u->transient_file);
561
562 if (!MANAGER_IS_RELOADING(u->manager))
563 unit_remove_transient(u);
564
565 bus_unit_send_removed_signal(u);
566
567 unit_done(u);
568
569 sd_bus_slot_unref(u->match_bus_slot);
570
571 sd_bus_track_unref(u->bus_track);
572 u->deserialized_refs = strv_free(u->deserialized_refs);
573
574 unit_free_requires_mounts_for(u);
575
576 SET_FOREACH(t, u->names, i)
577 hashmap_remove_value(u->manager->units, t, u);
578
579 if (!sd_id128_is_null(u->invocation_id))
580 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
581
582 if (u->job) {
583 Job *j = u->job;
584 job_uninstall(j);
585 job_free(j);
586 }
587
588 if (u->nop_job) {
589 Job *j = u->nop_job;
590 job_uninstall(j);
591 job_free(j);
592 }
593
594 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
595 bidi_set_free(u, u->dependencies[d]);
596
597 if (u->on_console)
598 manager_unref_console(u->manager);
599
600 unit_release_cgroup(u);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_unlink_state_files(u);
604
605 unit_unref_uid_gid(u, false);
606
607 (void) manager_update_failed_units(u->manager, u, false);
608 set_remove(u->manager->startup_units, u);
609
610 unit_unwatch_all_pids(u);
611
612 unit_ref_unset(&u->slice);
613 while (u->refs_by_target)
614 unit_ref_unset(u->refs_by_target);
615
616 if (u->type != _UNIT_TYPE_INVALID)
617 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
618
619 if (u->in_load_queue)
620 LIST_REMOVE(load_queue, u->manager->load_queue, u);
621
622 if (u->in_dbus_queue)
623 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
624
625 if (u->in_gc_queue)
626 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
627
628 if (u->in_cgroup_realize_queue)
629 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
630
631 if (u->in_cgroup_empty_queue)
632 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
633
634 if (u->in_cleanup_queue)
635 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
636
637 if (u->in_target_deps_queue)
638 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
639
640 safe_close(u->ip_accounting_ingress_map_fd);
641 safe_close(u->ip_accounting_egress_map_fd);
642
643 safe_close(u->ipv4_allow_map_fd);
644 safe_close(u->ipv6_allow_map_fd);
645 safe_close(u->ipv4_deny_map_fd);
646 safe_close(u->ipv6_deny_map_fd);
647
648 bpf_program_unref(u->ip_bpf_ingress);
649 bpf_program_unref(u->ip_bpf_ingress_installed);
650 bpf_program_unref(u->ip_bpf_egress);
651 bpf_program_unref(u->ip_bpf_egress_installed);
652
653 condition_free_list(u->conditions);
654 condition_free_list(u->asserts);
655
656 free(u->description);
657 strv_free(u->documentation);
658 free(u->fragment_path);
659 free(u->source_path);
660 strv_free(u->dropin_paths);
661 free(u->instance);
662
663 free(u->job_timeout_reboot_arg);
664
665 set_free_free(u->names);
666
667 free(u->reboot_arg);
668
669 free(u);
670 }
671
672 UnitActiveState unit_active_state(Unit *u) {
673 assert(u);
674
675 if (u->load_state == UNIT_MERGED)
676 return unit_active_state(unit_follow_merge(u));
677
678 /* After a reload it might happen that a unit is not correctly
679 * loaded but still has a process around. That's why we won't
680 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
681
682 return UNIT_VTABLE(u)->active_state(u);
683 }
684
685 const char* unit_sub_state_to_string(Unit *u) {
686 assert(u);
687
688 return UNIT_VTABLE(u)->sub_state_to_string(u);
689 }
690
691 static int set_complete_move(Set **s, Set **other) {
692 assert(s);
693 assert(other);
694
695 if (!other)
696 return 0;
697
698 if (*s)
699 return set_move(*s, *other);
700 else
701 *s = TAKE_PTR(*other);
702
703 return 0;
704 }
705
706 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
707 assert(s);
708 assert(other);
709
710 if (!*other)
711 return 0;
712
713 if (*s)
714 return hashmap_move(*s, *other);
715 else
716 *s = TAKE_PTR(*other);
717
718 return 0;
719 }
720
721 static int merge_names(Unit *u, Unit *other) {
722 char *t;
723 Iterator i;
724 int r;
725
726 assert(u);
727 assert(other);
728
729 r = set_complete_move(&u->names, &other->names);
730 if (r < 0)
731 return r;
732
733 set_free_free(other->names);
734 other->names = NULL;
735 other->id = NULL;
736
737 SET_FOREACH(t, u->names, i)
738 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
739
740 return 0;
741 }
742
743 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
744 unsigned n_reserve;
745
746 assert(u);
747 assert(other);
748 assert(d < _UNIT_DEPENDENCY_MAX);
749
750 /*
751 * If u does not have this dependency set allocated, there is no need
752 * to reserve anything. In that case other's set will be transferred
753 * as a whole to u by complete_move().
754 */
755 if (!u->dependencies[d])
756 return 0;
757
758 /* merge_dependencies() will skip a u-on-u dependency */
759 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
760
761 return hashmap_reserve(u->dependencies[d], n_reserve);
762 }
763
764 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
765 Iterator i;
766 Unit *back;
767 void *v;
768 int r;
769
770 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
771
772 assert(u);
773 assert(other);
774 assert(d < _UNIT_DEPENDENCY_MAX);
775
776 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
777 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
778 UnitDependency k;
779
780 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
781 * pointers back, and let's fix them up, to instead point to 'u'. */
782
783 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
784 if (back == u) {
785 /* Do not add dependencies between u and itself. */
786 if (hashmap_remove(back->dependencies[k], other))
787 maybe_warn_about_dependency(u, other_id, k);
788 } else {
789 UnitDependencyInfo di_u, di_other, di_merged;
790
791 /* Let's drop this dependency between "back" and "other", and let's create it between
792 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
793 * and any such dependency which might already exist */
794
795 di_other.data = hashmap_get(back->dependencies[k], other);
796 if (!di_other.data)
797 continue; /* dependency isn't set, let's try the next one */
798
799 di_u.data = hashmap_get(back->dependencies[k], u);
800
801 di_merged = (UnitDependencyInfo) {
802 .origin_mask = di_u.origin_mask | di_other.origin_mask,
803 .destination_mask = di_u.destination_mask | di_other.destination_mask,
804 };
805
806 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
807 if (r < 0)
808 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
809 assert(r >= 0);
810
811 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
812 }
813 }
814
815 }
816
817 /* Also do not move dependencies on u to itself */
818 back = hashmap_remove(other->dependencies[d], u);
819 if (back)
820 maybe_warn_about_dependency(u, other_id, d);
821
822 /* The move cannot fail. The caller must have performed a reservation. */
823 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
824
825 other->dependencies[d] = hashmap_free(other->dependencies[d]);
826 }
827
828 int unit_merge(Unit *u, Unit *other) {
829 UnitDependency d;
830 const char *other_id = NULL;
831 int r;
832
833 assert(u);
834 assert(other);
835 assert(u->manager == other->manager);
836 assert(u->type != _UNIT_TYPE_INVALID);
837
838 other = unit_follow_merge(other);
839
840 if (other == u)
841 return 0;
842
843 if (u->type != other->type)
844 return -EINVAL;
845
846 if (!u->instance != !other->instance)
847 return -EINVAL;
848
849 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
850 return -EEXIST;
851
852 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
853 return -EEXIST;
854
855 if (other->job)
856 return -EEXIST;
857
858 if (other->nop_job)
859 return -EEXIST;
860
861 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
862 return -EEXIST;
863
864 if (other->id)
865 other_id = strdupa(other->id);
866
867 /* Make reservations to ensure merge_dependencies() won't fail */
868 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
869 r = reserve_dependencies(u, other, d);
870 /*
871 * We don't rollback reservations if we fail. We don't have
872 * a way to undo reservations. A reservation is not a leak.
873 */
874 if (r < 0)
875 return r;
876 }
877
878 /* Merge names */
879 r = merge_names(u, other);
880 if (r < 0)
881 return r;
882
883 /* Redirect all references */
884 while (other->refs_by_target)
885 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
886
887 /* Merge dependencies */
888 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
889 merge_dependencies(u, other, other_id, d);
890
891 other->load_state = UNIT_MERGED;
892 other->merged_into = u;
893
894 /* If there is still some data attached to the other node, we
895 * don't need it anymore, and can free it. */
896 if (other->load_state != UNIT_STUB)
897 if (UNIT_VTABLE(other)->done)
898 UNIT_VTABLE(other)->done(other);
899
900 unit_add_to_dbus_queue(u);
901 unit_add_to_cleanup_queue(other);
902
903 return 0;
904 }
905
906 int unit_merge_by_name(Unit *u, const char *name) {
907 _cleanup_free_ char *s = NULL;
908 Unit *other;
909 int r;
910
911 assert(u);
912 assert(name);
913
914 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
915 if (!u->instance)
916 return -EINVAL;
917
918 r = unit_name_replace_instance(name, u->instance, &s);
919 if (r < 0)
920 return r;
921
922 name = s;
923 }
924
925 other = manager_get_unit(u->manager, name);
926 if (other)
927 return unit_merge(u, other);
928
929 return unit_add_name(u, name);
930 }
931
932 Unit* unit_follow_merge(Unit *u) {
933 assert(u);
934
935 while (u->load_state == UNIT_MERGED)
936 assert_se(u = u->merged_into);
937
938 return u;
939 }
940
941 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
942 ExecDirectoryType dt;
943 char **dp;
944 int r;
945
946 assert(u);
947 assert(c);
948
949 if (c->working_directory) {
950 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
951 if (r < 0)
952 return r;
953 }
954
955 if (c->root_directory) {
956 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
957 if (r < 0)
958 return r;
959 }
960
961 if (c->root_image) {
962 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
963 if (r < 0)
964 return r;
965 }
966
967 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
968 if (!u->manager->prefix[dt])
969 continue;
970
971 STRV_FOREACH(dp, c->directories[dt].paths) {
972 _cleanup_free_ char *p;
973
974 p = strjoin(u->manager->prefix[dt], "/", *dp);
975 if (!p)
976 return -ENOMEM;
977
978 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
979 if (r < 0)
980 return r;
981 }
982 }
983
984 if (!MANAGER_IS_SYSTEM(u->manager))
985 return 0;
986
987 if (c->private_tmp) {
988 const char *p;
989
990 FOREACH_STRING(p, "/tmp", "/var/tmp") {
991 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
992 if (r < 0)
993 return r;
994 }
995
996 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
997 if (r < 0)
998 return r;
999 }
1000
1001 if (!IN_SET(c->std_output,
1002 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1003 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1004 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1005 !IN_SET(c->std_error,
1006 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1007 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1008 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1009 return 0;
1010
1011 /* If syslog or kernel logging is requested, make sure our own
1012 * logging daemon is run first. */
1013
1014 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1015 if (r < 0)
1016 return r;
1017
1018 return 0;
1019 }
1020
1021 const char *unit_description(Unit *u) {
1022 assert(u);
1023
1024 if (u->description)
1025 return u->description;
1026
1027 return strna(u->id);
1028 }
1029
1030 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1031 const struct {
1032 UnitDependencyMask mask;
1033 const char *name;
1034 } table[] = {
1035 { UNIT_DEPENDENCY_FILE, "file" },
1036 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1037 { UNIT_DEPENDENCY_DEFAULT, "default" },
1038 { UNIT_DEPENDENCY_UDEV, "udev" },
1039 { UNIT_DEPENDENCY_PATH, "path" },
1040 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1041 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1042 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1043 };
1044 size_t i;
1045
1046 assert(f);
1047 assert(kind);
1048 assert(space);
1049
1050 for (i = 0; i < ELEMENTSOF(table); i++) {
1051
1052 if (mask == 0)
1053 break;
1054
1055 if ((mask & table[i].mask) == table[i].mask) {
1056 if (*space)
1057 fputc(' ', f);
1058 else
1059 *space = true;
1060
1061 fputs(kind, f);
1062 fputs("-", f);
1063 fputs(table[i].name, f);
1064
1065 mask &= ~table[i].mask;
1066 }
1067 }
1068
1069 assert(mask == 0);
1070 }
1071
1072 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1073 char *t, **j;
1074 UnitDependency d;
1075 Iterator i;
1076 const char *prefix2;
1077 char
1078 timestamp0[FORMAT_TIMESTAMP_MAX],
1079 timestamp1[FORMAT_TIMESTAMP_MAX],
1080 timestamp2[FORMAT_TIMESTAMP_MAX],
1081 timestamp3[FORMAT_TIMESTAMP_MAX],
1082 timestamp4[FORMAT_TIMESTAMP_MAX],
1083 timespan[FORMAT_TIMESPAN_MAX];
1084 Unit *following;
1085 _cleanup_set_free_ Set *following_set = NULL;
1086 const char *n;
1087 CGroupMask m;
1088 int r;
1089
1090 assert(u);
1091 assert(u->type >= 0);
1092
1093 prefix = strempty(prefix);
1094 prefix2 = strjoina(prefix, "\t");
1095
1096 fprintf(f,
1097 "%s-> Unit %s:\n"
1098 "%s\tDescription: %s\n"
1099 "%s\tInstance: %s\n"
1100 "%s\tUnit Load State: %s\n"
1101 "%s\tUnit Active State: %s\n"
1102 "%s\tState Change Timestamp: %s\n"
1103 "%s\tInactive Exit Timestamp: %s\n"
1104 "%s\tActive Enter Timestamp: %s\n"
1105 "%s\tActive Exit Timestamp: %s\n"
1106 "%s\tInactive Enter Timestamp: %s\n"
1107 "%s\tMay GC: %s\n"
1108 "%s\tNeed Daemon Reload: %s\n"
1109 "%s\tTransient: %s\n"
1110 "%s\tPerpetual: %s\n"
1111 "%s\tGarbage Collection Mode: %s\n"
1112 "%s\tSlice: %s\n"
1113 "%s\tCGroup: %s\n"
1114 "%s\tCGroup realized: %s\n",
1115 prefix, u->id,
1116 prefix, unit_description(u),
1117 prefix, strna(u->instance),
1118 prefix, unit_load_state_to_string(u->load_state),
1119 prefix, unit_active_state_to_string(unit_active_state(u)),
1120 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1121 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1122 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1123 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1124 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1125 prefix, yes_no(unit_may_gc(u)),
1126 prefix, yes_no(unit_need_daemon_reload(u)),
1127 prefix, yes_no(u->transient),
1128 prefix, yes_no(u->perpetual),
1129 prefix, collect_mode_to_string(u->collect_mode),
1130 prefix, strna(unit_slice_name(u)),
1131 prefix, strna(u->cgroup_path),
1132 prefix, yes_no(u->cgroup_realized));
1133
1134 if (u->cgroup_realized_mask != 0) {
1135 _cleanup_free_ char *s = NULL;
1136 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1137 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1138 }
1139 if (u->cgroup_enabled_mask != 0) {
1140 _cleanup_free_ char *s = NULL;
1141 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1142 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1143 }
1144 m = unit_get_own_mask(u);
1145 if (m != 0) {
1146 _cleanup_free_ char *s = NULL;
1147 (void) cg_mask_to_string(m, &s);
1148 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1149 }
1150 m = unit_get_members_mask(u);
1151 if (m != 0) {
1152 _cleanup_free_ char *s = NULL;
1153 (void) cg_mask_to_string(m, &s);
1154 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1155 }
1156
1157 SET_FOREACH(t, u->names, i)
1158 fprintf(f, "%s\tName: %s\n", prefix, t);
1159
1160 if (!sd_id128_is_null(u->invocation_id))
1161 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1162 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1163
1164 STRV_FOREACH(j, u->documentation)
1165 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1166
1167 following = unit_following(u);
1168 if (following)
1169 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1170
1171 r = unit_following_set(u, &following_set);
1172 if (r >= 0) {
1173 Unit *other;
1174
1175 SET_FOREACH(other, following_set, i)
1176 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1177 }
1178
1179 if (u->fragment_path)
1180 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1181
1182 if (u->source_path)
1183 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1184
1185 STRV_FOREACH(j, u->dropin_paths)
1186 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1187
1188 if (u->failure_action != EMERGENCY_ACTION_NONE)
1189 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1190 if (u->success_action != EMERGENCY_ACTION_NONE)
1191 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1192
1193 if (u->job_timeout != USEC_INFINITY)
1194 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1195
1196 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1197 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1198
1199 if (u->job_timeout_reboot_arg)
1200 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1201
1202 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1203 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1204
1205 if (dual_timestamp_is_set(&u->condition_timestamp))
1206 fprintf(f,
1207 "%s\tCondition Timestamp: %s\n"
1208 "%s\tCondition Result: %s\n",
1209 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1210 prefix, yes_no(u->condition_result));
1211
1212 if (dual_timestamp_is_set(&u->assert_timestamp))
1213 fprintf(f,
1214 "%s\tAssert Timestamp: %s\n"
1215 "%s\tAssert Result: %s\n",
1216 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1217 prefix, yes_no(u->assert_result));
1218
1219 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1220 UnitDependencyInfo di;
1221 Unit *other;
1222
1223 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1224 bool space = false;
1225
1226 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1227
1228 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1229 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1230
1231 fputs(")\n", f);
1232 }
1233 }
1234
1235 if (!hashmap_isempty(u->requires_mounts_for)) {
1236 UnitDependencyInfo di;
1237 const char *path;
1238
1239 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1240 bool space = false;
1241
1242 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1243
1244 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1245 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1246
1247 fputs(")\n", f);
1248 }
1249 }
1250
1251 if (u->load_state == UNIT_LOADED) {
1252
1253 fprintf(f,
1254 "%s\tStopWhenUnneeded: %s\n"
1255 "%s\tRefuseManualStart: %s\n"
1256 "%s\tRefuseManualStop: %s\n"
1257 "%s\tDefaultDependencies: %s\n"
1258 "%s\tOnFailureJobMode: %s\n"
1259 "%s\tIgnoreOnIsolate: %s\n",
1260 prefix, yes_no(u->stop_when_unneeded),
1261 prefix, yes_no(u->refuse_manual_start),
1262 prefix, yes_no(u->refuse_manual_stop),
1263 prefix, yes_no(u->default_dependencies),
1264 prefix, job_mode_to_string(u->on_failure_job_mode),
1265 prefix, yes_no(u->ignore_on_isolate));
1266
1267 if (UNIT_VTABLE(u)->dump)
1268 UNIT_VTABLE(u)->dump(u, f, prefix2);
1269
1270 } else if (u->load_state == UNIT_MERGED)
1271 fprintf(f,
1272 "%s\tMerged into: %s\n",
1273 prefix, u->merged_into->id);
1274 else if (u->load_state == UNIT_ERROR)
1275 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1276
1277 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1278 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1279
1280 if (u->job)
1281 job_dump(u->job, f, prefix2);
1282
1283 if (u->nop_job)
1284 job_dump(u->nop_job, f, prefix2);
1285 }
1286
1287 /* Common implementation for multiple backends */
1288 int unit_load_fragment_and_dropin(Unit *u) {
1289 int r;
1290
1291 assert(u);
1292
1293 /* Load a .{service,socket,...} file */
1294 r = unit_load_fragment(u);
1295 if (r < 0)
1296 return r;
1297
1298 if (u->load_state == UNIT_STUB)
1299 return -ENOENT;
1300
1301 /* Load drop-in directory data. If u is an alias, we might be reloading the
1302 * target unit needlessly. But we cannot be sure which drops-ins have already
1303 * been loaded and which not, at least without doing complicated book-keeping,
1304 * so let's always reread all drop-ins. */
1305 return unit_load_dropin(unit_follow_merge(u));
1306 }
1307
1308 /* Common implementation for multiple backends */
1309 int unit_load_fragment_and_dropin_optional(Unit *u) {
1310 int r;
1311
1312 assert(u);
1313
1314 /* Same as unit_load_fragment_and_dropin(), but whether
1315 * something can be loaded or not doesn't matter. */
1316
1317 /* Load a .service file */
1318 r = unit_load_fragment(u);
1319 if (r < 0)
1320 return r;
1321
1322 if (u->load_state == UNIT_STUB)
1323 u->load_state = UNIT_LOADED;
1324
1325 /* Load drop-in directory data */
1326 return unit_load_dropin(unit_follow_merge(u));
1327 }
1328
1329 void unit_add_to_target_deps_queue(Unit *u) {
1330 Manager *m = u->manager;
1331
1332 assert(u);
1333
1334 if (u->in_target_deps_queue)
1335 return;
1336
1337 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1338 u->in_target_deps_queue = true;
1339 }
1340
1341 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1342 assert(u);
1343 assert(target);
1344
1345 if (target->type != UNIT_TARGET)
1346 return 0;
1347
1348 /* Only add the dependency if both units are loaded, so that
1349 * that loop check below is reliable */
1350 if (u->load_state != UNIT_LOADED ||
1351 target->load_state != UNIT_LOADED)
1352 return 0;
1353
1354 /* If either side wants no automatic dependencies, then let's
1355 * skip this */
1356 if (!u->default_dependencies ||
1357 !target->default_dependencies)
1358 return 0;
1359
1360 /* Don't create loops */
1361 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1362 return 0;
1363
1364 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1365 }
1366
1367 static int unit_add_slice_dependencies(Unit *u) {
1368 UnitDependencyMask mask;
1369 assert(u);
1370
1371 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1372 return 0;
1373
1374 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1375 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1376 relationship). */
1377 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1378
1379 if (UNIT_ISSET(u->slice))
1380 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1381
1382 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1383 return 0;
1384
1385 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1386 }
1387
1388 static int unit_add_mount_dependencies(Unit *u) {
1389 UnitDependencyInfo di;
1390 const char *path;
1391 Iterator i;
1392 int r;
1393
1394 assert(u);
1395
1396 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1397 char prefix[strlen(path) + 1];
1398
1399 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1400 _cleanup_free_ char *p = NULL;
1401 Unit *m;
1402
1403 r = unit_name_from_path(prefix, ".mount", &p);
1404 if (r < 0)
1405 return r;
1406
1407 m = manager_get_unit(u->manager, p);
1408 if (!m) {
1409 /* Make sure to load the mount unit if
1410 * it exists. If so the dependencies
1411 * on this unit will be added later
1412 * during the loading of the mount
1413 * unit. */
1414 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1415 continue;
1416 }
1417 if (m == u)
1418 continue;
1419
1420 if (m->load_state != UNIT_LOADED)
1421 continue;
1422
1423 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1424 if (r < 0)
1425 return r;
1426
1427 if (m->fragment_path) {
1428 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1429 if (r < 0)
1430 return r;
1431 }
1432 }
1433 }
1434
1435 return 0;
1436 }
1437
1438 static int unit_add_startup_units(Unit *u) {
1439 CGroupContext *c;
1440 int r;
1441
1442 c = unit_get_cgroup_context(u);
1443 if (!c)
1444 return 0;
1445
1446 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1447 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1448 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1449 return 0;
1450
1451 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1452 if (r < 0)
1453 return r;
1454
1455 return set_put(u->manager->startup_units, u);
1456 }
1457
1458 int unit_load(Unit *u) {
1459 int r;
1460
1461 assert(u);
1462
1463 if (u->in_load_queue) {
1464 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1465 u->in_load_queue = false;
1466 }
1467
1468 if (u->type == _UNIT_TYPE_INVALID)
1469 return -EINVAL;
1470
1471 if (u->load_state != UNIT_STUB)
1472 return 0;
1473
1474 if (u->transient_file) {
1475 r = fflush_and_check(u->transient_file);
1476 if (r < 0)
1477 goto fail;
1478
1479 u->transient_file = safe_fclose(u->transient_file);
1480 u->fragment_mtime = now(CLOCK_REALTIME);
1481 }
1482
1483 if (UNIT_VTABLE(u)->load) {
1484 r = UNIT_VTABLE(u)->load(u);
1485 if (r < 0)
1486 goto fail;
1487 }
1488
1489 if (u->load_state == UNIT_STUB) {
1490 r = -ENOENT;
1491 goto fail;
1492 }
1493
1494 if (u->load_state == UNIT_LOADED) {
1495 unit_add_to_target_deps_queue(u);
1496
1497 r = unit_add_slice_dependencies(u);
1498 if (r < 0)
1499 goto fail;
1500
1501 r = unit_add_mount_dependencies(u);
1502 if (r < 0)
1503 goto fail;
1504
1505 r = unit_add_startup_units(u);
1506 if (r < 0)
1507 goto fail;
1508
1509 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1510 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1511 r = -EINVAL;
1512 goto fail;
1513 }
1514
1515 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1516 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1517
1518 unit_update_cgroup_members_masks(u);
1519 }
1520
1521 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1522
1523 unit_add_to_dbus_queue(unit_follow_merge(u));
1524 unit_add_to_gc_queue(u);
1525
1526 return 0;
1527
1528 fail:
1529 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1530 u->load_error = r;
1531 unit_add_to_dbus_queue(u);
1532 unit_add_to_gc_queue(u);
1533
1534 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1535
1536 return r;
1537 }
1538
1539 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1540 Condition *c;
1541 int triggered = -1;
1542
1543 assert(u);
1544 assert(to_string);
1545
1546 /* If the condition list is empty, then it is true */
1547 if (!first)
1548 return true;
1549
1550 /* Otherwise, if all of the non-trigger conditions apply and
1551 * if any of the trigger conditions apply (unless there are
1552 * none) we return true */
1553 LIST_FOREACH(conditions, c, first) {
1554 int r;
1555
1556 r = condition_test(c);
1557 if (r < 0)
1558 log_unit_warning(u,
1559 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1560 to_string(c->type),
1561 c->trigger ? "|" : "",
1562 c->negate ? "!" : "",
1563 c->parameter);
1564 else
1565 log_unit_debug(u,
1566 "%s=%s%s%s %s.",
1567 to_string(c->type),
1568 c->trigger ? "|" : "",
1569 c->negate ? "!" : "",
1570 c->parameter,
1571 condition_result_to_string(c->result));
1572
1573 if (!c->trigger && r <= 0)
1574 return false;
1575
1576 if (c->trigger && triggered <= 0)
1577 triggered = r > 0;
1578 }
1579
1580 return triggered != 0;
1581 }
1582
1583 static bool unit_condition_test(Unit *u) {
1584 assert(u);
1585
1586 dual_timestamp_get(&u->condition_timestamp);
1587 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1588
1589 return u->condition_result;
1590 }
1591
1592 static bool unit_assert_test(Unit *u) {
1593 assert(u);
1594
1595 dual_timestamp_get(&u->assert_timestamp);
1596 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1597
1598 return u->assert_result;
1599 }
1600
1601 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1602 DISABLE_WARNING_FORMAT_NONLITERAL;
1603 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1604 REENABLE_WARNING;
1605 }
1606
1607 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1608 const char *format;
1609 const UnitStatusMessageFormats *format_table;
1610
1611 assert(u);
1612 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1613
1614 if (t != JOB_RELOAD) {
1615 format_table = &UNIT_VTABLE(u)->status_message_formats;
1616 if (format_table) {
1617 format = format_table->starting_stopping[t == JOB_STOP];
1618 if (format)
1619 return format;
1620 }
1621 }
1622
1623 /* Return generic strings */
1624 if (t == JOB_START)
1625 return "Starting %s.";
1626 else if (t == JOB_STOP)
1627 return "Stopping %s.";
1628 else
1629 return "Reloading %s.";
1630 }
1631
1632 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1633 const char *format;
1634
1635 assert(u);
1636
1637 /* Reload status messages have traditionally not been printed to console. */
1638 if (!IN_SET(t, JOB_START, JOB_STOP))
1639 return;
1640
1641 format = unit_get_status_message_format(u, t);
1642
1643 DISABLE_WARNING_FORMAT_NONLITERAL;
1644 unit_status_printf(u, "", format);
1645 REENABLE_WARNING;
1646 }
1647
1648 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1649 const char *format, *mid;
1650 char buf[LINE_MAX];
1651
1652 assert(u);
1653
1654 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1655 return;
1656
1657 if (log_on_console())
1658 return;
1659
1660 /* We log status messages for all units and all operations. */
1661
1662 format = unit_get_status_message_format(u, t);
1663
1664 DISABLE_WARNING_FORMAT_NONLITERAL;
1665 (void) snprintf(buf, sizeof buf, format, unit_description(u));
1666 REENABLE_WARNING;
1667
1668 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1669 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1670 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1671
1672 /* Note that we deliberately use LOG_MESSAGE() instead of
1673 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1674 * closely what is written to screen using the status output,
1675 * which is supposed the highest level, friendliest output
1676 * possible, which means we should avoid the low-level unit
1677 * name. */
1678 log_struct(LOG_INFO,
1679 LOG_MESSAGE("%s", buf),
1680 LOG_UNIT_ID(u),
1681 LOG_UNIT_INVOCATION_ID(u),
1682 mid,
1683 NULL);
1684 }
1685
1686 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1687 assert(u);
1688 assert(t >= 0);
1689 assert(t < _JOB_TYPE_MAX);
1690
1691 unit_status_log_starting_stopping_reloading(u, t);
1692 unit_status_print_starting_stopping(u, t);
1693 }
1694
1695 int unit_start_limit_test(Unit *u) {
1696 assert(u);
1697
1698 if (ratelimit_test(&u->start_limit)) {
1699 u->start_limit_hit = false;
1700 return 0;
1701 }
1702
1703 log_unit_warning(u, "Start request repeated too quickly.");
1704 u->start_limit_hit = true;
1705
1706 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1707 }
1708
1709 bool unit_shall_confirm_spawn(Unit *u) {
1710 assert(u);
1711
1712 if (manager_is_confirm_spawn_disabled(u->manager))
1713 return false;
1714
1715 /* For some reasons units remaining in the same process group
1716 * as PID 1 fail to acquire the console even if it's not used
1717 * by any process. So skip the confirmation question for them. */
1718 return !unit_get_exec_context(u)->same_pgrp;
1719 }
1720
1721 static bool unit_verify_deps(Unit *u) {
1722 Unit *other;
1723 Iterator j;
1724 void *v;
1725
1726 assert(u);
1727
1728 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1729 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1730 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1731 * conjunction with After= as for them any such check would make things entirely racy. */
1732
1733 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1734
1735 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1736 continue;
1737
1738 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1739 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1740 return false;
1741 }
1742 }
1743
1744 return true;
1745 }
1746
1747 /* Errors:
1748 * -EBADR: This unit type does not support starting.
1749 * -EALREADY: Unit is already started.
1750 * -EAGAIN: An operation is already in progress. Retry later.
1751 * -ECANCELED: Too many requests for now.
1752 * -EPROTO: Assert failed
1753 * -EINVAL: Unit not loaded
1754 * -EOPNOTSUPP: Unit type not supported
1755 * -ENOLINK: The necessary dependencies are not fulfilled.
1756 */
1757 int unit_start(Unit *u) {
1758 UnitActiveState state;
1759 Unit *following;
1760
1761 assert(u);
1762
1763 /* If this is already started, then this will succeed. Note
1764 * that this will even succeed if this unit is not startable
1765 * by the user. This is relied on to detect when we need to
1766 * wait for units and when waiting is finished. */
1767 state = unit_active_state(u);
1768 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1769 return -EALREADY;
1770
1771 /* Units that aren't loaded cannot be started */
1772 if (u->load_state != UNIT_LOADED)
1773 return -EINVAL;
1774
1775 /* If the conditions failed, don't do anything at all. If we
1776 * already are activating this call might still be useful to
1777 * speed up activation in case there is some hold-off time,
1778 * but we don't want to recheck the condition in that case. */
1779 if (state != UNIT_ACTIVATING &&
1780 !unit_condition_test(u)) {
1781 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1782 return -EALREADY;
1783 }
1784
1785 /* If the asserts failed, fail the entire job */
1786 if (state != UNIT_ACTIVATING &&
1787 !unit_assert_test(u)) {
1788 log_unit_notice(u, "Starting requested but asserts failed.");
1789 return -EPROTO;
1790 }
1791
1792 /* Units of types that aren't supported cannot be
1793 * started. Note that we do this test only after the condition
1794 * checks, so that we rather return condition check errors
1795 * (which are usually not considered a true failure) than "not
1796 * supported" errors (which are considered a failure).
1797 */
1798 if (!unit_supported(u))
1799 return -EOPNOTSUPP;
1800
1801 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1802 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1803 * effect anymore, due to a reload or due to a failed condition. */
1804 if (!unit_verify_deps(u))
1805 return -ENOLINK;
1806
1807 /* Forward to the main object, if we aren't it. */
1808 following = unit_following(u);
1809 if (following) {
1810 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1811 return unit_start(following);
1812 }
1813
1814 /* If it is stopped, but we cannot start it, then fail */
1815 if (!UNIT_VTABLE(u)->start)
1816 return -EBADR;
1817
1818 /* We don't suppress calls to ->start() here when we are
1819 * already starting, to allow this request to be used as a
1820 * "hurry up" call, for example when the unit is in some "auto
1821 * restart" state where it waits for a holdoff timer to elapse
1822 * before it will start again. */
1823
1824 unit_add_to_dbus_queue(u);
1825
1826 return UNIT_VTABLE(u)->start(u);
1827 }
1828
1829 bool unit_can_start(Unit *u) {
1830 assert(u);
1831
1832 if (u->load_state != UNIT_LOADED)
1833 return false;
1834
1835 if (!unit_supported(u))
1836 return false;
1837
1838 return !!UNIT_VTABLE(u)->start;
1839 }
1840
1841 bool unit_can_isolate(Unit *u) {
1842 assert(u);
1843
1844 return unit_can_start(u) &&
1845 u->allow_isolate;
1846 }
1847
1848 /* Errors:
1849 * -EBADR: This unit type does not support stopping.
1850 * -EALREADY: Unit is already stopped.
1851 * -EAGAIN: An operation is already in progress. Retry later.
1852 */
1853 int unit_stop(Unit *u) {
1854 UnitActiveState state;
1855 Unit *following;
1856
1857 assert(u);
1858
1859 state = unit_active_state(u);
1860 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1861 return -EALREADY;
1862
1863 following = unit_following(u);
1864 if (following) {
1865 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1866 return unit_stop(following);
1867 }
1868
1869 if (!UNIT_VTABLE(u)->stop)
1870 return -EBADR;
1871
1872 unit_add_to_dbus_queue(u);
1873
1874 return UNIT_VTABLE(u)->stop(u);
1875 }
1876
1877 bool unit_can_stop(Unit *u) {
1878 assert(u);
1879
1880 if (!unit_supported(u))
1881 return false;
1882
1883 if (u->perpetual)
1884 return false;
1885
1886 return !!UNIT_VTABLE(u)->stop;
1887 }
1888
1889 /* Errors:
1890 * -EBADR: This unit type does not support reloading.
1891 * -ENOEXEC: Unit is not started.
1892 * -EAGAIN: An operation is already in progress. Retry later.
1893 */
1894 int unit_reload(Unit *u) {
1895 UnitActiveState state;
1896 Unit *following;
1897
1898 assert(u);
1899
1900 if (u->load_state != UNIT_LOADED)
1901 return -EINVAL;
1902
1903 if (!unit_can_reload(u))
1904 return -EBADR;
1905
1906 state = unit_active_state(u);
1907 if (state == UNIT_RELOADING)
1908 return -EALREADY;
1909
1910 if (state != UNIT_ACTIVE) {
1911 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1912 return -ENOEXEC;
1913 }
1914
1915 following = unit_following(u);
1916 if (following) {
1917 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1918 return unit_reload(following);
1919 }
1920
1921 unit_add_to_dbus_queue(u);
1922
1923 if (!UNIT_VTABLE(u)->reload) {
1924 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1925 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1926 return 0;
1927 }
1928
1929 return UNIT_VTABLE(u)->reload(u);
1930 }
1931
1932 bool unit_can_reload(Unit *u) {
1933 assert(u);
1934
1935 if (UNIT_VTABLE(u)->can_reload)
1936 return UNIT_VTABLE(u)->can_reload(u);
1937
1938 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1939 return true;
1940
1941 return UNIT_VTABLE(u)->reload;
1942 }
1943
1944 static void unit_check_unneeded(Unit *u) {
1945
1946 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1947
1948 static const UnitDependency needed_dependencies[] = {
1949 UNIT_REQUIRED_BY,
1950 UNIT_REQUISITE_OF,
1951 UNIT_WANTED_BY,
1952 UNIT_BOUND_BY,
1953 };
1954
1955 unsigned j;
1956 int r;
1957
1958 assert(u);
1959
1960 /* If this service shall be shut down when unneeded then do
1961 * so. */
1962
1963 if (!u->stop_when_unneeded)
1964 return;
1965
1966 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1967 return;
1968
1969 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
1970 Unit *other;
1971 Iterator i;
1972 void *v;
1973
1974 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
1975 if (unit_active_or_pending(other) || unit_will_restart(other))
1976 return;
1977 }
1978
1979 /* If stopping a unit fails continuously we might enter a stop
1980 * loop here, hence stop acting on the service being
1981 * unnecessary after a while. */
1982 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1983 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1984 return;
1985 }
1986
1987 log_unit_info(u, "Unit not needed anymore. Stopping.");
1988
1989 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1990 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1991 if (r < 0)
1992 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1993 }
1994
1995 static void unit_check_binds_to(Unit *u) {
1996 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1997 bool stop = false;
1998 Unit *other;
1999 Iterator i;
2000 void *v;
2001 int r;
2002
2003 assert(u);
2004
2005 if (u->job)
2006 return;
2007
2008 if (unit_active_state(u) != UNIT_ACTIVE)
2009 return;
2010
2011 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2012 if (other->job)
2013 continue;
2014
2015 if (!other->coldplugged)
2016 /* We might yet create a job for the other unit… */
2017 continue;
2018
2019 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2020 continue;
2021
2022 stop = true;
2023 break;
2024 }
2025
2026 if (!stop)
2027 return;
2028
2029 /* If stopping a unit fails continuously we might enter a stop
2030 * loop here, hence stop acting on the service being
2031 * unnecessary after a while. */
2032 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2033 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2034 return;
2035 }
2036
2037 assert(other);
2038 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2039
2040 /* A unit we need to run is gone. Sniff. Let's stop this. */
2041 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2042 if (r < 0)
2043 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2044 }
2045
2046 static void retroactively_start_dependencies(Unit *u) {
2047 Iterator i;
2048 Unit *other;
2049 void *v;
2050
2051 assert(u);
2052 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2053
2054 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2055 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2056 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2057 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2058
2059 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2060 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2061 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2062 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2063
2064 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2065 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2066 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2067 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2068
2069 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2070 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2071 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2072
2073 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2074 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2075 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2076 }
2077
2078 static void retroactively_stop_dependencies(Unit *u) {
2079 Unit *other;
2080 Iterator i;
2081 void *v;
2082
2083 assert(u);
2084 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2085
2086 /* Pull down units which are bound to us recursively if enabled */
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2089 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2090 }
2091
2092 static void check_unneeded_dependencies(Unit *u) {
2093 Unit *other;
2094 Iterator i;
2095 void *v;
2096
2097 assert(u);
2098 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2099
2100 /* Garbage collect services that might not be needed anymore, if enabled */
2101 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2102 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 unit_check_unneeded(other);
2104 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2106 unit_check_unneeded(other);
2107 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2108 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2109 unit_check_unneeded(other);
2110 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2111 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2112 unit_check_unneeded(other);
2113 }
2114
2115 void unit_start_on_failure(Unit *u) {
2116 Unit *other;
2117 Iterator i;
2118 void *v;
2119
2120 assert(u);
2121
2122 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2123 return;
2124
2125 log_unit_info(u, "Triggering OnFailure= dependencies.");
2126
2127 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2128 int r;
2129
2130 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2131 if (r < 0)
2132 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2133 }
2134 }
2135
2136 void unit_trigger_notify(Unit *u) {
2137 Unit *other;
2138 Iterator i;
2139 void *v;
2140
2141 assert(u);
2142
2143 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2144 if (UNIT_VTABLE(other)->trigger_notify)
2145 UNIT_VTABLE(other)->trigger_notify(other, u);
2146 }
2147
2148 static int unit_log_resources(Unit *u) {
2149
2150 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2151 size_t n_message_parts = 0, n_iovec = 0;
2152 char* message_parts[3 + 1], *t;
2153 nsec_t nsec = NSEC_INFINITY;
2154 CGroupIPAccountingMetric m;
2155 size_t i;
2156 int r;
2157 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2158 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2159 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2160 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2161 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2162 };
2163
2164 assert(u);
2165
2166 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2167 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2168 * information and the complete data in structured fields. */
2169
2170 (void) unit_get_cpu_usage(u, &nsec);
2171 if (nsec != NSEC_INFINITY) {
2172 char buf[FORMAT_TIMESPAN_MAX] = "";
2173
2174 /* Format the CPU time for inclusion in the structured log message */
2175 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2176 r = log_oom();
2177 goto finish;
2178 }
2179 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2180
2181 /* Format the CPU time for inclusion in the human language message string */
2182 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2183 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2184 if (!t) {
2185 r = log_oom();
2186 goto finish;
2187 }
2188
2189 message_parts[n_message_parts++] = t;
2190 }
2191
2192 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2193 char buf[FORMAT_BYTES_MAX] = "";
2194 uint64_t value = UINT64_MAX;
2195
2196 assert(ip_fields[m]);
2197
2198 (void) unit_get_ip_accounting(u, m, &value);
2199 if (value == UINT64_MAX)
2200 continue;
2201
2202 /* Format IP accounting data for inclusion in the structured log message */
2203 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2204 r = log_oom();
2205 goto finish;
2206 }
2207 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2208
2209 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2210 * bytes counters (and not for the packets counters) */
2211 if (m == CGROUP_IP_INGRESS_BYTES)
2212 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2213 format_bytes(buf, sizeof(buf), value),
2214 " IP traffic");
2215 else if (m == CGROUP_IP_EGRESS_BYTES)
2216 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2217 format_bytes(buf, sizeof(buf), value),
2218 " IP traffic");
2219 else
2220 continue;
2221 if (!t) {
2222 r = log_oom();
2223 goto finish;
2224 }
2225
2226 message_parts[n_message_parts++] = t;
2227 }
2228
2229 /* Is there any accounting data available at all? */
2230 if (n_iovec == 0) {
2231 r = 0;
2232 goto finish;
2233 }
2234
2235 if (n_message_parts == 0)
2236 t = strjoina("MESSAGE=", u->id, ": Completed");
2237 else {
2238 _cleanup_free_ char *joined;
2239
2240 message_parts[n_message_parts] = NULL;
2241
2242 joined = strv_join(message_parts, ", ");
2243 if (!joined) {
2244 r = log_oom();
2245 goto finish;
2246 }
2247
2248 t = strjoina("MESSAGE=", u->id, ": ", joined);
2249 }
2250
2251 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2252 * and hence don't increase n_iovec for them */
2253 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2254 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2255
2256 t = strjoina(u->manager->unit_log_field, u->id);
2257 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2258
2259 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2260 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2261
2262 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2263 r = 0;
2264
2265 finish:
2266 for (i = 0; i < n_message_parts; i++)
2267 free(message_parts[i]);
2268
2269 for (i = 0; i < n_iovec; i++)
2270 free(iovec[i].iov_base);
2271
2272 return r;
2273
2274 }
2275
2276 static void unit_update_on_console(Unit *u) {
2277 bool b;
2278
2279 assert(u);
2280
2281 b = unit_needs_console(u);
2282 if (u->on_console == b)
2283 return;
2284
2285 u->on_console = b;
2286 if (b)
2287 manager_ref_console(u->manager);
2288 else
2289 manager_unref_console(u->manager);
2290
2291 }
2292
2293 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2294 bool unexpected;
2295 Manager *m;
2296
2297 assert(u);
2298 assert(os < _UNIT_ACTIVE_STATE_MAX);
2299 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2300
2301 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2302 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2303 * remounted this function will be called too! */
2304
2305 m = u->manager;
2306
2307 /* Update timestamps for state changes */
2308 if (!MANAGER_IS_RELOADING(m)) {
2309 dual_timestamp_get(&u->state_change_timestamp);
2310
2311 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2312 u->inactive_exit_timestamp = u->state_change_timestamp;
2313 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2314 u->inactive_enter_timestamp = u->state_change_timestamp;
2315
2316 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2317 u->active_enter_timestamp = u->state_change_timestamp;
2318 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2319 u->active_exit_timestamp = u->state_change_timestamp;
2320 }
2321
2322 /* Keep track of failed units */
2323 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2324
2325 /* Make sure the cgroup and state files are always removed when we become inactive */
2326 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2327 unit_prune_cgroup(u);
2328 unit_unlink_state_files(u);
2329 }
2330
2331 unit_update_on_console(u);
2332
2333 if (u->job) {
2334 unexpected = false;
2335
2336 if (u->job->state == JOB_WAITING)
2337
2338 /* So we reached a different state for this
2339 * job. Let's see if we can run it now if it
2340 * failed previously due to EAGAIN. */
2341 job_add_to_run_queue(u->job);
2342
2343 /* Let's check whether this state change constitutes a
2344 * finished job, or maybe contradicts a running job and
2345 * hence needs to invalidate jobs. */
2346
2347 switch (u->job->type) {
2348
2349 case JOB_START:
2350 case JOB_VERIFY_ACTIVE:
2351
2352 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2353 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2354 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2355 unexpected = true;
2356
2357 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2358 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2359 }
2360
2361 break;
2362
2363 case JOB_RELOAD:
2364 case JOB_RELOAD_OR_START:
2365 case JOB_TRY_RELOAD:
2366
2367 if (u->job->state == JOB_RUNNING) {
2368 if (ns == UNIT_ACTIVE)
2369 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2370 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2371 unexpected = true;
2372
2373 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2374 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2375 }
2376 }
2377
2378 break;
2379
2380 case JOB_STOP:
2381 case JOB_RESTART:
2382 case JOB_TRY_RESTART:
2383
2384 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2385 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2386 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2387 unexpected = true;
2388 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2389 }
2390
2391 break;
2392
2393 default:
2394 assert_not_reached("Job type unknown");
2395 }
2396
2397 } else
2398 unexpected = true;
2399
2400 if (!MANAGER_IS_RELOADING(m)) {
2401
2402 /* If this state change happened without being
2403 * requested by a job, then let's retroactively start
2404 * or stop dependencies. We skip that step when
2405 * deserializing, since we don't want to create any
2406 * additional jobs just because something is already
2407 * activated. */
2408
2409 if (unexpected) {
2410 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2411 retroactively_start_dependencies(u);
2412 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2413 retroactively_stop_dependencies(u);
2414 }
2415
2416 /* stop unneeded units regardless if going down was expected or not */
2417 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2418 check_unneeded_dependencies(u);
2419
2420 if (ns != os && ns == UNIT_FAILED) {
2421 log_unit_debug(u, "Unit entered failed state.");
2422 unit_start_on_failure(u);
2423 }
2424 }
2425
2426 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2427
2428 if (u->type == UNIT_SERVICE &&
2429 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2430 !MANAGER_IS_RELOADING(m)) {
2431 /* Write audit record if we have just finished starting up */
2432 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2433 u->in_audit = true;
2434 }
2435
2436 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2437 manager_send_unit_plymouth(m, u);
2438
2439 } else {
2440
2441 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2442 !UNIT_IS_INACTIVE_OR_FAILED(os)
2443 && !MANAGER_IS_RELOADING(m)) {
2444
2445 /* This unit just stopped/failed. */
2446 if (u->type == UNIT_SERVICE) {
2447
2448 /* Hmm, if there was no start record written
2449 * write it now, so that we always have a nice
2450 * pair */
2451 if (!u->in_audit) {
2452 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2453
2454 if (ns == UNIT_INACTIVE)
2455 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2456 } else
2457 /* Write audit record if we have just finished shutting down */
2458 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2459
2460 u->in_audit = false;
2461 }
2462
2463 /* Write a log message about consumed resources */
2464 unit_log_resources(u);
2465 }
2466 }
2467
2468 manager_recheck_journal(m);
2469 manager_recheck_dbus(m);
2470
2471 unit_trigger_notify(u);
2472
2473 if (!MANAGER_IS_RELOADING(u->manager)) {
2474 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2475 unit_check_unneeded(u);
2476
2477 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2478 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2479 * without ever entering started.) */
2480 unit_check_binds_to(u);
2481
2482 if (os != UNIT_FAILED && ns == UNIT_FAILED)
2483 (void) emergency_action(u->manager, u->failure_action, u->reboot_arg, "unit failed");
2484 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE)
2485 (void) emergency_action(u->manager, u->success_action, u->reboot_arg, "unit succeeded");
2486 }
2487
2488 unit_add_to_dbus_queue(u);
2489 unit_add_to_gc_queue(u);
2490 }
2491
2492 int unit_watch_pid(Unit *u, pid_t pid) {
2493 int r;
2494
2495 assert(u);
2496 assert(pid_is_valid(pid));
2497
2498 /* Watch a specific PID */
2499
2500 r = set_ensure_allocated(&u->pids, NULL);
2501 if (r < 0)
2502 return r;
2503
2504 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2505 if (r < 0)
2506 return r;
2507
2508 /* First try, let's add the unit keyed by "pid". */
2509 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2510 if (r == -EEXIST) {
2511 Unit **array;
2512 bool found = false;
2513 size_t n = 0;
2514
2515 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2516 * to an array of Units rather than just a Unit), lists us already. */
2517
2518 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2519 if (array)
2520 for (; array[n]; n++)
2521 if (array[n] == u)
2522 found = true;
2523
2524 if (found) /* Found it already? if so, do nothing */
2525 r = 0;
2526 else {
2527 Unit **new_array;
2528
2529 /* Allocate a new array */
2530 new_array = new(Unit*, n + 2);
2531 if (!new_array)
2532 return -ENOMEM;
2533
2534 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2535 new_array[n] = u;
2536 new_array[n+1] = NULL;
2537
2538 /* Add or replace the old array */
2539 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2540 if (r < 0) {
2541 free(new_array);
2542 return r;
2543 }
2544
2545 free(array);
2546 }
2547 } else if (r < 0)
2548 return r;
2549
2550 r = set_put(u->pids, PID_TO_PTR(pid));
2551 if (r < 0)
2552 return r;
2553
2554 return 0;
2555 }
2556
2557 void unit_unwatch_pid(Unit *u, pid_t pid) {
2558 Unit **array;
2559
2560 assert(u);
2561 assert(pid_is_valid(pid));
2562
2563 /* First let's drop the unit in case it's keyed as "pid". */
2564 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2565
2566 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2567 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2568 if (array) {
2569 size_t n, m = 0;
2570
2571 /* Let's iterate through the array, dropping our own entry */
2572 for (n = 0; array[n]; n++)
2573 if (array[n] != u)
2574 array[m++] = array[n];
2575 array[m] = NULL;
2576
2577 if (m == 0) {
2578 /* The array is now empty, remove the entire entry */
2579 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2580 free(array);
2581 }
2582 }
2583
2584 (void) set_remove(u->pids, PID_TO_PTR(pid));
2585 }
2586
2587 void unit_unwatch_all_pids(Unit *u) {
2588 assert(u);
2589
2590 while (!set_isempty(u->pids))
2591 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2592
2593 u->pids = set_free(u->pids);
2594 }
2595
2596 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2597 Iterator i;
2598 void *e;
2599
2600 assert(u);
2601
2602 /* Cleans dead PIDs from our list */
2603
2604 SET_FOREACH(e, u->pids, i) {
2605 pid_t pid = PTR_TO_PID(e);
2606
2607 if (pid == except1 || pid == except2)
2608 continue;
2609
2610 if (!pid_is_unwaited(pid))
2611 unit_unwatch_pid(u, pid);
2612 }
2613 }
2614
2615 bool unit_job_is_applicable(Unit *u, JobType j) {
2616 assert(u);
2617 assert(j >= 0 && j < _JOB_TYPE_MAX);
2618
2619 switch (j) {
2620
2621 case JOB_VERIFY_ACTIVE:
2622 case JOB_START:
2623 case JOB_NOP:
2624 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2625 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2626 * jobs for it. */
2627 return true;
2628
2629 case JOB_STOP:
2630 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2631 * external events), hence it makes no sense to permit enqueing such a request either. */
2632 return !u->perpetual;
2633
2634 case JOB_RESTART:
2635 case JOB_TRY_RESTART:
2636 return unit_can_stop(u) && unit_can_start(u);
2637
2638 case JOB_RELOAD:
2639 case JOB_TRY_RELOAD:
2640 return unit_can_reload(u);
2641
2642 case JOB_RELOAD_OR_START:
2643 return unit_can_reload(u) && unit_can_start(u);
2644
2645 default:
2646 assert_not_reached("Invalid job type");
2647 }
2648 }
2649
2650 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2651 assert(u);
2652
2653 /* Only warn about some unit types */
2654 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2655 return;
2656
2657 if (streq_ptr(u->id, other))
2658 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2659 else
2660 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2661 }
2662
2663 static int unit_add_dependency_hashmap(
2664 Hashmap **h,
2665 Unit *other,
2666 UnitDependencyMask origin_mask,
2667 UnitDependencyMask destination_mask) {
2668
2669 UnitDependencyInfo info;
2670 int r;
2671
2672 assert(h);
2673 assert(other);
2674 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2675 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2676 assert(origin_mask > 0 || destination_mask > 0);
2677
2678 r = hashmap_ensure_allocated(h, NULL);
2679 if (r < 0)
2680 return r;
2681
2682 assert_cc(sizeof(void*) == sizeof(info));
2683
2684 info.data = hashmap_get(*h, other);
2685 if (info.data) {
2686 /* Entry already exists. Add in our mask. */
2687
2688 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2689 (info.destination_mask & destination_mask) == info.destination_mask)
2690 return 0; /* NOP */
2691
2692 info.origin_mask |= origin_mask;
2693 info.destination_mask |= destination_mask;
2694
2695 r = hashmap_update(*h, other, info.data);
2696 } else {
2697 info = (UnitDependencyInfo) {
2698 .origin_mask = origin_mask,
2699 .destination_mask = destination_mask,
2700 };
2701
2702 r = hashmap_put(*h, other, info.data);
2703 }
2704 if (r < 0)
2705 return r;
2706
2707 return 1;
2708 }
2709
2710 int unit_add_dependency(
2711 Unit *u,
2712 UnitDependency d,
2713 Unit *other,
2714 bool add_reference,
2715 UnitDependencyMask mask) {
2716
2717 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2718 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2719 [UNIT_WANTS] = UNIT_WANTED_BY,
2720 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2721 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2722 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2723 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2724 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2725 [UNIT_WANTED_BY] = UNIT_WANTS,
2726 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2727 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2728 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2729 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2730 [UNIT_BEFORE] = UNIT_AFTER,
2731 [UNIT_AFTER] = UNIT_BEFORE,
2732 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2733 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2734 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2735 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2736 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2737 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2738 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2739 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2740 };
2741 Unit *original_u = u, *original_other = other;
2742 int r;
2743
2744 assert(u);
2745 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2746 assert(other);
2747
2748 u = unit_follow_merge(u);
2749 other = unit_follow_merge(other);
2750
2751 /* We won't allow dependencies on ourselves. We will not
2752 * consider them an error however. */
2753 if (u == other) {
2754 maybe_warn_about_dependency(original_u, original_other->id, d);
2755 return 0;
2756 }
2757
2758 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2759 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2760 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2761 return 0;
2762 }
2763
2764 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2765 if (r < 0)
2766 return r;
2767
2768 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2769 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2770 if (r < 0)
2771 return r;
2772 }
2773
2774 if (add_reference) {
2775 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2776 if (r < 0)
2777 return r;
2778
2779 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2780 if (r < 0)
2781 return r;
2782 }
2783
2784 unit_add_to_dbus_queue(u);
2785 return 0;
2786 }
2787
2788 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2789 int r;
2790
2791 assert(u);
2792
2793 r = unit_add_dependency(u, d, other, add_reference, mask);
2794 if (r < 0)
2795 return r;
2796
2797 return unit_add_dependency(u, e, other, add_reference, mask);
2798 }
2799
2800 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2801 int r;
2802
2803 assert(u);
2804 assert(name || path);
2805 assert(buf);
2806 assert(ret);
2807
2808 if (!name)
2809 name = basename(path);
2810
2811 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2812 *buf = NULL;
2813 *ret = name;
2814 return 0;
2815 }
2816
2817 if (u->instance)
2818 r = unit_name_replace_instance(name, u->instance, buf);
2819 else {
2820 _cleanup_free_ char *i = NULL;
2821
2822 r = unit_name_to_prefix(u->id, &i);
2823 if (r < 0)
2824 return r;
2825
2826 r = unit_name_replace_instance(name, i, buf);
2827 }
2828 if (r < 0)
2829 return r;
2830
2831 *ret = *buf;
2832 return 0;
2833 }
2834
2835 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2836 _cleanup_free_ char *buf = NULL;
2837 Unit *other;
2838 int r;
2839
2840 assert(u);
2841 assert(name || path);
2842
2843 r = resolve_template(u, name, path, &buf, &name);
2844 if (r < 0)
2845 return r;
2846
2847 r = manager_load_unit(u->manager, name, path, NULL, &other);
2848 if (r < 0)
2849 return r;
2850
2851 return unit_add_dependency(u, d, other, add_reference, mask);
2852 }
2853
2854 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2855 _cleanup_free_ char *buf = NULL;
2856 Unit *other;
2857 int r;
2858
2859 assert(u);
2860 assert(name || path);
2861
2862 r = resolve_template(u, name, path, &buf, &name);
2863 if (r < 0)
2864 return r;
2865
2866 r = manager_load_unit(u->manager, name, path, NULL, &other);
2867 if (r < 0)
2868 return r;
2869
2870 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2871 }
2872
2873 int set_unit_path(const char *p) {
2874 /* This is mostly for debug purposes */
2875 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2876 return -errno;
2877
2878 return 0;
2879 }
2880
2881 char *unit_dbus_path(Unit *u) {
2882 assert(u);
2883
2884 if (!u->id)
2885 return NULL;
2886
2887 return unit_dbus_path_from_name(u->id);
2888 }
2889
2890 char *unit_dbus_path_invocation_id(Unit *u) {
2891 assert(u);
2892
2893 if (sd_id128_is_null(u->invocation_id))
2894 return NULL;
2895
2896 return unit_dbus_path_from_name(u->invocation_id_string);
2897 }
2898
2899 int unit_set_slice(Unit *u, Unit *slice) {
2900 assert(u);
2901 assert(slice);
2902
2903 /* Sets the unit slice if it has not been set before. Is extra
2904 * careful, to only allow this for units that actually have a
2905 * cgroup context. Also, we don't allow to set this for slices
2906 * (since the parent slice is derived from the name). Make
2907 * sure the unit we set is actually a slice. */
2908
2909 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2910 return -EOPNOTSUPP;
2911
2912 if (u->type == UNIT_SLICE)
2913 return -EINVAL;
2914
2915 if (unit_active_state(u) != UNIT_INACTIVE)
2916 return -EBUSY;
2917
2918 if (slice->type != UNIT_SLICE)
2919 return -EINVAL;
2920
2921 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2922 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2923 return -EPERM;
2924
2925 if (UNIT_DEREF(u->slice) == slice)
2926 return 0;
2927
2928 /* Disallow slice changes if @u is already bound to cgroups */
2929 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2930 return -EBUSY;
2931
2932 unit_ref_set(&u->slice, u, slice);
2933 return 1;
2934 }
2935
2936 int unit_set_default_slice(Unit *u) {
2937 _cleanup_free_ char *b = NULL;
2938 const char *slice_name;
2939 Unit *slice;
2940 int r;
2941
2942 assert(u);
2943
2944 if (UNIT_ISSET(u->slice))
2945 return 0;
2946
2947 if (u->instance) {
2948 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2949
2950 /* Implicitly place all instantiated units in their
2951 * own per-template slice */
2952
2953 r = unit_name_to_prefix(u->id, &prefix);
2954 if (r < 0)
2955 return r;
2956
2957 /* The prefix is already escaped, but it might include
2958 * "-" which has a special meaning for slice units,
2959 * hence escape it here extra. */
2960 escaped = unit_name_escape(prefix);
2961 if (!escaped)
2962 return -ENOMEM;
2963
2964 if (MANAGER_IS_SYSTEM(u->manager))
2965 b = strjoin("system-", escaped, ".slice");
2966 else
2967 b = strappend(escaped, ".slice");
2968 if (!b)
2969 return -ENOMEM;
2970
2971 slice_name = b;
2972 } else
2973 slice_name =
2974 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2975 ? SPECIAL_SYSTEM_SLICE
2976 : SPECIAL_ROOT_SLICE;
2977
2978 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2979 if (r < 0)
2980 return r;
2981
2982 return unit_set_slice(u, slice);
2983 }
2984
2985 const char *unit_slice_name(Unit *u) {
2986 assert(u);
2987
2988 if (!UNIT_ISSET(u->slice))
2989 return NULL;
2990
2991 return UNIT_DEREF(u->slice)->id;
2992 }
2993
2994 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2995 _cleanup_free_ char *t = NULL;
2996 int r;
2997
2998 assert(u);
2999 assert(type);
3000 assert(_found);
3001
3002 r = unit_name_change_suffix(u->id, type, &t);
3003 if (r < 0)
3004 return r;
3005 if (unit_has_name(u, t))
3006 return -EINVAL;
3007
3008 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3009 assert(r < 0 || *_found != u);
3010 return r;
3011 }
3012
3013 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3014 const char *name, *old_owner, *new_owner;
3015 Unit *u = userdata;
3016 int r;
3017
3018 assert(message);
3019 assert(u);
3020
3021 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3022 if (r < 0) {
3023 bus_log_parse_error(r);
3024 return 0;
3025 }
3026
3027 old_owner = empty_to_null(old_owner);
3028 new_owner = empty_to_null(new_owner);
3029
3030 if (UNIT_VTABLE(u)->bus_name_owner_change)
3031 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3032
3033 return 0;
3034 }
3035
3036 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3037 const char *match;
3038
3039 assert(u);
3040 assert(bus);
3041 assert(name);
3042
3043 if (u->match_bus_slot)
3044 return -EBUSY;
3045
3046 match = strjoina("type='signal',"
3047 "sender='org.freedesktop.DBus',"
3048 "path='/org/freedesktop/DBus',"
3049 "interface='org.freedesktop.DBus',"
3050 "member='NameOwnerChanged',"
3051 "arg0='", name, "'");
3052
3053 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3054 }
3055
3056 int unit_watch_bus_name(Unit *u, const char *name) {
3057 int r;
3058
3059 assert(u);
3060 assert(name);
3061
3062 /* Watch a specific name on the bus. We only support one unit
3063 * watching each name for now. */
3064
3065 if (u->manager->api_bus) {
3066 /* If the bus is already available, install the match directly.
3067 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3068 r = unit_install_bus_match(u, u->manager->api_bus, name);
3069 if (r < 0)
3070 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3071 }
3072
3073 r = hashmap_put(u->manager->watch_bus, name, u);
3074 if (r < 0) {
3075 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3076 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3077 }
3078
3079 return 0;
3080 }
3081
3082 void unit_unwatch_bus_name(Unit *u, const char *name) {
3083 assert(u);
3084 assert(name);
3085
3086 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3087 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3088 }
3089
3090 bool unit_can_serialize(Unit *u) {
3091 assert(u);
3092
3093 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3094 }
3095
3096 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3097 _cleanup_free_ char *s = NULL;
3098 int r = 0;
3099
3100 assert(f);
3101 assert(key);
3102
3103 if (mask != 0) {
3104 r = cg_mask_to_string(mask, &s);
3105 if (r >= 0) {
3106 fputs(key, f);
3107 fputc('=', f);
3108 fputs(s, f);
3109 fputc('\n', f);
3110 }
3111 }
3112 return r;
3113 }
3114
3115 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3116 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3117 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3118 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3119 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3120 };
3121
3122 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3123 CGroupIPAccountingMetric m;
3124 int r;
3125
3126 assert(u);
3127 assert(f);
3128 assert(fds);
3129
3130 if (unit_can_serialize(u)) {
3131 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3132 if (r < 0)
3133 return r;
3134 }
3135
3136 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3137
3138 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3139 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3140 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3141 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3142
3143 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3144 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3145
3146 if (dual_timestamp_is_set(&u->condition_timestamp))
3147 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3148
3149 if (dual_timestamp_is_set(&u->assert_timestamp))
3150 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3151
3152 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3153
3154 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3155 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3156 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3157
3158 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3159 if (u->cpu_usage_last != NSEC_INFINITY)
3160 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3161
3162 if (u->cgroup_path)
3163 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3164 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3165 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3166 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3167 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3168
3169 if (uid_is_valid(u->ref_uid))
3170 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3171 if (gid_is_valid(u->ref_gid))
3172 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3173
3174 if (!sd_id128_is_null(u->invocation_id))
3175 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3176
3177 bus_track_serialize(u->bus_track, f, "ref");
3178
3179 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3180 uint64_t v;
3181
3182 r = unit_get_ip_accounting(u, m, &v);
3183 if (r >= 0)
3184 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3185 }
3186
3187 if (serialize_jobs) {
3188 if (u->job) {
3189 fprintf(f, "job\n");
3190 job_serialize(u->job, f);
3191 }
3192
3193 if (u->nop_job) {
3194 fprintf(f, "job\n");
3195 job_serialize(u->nop_job, f);
3196 }
3197 }
3198
3199 /* End marker */
3200 fputc('\n', f);
3201 return 0;
3202 }
3203
3204 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3205 assert(u);
3206 assert(f);
3207 assert(key);
3208
3209 if (!value)
3210 return 0;
3211
3212 fputs(key, f);
3213 fputc('=', f);
3214 fputs(value, f);
3215 fputc('\n', f);
3216
3217 return 1;
3218 }
3219
3220 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3221 _cleanup_free_ char *c = NULL;
3222
3223 assert(u);
3224 assert(f);
3225 assert(key);
3226
3227 if (!value)
3228 return 0;
3229
3230 c = cescape(value);
3231 if (!c)
3232 return -ENOMEM;
3233
3234 fputs(key, f);
3235 fputc('=', f);
3236 fputs(c, f);
3237 fputc('\n', f);
3238
3239 return 1;
3240 }
3241
3242 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3243 int copy;
3244
3245 assert(u);
3246 assert(f);
3247 assert(key);
3248
3249 if (fd < 0)
3250 return 0;
3251
3252 copy = fdset_put_dup(fds, fd);
3253 if (copy < 0)
3254 return copy;
3255
3256 fprintf(f, "%s=%i\n", key, copy);
3257 return 1;
3258 }
3259
3260 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3261 va_list ap;
3262
3263 assert(u);
3264 assert(f);
3265 assert(key);
3266 assert(format);
3267
3268 fputs(key, f);
3269 fputc('=', f);
3270
3271 va_start(ap, format);
3272 vfprintf(f, format, ap);
3273 va_end(ap);
3274
3275 fputc('\n', f);
3276 }
3277
3278 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3279 int r;
3280
3281 assert(u);
3282 assert(f);
3283 assert(fds);
3284
3285 for (;;) {
3286 char line[LINE_MAX], *l, *v;
3287 CGroupIPAccountingMetric m;
3288 size_t k;
3289
3290 if (!fgets(line, sizeof(line), f)) {
3291 if (feof(f))
3292 return 0;
3293 return -errno;
3294 }
3295
3296 char_array_0(line);
3297 l = strstrip(line);
3298
3299 /* End marker */
3300 if (isempty(l))
3301 break;
3302
3303 k = strcspn(l, "=");
3304
3305 if (l[k] == '=') {
3306 l[k] = 0;
3307 v = l+k+1;
3308 } else
3309 v = l+k;
3310
3311 if (streq(l, "job")) {
3312 if (v[0] == '\0') {
3313 /* new-style serialized job */
3314 Job *j;
3315
3316 j = job_new_raw(u);
3317 if (!j)
3318 return log_oom();
3319
3320 r = job_deserialize(j, f);
3321 if (r < 0) {
3322 job_free(j);
3323 return r;
3324 }
3325
3326 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3327 if (r < 0) {
3328 job_free(j);
3329 return r;
3330 }
3331
3332 r = job_install_deserialized(j);
3333 if (r < 0) {
3334 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3335 job_free(j);
3336 return r;
3337 }
3338 } else /* legacy for pre-44 */
3339 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3340 continue;
3341 } else if (streq(l, "state-change-timestamp")) {
3342 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3343 continue;
3344 } else if (streq(l, "inactive-exit-timestamp")) {
3345 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3346 continue;
3347 } else if (streq(l, "active-enter-timestamp")) {
3348 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3349 continue;
3350 } else if (streq(l, "active-exit-timestamp")) {
3351 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3352 continue;
3353 } else if (streq(l, "inactive-enter-timestamp")) {
3354 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3355 continue;
3356 } else if (streq(l, "condition-timestamp")) {
3357 dual_timestamp_deserialize(v, &u->condition_timestamp);
3358 continue;
3359 } else if (streq(l, "assert-timestamp")) {
3360 dual_timestamp_deserialize(v, &u->assert_timestamp);
3361 continue;
3362 } else if (streq(l, "condition-result")) {
3363
3364 r = parse_boolean(v);
3365 if (r < 0)
3366 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3367 else
3368 u->condition_result = r;
3369
3370 continue;
3371
3372 } else if (streq(l, "assert-result")) {
3373
3374 r = parse_boolean(v);
3375 if (r < 0)
3376 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3377 else
3378 u->assert_result = r;
3379
3380 continue;
3381
3382 } else if (streq(l, "transient")) {
3383
3384 r = parse_boolean(v);
3385 if (r < 0)
3386 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3387 else
3388 u->transient = r;
3389
3390 continue;
3391
3392 } else if (streq(l, "exported-invocation-id")) {
3393
3394 r = parse_boolean(v);
3395 if (r < 0)
3396 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3397 else
3398 u->exported_invocation_id = r;
3399
3400 continue;
3401
3402 } else if (streq(l, "exported-log-level-max")) {
3403
3404 r = parse_boolean(v);
3405 if (r < 0)
3406 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3407 else
3408 u->exported_log_level_max = r;
3409
3410 continue;
3411
3412 } else if (streq(l, "exported-log-extra-fields")) {
3413
3414 r = parse_boolean(v);
3415 if (r < 0)
3416 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3417 else
3418 u->exported_log_extra_fields = r;
3419
3420 continue;
3421
3422 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3423
3424 r = safe_atou64(v, &u->cpu_usage_base);
3425 if (r < 0)
3426 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3427
3428 continue;
3429
3430 } else if (streq(l, "cpu-usage-last")) {
3431
3432 r = safe_atou64(v, &u->cpu_usage_last);
3433 if (r < 0)
3434 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3435
3436 continue;
3437
3438 } else if (streq(l, "cgroup")) {
3439
3440 r = unit_set_cgroup_path(u, v);
3441 if (r < 0)
3442 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3443
3444 (void) unit_watch_cgroup(u);
3445
3446 continue;
3447 } else if (streq(l, "cgroup-realized")) {
3448 int b;
3449
3450 b = parse_boolean(v);
3451 if (b < 0)
3452 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3453 else
3454 u->cgroup_realized = b;
3455
3456 continue;
3457
3458 } else if (streq(l, "cgroup-realized-mask")) {
3459
3460 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3461 if (r < 0)
3462 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3463 continue;
3464
3465 } else if (streq(l, "cgroup-enabled-mask")) {
3466
3467 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3468 if (r < 0)
3469 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3470 continue;
3471
3472 } else if (streq(l, "cgroup-bpf-realized")) {
3473 int i;
3474
3475 r = safe_atoi(v, &i);
3476 if (r < 0)
3477 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3478 else
3479 u->cgroup_bpf_state =
3480 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3481 i > 0 ? UNIT_CGROUP_BPF_ON :
3482 UNIT_CGROUP_BPF_OFF;
3483
3484 continue;
3485
3486 } else if (streq(l, "ref-uid")) {
3487 uid_t uid;
3488
3489 r = parse_uid(v, &uid);
3490 if (r < 0)
3491 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3492 else
3493 unit_ref_uid_gid(u, uid, GID_INVALID);
3494
3495 continue;
3496
3497 } else if (streq(l, "ref-gid")) {
3498 gid_t gid;
3499
3500 r = parse_gid(v, &gid);
3501 if (r < 0)
3502 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3503 else
3504 unit_ref_uid_gid(u, UID_INVALID, gid);
3505
3506 } else if (streq(l, "ref")) {
3507
3508 r = strv_extend(&u->deserialized_refs, v);
3509 if (r < 0)
3510 log_oom();
3511
3512 continue;
3513 } else if (streq(l, "invocation-id")) {
3514 sd_id128_t id;
3515
3516 r = sd_id128_from_string(v, &id);
3517 if (r < 0)
3518 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3519 else {
3520 r = unit_set_invocation_id(u, id);
3521 if (r < 0)
3522 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3523 }
3524
3525 continue;
3526 }
3527
3528 /* Check if this is an IP accounting metric serialization field */
3529 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3530 if (streq(l, ip_accounting_metric_field[m]))
3531 break;
3532 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3533 uint64_t c;
3534
3535 r = safe_atou64(v, &c);
3536 if (r < 0)
3537 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3538 else
3539 u->ip_accounting_extra[m] = c;
3540 continue;
3541 }
3542
3543 if (unit_can_serialize(u)) {
3544 r = exec_runtime_deserialize_compat(u, l, v, fds);
3545 if (r < 0) {
3546 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3547 continue;
3548 }
3549
3550 /* Returns positive if key was handled by the call */
3551 if (r > 0)
3552 continue;
3553
3554 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3555 if (r < 0)
3556 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3557 }
3558 }
3559
3560 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3561 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3562 * before 228 where the base for timeouts was not persistent across reboots. */
3563
3564 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3565 dual_timestamp_get(&u->state_change_timestamp);
3566
3567 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3568 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3569 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3570 unit_invalidate_cgroup_bpf(u);
3571
3572 return 0;
3573 }
3574
3575 void unit_deserialize_skip(FILE *f) {
3576 assert(f);
3577
3578 /* Skip serialized data for this unit. We don't know what it is. */
3579
3580 for (;;) {
3581 char line[LINE_MAX], *l;
3582
3583 if (!fgets(line, sizeof line, f))
3584 return;
3585
3586 char_array_0(line);
3587 l = strstrip(line);
3588
3589 /* End marker */
3590 if (isempty(l))
3591 return;
3592 }
3593 }
3594
3595
3596 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3597 Unit *device;
3598 _cleanup_free_ char *e = NULL;
3599 int r;
3600
3601 assert(u);
3602
3603 /* Adds in links to the device node that this unit is based on */
3604 if (isempty(what))
3605 return 0;
3606
3607 if (!is_device_path(what))
3608 return 0;
3609
3610 /* When device units aren't supported (such as in a
3611 * container), don't create dependencies on them. */
3612 if (!unit_type_supported(UNIT_DEVICE))
3613 return 0;
3614
3615 r = unit_name_from_path(what, ".device", &e);
3616 if (r < 0)
3617 return r;
3618
3619 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3620 if (r < 0)
3621 return r;
3622
3623 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3624 dep = UNIT_BINDS_TO;
3625
3626 r = unit_add_two_dependencies(u, UNIT_AFTER,
3627 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3628 device, true, mask);
3629 if (r < 0)
3630 return r;
3631
3632 if (wants) {
3633 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3634 if (r < 0)
3635 return r;
3636 }
3637
3638 return 0;
3639 }
3640
3641 int unit_coldplug(Unit *u) {
3642 int r = 0, q;
3643 char **i;
3644
3645 assert(u);
3646
3647 /* Make sure we don't enter a loop, when coldplugging
3648 * recursively. */
3649 if (u->coldplugged)
3650 return 0;
3651
3652 u->coldplugged = true;
3653
3654 STRV_FOREACH(i, u->deserialized_refs) {
3655 q = bus_unit_track_add_name(u, *i);
3656 if (q < 0 && r >= 0)
3657 r = q;
3658 }
3659 u->deserialized_refs = strv_free(u->deserialized_refs);
3660
3661 if (UNIT_VTABLE(u)->coldplug) {
3662 q = UNIT_VTABLE(u)->coldplug(u);
3663 if (q < 0 && r >= 0)
3664 r = q;
3665 }
3666
3667 if (u->job) {
3668 q = job_coldplug(u->job);
3669 if (q < 0 && r >= 0)
3670 r = q;
3671 }
3672
3673 return r;
3674 }
3675
3676 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3677 struct stat st;
3678
3679 if (!path)
3680 return false;
3681
3682 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3683 * are never out-of-date. */
3684 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3685 return false;
3686
3687 if (stat(path, &st) < 0)
3688 /* What, cannot access this anymore? */
3689 return true;
3690
3691 if (path_masked)
3692 /* For masked files check if they are still so */
3693 return !null_or_empty(&st);
3694 else
3695 /* For non-empty files check the mtime */
3696 return timespec_load(&st.st_mtim) > mtime;
3697
3698 return false;
3699 }
3700
3701 bool unit_need_daemon_reload(Unit *u) {
3702 _cleanup_strv_free_ char **t = NULL;
3703 char **path;
3704
3705 assert(u);
3706
3707 /* For unit files, we allow masking… */
3708 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3709 u->load_state == UNIT_MASKED))
3710 return true;
3711
3712 /* Source paths should not be masked… */
3713 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3714 return true;
3715
3716 if (u->load_state == UNIT_LOADED)
3717 (void) unit_find_dropin_paths(u, &t);
3718 if (!strv_equal(u->dropin_paths, t))
3719 return true;
3720
3721 /* … any drop-ins that are masked are simply omitted from the list. */
3722 STRV_FOREACH(path, u->dropin_paths)
3723 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3724 return true;
3725
3726 return false;
3727 }
3728
3729 void unit_reset_failed(Unit *u) {
3730 assert(u);
3731
3732 if (UNIT_VTABLE(u)->reset_failed)
3733 UNIT_VTABLE(u)->reset_failed(u);
3734
3735 RATELIMIT_RESET(u->start_limit);
3736 u->start_limit_hit = false;
3737 }
3738
3739 Unit *unit_following(Unit *u) {
3740 assert(u);
3741
3742 if (UNIT_VTABLE(u)->following)
3743 return UNIT_VTABLE(u)->following(u);
3744
3745 return NULL;
3746 }
3747
3748 bool unit_stop_pending(Unit *u) {
3749 assert(u);
3750
3751 /* This call does check the current state of the unit. It's
3752 * hence useful to be called from state change calls of the
3753 * unit itself, where the state isn't updated yet. This is
3754 * different from unit_inactive_or_pending() which checks both
3755 * the current state and for a queued job. */
3756
3757 return u->job && u->job->type == JOB_STOP;
3758 }
3759
3760 bool unit_inactive_or_pending(Unit *u) {
3761 assert(u);
3762
3763 /* Returns true if the unit is inactive or going down */
3764
3765 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3766 return true;
3767
3768 if (unit_stop_pending(u))
3769 return true;
3770
3771 return false;
3772 }
3773
3774 bool unit_active_or_pending(Unit *u) {
3775 assert(u);
3776
3777 /* Returns true if the unit is active or going up */
3778
3779 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3780 return true;
3781
3782 if (u->job &&
3783 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3784 return true;
3785
3786 return false;
3787 }
3788
3789 bool unit_will_restart(Unit *u) {
3790 assert(u);
3791
3792 if (!UNIT_VTABLE(u)->will_restart)
3793 return false;
3794
3795 return UNIT_VTABLE(u)->will_restart(u);
3796 }
3797
3798 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3799 assert(u);
3800 assert(w >= 0 && w < _KILL_WHO_MAX);
3801 assert(SIGNAL_VALID(signo));
3802
3803 if (!UNIT_VTABLE(u)->kill)
3804 return -EOPNOTSUPP;
3805
3806 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3807 }
3808
3809 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3810 Set *pid_set;
3811 int r;
3812
3813 pid_set = set_new(NULL);
3814 if (!pid_set)
3815 return NULL;
3816
3817 /* Exclude the main/control pids from being killed via the cgroup */
3818 if (main_pid > 0) {
3819 r = set_put(pid_set, PID_TO_PTR(main_pid));
3820 if (r < 0)
3821 goto fail;
3822 }
3823
3824 if (control_pid > 0) {
3825 r = set_put(pid_set, PID_TO_PTR(control_pid));
3826 if (r < 0)
3827 goto fail;
3828 }
3829
3830 return pid_set;
3831
3832 fail:
3833 set_free(pid_set);
3834 return NULL;
3835 }
3836
3837 int unit_kill_common(
3838 Unit *u,
3839 KillWho who,
3840 int signo,
3841 pid_t main_pid,
3842 pid_t control_pid,
3843 sd_bus_error *error) {
3844
3845 int r = 0;
3846 bool killed = false;
3847
3848 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3849 if (main_pid < 0)
3850 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3851 else if (main_pid == 0)
3852 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3853 }
3854
3855 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3856 if (control_pid < 0)
3857 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3858 else if (control_pid == 0)
3859 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3860 }
3861
3862 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3863 if (control_pid > 0) {
3864 if (kill(control_pid, signo) < 0)
3865 r = -errno;
3866 else
3867 killed = true;
3868 }
3869
3870 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3871 if (main_pid > 0) {
3872 if (kill(main_pid, signo) < 0)
3873 r = -errno;
3874 else
3875 killed = true;
3876 }
3877
3878 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3879 _cleanup_set_free_ Set *pid_set = NULL;
3880 int q;
3881
3882 /* Exclude the main/control pids from being killed via the cgroup */
3883 pid_set = unit_pid_set(main_pid, control_pid);
3884 if (!pid_set)
3885 return -ENOMEM;
3886
3887 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3888 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3889 r = q;
3890 else
3891 killed = true;
3892 }
3893
3894 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3895 return -ESRCH;
3896
3897 return r;
3898 }
3899
3900 int unit_following_set(Unit *u, Set **s) {
3901 assert(u);
3902 assert(s);
3903
3904 if (UNIT_VTABLE(u)->following_set)
3905 return UNIT_VTABLE(u)->following_set(u, s);
3906
3907 *s = NULL;
3908 return 0;
3909 }
3910
3911 UnitFileState unit_get_unit_file_state(Unit *u) {
3912 int r;
3913
3914 assert(u);
3915
3916 if (u->unit_file_state < 0 && u->fragment_path) {
3917 r = unit_file_get_state(
3918 u->manager->unit_file_scope,
3919 NULL,
3920 u->id,
3921 &u->unit_file_state);
3922 if (r < 0)
3923 u->unit_file_state = UNIT_FILE_BAD;
3924 }
3925
3926 return u->unit_file_state;
3927 }
3928
3929 int unit_get_unit_file_preset(Unit *u) {
3930 assert(u);
3931
3932 if (u->unit_file_preset < 0 && u->fragment_path)
3933 u->unit_file_preset = unit_file_query_preset(
3934 u->manager->unit_file_scope,
3935 NULL,
3936 basename(u->fragment_path));
3937
3938 return u->unit_file_preset;
3939 }
3940
3941 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3942 assert(ref);
3943 assert(source);
3944 assert(target);
3945
3946 if (ref->target)
3947 unit_ref_unset(ref);
3948
3949 ref->source = source;
3950 ref->target = target;
3951 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3952 return target;
3953 }
3954
3955 void unit_ref_unset(UnitRef *ref) {
3956 assert(ref);
3957
3958 if (!ref->target)
3959 return;
3960
3961 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3962 * be unreferenced now. */
3963 unit_add_to_gc_queue(ref->target);
3964
3965 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3966 ref->source = ref->target = NULL;
3967 }
3968
3969 static int user_from_unit_name(Unit *u, char **ret) {
3970
3971 static const uint8_t hash_key[] = {
3972 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3973 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3974 };
3975
3976 _cleanup_free_ char *n = NULL;
3977 int r;
3978
3979 r = unit_name_to_prefix(u->id, &n);
3980 if (r < 0)
3981 return r;
3982
3983 if (valid_user_group_name(n)) {
3984 *ret = TAKE_PTR(n);
3985 return 0;
3986 }
3987
3988 /* If we can't use the unit name as a user name, then let's hash it and use that */
3989 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3990 return -ENOMEM;
3991
3992 return 0;
3993 }
3994
3995 int unit_patch_contexts(Unit *u) {
3996 CGroupContext *cc;
3997 ExecContext *ec;
3998 unsigned i;
3999 int r;
4000
4001 assert(u);
4002
4003 /* Patch in the manager defaults into the exec and cgroup
4004 * contexts, _after_ the rest of the settings have been
4005 * initialized */
4006
4007 ec = unit_get_exec_context(u);
4008 if (ec) {
4009 /* This only copies in the ones that need memory */
4010 for (i = 0; i < _RLIMIT_MAX; i++)
4011 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4012 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4013 if (!ec->rlimit[i])
4014 return -ENOMEM;
4015 }
4016
4017 if (MANAGER_IS_USER(u->manager) &&
4018 !ec->working_directory) {
4019
4020 r = get_home_dir(&ec->working_directory);
4021 if (r < 0)
4022 return r;
4023
4024 /* Allow user services to run, even if the
4025 * home directory is missing */
4026 ec->working_directory_missing_ok = true;
4027 }
4028
4029 if (ec->private_devices)
4030 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4031
4032 if (ec->protect_kernel_modules)
4033 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4034
4035 if (ec->dynamic_user) {
4036 if (!ec->user) {
4037 r = user_from_unit_name(u, &ec->user);
4038 if (r < 0)
4039 return r;
4040 }
4041
4042 if (!ec->group) {
4043 ec->group = strdup(ec->user);
4044 if (!ec->group)
4045 return -ENOMEM;
4046 }
4047
4048 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4049 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4050
4051 ec->private_tmp = true;
4052 ec->remove_ipc = true;
4053 ec->protect_system = PROTECT_SYSTEM_STRICT;
4054 if (ec->protect_home == PROTECT_HOME_NO)
4055 ec->protect_home = PROTECT_HOME_READ_ONLY;
4056 }
4057 }
4058
4059 cc = unit_get_cgroup_context(u);
4060 if (cc) {
4061
4062 if (ec &&
4063 ec->private_devices &&
4064 cc->device_policy == CGROUP_AUTO)
4065 cc->device_policy = CGROUP_CLOSED;
4066 }
4067
4068 return 0;
4069 }
4070
4071 ExecContext *unit_get_exec_context(Unit *u) {
4072 size_t offset;
4073 assert(u);
4074
4075 if (u->type < 0)
4076 return NULL;
4077
4078 offset = UNIT_VTABLE(u)->exec_context_offset;
4079 if (offset <= 0)
4080 return NULL;
4081
4082 return (ExecContext*) ((uint8_t*) u + offset);
4083 }
4084
4085 KillContext *unit_get_kill_context(Unit *u) {
4086 size_t offset;
4087 assert(u);
4088
4089 if (u->type < 0)
4090 return NULL;
4091
4092 offset = UNIT_VTABLE(u)->kill_context_offset;
4093 if (offset <= 0)
4094 return NULL;
4095
4096 return (KillContext*) ((uint8_t*) u + offset);
4097 }
4098
4099 CGroupContext *unit_get_cgroup_context(Unit *u) {
4100 size_t offset;
4101
4102 if (u->type < 0)
4103 return NULL;
4104
4105 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4106 if (offset <= 0)
4107 return NULL;
4108
4109 return (CGroupContext*) ((uint8_t*) u + offset);
4110 }
4111
4112 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4113 size_t offset;
4114
4115 if (u->type < 0)
4116 return NULL;
4117
4118 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4119 if (offset <= 0)
4120 return NULL;
4121
4122 return *(ExecRuntime**) ((uint8_t*) u + offset);
4123 }
4124
4125 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4126 assert(u);
4127
4128 if (UNIT_WRITE_FLAGS_NOOP(flags))
4129 return NULL;
4130
4131 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4132 return u->manager->lookup_paths.transient;
4133
4134 if (flags & UNIT_PERSISTENT)
4135 return u->manager->lookup_paths.persistent_control;
4136
4137 if (flags & UNIT_RUNTIME)
4138 return u->manager->lookup_paths.runtime_control;
4139
4140 return NULL;
4141 }
4142
4143 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4144 char *ret = NULL;
4145
4146 if (!s)
4147 return NULL;
4148
4149 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4150 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4151 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4152 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4153 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4154 * allocations. */
4155
4156 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4157 ret = specifier_escape(s);
4158 if (!ret)
4159 return NULL;
4160
4161 s = ret;
4162 }
4163
4164 if (flags & UNIT_ESCAPE_C) {
4165 char *a;
4166
4167 a = cescape(s);
4168 free(ret);
4169 if (!a)
4170 return NULL;
4171
4172 ret = a;
4173 }
4174
4175 if (buf) {
4176 *buf = ret;
4177 return ret ?: (char*) s;
4178 }
4179
4180 return ret ?: strdup(s);
4181 }
4182
4183 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4184 _cleanup_free_ char *result = NULL;
4185 size_t n = 0, allocated = 0;
4186 char **i;
4187
4188 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4189 * way suitable for ExecStart= stanzas */
4190
4191 STRV_FOREACH(i, l) {
4192 _cleanup_free_ char *buf = NULL;
4193 const char *p;
4194 size_t a;
4195 char *q;
4196
4197 p = unit_escape_setting(*i, flags, &buf);
4198 if (!p)
4199 return NULL;
4200
4201 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4202 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4203 return NULL;
4204
4205 q = result + n;
4206 if (n > 0)
4207 *(q++) = ' ';
4208
4209 *(q++) = '"';
4210 q = stpcpy(q, p);
4211 *(q++) = '"';
4212
4213 n += a;
4214 }
4215
4216 if (!GREEDY_REALLOC(result, allocated, n + 1))
4217 return NULL;
4218
4219 result[n] = 0;
4220
4221 return TAKE_PTR(result);
4222 }
4223
4224 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4225 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4226 const char *dir, *wrapped;
4227 int r;
4228
4229 assert(u);
4230 assert(name);
4231 assert(data);
4232
4233 if (UNIT_WRITE_FLAGS_NOOP(flags))
4234 return 0;
4235
4236 data = unit_escape_setting(data, flags, &escaped);
4237 if (!data)
4238 return -ENOMEM;
4239
4240 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4241 * previous section header is the same */
4242
4243 if (flags & UNIT_PRIVATE) {
4244 if (!UNIT_VTABLE(u)->private_section)
4245 return -EINVAL;
4246
4247 if (!u->transient_file || u->last_section_private < 0)
4248 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4249 else if (u->last_section_private == 0)
4250 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4251 } else {
4252 if (!u->transient_file || u->last_section_private < 0)
4253 data = strjoina("[Unit]\n", data);
4254 else if (u->last_section_private > 0)
4255 data = strjoina("\n[Unit]\n", data);
4256 }
4257
4258 if (u->transient_file) {
4259 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4260 * write to the transient unit file. */
4261 fputs(data, u->transient_file);
4262
4263 if (!endswith(data, "\n"))
4264 fputc('\n', u->transient_file);
4265
4266 /* Remember which section we wrote this entry to */
4267 u->last_section_private = !!(flags & UNIT_PRIVATE);
4268 return 0;
4269 }
4270
4271 dir = unit_drop_in_dir(u, flags);
4272 if (!dir)
4273 return -EINVAL;
4274
4275 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4276 "# or an equivalent operation. Do not edit.\n",
4277 data,
4278 "\n");
4279
4280 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4281 if (r < 0)
4282 return r;
4283
4284 (void) mkdir_p_label(p, 0755);
4285 r = write_string_file_atomic_label(q, wrapped);
4286 if (r < 0)
4287 return r;
4288
4289 r = strv_push(&u->dropin_paths, q);
4290 if (r < 0)
4291 return r;
4292 q = NULL;
4293
4294 strv_uniq(u->dropin_paths);
4295
4296 u->dropin_mtime = now(CLOCK_REALTIME);
4297
4298 return 0;
4299 }
4300
4301 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4302 _cleanup_free_ char *p = NULL;
4303 va_list ap;
4304 int r;
4305
4306 assert(u);
4307 assert(name);
4308 assert(format);
4309
4310 if (UNIT_WRITE_FLAGS_NOOP(flags))
4311 return 0;
4312
4313 va_start(ap, format);
4314 r = vasprintf(&p, format, ap);
4315 va_end(ap);
4316
4317 if (r < 0)
4318 return -ENOMEM;
4319
4320 return unit_write_setting(u, flags, name, p);
4321 }
4322
4323 int unit_make_transient(Unit *u) {
4324 _cleanup_free_ char *path = NULL;
4325 FILE *f;
4326
4327 assert(u);
4328
4329 if (!UNIT_VTABLE(u)->can_transient)
4330 return -EOPNOTSUPP;
4331
4332 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4333
4334 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4335 if (!path)
4336 return -ENOMEM;
4337
4338 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4339 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4340
4341 RUN_WITH_UMASK(0022) {
4342 f = fopen(path, "we");
4343 if (!f)
4344 return -errno;
4345 }
4346
4347 safe_fclose(u->transient_file);
4348 u->transient_file = f;
4349
4350 free_and_replace(u->fragment_path, path);
4351
4352 u->source_path = mfree(u->source_path);
4353 u->dropin_paths = strv_free(u->dropin_paths);
4354 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4355
4356 u->load_state = UNIT_STUB;
4357 u->load_error = 0;
4358 u->transient = true;
4359
4360 unit_add_to_dbus_queue(u);
4361 unit_add_to_gc_queue(u);
4362
4363 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4364 u->transient_file);
4365
4366 return 0;
4367 }
4368
4369 static void log_kill(pid_t pid, int sig, void *userdata) {
4370 _cleanup_free_ char *comm = NULL;
4371
4372 (void) get_process_comm(pid, &comm);
4373
4374 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4375 only, like for example systemd's own PAM stub process. */
4376 if (comm && comm[0] == '(')
4377 return;
4378
4379 log_unit_notice(userdata,
4380 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4381 pid,
4382 strna(comm),
4383 signal_to_string(sig));
4384 }
4385
4386 static int operation_to_signal(KillContext *c, KillOperation k) {
4387 assert(c);
4388
4389 switch (k) {
4390
4391 case KILL_TERMINATE:
4392 case KILL_TERMINATE_AND_LOG:
4393 return c->kill_signal;
4394
4395 case KILL_KILL:
4396 return SIGKILL;
4397
4398 case KILL_ABORT:
4399 return SIGABRT;
4400
4401 default:
4402 assert_not_reached("KillOperation unknown");
4403 }
4404 }
4405
4406 int unit_kill_context(
4407 Unit *u,
4408 KillContext *c,
4409 KillOperation k,
4410 pid_t main_pid,
4411 pid_t control_pid,
4412 bool main_pid_alien) {
4413
4414 bool wait_for_exit = false, send_sighup;
4415 cg_kill_log_func_t log_func = NULL;
4416 int sig, r;
4417
4418 assert(u);
4419 assert(c);
4420
4421 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4422 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4423
4424 if (c->kill_mode == KILL_NONE)
4425 return 0;
4426
4427 sig = operation_to_signal(c, k);
4428
4429 send_sighup =
4430 c->send_sighup &&
4431 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4432 sig != SIGHUP;
4433
4434 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4435 log_func = log_kill;
4436
4437 if (main_pid > 0) {
4438 if (log_func)
4439 log_func(main_pid, sig, u);
4440
4441 r = kill_and_sigcont(main_pid, sig);
4442 if (r < 0 && r != -ESRCH) {
4443 _cleanup_free_ char *comm = NULL;
4444 (void) get_process_comm(main_pid, &comm);
4445
4446 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4447 } else {
4448 if (!main_pid_alien)
4449 wait_for_exit = true;
4450
4451 if (r != -ESRCH && send_sighup)
4452 (void) kill(main_pid, SIGHUP);
4453 }
4454 }
4455
4456 if (control_pid > 0) {
4457 if (log_func)
4458 log_func(control_pid, sig, u);
4459
4460 r = kill_and_sigcont(control_pid, sig);
4461 if (r < 0 && r != -ESRCH) {
4462 _cleanup_free_ char *comm = NULL;
4463 (void) get_process_comm(control_pid, &comm);
4464
4465 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4466 } else {
4467 wait_for_exit = true;
4468
4469 if (r != -ESRCH && send_sighup)
4470 (void) kill(control_pid, SIGHUP);
4471 }
4472 }
4473
4474 if (u->cgroup_path &&
4475 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4476 _cleanup_set_free_ Set *pid_set = NULL;
4477
4478 /* Exclude the main/control pids from being killed via the cgroup */
4479 pid_set = unit_pid_set(main_pid, control_pid);
4480 if (!pid_set)
4481 return -ENOMEM;
4482
4483 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4484 sig,
4485 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4486 pid_set,
4487 log_func, u);
4488 if (r < 0) {
4489 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4490 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4491
4492 } else if (r > 0) {
4493
4494 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4495 * we are running in a container or if this is a delegation unit, simply because cgroup
4496 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4497 * of containers it can be confused easily by left-over directories in the cgroup — which
4498 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4499 * there we get proper events. Hence rely on them. */
4500
4501 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4502 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4503 wait_for_exit = true;
4504
4505 if (send_sighup) {
4506 set_free(pid_set);
4507
4508 pid_set = unit_pid_set(main_pid, control_pid);
4509 if (!pid_set)
4510 return -ENOMEM;
4511
4512 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4513 SIGHUP,
4514 CGROUP_IGNORE_SELF,
4515 pid_set,
4516 NULL, NULL);
4517 }
4518 }
4519 }
4520
4521 return wait_for_exit;
4522 }
4523
4524 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4525 _cleanup_free_ char *p = NULL;
4526 char *prefix;
4527 UnitDependencyInfo di;
4528 int r;
4529
4530 assert(u);
4531 assert(path);
4532
4533 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4534 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4535 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4536 * determine which units to make themselves a dependency of. */
4537
4538 if (!path_is_absolute(path))
4539 return -EINVAL;
4540
4541 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4542 if (r < 0)
4543 return r;
4544
4545 p = strdup(path);
4546 if (!p)
4547 return -ENOMEM;
4548
4549 path = path_kill_slashes(p);
4550
4551 if (!path_is_normalized(path))
4552 return -EPERM;
4553
4554 if (hashmap_contains(u->requires_mounts_for, path))
4555 return 0;
4556
4557 di = (UnitDependencyInfo) {
4558 .origin_mask = mask
4559 };
4560
4561 r = hashmap_put(u->requires_mounts_for, path, di.data);
4562 if (r < 0)
4563 return r;
4564 p = NULL;
4565
4566 prefix = alloca(strlen(path) + 1);
4567 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4568 Set *x;
4569
4570 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4571 if (!x) {
4572 _cleanup_free_ char *q = NULL;
4573
4574 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4575 if (r < 0)
4576 return r;
4577
4578 q = strdup(prefix);
4579 if (!q)
4580 return -ENOMEM;
4581
4582 x = set_new(NULL);
4583 if (!x)
4584 return -ENOMEM;
4585
4586 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4587 if (r < 0) {
4588 set_free(x);
4589 return r;
4590 }
4591 q = NULL;
4592 }
4593
4594 r = set_put(x, u);
4595 if (r < 0)
4596 return r;
4597 }
4598
4599 return 0;
4600 }
4601
4602 int unit_setup_exec_runtime(Unit *u) {
4603 ExecRuntime **rt;
4604 size_t offset;
4605 Unit *other;
4606 Iterator i;
4607 void *v;
4608 int r;
4609
4610 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4611 assert(offset > 0);
4612
4613 /* Check if there already is an ExecRuntime for this unit? */
4614 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4615 if (*rt)
4616 return 0;
4617
4618 /* Try to get it from somebody else */
4619 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4620 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4621 if (r == 1)
4622 return 1;
4623 }
4624
4625 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4626 }
4627
4628 int unit_setup_dynamic_creds(Unit *u) {
4629 ExecContext *ec;
4630 DynamicCreds *dcreds;
4631 size_t offset;
4632
4633 assert(u);
4634
4635 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4636 assert(offset > 0);
4637 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4638
4639 ec = unit_get_exec_context(u);
4640 assert(ec);
4641
4642 if (!ec->dynamic_user)
4643 return 0;
4644
4645 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4646 }
4647
4648 bool unit_type_supported(UnitType t) {
4649 if (_unlikely_(t < 0))
4650 return false;
4651 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4652 return false;
4653
4654 if (!unit_vtable[t]->supported)
4655 return true;
4656
4657 return unit_vtable[t]->supported();
4658 }
4659
4660 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4661 int r;
4662
4663 assert(u);
4664 assert(where);
4665
4666 r = dir_is_empty(where);
4667 if (r > 0 || r == -ENOTDIR)
4668 return;
4669 if (r < 0) {
4670 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4671 return;
4672 }
4673
4674 log_struct(LOG_NOTICE,
4675 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4676 LOG_UNIT_ID(u),
4677 LOG_UNIT_INVOCATION_ID(u),
4678 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4679 "WHERE=%s", where,
4680 NULL);
4681 }
4682
4683 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4684 _cleanup_free_ char *canonical_where;
4685 int r;
4686
4687 assert(u);
4688 assert(where);
4689
4690 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4691 if (r < 0) {
4692 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4693 return 0;
4694 }
4695
4696 /* We will happily ignore a trailing slash (or any redundant slashes) */
4697 if (path_equal(where, canonical_where))
4698 return 0;
4699
4700 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4701 log_struct(LOG_ERR,
4702 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4703 LOG_UNIT_ID(u),
4704 LOG_UNIT_INVOCATION_ID(u),
4705 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4706 "WHERE=%s", where,
4707 NULL);
4708
4709 return -ELOOP;
4710 }
4711
4712 bool unit_is_pristine(Unit *u) {
4713 assert(u);
4714
4715 /* Check if the unit already exists or is already around,
4716 * in a number of different ways. Note that to cater for unit
4717 * types such as slice, we are generally fine with units that
4718 * are marked UNIT_LOADED even though nothing was
4719 * actually loaded, as those unit types don't require a file
4720 * on disk to validly load. */
4721
4722 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4723 u->fragment_path ||
4724 u->source_path ||
4725 !strv_isempty(u->dropin_paths) ||
4726 u->job ||
4727 u->merged_into);
4728 }
4729
4730 pid_t unit_control_pid(Unit *u) {
4731 assert(u);
4732
4733 if (UNIT_VTABLE(u)->control_pid)
4734 return UNIT_VTABLE(u)->control_pid(u);
4735
4736 return 0;
4737 }
4738
4739 pid_t unit_main_pid(Unit *u) {
4740 assert(u);
4741
4742 if (UNIT_VTABLE(u)->main_pid)
4743 return UNIT_VTABLE(u)->main_pid(u);
4744
4745 return 0;
4746 }
4747
4748 static void unit_unref_uid_internal(
4749 Unit *u,
4750 uid_t *ref_uid,
4751 bool destroy_now,
4752 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4753
4754 assert(u);
4755 assert(ref_uid);
4756 assert(_manager_unref_uid);
4757
4758 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4759 * gid_t are actually the same time, with the same validity rules.
4760 *
4761 * Drops a reference to UID/GID from a unit. */
4762
4763 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4764 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4765
4766 if (!uid_is_valid(*ref_uid))
4767 return;
4768
4769 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4770 *ref_uid = UID_INVALID;
4771 }
4772
4773 void unit_unref_uid(Unit *u, bool destroy_now) {
4774 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4775 }
4776
4777 void unit_unref_gid(Unit *u, bool destroy_now) {
4778 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4779 }
4780
4781 static int unit_ref_uid_internal(
4782 Unit *u,
4783 uid_t *ref_uid,
4784 uid_t uid,
4785 bool clean_ipc,
4786 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4787
4788 int r;
4789
4790 assert(u);
4791 assert(ref_uid);
4792 assert(uid_is_valid(uid));
4793 assert(_manager_ref_uid);
4794
4795 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4796 * are actually the same type, and have the same validity rules.
4797 *
4798 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4799 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4800 * drops to zero. */
4801
4802 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4803 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4804
4805 if (*ref_uid == uid)
4806 return 0;
4807
4808 if (uid_is_valid(*ref_uid)) /* Already set? */
4809 return -EBUSY;
4810
4811 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4812 if (r < 0)
4813 return r;
4814
4815 *ref_uid = uid;
4816 return 1;
4817 }
4818
4819 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4820 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4821 }
4822
4823 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4824 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4825 }
4826
4827 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4828 int r = 0, q = 0;
4829
4830 assert(u);
4831
4832 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4833
4834 if (uid_is_valid(uid)) {
4835 r = unit_ref_uid(u, uid, clean_ipc);
4836 if (r < 0)
4837 return r;
4838 }
4839
4840 if (gid_is_valid(gid)) {
4841 q = unit_ref_gid(u, gid, clean_ipc);
4842 if (q < 0) {
4843 if (r > 0)
4844 unit_unref_uid(u, false);
4845
4846 return q;
4847 }
4848 }
4849
4850 return r > 0 || q > 0;
4851 }
4852
4853 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4854 ExecContext *c;
4855 int r;
4856
4857 assert(u);
4858
4859 c = unit_get_exec_context(u);
4860
4861 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4862 if (r < 0)
4863 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4864
4865 return r;
4866 }
4867
4868 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4869 assert(u);
4870
4871 unit_unref_uid(u, destroy_now);
4872 unit_unref_gid(u, destroy_now);
4873 }
4874
4875 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4876 int r;
4877
4878 assert(u);
4879
4880 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4881 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4882 * objects when no service references the UID/GID anymore. */
4883
4884 r = unit_ref_uid_gid(u, uid, gid);
4885 if (r > 0)
4886 bus_unit_send_change_signal(u);
4887 }
4888
4889 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4890 int r;
4891
4892 assert(u);
4893
4894 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4895
4896 if (sd_id128_equal(u->invocation_id, id))
4897 return 0;
4898
4899 if (!sd_id128_is_null(u->invocation_id))
4900 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4901
4902 if (sd_id128_is_null(id)) {
4903 r = 0;
4904 goto reset;
4905 }
4906
4907 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4908 if (r < 0)
4909 goto reset;
4910
4911 u->invocation_id = id;
4912 sd_id128_to_string(id, u->invocation_id_string);
4913
4914 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4915 if (r < 0)
4916 goto reset;
4917
4918 return 0;
4919
4920 reset:
4921 u->invocation_id = SD_ID128_NULL;
4922 u->invocation_id_string[0] = 0;
4923 return r;
4924 }
4925
4926 int unit_acquire_invocation_id(Unit *u) {
4927 sd_id128_t id;
4928 int r;
4929
4930 assert(u);
4931
4932 r = sd_id128_randomize(&id);
4933 if (r < 0)
4934 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4935
4936 r = unit_set_invocation_id(u, id);
4937 if (r < 0)
4938 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4939
4940 return 0;
4941 }
4942
4943 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4944 assert(u);
4945 assert(p);
4946
4947 /* Copy parameters from manager */
4948 p->environment = u->manager->environment;
4949 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4950 p->cgroup_supported = u->manager->cgroup_supported;
4951 p->prefix = u->manager->prefix;
4952 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4953
4954 /* Copy paramaters from unit */
4955 p->cgroup_path = u->cgroup_path;
4956 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4957 }
4958
4959 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4960 int r;
4961
4962 assert(u);
4963 assert(ret);
4964
4965 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4966 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4967
4968 (void) unit_realize_cgroup(u);
4969
4970 r = safe_fork(name, FORK_REOPEN_LOG, ret);
4971 if (r != 0)
4972 return r;
4973
4974 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4975 (void) ignore_signals(SIGPIPE, -1);
4976
4977 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
4978
4979 if (u->cgroup_path) {
4980 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4981 if (r < 0) {
4982 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4983 _exit(EXIT_CGROUP);
4984 }
4985 }
4986
4987 return 0;
4988 }
4989
4990 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
4991 assert(u);
4992 assert(d >= 0);
4993 assert(d < _UNIT_DEPENDENCY_MAX);
4994 assert(other);
4995
4996 if (di.origin_mask == 0 && di.destination_mask == 0) {
4997 /* No bit set anymore, let's drop the whole entry */
4998 assert_se(hashmap_remove(u->dependencies[d], other));
4999 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5000 } else
5001 /* Mask was reduced, let's update the entry */
5002 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5003 }
5004
5005 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5006 UnitDependency d;
5007
5008 assert(u);
5009
5010 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5011
5012 if (mask == 0)
5013 return;
5014
5015 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5016 bool done;
5017
5018 do {
5019 UnitDependencyInfo di;
5020 Unit *other;
5021 Iterator i;
5022
5023 done = true;
5024
5025 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5026 UnitDependency q;
5027
5028 if ((di.origin_mask & ~mask) == di.origin_mask)
5029 continue;
5030 di.origin_mask &= ~mask;
5031 unit_update_dependency_mask(u, d, other, di);
5032
5033 /* We updated the dependency from our unit to the other unit now. But most dependencies
5034 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5035 * all dependency types on the other unit and delete all those which point to us and
5036 * have the right mask set. */
5037
5038 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5039 UnitDependencyInfo dj;
5040
5041 dj.data = hashmap_get(other->dependencies[q], u);
5042 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5043 continue;
5044 dj.destination_mask &= ~mask;
5045
5046 unit_update_dependency_mask(other, q, u, dj);
5047 }
5048
5049 unit_add_to_gc_queue(other);
5050
5051 done = false;
5052 break;
5053 }
5054
5055 } while (!done);
5056 }
5057 }
5058
5059 static int unit_export_invocation_id(Unit *u) {
5060 const char *p;
5061 int r;
5062
5063 assert(u);
5064
5065 if (u->exported_invocation_id)
5066 return 0;
5067
5068 if (sd_id128_is_null(u->invocation_id))
5069 return 0;
5070
5071 p = strjoina("/run/systemd/units/invocation:", u->id);
5072 r = symlink_atomic(u->invocation_id_string, p);
5073 if (r < 0)
5074 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5075
5076 u->exported_invocation_id = true;
5077 return 0;
5078 }
5079
5080 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5081 const char *p;
5082 char buf[2];
5083 int r;
5084
5085 assert(u);
5086 assert(c);
5087
5088 if (u->exported_log_level_max)
5089 return 0;
5090
5091 if (c->log_level_max < 0)
5092 return 0;
5093
5094 assert(c->log_level_max <= 7);
5095
5096 buf[0] = '0' + c->log_level_max;
5097 buf[1] = 0;
5098
5099 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5100 r = symlink_atomic(buf, p);
5101 if (r < 0)
5102 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5103
5104 u->exported_log_level_max = true;
5105 return 0;
5106 }
5107
5108 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5109 _cleanup_close_ int fd = -1;
5110 struct iovec *iovec;
5111 const char *p;
5112 char *pattern;
5113 le64_t *sizes;
5114 ssize_t n;
5115 size_t i;
5116 int r;
5117
5118 if (u->exported_log_extra_fields)
5119 return 0;
5120
5121 if (c->n_log_extra_fields <= 0)
5122 return 0;
5123
5124 sizes = newa(le64_t, c->n_log_extra_fields);
5125 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5126
5127 for (i = 0; i < c->n_log_extra_fields; i++) {
5128 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5129
5130 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5131 iovec[i*2+1] = c->log_extra_fields[i];
5132 }
5133
5134 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5135 pattern = strjoina(p, ".XXXXXX");
5136
5137 fd = mkostemp_safe(pattern);
5138 if (fd < 0)
5139 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5140
5141 n = writev(fd, iovec, c->n_log_extra_fields*2);
5142 if (n < 0) {
5143 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5144 goto fail;
5145 }
5146
5147 (void) fchmod(fd, 0644);
5148
5149 if (rename(pattern, p) < 0) {
5150 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5151 goto fail;
5152 }
5153
5154 u->exported_log_extra_fields = true;
5155 return 0;
5156
5157 fail:
5158 (void) unlink(pattern);
5159 return r;
5160 }
5161
5162 void unit_export_state_files(Unit *u) {
5163 const ExecContext *c;
5164
5165 assert(u);
5166
5167 if (!u->id)
5168 return;
5169
5170 if (!MANAGER_IS_SYSTEM(u->manager))
5171 return;
5172
5173 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5174 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5175 * the IPC system itself and PID 1 also log to the journal.
5176 *
5177 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5178 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5179 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5180 * namespace at least.
5181 *
5182 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5183 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5184 * them with one. */
5185
5186 (void) unit_export_invocation_id(u);
5187
5188 c = unit_get_exec_context(u);
5189 if (c) {
5190 (void) unit_export_log_level_max(u, c);
5191 (void) unit_export_log_extra_fields(u, c);
5192 }
5193 }
5194
5195 void unit_unlink_state_files(Unit *u) {
5196 const char *p;
5197
5198 assert(u);
5199
5200 if (!u->id)
5201 return;
5202
5203 if (!MANAGER_IS_SYSTEM(u->manager))
5204 return;
5205
5206 /* Undoes the effect of unit_export_state() */
5207
5208 if (u->exported_invocation_id) {
5209 p = strjoina("/run/systemd/units/invocation:", u->id);
5210 (void) unlink(p);
5211
5212 u->exported_invocation_id = false;
5213 }
5214
5215 if (u->exported_log_level_max) {
5216 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5217 (void) unlink(p);
5218
5219 u->exported_log_level_max = false;
5220 }
5221
5222 if (u->exported_log_extra_fields) {
5223 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5224 (void) unlink(p);
5225
5226 u->exported_log_extra_fields = false;
5227 }
5228 }
5229
5230 int unit_prepare_exec(Unit *u) {
5231 int r;
5232
5233 assert(u);
5234
5235 /* Prepares everything so that we can fork of a process for this unit */
5236
5237 (void) unit_realize_cgroup(u);
5238
5239 if (u->reset_accounting) {
5240 (void) unit_reset_cpu_accounting(u);
5241 (void) unit_reset_ip_accounting(u);
5242 u->reset_accounting = false;
5243 }
5244
5245 unit_export_state_files(u);
5246
5247 r = unit_setup_exec_runtime(u);
5248 if (r < 0)
5249 return r;
5250
5251 r = unit_setup_dynamic_creds(u);
5252 if (r < 0)
5253 return r;
5254
5255 return 0;
5256 }
5257
5258 static void log_leftover(pid_t pid, int sig, void *userdata) {
5259 _cleanup_free_ char *comm = NULL;
5260
5261 (void) get_process_comm(pid, &comm);
5262
5263 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5264 return;
5265
5266 log_unit_warning(userdata,
5267 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5268 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5269 pid, strna(comm));
5270 }
5271
5272 void unit_warn_leftover_processes(Unit *u) {
5273 assert(u);
5274
5275 (void) unit_pick_cgroup_path(u);
5276
5277 if (!u->cgroup_path)
5278 return;
5279
5280 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5281 }
5282
5283 bool unit_needs_console(Unit *u) {
5284 ExecContext *ec;
5285 UnitActiveState state;
5286
5287 assert(u);
5288
5289 state = unit_active_state(u);
5290
5291 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5292 return false;
5293
5294 if (UNIT_VTABLE(u)->needs_console)
5295 return UNIT_VTABLE(u)->needs_console(u);
5296
5297 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5298 ec = unit_get_exec_context(u);
5299 if (!ec)
5300 return false;
5301
5302 return exec_context_may_touch_console(ec);
5303 }
5304
5305 const char *unit_label_path(Unit *u) {
5306 const char *p;
5307
5308 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5309 * when validating access checks. */
5310
5311 p = u->source_path ?: u->fragment_path;
5312 if (!p)
5313 return NULL;
5314
5315 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5316 if (path_equal(p, "/dev/null"))
5317 return NULL;
5318
5319 return p;
5320 }
5321
5322 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5323 int r;
5324
5325 assert(u);
5326
5327 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5328 * and not a kernel thread either */
5329
5330 /* First, a simple range check */
5331 if (!pid_is_valid(pid))
5332 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5333
5334 /* Some extra safety check */
5335 if (pid == 1 || pid == getpid_cached())
5336 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager processs, refusing.", pid);
5337
5338 /* Don't even begin to bother with kernel threads */
5339 r = is_kernel_thread(pid);
5340 if (r == -ESRCH)
5341 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5342 if (r < 0)
5343 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5344 if (r > 0)
5345 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5346
5347 return 0;
5348 }
5349
5350 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5351 [COLLECT_INACTIVE] = "inactive",
5352 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5353 };
5354
5355 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);