]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: implement /run/systemd/units/-based path for passing unit info from PID 1 to...
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fd-util.h"
39 #include "fileio-label.h"
40 #include "format-util.h"
41 #include "fs-util.h"
42 #include "id128-util.h"
43 #include "io-util.h"
44 #include "load-dropin.h"
45 #include "load-fragment.h"
46 #include "log.h"
47 #include "macro.h"
48 #include "missing.h"
49 #include "mkdir.h"
50 #include "parse-util.h"
51 #include "path-util.h"
52 #include "process-util.h"
53 #include "set.h"
54 #include "signal-util.h"
55 #include "sparse-endian.h"
56 #include "special.h"
57 #include "stat-util.h"
58 #include "stdio-util.h"
59 #include "string-util.h"
60 #include "strv.h"
61 #include "umask-util.h"
62 #include "unit-name.h"
63 #include "unit.h"
64 #include "user-util.h"
65 #include "virt.h"
66
67 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
68 [UNIT_SERVICE] = &service_vtable,
69 [UNIT_SOCKET] = &socket_vtable,
70 [UNIT_TARGET] = &target_vtable,
71 [UNIT_DEVICE] = &device_vtable,
72 [UNIT_MOUNT] = &mount_vtable,
73 [UNIT_AUTOMOUNT] = &automount_vtable,
74 [UNIT_SWAP] = &swap_vtable,
75 [UNIT_TIMER] = &timer_vtable,
76 [UNIT_PATH] = &path_vtable,
77 [UNIT_SLICE] = &slice_vtable,
78 [UNIT_SCOPE] = &scope_vtable
79 };
80
81 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
82
83 Unit *unit_new(Manager *m, size_t size) {
84 Unit *u;
85
86 assert(m);
87 assert(size >= sizeof(Unit));
88
89 u = malloc0(size);
90 if (!u)
91 return NULL;
92
93 u->names = set_new(&string_hash_ops);
94 if (!u->names)
95 return mfree(u);
96
97 u->manager = m;
98 u->type = _UNIT_TYPE_INVALID;
99 u->default_dependencies = true;
100 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
101 u->unit_file_preset = -1;
102 u->on_failure_job_mode = JOB_REPLACE;
103 u->cgroup_inotify_wd = -1;
104 u->job_timeout = USEC_INFINITY;
105 u->job_running_timeout = USEC_INFINITY;
106 u->ref_uid = UID_INVALID;
107 u->ref_gid = GID_INVALID;
108 u->cpu_usage_last = NSEC_INFINITY;
109
110 u->ip_accounting_ingress_map_fd = -1;
111 u->ip_accounting_egress_map_fd = -1;
112 u->ipv4_allow_map_fd = -1;
113 u->ipv6_allow_map_fd = -1;
114 u->ipv4_deny_map_fd = -1;
115 u->ipv6_deny_map_fd = -1;
116
117 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
118 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
119
120 return u;
121 }
122
123 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
124 Unit *u;
125 int r;
126
127 u = unit_new(m, size);
128 if (!u)
129 return -ENOMEM;
130
131 r = unit_add_name(u, name);
132 if (r < 0) {
133 unit_free(u);
134 return r;
135 }
136
137 *ret = u;
138 return r;
139 }
140
141 bool unit_has_name(Unit *u, const char *name) {
142 assert(u);
143 assert(name);
144
145 return set_contains(u->names, (char*) name);
146 }
147
148 static void unit_init(Unit *u) {
149 CGroupContext *cc;
150 ExecContext *ec;
151 KillContext *kc;
152
153 assert(u);
154 assert(u->manager);
155 assert(u->type >= 0);
156
157 cc = unit_get_cgroup_context(u);
158 if (cc) {
159 cgroup_context_init(cc);
160
161 /* Copy in the manager defaults into the cgroup
162 * context, _before_ the rest of the settings have
163 * been initialized */
164
165 cc->cpu_accounting = u->manager->default_cpu_accounting;
166 cc->io_accounting = u->manager->default_io_accounting;
167 cc->ip_accounting = u->manager->default_ip_accounting;
168 cc->blockio_accounting = u->manager->default_blockio_accounting;
169 cc->memory_accounting = u->manager->default_memory_accounting;
170 cc->tasks_accounting = u->manager->default_tasks_accounting;
171 cc->ip_accounting = u->manager->default_ip_accounting;
172
173 if (u->type != UNIT_SLICE)
174 cc->tasks_max = u->manager->default_tasks_max;
175 }
176
177 ec = unit_get_exec_context(u);
178 if (ec) {
179 exec_context_init(ec);
180
181 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
182 EXEC_KEYRING_PRIVATE : EXEC_KEYRING_INHERIT;
183 }
184
185 kc = unit_get_kill_context(u);
186 if (kc)
187 kill_context_init(kc);
188
189 if (UNIT_VTABLE(u)->init)
190 UNIT_VTABLE(u)->init(u);
191 }
192
193 int unit_add_name(Unit *u, const char *text) {
194 _cleanup_free_ char *s = NULL, *i = NULL;
195 UnitType t;
196 int r;
197
198 assert(u);
199 assert(text);
200
201 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
202
203 if (!u->instance)
204 return -EINVAL;
205
206 r = unit_name_replace_instance(text, u->instance, &s);
207 if (r < 0)
208 return r;
209 } else {
210 s = strdup(text);
211 if (!s)
212 return -ENOMEM;
213 }
214
215 if (set_contains(u->names, s))
216 return 0;
217 if (hashmap_contains(u->manager->units, s))
218 return -EEXIST;
219
220 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
221 return -EINVAL;
222
223 t = unit_name_to_type(s);
224 if (t < 0)
225 return -EINVAL;
226
227 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
228 return -EINVAL;
229
230 r = unit_name_to_instance(s, &i);
231 if (r < 0)
232 return r;
233
234 if (i && !unit_type_may_template(t))
235 return -EINVAL;
236
237 /* Ensure that this unit is either instanced or not instanced,
238 * but not both. Note that we do allow names with different
239 * instance names however! */
240 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
241 return -EINVAL;
242
243 if (!unit_type_may_alias(t) && !set_isempty(u->names))
244 return -EEXIST;
245
246 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
247 return -E2BIG;
248
249 r = set_put(u->names, s);
250 if (r < 0)
251 return r;
252 assert(r > 0);
253
254 r = hashmap_put(u->manager->units, s, u);
255 if (r < 0) {
256 (void) set_remove(u->names, s);
257 return r;
258 }
259
260 if (u->type == _UNIT_TYPE_INVALID) {
261 u->type = t;
262 u->id = s;
263 u->instance = i;
264
265 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
266
267 unit_init(u);
268
269 i = NULL;
270 }
271
272 s = NULL;
273
274 unit_add_to_dbus_queue(u);
275 return 0;
276 }
277
278 int unit_choose_id(Unit *u, const char *name) {
279 _cleanup_free_ char *t = NULL;
280 char *s, *i;
281 int r;
282
283 assert(u);
284 assert(name);
285
286 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
287
288 if (!u->instance)
289 return -EINVAL;
290
291 r = unit_name_replace_instance(name, u->instance, &t);
292 if (r < 0)
293 return r;
294
295 name = t;
296 }
297
298 /* Selects one of the names of this unit as the id */
299 s = set_get(u->names, (char*) name);
300 if (!s)
301 return -ENOENT;
302
303 /* Determine the new instance from the new id */
304 r = unit_name_to_instance(s, &i);
305 if (r < 0)
306 return r;
307
308 u->id = s;
309
310 free(u->instance);
311 u->instance = i;
312
313 unit_add_to_dbus_queue(u);
314
315 return 0;
316 }
317
318 int unit_set_description(Unit *u, const char *description) {
319 int r;
320
321 assert(u);
322
323 r = free_and_strdup(&u->description, empty_to_null(description));
324 if (r < 0)
325 return r;
326 if (r > 0)
327 unit_add_to_dbus_queue(u);
328
329 return 0;
330 }
331
332 bool unit_check_gc(Unit *u) {
333 UnitActiveState state;
334 bool inactive;
335 assert(u);
336
337 if (u->job)
338 return true;
339
340 if (u->nop_job)
341 return true;
342
343 state = unit_active_state(u);
344 inactive = state == UNIT_INACTIVE;
345
346 /* If the unit is inactive and failed and no job is queued for
347 * it, then release its runtime resources */
348 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
349 UNIT_VTABLE(u)->release_resources)
350 UNIT_VTABLE(u)->release_resources(u, inactive);
351
352 /* But we keep the unit object around for longer when it is
353 * referenced or configured to not be gc'ed */
354 if (!inactive)
355 return true;
356
357 if (u->perpetual)
358 return true;
359
360 if (u->refs)
361 return true;
362
363 if (sd_bus_track_count(u->bus_track) > 0)
364 return true;
365
366 if (UNIT_VTABLE(u)->check_gc)
367 if (UNIT_VTABLE(u)->check_gc(u))
368 return true;
369
370 return false;
371 }
372
373 void unit_add_to_load_queue(Unit *u) {
374 assert(u);
375 assert(u->type != _UNIT_TYPE_INVALID);
376
377 if (u->load_state != UNIT_STUB || u->in_load_queue)
378 return;
379
380 LIST_PREPEND(load_queue, u->manager->load_queue, u);
381 u->in_load_queue = true;
382 }
383
384 void unit_add_to_cleanup_queue(Unit *u) {
385 assert(u);
386
387 if (u->in_cleanup_queue)
388 return;
389
390 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
391 u->in_cleanup_queue = true;
392 }
393
394 void unit_add_to_gc_queue(Unit *u) {
395 assert(u);
396
397 if (u->in_gc_queue || u->in_cleanup_queue)
398 return;
399
400 if (unit_check_gc(u))
401 return;
402
403 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
404 u->in_gc_queue = true;
405 }
406
407 void unit_add_to_dbus_queue(Unit *u) {
408 assert(u);
409 assert(u->type != _UNIT_TYPE_INVALID);
410
411 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
412 return;
413
414 /* Shortcut things if nobody cares */
415 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
416 sd_bus_track_count(u->bus_track) <= 0 &&
417 set_isempty(u->manager->private_buses)) {
418 u->sent_dbus_new_signal = true;
419 return;
420 }
421
422 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
423 u->in_dbus_queue = true;
424 }
425
426 static void bidi_set_free(Unit *u, Hashmap *h) {
427 Unit *other;
428 Iterator i;
429 void *v;
430
431 assert(u);
432
433 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
434
435 HASHMAP_FOREACH_KEY(v, other, h, i) {
436 UnitDependency d;
437
438 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
439 hashmap_remove(other->dependencies[d], u);
440
441 unit_add_to_gc_queue(other);
442 }
443
444 hashmap_free(h);
445 }
446
447 static void unit_remove_transient(Unit *u) {
448 char **i;
449
450 assert(u);
451
452 if (!u->transient)
453 return;
454
455 if (u->fragment_path)
456 (void) unlink(u->fragment_path);
457
458 STRV_FOREACH(i, u->dropin_paths) {
459 _cleanup_free_ char *p = NULL, *pp = NULL;
460
461 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
462 if (!p)
463 continue;
464
465 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
466 if (!pp)
467 continue;
468
469 /* Only drop transient drop-ins */
470 if (!path_equal(u->manager->lookup_paths.transient, pp))
471 continue;
472
473 (void) unlink(*i);
474 (void) rmdir(p);
475 }
476 }
477
478 static void unit_free_requires_mounts_for(Unit *u) {
479 assert(u);
480
481 for (;;) {
482 _cleanup_free_ char *path;
483
484 path = hashmap_steal_first_key(u->requires_mounts_for);
485 if (!path)
486 break;
487 else {
488 char s[strlen(path) + 1];
489
490 PATH_FOREACH_PREFIX_MORE(s, path) {
491 char *y;
492 Set *x;
493
494 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
495 if (!x)
496 continue;
497
498 (void) set_remove(x, u);
499
500 if (set_isempty(x)) {
501 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
502 free(y);
503 set_free(x);
504 }
505 }
506 }
507 }
508
509 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
510 }
511
512 static void unit_done(Unit *u) {
513 ExecContext *ec;
514 CGroupContext *cc;
515
516 assert(u);
517
518 if (u->type < 0)
519 return;
520
521 if (UNIT_VTABLE(u)->done)
522 UNIT_VTABLE(u)->done(u);
523
524 ec = unit_get_exec_context(u);
525 if (ec)
526 exec_context_done(ec);
527
528 cc = unit_get_cgroup_context(u);
529 if (cc)
530 cgroup_context_done(cc);
531 }
532
533 void unit_free(Unit *u) {
534 UnitDependency d;
535 Iterator i;
536 char *t;
537
538 if (!u)
539 return;
540
541 if (u->transient_file)
542 fclose(u->transient_file);
543
544 if (!MANAGER_IS_RELOADING(u->manager))
545 unit_remove_transient(u);
546
547 bus_unit_send_removed_signal(u);
548
549 unit_done(u);
550
551 sd_bus_slot_unref(u->match_bus_slot);
552
553 sd_bus_track_unref(u->bus_track);
554 u->deserialized_refs = strv_free(u->deserialized_refs);
555
556 unit_free_requires_mounts_for(u);
557
558 SET_FOREACH(t, u->names, i)
559 hashmap_remove_value(u->manager->units, t, u);
560
561 if (!sd_id128_is_null(u->invocation_id))
562 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
563
564 if (u->job) {
565 Job *j = u->job;
566 job_uninstall(j);
567 job_free(j);
568 }
569
570 if (u->nop_job) {
571 Job *j = u->nop_job;
572 job_uninstall(j);
573 job_free(j);
574 }
575
576 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
577 bidi_set_free(u, u->dependencies[d]);
578
579 if (u->type != _UNIT_TYPE_INVALID)
580 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
581
582 if (u->in_load_queue)
583 LIST_REMOVE(load_queue, u->manager->load_queue, u);
584
585 if (u->in_dbus_queue)
586 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
587
588 if (u->in_cleanup_queue)
589 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
590
591 if (u->in_gc_queue)
592 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
593
594 if (u->in_cgroup_realize_queue)
595 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
596
597 if (u->in_cgroup_empty_queue)
598 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
599
600 unit_release_cgroup(u);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_unlink_state_files(u);
604
605 unit_unref_uid_gid(u, false);
606
607 (void) manager_update_failed_units(u->manager, u, false);
608 set_remove(u->manager->startup_units, u);
609
610 free(u->description);
611 strv_free(u->documentation);
612 free(u->fragment_path);
613 free(u->source_path);
614 strv_free(u->dropin_paths);
615 free(u->instance);
616
617 free(u->job_timeout_reboot_arg);
618
619 set_free_free(u->names);
620
621 unit_unwatch_all_pids(u);
622
623 condition_free_list(u->conditions);
624 condition_free_list(u->asserts);
625
626 free(u->reboot_arg);
627
628 unit_ref_unset(&u->slice);
629
630 while (u->refs)
631 unit_ref_unset(u->refs);
632
633 safe_close(u->ip_accounting_ingress_map_fd);
634 safe_close(u->ip_accounting_egress_map_fd);
635
636 safe_close(u->ipv4_allow_map_fd);
637 safe_close(u->ipv6_allow_map_fd);
638 safe_close(u->ipv4_deny_map_fd);
639 safe_close(u->ipv6_deny_map_fd);
640
641 bpf_program_unref(u->ip_bpf_ingress);
642 bpf_program_unref(u->ip_bpf_egress);
643
644 free(u);
645 }
646
647 UnitActiveState unit_active_state(Unit *u) {
648 assert(u);
649
650 if (u->load_state == UNIT_MERGED)
651 return unit_active_state(unit_follow_merge(u));
652
653 /* After a reload it might happen that a unit is not correctly
654 * loaded but still has a process around. That's why we won't
655 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
656
657 return UNIT_VTABLE(u)->active_state(u);
658 }
659
660 const char* unit_sub_state_to_string(Unit *u) {
661 assert(u);
662
663 return UNIT_VTABLE(u)->sub_state_to_string(u);
664 }
665
666 static int set_complete_move(Set **s, Set **other) {
667 assert(s);
668 assert(other);
669
670 if (!other)
671 return 0;
672
673 if (*s)
674 return set_move(*s, *other);
675 else {
676 *s = *other;
677 *other = NULL;
678 }
679
680 return 0;
681 }
682
683 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
684 assert(s);
685 assert(other);
686
687 if (!*other)
688 return 0;
689
690 if (*s)
691 return hashmap_move(*s, *other);
692 else {
693 *s = *other;
694 *other = NULL;
695 }
696
697 return 0;
698 }
699
700 static int merge_names(Unit *u, Unit *other) {
701 char *t;
702 Iterator i;
703 int r;
704
705 assert(u);
706 assert(other);
707
708 r = set_complete_move(&u->names, &other->names);
709 if (r < 0)
710 return r;
711
712 set_free_free(other->names);
713 other->names = NULL;
714 other->id = NULL;
715
716 SET_FOREACH(t, u->names, i)
717 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
718
719 return 0;
720 }
721
722 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
723 unsigned n_reserve;
724
725 assert(u);
726 assert(other);
727 assert(d < _UNIT_DEPENDENCY_MAX);
728
729 /*
730 * If u does not have this dependency set allocated, there is no need
731 * to reserve anything. In that case other's set will be transferred
732 * as a whole to u by complete_move().
733 */
734 if (!u->dependencies[d])
735 return 0;
736
737 /* merge_dependencies() will skip a u-on-u dependency */
738 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
739
740 return hashmap_reserve(u->dependencies[d], n_reserve);
741 }
742
743 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
744 Iterator i;
745 Unit *back;
746 void *v;
747 int r;
748
749 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
750
751 assert(u);
752 assert(other);
753 assert(d < _UNIT_DEPENDENCY_MAX);
754
755 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
756 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
757 UnitDependency k;
758
759 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
760 * pointers back, and let's fix them up, to instead point to 'u'. */
761
762 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
763 if (back == u) {
764 /* Do not add dependencies between u and itself. */
765 if (hashmap_remove(back->dependencies[k], other))
766 maybe_warn_about_dependency(u, other_id, k);
767 } else {
768 UnitDependencyInfo di_u, di_other, di_merged;
769
770 /* Let's drop this dependency between "back" and "other", and let's create it between
771 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
772 * and any such dependency which might already exist */
773
774 di_other.data = hashmap_get(back->dependencies[k], other);
775 if (!di_other.data)
776 continue; /* dependency isn't set, let's try the next one */
777
778 di_u.data = hashmap_get(back->dependencies[k], u);
779
780 di_merged = (UnitDependencyInfo) {
781 .origin_mask = di_u.origin_mask | di_other.origin_mask,
782 .destination_mask = di_u.destination_mask | di_other.destination_mask,
783 };
784
785 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
786 if (r < 0)
787 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
788 assert(r >= 0);
789
790 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
791 }
792 }
793
794 }
795
796 /* Also do not move dependencies on u to itself */
797 back = hashmap_remove(other->dependencies[d], u);
798 if (back)
799 maybe_warn_about_dependency(u, other_id, d);
800
801 /* The move cannot fail. The caller must have performed a reservation. */
802 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
803
804 other->dependencies[d] = hashmap_free(other->dependencies[d]);
805 }
806
807 int unit_merge(Unit *u, Unit *other) {
808 UnitDependency d;
809 const char *other_id = NULL;
810 int r;
811
812 assert(u);
813 assert(other);
814 assert(u->manager == other->manager);
815 assert(u->type != _UNIT_TYPE_INVALID);
816
817 other = unit_follow_merge(other);
818
819 if (other == u)
820 return 0;
821
822 if (u->type != other->type)
823 return -EINVAL;
824
825 if (!u->instance != !other->instance)
826 return -EINVAL;
827
828 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
829 return -EEXIST;
830
831 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
832 return -EEXIST;
833
834 if (other->job)
835 return -EEXIST;
836
837 if (other->nop_job)
838 return -EEXIST;
839
840 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
841 return -EEXIST;
842
843 if (other->id)
844 other_id = strdupa(other->id);
845
846 /* Make reservations to ensure merge_dependencies() won't fail */
847 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
848 r = reserve_dependencies(u, other, d);
849 /*
850 * We don't rollback reservations if we fail. We don't have
851 * a way to undo reservations. A reservation is not a leak.
852 */
853 if (r < 0)
854 return r;
855 }
856
857 /* Merge names */
858 r = merge_names(u, other);
859 if (r < 0)
860 return r;
861
862 /* Redirect all references */
863 while (other->refs)
864 unit_ref_set(other->refs, u);
865
866 /* Merge dependencies */
867 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
868 merge_dependencies(u, other, other_id, d);
869
870 other->load_state = UNIT_MERGED;
871 other->merged_into = u;
872
873 /* If there is still some data attached to the other node, we
874 * don't need it anymore, and can free it. */
875 if (other->load_state != UNIT_STUB)
876 if (UNIT_VTABLE(other)->done)
877 UNIT_VTABLE(other)->done(other);
878
879 unit_add_to_dbus_queue(u);
880 unit_add_to_cleanup_queue(other);
881
882 return 0;
883 }
884
885 int unit_merge_by_name(Unit *u, const char *name) {
886 _cleanup_free_ char *s = NULL;
887 Unit *other;
888 int r;
889
890 assert(u);
891 assert(name);
892
893 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
894 if (!u->instance)
895 return -EINVAL;
896
897 r = unit_name_replace_instance(name, u->instance, &s);
898 if (r < 0)
899 return r;
900
901 name = s;
902 }
903
904 other = manager_get_unit(u->manager, name);
905 if (other)
906 return unit_merge(u, other);
907
908 return unit_add_name(u, name);
909 }
910
911 Unit* unit_follow_merge(Unit *u) {
912 assert(u);
913
914 while (u->load_state == UNIT_MERGED)
915 assert_se(u = u->merged_into);
916
917 return u;
918 }
919
920 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
921 ExecDirectoryType dt;
922 char **dp;
923 int r;
924
925 assert(u);
926 assert(c);
927
928 if (c->working_directory) {
929 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
930 if (r < 0)
931 return r;
932 }
933
934 if (c->root_directory) {
935 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
936 if (r < 0)
937 return r;
938 }
939
940 if (c->root_image) {
941 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
942 if (r < 0)
943 return r;
944 }
945
946 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
947 if (!u->manager->prefix[dt])
948 continue;
949
950 STRV_FOREACH(dp, c->directories[dt].paths) {
951 _cleanup_free_ char *p;
952
953 p = strjoin(u->manager->prefix[dt], "/", *dp);
954 if (!p)
955 return -ENOMEM;
956
957 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
958 if (r < 0)
959 return r;
960 }
961 }
962
963 if (!MANAGER_IS_SYSTEM(u->manager))
964 return 0;
965
966 if (c->private_tmp) {
967 const char *p;
968
969 FOREACH_STRING(p, "/tmp", "/var/tmp") {
970 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
971 if (r < 0)
972 return r;
973 }
974
975 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
976 if (r < 0)
977 return r;
978 }
979
980 if (!IN_SET(c->std_output,
981 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
982 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
983 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
984 !IN_SET(c->std_error,
985 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
986 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
987 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
988 return 0;
989
990 /* If syslog or kernel logging is requested, make sure our own
991 * logging daemon is run first. */
992
993 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
994 if (r < 0)
995 return r;
996
997 return 0;
998 }
999
1000 const char *unit_description(Unit *u) {
1001 assert(u);
1002
1003 if (u->description)
1004 return u->description;
1005
1006 return strna(u->id);
1007 }
1008
1009 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1010 const struct {
1011 UnitDependencyMask mask;
1012 const char *name;
1013 } table[] = {
1014 { UNIT_DEPENDENCY_FILE, "file" },
1015 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1016 { UNIT_DEPENDENCY_DEFAULT, "default" },
1017 { UNIT_DEPENDENCY_UDEV, "udev" },
1018 { UNIT_DEPENDENCY_PATH, "path" },
1019 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1020 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1021 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1022 };
1023 size_t i;
1024
1025 assert(f);
1026 assert(kind);
1027 assert(space);
1028
1029 for (i = 0; i < ELEMENTSOF(table); i++) {
1030
1031 if (mask == 0)
1032 break;
1033
1034 if ((mask & table[i].mask) == table[i].mask) {
1035 if (*space)
1036 fputc(' ', f);
1037 else
1038 *space = true;
1039
1040 fputs(kind, f);
1041 fputs("-", f);
1042 fputs(table[i].name, f);
1043
1044 mask &= ~table[i].mask;
1045 }
1046 }
1047
1048 assert(mask == 0);
1049 }
1050
1051 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1052 char *t, **j;
1053 UnitDependency d;
1054 Iterator i;
1055 const char *prefix2;
1056 char
1057 timestamp0[FORMAT_TIMESTAMP_MAX],
1058 timestamp1[FORMAT_TIMESTAMP_MAX],
1059 timestamp2[FORMAT_TIMESTAMP_MAX],
1060 timestamp3[FORMAT_TIMESTAMP_MAX],
1061 timestamp4[FORMAT_TIMESTAMP_MAX],
1062 timespan[FORMAT_TIMESPAN_MAX];
1063 Unit *following;
1064 _cleanup_set_free_ Set *following_set = NULL;
1065 const char *n;
1066 CGroupMask m;
1067 int r;
1068
1069 assert(u);
1070 assert(u->type >= 0);
1071
1072 prefix = strempty(prefix);
1073 prefix2 = strjoina(prefix, "\t");
1074
1075 fprintf(f,
1076 "%s-> Unit %s:\n"
1077 "%s\tDescription: %s\n"
1078 "%s\tInstance: %s\n"
1079 "%s\tUnit Load State: %s\n"
1080 "%s\tUnit Active State: %s\n"
1081 "%s\tState Change Timestamp: %s\n"
1082 "%s\tInactive Exit Timestamp: %s\n"
1083 "%s\tActive Enter Timestamp: %s\n"
1084 "%s\tActive Exit Timestamp: %s\n"
1085 "%s\tInactive Enter Timestamp: %s\n"
1086 "%s\tGC Check Good: %s\n"
1087 "%s\tNeed Daemon Reload: %s\n"
1088 "%s\tTransient: %s\n"
1089 "%s\tPerpetual: %s\n"
1090 "%s\tSlice: %s\n"
1091 "%s\tCGroup: %s\n"
1092 "%s\tCGroup realized: %s\n",
1093 prefix, u->id,
1094 prefix, unit_description(u),
1095 prefix, strna(u->instance),
1096 prefix, unit_load_state_to_string(u->load_state),
1097 prefix, unit_active_state_to_string(unit_active_state(u)),
1098 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1099 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1100 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1101 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1102 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1103 prefix, yes_no(unit_check_gc(u)),
1104 prefix, yes_no(unit_need_daemon_reload(u)),
1105 prefix, yes_no(u->transient),
1106 prefix, yes_no(u->perpetual),
1107 prefix, strna(unit_slice_name(u)),
1108 prefix, strna(u->cgroup_path),
1109 prefix, yes_no(u->cgroup_realized));
1110
1111 if (u->cgroup_realized_mask != 0) {
1112 _cleanup_free_ char *s = NULL;
1113 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1114 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1115 }
1116 if (u->cgroup_enabled_mask != 0) {
1117 _cleanup_free_ char *s = NULL;
1118 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1119 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1120 }
1121 m = unit_get_own_mask(u);
1122 if (m != 0) {
1123 _cleanup_free_ char *s = NULL;
1124 (void) cg_mask_to_string(m, &s);
1125 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1126 }
1127 m = unit_get_members_mask(u);
1128 if (m != 0) {
1129 _cleanup_free_ char *s = NULL;
1130 (void) cg_mask_to_string(m, &s);
1131 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1132 }
1133
1134 SET_FOREACH(t, u->names, i)
1135 fprintf(f, "%s\tName: %s\n", prefix, t);
1136
1137 if (!sd_id128_is_null(u->invocation_id))
1138 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1139 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1140
1141 STRV_FOREACH(j, u->documentation)
1142 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1143
1144 following = unit_following(u);
1145 if (following)
1146 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1147
1148 r = unit_following_set(u, &following_set);
1149 if (r >= 0) {
1150 Unit *other;
1151
1152 SET_FOREACH(other, following_set, i)
1153 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1154 }
1155
1156 if (u->fragment_path)
1157 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1158
1159 if (u->source_path)
1160 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1161
1162 STRV_FOREACH(j, u->dropin_paths)
1163 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1164
1165 if (u->job_timeout != USEC_INFINITY)
1166 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1167
1168 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1169 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1170
1171 if (u->job_timeout_reboot_arg)
1172 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1173
1174 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1175 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1176
1177 if (dual_timestamp_is_set(&u->condition_timestamp))
1178 fprintf(f,
1179 "%s\tCondition Timestamp: %s\n"
1180 "%s\tCondition Result: %s\n",
1181 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1182 prefix, yes_no(u->condition_result));
1183
1184 if (dual_timestamp_is_set(&u->assert_timestamp))
1185 fprintf(f,
1186 "%s\tAssert Timestamp: %s\n"
1187 "%s\tAssert Result: %s\n",
1188 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1189 prefix, yes_no(u->assert_result));
1190
1191 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1192 UnitDependencyInfo di;
1193 Unit *other;
1194
1195 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1196 bool space = false;
1197
1198 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1199
1200 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1201 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1202
1203 fputs(")\n", f);
1204 }
1205 }
1206
1207 if (!hashmap_isempty(u->requires_mounts_for)) {
1208 UnitDependencyInfo di;
1209 const char *path;
1210
1211 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1212 bool space = false;
1213
1214 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1215
1216 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1217 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1218
1219 fputs(")\n", f);
1220 }
1221 }
1222
1223 if (u->load_state == UNIT_LOADED) {
1224
1225 fprintf(f,
1226 "%s\tStopWhenUnneeded: %s\n"
1227 "%s\tRefuseManualStart: %s\n"
1228 "%s\tRefuseManualStop: %s\n"
1229 "%s\tDefaultDependencies: %s\n"
1230 "%s\tOnFailureJobMode: %s\n"
1231 "%s\tIgnoreOnIsolate: %s\n",
1232 prefix, yes_no(u->stop_when_unneeded),
1233 prefix, yes_no(u->refuse_manual_start),
1234 prefix, yes_no(u->refuse_manual_stop),
1235 prefix, yes_no(u->default_dependencies),
1236 prefix, job_mode_to_string(u->on_failure_job_mode),
1237 prefix, yes_no(u->ignore_on_isolate));
1238
1239 if (UNIT_VTABLE(u)->dump)
1240 UNIT_VTABLE(u)->dump(u, f, prefix2);
1241
1242 } else if (u->load_state == UNIT_MERGED)
1243 fprintf(f,
1244 "%s\tMerged into: %s\n",
1245 prefix, u->merged_into->id);
1246 else if (u->load_state == UNIT_ERROR)
1247 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1248
1249 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1250 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1251
1252 if (u->job)
1253 job_dump(u->job, f, prefix2);
1254
1255 if (u->nop_job)
1256 job_dump(u->nop_job, f, prefix2);
1257 }
1258
1259 /* Common implementation for multiple backends */
1260 int unit_load_fragment_and_dropin(Unit *u) {
1261 int r;
1262
1263 assert(u);
1264
1265 /* Load a .{service,socket,...} file */
1266 r = unit_load_fragment(u);
1267 if (r < 0)
1268 return r;
1269
1270 if (u->load_state == UNIT_STUB)
1271 return -ENOENT;
1272
1273 /* Load drop-in directory data. If u is an alias, we might be reloading the
1274 * target unit needlessly. But we cannot be sure which drops-ins have already
1275 * been loaded and which not, at least without doing complicated book-keeping,
1276 * so let's always reread all drop-ins. */
1277 return unit_load_dropin(unit_follow_merge(u));
1278 }
1279
1280 /* Common implementation for multiple backends */
1281 int unit_load_fragment_and_dropin_optional(Unit *u) {
1282 int r;
1283
1284 assert(u);
1285
1286 /* Same as unit_load_fragment_and_dropin(), but whether
1287 * something can be loaded or not doesn't matter. */
1288
1289 /* Load a .service file */
1290 r = unit_load_fragment(u);
1291 if (r < 0)
1292 return r;
1293
1294 if (u->load_state == UNIT_STUB)
1295 u->load_state = UNIT_LOADED;
1296
1297 /* Load drop-in directory data */
1298 return unit_load_dropin(unit_follow_merge(u));
1299 }
1300
1301 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1302 assert(u);
1303 assert(target);
1304
1305 if (target->type != UNIT_TARGET)
1306 return 0;
1307
1308 /* Only add the dependency if both units are loaded, so that
1309 * that loop check below is reliable */
1310 if (u->load_state != UNIT_LOADED ||
1311 target->load_state != UNIT_LOADED)
1312 return 0;
1313
1314 /* If either side wants no automatic dependencies, then let's
1315 * skip this */
1316 if (!u->default_dependencies ||
1317 !target->default_dependencies)
1318 return 0;
1319
1320 /* Don't create loops */
1321 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1322 return 0;
1323
1324 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1325 }
1326
1327 static int unit_add_target_dependencies(Unit *u) {
1328
1329 static const UnitDependency deps[] = {
1330 UNIT_REQUIRED_BY,
1331 UNIT_REQUISITE_OF,
1332 UNIT_WANTED_BY,
1333 UNIT_BOUND_BY
1334 };
1335
1336 unsigned k;
1337 int r = 0;
1338
1339 assert(u);
1340
1341 for (k = 0; k < ELEMENTSOF(deps); k++) {
1342 Unit *target;
1343 Iterator i;
1344 void *v;
1345
1346 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1347 r = unit_add_default_target_dependency(u, target);
1348 if (r < 0)
1349 return r;
1350 }
1351 }
1352
1353 return r;
1354 }
1355
1356 static int unit_add_slice_dependencies(Unit *u) {
1357 UnitDependencyMask mask;
1358 assert(u);
1359
1360 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1361 return 0;
1362
1363 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1364 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1365 relationship). */
1366 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1367
1368 if (UNIT_ISSET(u->slice))
1369 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1370
1371 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1372 return 0;
1373
1374 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1375 }
1376
1377 static int unit_add_mount_dependencies(Unit *u) {
1378 UnitDependencyInfo di;
1379 const char *path;
1380 Iterator i;
1381 int r;
1382
1383 assert(u);
1384
1385 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1386 char prefix[strlen(path) + 1];
1387
1388 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1389 _cleanup_free_ char *p = NULL;
1390 Unit *m;
1391
1392 r = unit_name_from_path(prefix, ".mount", &p);
1393 if (r < 0)
1394 return r;
1395
1396 m = manager_get_unit(u->manager, p);
1397 if (!m) {
1398 /* Make sure to load the mount unit if
1399 * it exists. If so the dependencies
1400 * on this unit will be added later
1401 * during the loading of the mount
1402 * unit. */
1403 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1404 continue;
1405 }
1406 if (m == u)
1407 continue;
1408
1409 if (m->load_state != UNIT_LOADED)
1410 continue;
1411
1412 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1413 if (r < 0)
1414 return r;
1415
1416 if (m->fragment_path) {
1417 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1418 if (r < 0)
1419 return r;
1420 }
1421 }
1422 }
1423
1424 return 0;
1425 }
1426
1427 static int unit_add_startup_units(Unit *u) {
1428 CGroupContext *c;
1429 int r;
1430
1431 c = unit_get_cgroup_context(u);
1432 if (!c)
1433 return 0;
1434
1435 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1436 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1437 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1438 return 0;
1439
1440 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1441 if (r < 0)
1442 return r;
1443
1444 return set_put(u->manager->startup_units, u);
1445 }
1446
1447 int unit_load(Unit *u) {
1448 int r;
1449
1450 assert(u);
1451
1452 if (u->in_load_queue) {
1453 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1454 u->in_load_queue = false;
1455 }
1456
1457 if (u->type == _UNIT_TYPE_INVALID)
1458 return -EINVAL;
1459
1460 if (u->load_state != UNIT_STUB)
1461 return 0;
1462
1463 if (u->transient_file) {
1464 r = fflush_and_check(u->transient_file);
1465 if (r < 0)
1466 goto fail;
1467
1468 fclose(u->transient_file);
1469 u->transient_file = NULL;
1470
1471 u->fragment_mtime = now(CLOCK_REALTIME);
1472 }
1473
1474 if (UNIT_VTABLE(u)->load) {
1475 r = UNIT_VTABLE(u)->load(u);
1476 if (r < 0)
1477 goto fail;
1478 }
1479
1480 if (u->load_state == UNIT_STUB) {
1481 r = -ENOENT;
1482 goto fail;
1483 }
1484
1485 if (u->load_state == UNIT_LOADED) {
1486
1487 r = unit_add_target_dependencies(u);
1488 if (r < 0)
1489 goto fail;
1490
1491 r = unit_add_slice_dependencies(u);
1492 if (r < 0)
1493 goto fail;
1494
1495 r = unit_add_mount_dependencies(u);
1496 if (r < 0)
1497 goto fail;
1498
1499 r = unit_add_startup_units(u);
1500 if (r < 0)
1501 goto fail;
1502
1503 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1504 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1505 r = -EINVAL;
1506 goto fail;
1507 }
1508
1509 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1510 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1511
1512 unit_update_cgroup_members_masks(u);
1513 }
1514
1515 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1516
1517 unit_add_to_dbus_queue(unit_follow_merge(u));
1518 unit_add_to_gc_queue(u);
1519
1520 return 0;
1521
1522 fail:
1523 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1524 u->load_error = r;
1525 unit_add_to_dbus_queue(u);
1526 unit_add_to_gc_queue(u);
1527
1528 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1529
1530 return r;
1531 }
1532
1533 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1534 Condition *c;
1535 int triggered = -1;
1536
1537 assert(u);
1538 assert(to_string);
1539
1540 /* If the condition list is empty, then it is true */
1541 if (!first)
1542 return true;
1543
1544 /* Otherwise, if all of the non-trigger conditions apply and
1545 * if any of the trigger conditions apply (unless there are
1546 * none) we return true */
1547 LIST_FOREACH(conditions, c, first) {
1548 int r;
1549
1550 r = condition_test(c);
1551 if (r < 0)
1552 log_unit_warning(u,
1553 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1554 to_string(c->type),
1555 c->trigger ? "|" : "",
1556 c->negate ? "!" : "",
1557 c->parameter);
1558 else
1559 log_unit_debug(u,
1560 "%s=%s%s%s %s.",
1561 to_string(c->type),
1562 c->trigger ? "|" : "",
1563 c->negate ? "!" : "",
1564 c->parameter,
1565 condition_result_to_string(c->result));
1566
1567 if (!c->trigger && r <= 0)
1568 return false;
1569
1570 if (c->trigger && triggered <= 0)
1571 triggered = r > 0;
1572 }
1573
1574 return triggered != 0;
1575 }
1576
1577 static bool unit_condition_test(Unit *u) {
1578 assert(u);
1579
1580 dual_timestamp_get(&u->condition_timestamp);
1581 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1582
1583 return u->condition_result;
1584 }
1585
1586 static bool unit_assert_test(Unit *u) {
1587 assert(u);
1588
1589 dual_timestamp_get(&u->assert_timestamp);
1590 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1591
1592 return u->assert_result;
1593 }
1594
1595 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1596 DISABLE_WARNING_FORMAT_NONLITERAL;
1597 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1598 REENABLE_WARNING;
1599 }
1600
1601 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1602 const char *format;
1603 const UnitStatusMessageFormats *format_table;
1604
1605 assert(u);
1606 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1607
1608 if (t != JOB_RELOAD) {
1609 format_table = &UNIT_VTABLE(u)->status_message_formats;
1610 if (format_table) {
1611 format = format_table->starting_stopping[t == JOB_STOP];
1612 if (format)
1613 return format;
1614 }
1615 }
1616
1617 /* Return generic strings */
1618 if (t == JOB_START)
1619 return "Starting %s.";
1620 else if (t == JOB_STOP)
1621 return "Stopping %s.";
1622 else
1623 return "Reloading %s.";
1624 }
1625
1626 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1627 const char *format;
1628
1629 assert(u);
1630
1631 /* Reload status messages have traditionally not been printed to console. */
1632 if (!IN_SET(t, JOB_START, JOB_STOP))
1633 return;
1634
1635 format = unit_get_status_message_format(u, t);
1636
1637 DISABLE_WARNING_FORMAT_NONLITERAL;
1638 unit_status_printf(u, "", format);
1639 REENABLE_WARNING;
1640 }
1641
1642 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1643 const char *format, *mid;
1644 char buf[LINE_MAX];
1645
1646 assert(u);
1647
1648 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1649 return;
1650
1651 if (log_on_console())
1652 return;
1653
1654 /* We log status messages for all units and all operations. */
1655
1656 format = unit_get_status_message_format(u, t);
1657
1658 DISABLE_WARNING_FORMAT_NONLITERAL;
1659 snprintf(buf, sizeof buf, format, unit_description(u));
1660 REENABLE_WARNING;
1661
1662 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1663 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1664 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1665
1666 /* Note that we deliberately use LOG_MESSAGE() instead of
1667 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1668 * closely what is written to screen using the status output,
1669 * which is supposed the highest level, friendliest output
1670 * possible, which means we should avoid the low-level unit
1671 * name. */
1672 log_struct(LOG_INFO,
1673 LOG_MESSAGE("%s", buf),
1674 LOG_UNIT_ID(u),
1675 LOG_UNIT_INVOCATION_ID(u),
1676 mid,
1677 NULL);
1678 }
1679
1680 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1681 assert(u);
1682 assert(t >= 0);
1683 assert(t < _JOB_TYPE_MAX);
1684
1685 unit_status_log_starting_stopping_reloading(u, t);
1686 unit_status_print_starting_stopping(u, t);
1687 }
1688
1689 int unit_start_limit_test(Unit *u) {
1690 assert(u);
1691
1692 if (ratelimit_test(&u->start_limit)) {
1693 u->start_limit_hit = false;
1694 return 0;
1695 }
1696
1697 log_unit_warning(u, "Start request repeated too quickly.");
1698 u->start_limit_hit = true;
1699
1700 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1701 }
1702
1703 bool unit_shall_confirm_spawn(Unit *u) {
1704 assert(u);
1705
1706 if (manager_is_confirm_spawn_disabled(u->manager))
1707 return false;
1708
1709 /* For some reasons units remaining in the same process group
1710 * as PID 1 fail to acquire the console even if it's not used
1711 * by any process. So skip the confirmation question for them. */
1712 return !unit_get_exec_context(u)->same_pgrp;
1713 }
1714
1715 static bool unit_verify_deps(Unit *u) {
1716 Unit *other;
1717 Iterator j;
1718 void *v;
1719
1720 assert(u);
1721
1722 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1723 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1724 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1725 * conjunction with After= as for them any such check would make things entirely racy. */
1726
1727 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1728
1729 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1730 continue;
1731
1732 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1733 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1734 return false;
1735 }
1736 }
1737
1738 return true;
1739 }
1740
1741 /* Errors:
1742 * -EBADR: This unit type does not support starting.
1743 * -EALREADY: Unit is already started.
1744 * -EAGAIN: An operation is already in progress. Retry later.
1745 * -ECANCELED: Too many requests for now.
1746 * -EPROTO: Assert failed
1747 * -EINVAL: Unit not loaded
1748 * -EOPNOTSUPP: Unit type not supported
1749 * -ENOLINK: The necessary dependencies are not fulfilled.
1750 */
1751 int unit_start(Unit *u) {
1752 UnitActiveState state;
1753 Unit *following;
1754
1755 assert(u);
1756
1757 /* If this is already started, then this will succeed. Note
1758 * that this will even succeed if this unit is not startable
1759 * by the user. This is relied on to detect when we need to
1760 * wait for units and when waiting is finished. */
1761 state = unit_active_state(u);
1762 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1763 return -EALREADY;
1764
1765 /* Units that aren't loaded cannot be started */
1766 if (u->load_state != UNIT_LOADED)
1767 return -EINVAL;
1768
1769 /* If the conditions failed, don't do anything at all. If we
1770 * already are activating this call might still be useful to
1771 * speed up activation in case there is some hold-off time,
1772 * but we don't want to recheck the condition in that case. */
1773 if (state != UNIT_ACTIVATING &&
1774 !unit_condition_test(u)) {
1775 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1776 return -EALREADY;
1777 }
1778
1779 /* If the asserts failed, fail the entire job */
1780 if (state != UNIT_ACTIVATING &&
1781 !unit_assert_test(u)) {
1782 log_unit_notice(u, "Starting requested but asserts failed.");
1783 return -EPROTO;
1784 }
1785
1786 /* Units of types that aren't supported cannot be
1787 * started. Note that we do this test only after the condition
1788 * checks, so that we rather return condition check errors
1789 * (which are usually not considered a true failure) than "not
1790 * supported" errors (which are considered a failure).
1791 */
1792 if (!unit_supported(u))
1793 return -EOPNOTSUPP;
1794
1795 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1796 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1797 * effect anymore, due to a reload or due to a failed condition. */
1798 if (!unit_verify_deps(u))
1799 return -ENOLINK;
1800
1801 /* Forward to the main object, if we aren't it. */
1802 following = unit_following(u);
1803 if (following) {
1804 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1805 return unit_start(following);
1806 }
1807
1808 /* If it is stopped, but we cannot start it, then fail */
1809 if (!UNIT_VTABLE(u)->start)
1810 return -EBADR;
1811
1812 /* We don't suppress calls to ->start() here when we are
1813 * already starting, to allow this request to be used as a
1814 * "hurry up" call, for example when the unit is in some "auto
1815 * restart" state where it waits for a holdoff timer to elapse
1816 * before it will start again. */
1817
1818 unit_add_to_dbus_queue(u);
1819
1820 return UNIT_VTABLE(u)->start(u);
1821 }
1822
1823 bool unit_can_start(Unit *u) {
1824 assert(u);
1825
1826 if (u->load_state != UNIT_LOADED)
1827 return false;
1828
1829 if (!unit_supported(u))
1830 return false;
1831
1832 return !!UNIT_VTABLE(u)->start;
1833 }
1834
1835 bool unit_can_isolate(Unit *u) {
1836 assert(u);
1837
1838 return unit_can_start(u) &&
1839 u->allow_isolate;
1840 }
1841
1842 /* Errors:
1843 * -EBADR: This unit type does not support stopping.
1844 * -EALREADY: Unit is already stopped.
1845 * -EAGAIN: An operation is already in progress. Retry later.
1846 */
1847 int unit_stop(Unit *u) {
1848 UnitActiveState state;
1849 Unit *following;
1850
1851 assert(u);
1852
1853 state = unit_active_state(u);
1854 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1855 return -EALREADY;
1856
1857 following = unit_following(u);
1858 if (following) {
1859 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1860 return unit_stop(following);
1861 }
1862
1863 if (!UNIT_VTABLE(u)->stop)
1864 return -EBADR;
1865
1866 unit_add_to_dbus_queue(u);
1867
1868 return UNIT_VTABLE(u)->stop(u);
1869 }
1870
1871 bool unit_can_stop(Unit *u) {
1872 assert(u);
1873
1874 if (!unit_supported(u))
1875 return false;
1876
1877 if (u->perpetual)
1878 return false;
1879
1880 return !!UNIT_VTABLE(u)->stop;
1881 }
1882
1883 /* Errors:
1884 * -EBADR: This unit type does not support reloading.
1885 * -ENOEXEC: Unit is not started.
1886 * -EAGAIN: An operation is already in progress. Retry later.
1887 */
1888 int unit_reload(Unit *u) {
1889 UnitActiveState state;
1890 Unit *following;
1891
1892 assert(u);
1893
1894 if (u->load_state != UNIT_LOADED)
1895 return -EINVAL;
1896
1897 if (!unit_can_reload(u))
1898 return -EBADR;
1899
1900 state = unit_active_state(u);
1901 if (state == UNIT_RELOADING)
1902 return -EALREADY;
1903
1904 if (state != UNIT_ACTIVE) {
1905 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1906 return -ENOEXEC;
1907 }
1908
1909 following = unit_following(u);
1910 if (following) {
1911 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1912 return unit_reload(following);
1913 }
1914
1915 unit_add_to_dbus_queue(u);
1916
1917 if (!UNIT_VTABLE(u)->reload) {
1918 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1919 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1920 return 0;
1921 }
1922
1923 return UNIT_VTABLE(u)->reload(u);
1924 }
1925
1926 bool unit_can_reload(Unit *u) {
1927 assert(u);
1928
1929 if (UNIT_VTABLE(u)->can_reload)
1930 return UNIT_VTABLE(u)->can_reload(u);
1931
1932 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1933 return true;
1934
1935 return UNIT_VTABLE(u)->reload;
1936 }
1937
1938 static void unit_check_unneeded(Unit *u) {
1939
1940 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1941
1942 static const UnitDependency needed_dependencies[] = {
1943 UNIT_REQUIRED_BY,
1944 UNIT_REQUISITE_OF,
1945 UNIT_WANTED_BY,
1946 UNIT_BOUND_BY,
1947 };
1948
1949 unsigned j;
1950 int r;
1951
1952 assert(u);
1953
1954 /* If this service shall be shut down when unneeded then do
1955 * so. */
1956
1957 if (!u->stop_when_unneeded)
1958 return;
1959
1960 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1961 return;
1962
1963 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
1964 Unit *other;
1965 Iterator i;
1966 void *v;
1967
1968 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
1969 if (unit_active_or_pending(other))
1970 return;
1971 }
1972
1973 /* If stopping a unit fails continuously we might enter a stop
1974 * loop here, hence stop acting on the service being
1975 * unnecessary after a while. */
1976 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1977 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1978 return;
1979 }
1980
1981 log_unit_info(u, "Unit not needed anymore. Stopping.");
1982
1983 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1984 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1985 if (r < 0)
1986 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1987 }
1988
1989 static void unit_check_binds_to(Unit *u) {
1990 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1991 bool stop = false;
1992 Unit *other;
1993 Iterator i;
1994 void *v;
1995 int r;
1996
1997 assert(u);
1998
1999 if (u->job)
2000 return;
2001
2002 if (unit_active_state(u) != UNIT_ACTIVE)
2003 return;
2004
2005 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2006 if (other->job)
2007 continue;
2008
2009 if (!other->coldplugged)
2010 /* We might yet create a job for the other unit… */
2011 continue;
2012
2013 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2014 continue;
2015
2016 stop = true;
2017 break;
2018 }
2019
2020 if (!stop)
2021 return;
2022
2023 /* If stopping a unit fails continuously we might enter a stop
2024 * loop here, hence stop acting on the service being
2025 * unnecessary after a while. */
2026 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2027 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2028 return;
2029 }
2030
2031 assert(other);
2032 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2033
2034 /* A unit we need to run is gone. Sniff. Let's stop this. */
2035 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2036 if (r < 0)
2037 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2038 }
2039
2040 static void retroactively_start_dependencies(Unit *u) {
2041 Iterator i;
2042 Unit *other;
2043 void *v;
2044
2045 assert(u);
2046 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2047
2048 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2049 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2050 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2051 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2052
2053 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2054 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2055 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2056 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2057
2058 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2059 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2060 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2061 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2062
2063 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2064 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2065 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2066
2067 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2068 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2069 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2070 }
2071
2072 static void retroactively_stop_dependencies(Unit *u) {
2073 Unit *other;
2074 Iterator i;
2075 void *v;
2076
2077 assert(u);
2078 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2079
2080 /* Pull down units which are bound to us recursively if enabled */
2081 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2082 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2083 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2084 }
2085
2086 static void check_unneeded_dependencies(Unit *u) {
2087 Unit *other;
2088 Iterator i;
2089 void *v;
2090
2091 assert(u);
2092 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2093
2094 /* Garbage collect services that might not be needed anymore, if enabled */
2095 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2096 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2097 unit_check_unneeded(other);
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2099 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2100 unit_check_unneeded(other);
2101 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2102 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 unit_check_unneeded(other);
2104 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2106 unit_check_unneeded(other);
2107 }
2108
2109 void unit_start_on_failure(Unit *u) {
2110 Unit *other;
2111 Iterator i;
2112 void *v;
2113
2114 assert(u);
2115
2116 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2117 return;
2118
2119 log_unit_info(u, "Triggering OnFailure= dependencies.");
2120
2121 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2122 int r;
2123
2124 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2125 if (r < 0)
2126 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2127 }
2128 }
2129
2130 void unit_trigger_notify(Unit *u) {
2131 Unit *other;
2132 Iterator i;
2133 void *v;
2134
2135 assert(u);
2136
2137 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2138 if (UNIT_VTABLE(other)->trigger_notify)
2139 UNIT_VTABLE(other)->trigger_notify(other, u);
2140 }
2141
2142 static int unit_log_resources(Unit *u) {
2143
2144 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2145 size_t n_message_parts = 0, n_iovec = 0;
2146 char* message_parts[3 + 1], *t;
2147 nsec_t nsec = NSEC_INFINITY;
2148 CGroupIPAccountingMetric m;
2149 size_t i;
2150 int r;
2151 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2152 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2153 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2154 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2155 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2156 };
2157
2158 assert(u);
2159
2160 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2161 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2162 * information and the complete data in structured fields. */
2163
2164 (void) unit_get_cpu_usage(u, &nsec);
2165 if (nsec != NSEC_INFINITY) {
2166 char buf[FORMAT_TIMESPAN_MAX] = "";
2167
2168 /* Format the CPU time for inclusion in the structured log message */
2169 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2170 r = log_oom();
2171 goto finish;
2172 }
2173 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2174
2175 /* Format the CPU time for inclusion in the human language message string */
2176 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2177 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2178 if (!t) {
2179 r = log_oom();
2180 goto finish;
2181 }
2182
2183 message_parts[n_message_parts++] = t;
2184 }
2185
2186 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2187 char buf[FORMAT_BYTES_MAX] = "";
2188 uint64_t value = UINT64_MAX;
2189
2190 assert(ip_fields[m]);
2191
2192 (void) unit_get_ip_accounting(u, m, &value);
2193 if (value == UINT64_MAX)
2194 continue;
2195
2196 /* Format IP accounting data for inclusion in the structured log message */
2197 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2202
2203 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2204 * bytes counters (and not for the packets counters) */
2205 if (m == CGROUP_IP_INGRESS_BYTES)
2206 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2207 format_bytes(buf, sizeof(buf), value),
2208 " IP traffic");
2209 else if (m == CGROUP_IP_EGRESS_BYTES)
2210 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2211 format_bytes(buf, sizeof(buf), value),
2212 " IP traffic");
2213 else
2214 continue;
2215 if (!t) {
2216 r = log_oom();
2217 goto finish;
2218 }
2219
2220 message_parts[n_message_parts++] = t;
2221 }
2222
2223 /* Is there any accounting data available at all? */
2224 if (n_iovec == 0) {
2225 r = 0;
2226 goto finish;
2227 }
2228
2229 if (n_message_parts == 0)
2230 t = strjoina("MESSAGE=", u->id, ": Completed");
2231 else {
2232 _cleanup_free_ char *joined;
2233
2234 message_parts[n_message_parts] = NULL;
2235
2236 joined = strv_join(message_parts, ", ");
2237 if (!joined) {
2238 r = log_oom();
2239 goto finish;
2240 }
2241
2242 t = strjoina("MESSAGE=", u->id, ": ", joined);
2243 }
2244
2245 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2246 * and hence don't increase n_iovec for them */
2247 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2248 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2249
2250 t = strjoina(u->manager->unit_log_field, u->id);
2251 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2252
2253 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2254 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2255
2256 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2257 r = 0;
2258
2259 finish:
2260 for (i = 0; i < n_message_parts; i++)
2261 free(message_parts[i]);
2262
2263 for (i = 0; i < n_iovec; i++)
2264 free(iovec[i].iov_base);
2265
2266 return r;
2267
2268 }
2269
2270 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2271 Manager *m;
2272 bool unexpected;
2273
2274 assert(u);
2275 assert(os < _UNIT_ACTIVE_STATE_MAX);
2276 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2277
2278 /* Note that this is called for all low-level state changes,
2279 * even if they might map to the same high-level
2280 * UnitActiveState! That means that ns == os is an expected
2281 * behavior here. For example: if a mount point is remounted
2282 * this function will be called too! */
2283
2284 m = u->manager;
2285
2286 /* Update timestamps for state changes */
2287 if (!MANAGER_IS_RELOADING(m)) {
2288 dual_timestamp_get(&u->state_change_timestamp);
2289
2290 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2291 u->inactive_exit_timestamp = u->state_change_timestamp;
2292 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2293 u->inactive_enter_timestamp = u->state_change_timestamp;
2294
2295 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2296 u->active_enter_timestamp = u->state_change_timestamp;
2297 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2298 u->active_exit_timestamp = u->state_change_timestamp;
2299 }
2300
2301 /* Keep track of failed units */
2302 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2303
2304 /* Make sure the cgroup and state files are always removed when we become inactive */
2305 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2306 unit_prune_cgroup(u);
2307 unit_unlink_state_files(u);
2308 }
2309
2310 /* Note that this doesn't apply to RemainAfterExit services exiting
2311 * successfully, since there's no change of state in that case. Which is
2312 * why it is handled in service_set_state() */
2313 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2314 ExecContext *ec;
2315
2316 ec = unit_get_exec_context(u);
2317 if (ec && exec_context_may_touch_console(ec)) {
2318 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2319 m->n_on_console--;
2320
2321 if (m->n_on_console == 0)
2322 /* unset no_console_output flag, since the console is free */
2323 m->no_console_output = false;
2324 } else
2325 m->n_on_console++;
2326 }
2327 }
2328
2329 if (u->job) {
2330 unexpected = false;
2331
2332 if (u->job->state == JOB_WAITING)
2333
2334 /* So we reached a different state for this
2335 * job. Let's see if we can run it now if it
2336 * failed previously due to EAGAIN. */
2337 job_add_to_run_queue(u->job);
2338
2339 /* Let's check whether this state change constitutes a
2340 * finished job, or maybe contradicts a running job and
2341 * hence needs to invalidate jobs. */
2342
2343 switch (u->job->type) {
2344
2345 case JOB_START:
2346 case JOB_VERIFY_ACTIVE:
2347
2348 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2349 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2350 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2351 unexpected = true;
2352
2353 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2354 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2355 }
2356
2357 break;
2358
2359 case JOB_RELOAD:
2360 case JOB_RELOAD_OR_START:
2361 case JOB_TRY_RELOAD:
2362
2363 if (u->job->state == JOB_RUNNING) {
2364 if (ns == UNIT_ACTIVE)
2365 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2366 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2367 unexpected = true;
2368
2369 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2370 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2371 }
2372 }
2373
2374 break;
2375
2376 case JOB_STOP:
2377 case JOB_RESTART:
2378 case JOB_TRY_RESTART:
2379
2380 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2381 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2382 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2383 unexpected = true;
2384 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2385 }
2386
2387 break;
2388
2389 default:
2390 assert_not_reached("Job type unknown");
2391 }
2392
2393 } else
2394 unexpected = true;
2395
2396 if (!MANAGER_IS_RELOADING(m)) {
2397
2398 /* If this state change happened without being
2399 * requested by a job, then let's retroactively start
2400 * or stop dependencies. We skip that step when
2401 * deserializing, since we don't want to create any
2402 * additional jobs just because something is already
2403 * activated. */
2404
2405 if (unexpected) {
2406 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2407 retroactively_start_dependencies(u);
2408 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2409 retroactively_stop_dependencies(u);
2410 }
2411
2412 /* stop unneeded units regardless if going down was expected or not */
2413 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2414 check_unneeded_dependencies(u);
2415
2416 if (ns != os && ns == UNIT_FAILED) {
2417 log_unit_debug(u, "Unit entered failed state.");
2418 unit_start_on_failure(u);
2419 }
2420 }
2421
2422 /* Some names are special */
2423 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2424
2425 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2426 /* The bus might have just become available,
2427 * hence try to connect to it, if we aren't
2428 * yet connected. */
2429 bus_init(m, true);
2430
2431 if (u->type == UNIT_SERVICE &&
2432 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2433 !MANAGER_IS_RELOADING(m)) {
2434 /* Write audit record if we have just finished starting up */
2435 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2436 u->in_audit = true;
2437 }
2438
2439 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2440 manager_send_unit_plymouth(m, u);
2441
2442 } else {
2443 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2444 * anyway. */
2445
2446 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2447 !UNIT_IS_INACTIVE_OR_FAILED(os)
2448 && !MANAGER_IS_RELOADING(m)) {
2449
2450 /* This unit just stopped/failed. */
2451 if (u->type == UNIT_SERVICE) {
2452
2453 /* Hmm, if there was no start record written
2454 * write it now, so that we always have a nice
2455 * pair */
2456 if (!u->in_audit) {
2457 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2458
2459 if (ns == UNIT_INACTIVE)
2460 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2461 } else
2462 /* Write audit record if we have just finished shutting down */
2463 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2464
2465 u->in_audit = false;
2466 }
2467
2468 /* Write a log message about consumed resources */
2469 unit_log_resources(u);
2470 }
2471 }
2472
2473 manager_recheck_journal(m);
2474 unit_trigger_notify(u);
2475
2476 if (!MANAGER_IS_RELOADING(u->manager)) {
2477 /* Maybe we finished startup and are now ready for
2478 * being stopped because unneeded? */
2479 unit_check_unneeded(u);
2480
2481 /* Maybe we finished startup, but something we needed
2482 * has vanished? Let's die then. (This happens when
2483 * something BindsTo= to a Type=oneshot unit, as these
2484 * units go directly from starting to inactive,
2485 * without ever entering started.) */
2486 unit_check_binds_to(u);
2487 }
2488
2489 unit_add_to_dbus_queue(u);
2490 unit_add_to_gc_queue(u);
2491 }
2492
2493 int unit_watch_pid(Unit *u, pid_t pid) {
2494 int q, r;
2495
2496 assert(u);
2497 assert(pid >= 1);
2498
2499 /* Watch a specific PID. We only support one or two units
2500 * watching each PID for now, not more. */
2501
2502 r = set_ensure_allocated(&u->pids, NULL);
2503 if (r < 0)
2504 return r;
2505
2506 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2507 if (r < 0)
2508 return r;
2509
2510 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2511 if (r == -EEXIST) {
2512 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2513 if (r < 0)
2514 return r;
2515
2516 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2517 }
2518
2519 q = set_put(u->pids, PID_TO_PTR(pid));
2520 if (q < 0)
2521 return q;
2522
2523 return r;
2524 }
2525
2526 void unit_unwatch_pid(Unit *u, pid_t pid) {
2527 assert(u);
2528 assert(pid >= 1);
2529
2530 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2531 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2532 (void) set_remove(u->pids, PID_TO_PTR(pid));
2533 }
2534
2535 void unit_unwatch_all_pids(Unit *u) {
2536 assert(u);
2537
2538 while (!set_isempty(u->pids))
2539 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2540
2541 u->pids = set_free(u->pids);
2542 }
2543
2544 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2545 Iterator i;
2546 void *e;
2547
2548 assert(u);
2549
2550 /* Cleans dead PIDs from our list */
2551
2552 SET_FOREACH(e, u->pids, i) {
2553 pid_t pid = PTR_TO_PID(e);
2554
2555 if (pid == except1 || pid == except2)
2556 continue;
2557
2558 if (!pid_is_unwaited(pid))
2559 unit_unwatch_pid(u, pid);
2560 }
2561 }
2562
2563 bool unit_job_is_applicable(Unit *u, JobType j) {
2564 assert(u);
2565 assert(j >= 0 && j < _JOB_TYPE_MAX);
2566
2567 switch (j) {
2568
2569 case JOB_VERIFY_ACTIVE:
2570 case JOB_START:
2571 case JOB_NOP:
2572 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2573 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2574 * jobs for it. */
2575 return true;
2576
2577 case JOB_STOP:
2578 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2579 * external events), hence it makes no sense to permit enqueing such a request either. */
2580 return !u->perpetual;
2581
2582 case JOB_RESTART:
2583 case JOB_TRY_RESTART:
2584 return unit_can_stop(u) && unit_can_start(u);
2585
2586 case JOB_RELOAD:
2587 case JOB_TRY_RELOAD:
2588 return unit_can_reload(u);
2589
2590 case JOB_RELOAD_OR_START:
2591 return unit_can_reload(u) && unit_can_start(u);
2592
2593 default:
2594 assert_not_reached("Invalid job type");
2595 }
2596 }
2597
2598 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2599 assert(u);
2600
2601 /* Only warn about some unit types */
2602 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2603 return;
2604
2605 if (streq_ptr(u->id, other))
2606 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2607 else
2608 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2609 }
2610
2611 static int unit_add_dependency_hashmap(
2612 Hashmap **h,
2613 Unit *other,
2614 UnitDependencyMask origin_mask,
2615 UnitDependencyMask destination_mask) {
2616
2617 UnitDependencyInfo info;
2618 int r;
2619
2620 assert(h);
2621 assert(other);
2622 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2623 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2624 assert(origin_mask > 0 || destination_mask > 0);
2625
2626 r = hashmap_ensure_allocated(h, NULL);
2627 if (r < 0)
2628 return r;
2629
2630 assert_cc(sizeof(void*) == sizeof(info));
2631
2632 info.data = hashmap_get(*h, other);
2633 if (info.data) {
2634 /* Entry already exists. Add in our mask. */
2635
2636 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2637 (info.destination_mask & destination_mask) == info.destination_mask)
2638 return 0; /* NOP */
2639
2640 info.origin_mask |= origin_mask;
2641 info.destination_mask |= destination_mask;
2642
2643 r = hashmap_update(*h, other, info.data);
2644 } else {
2645 info = (UnitDependencyInfo) {
2646 .origin_mask = origin_mask,
2647 .destination_mask = destination_mask,
2648 };
2649
2650 r = hashmap_put(*h, other, info.data);
2651 }
2652 if (r < 0)
2653 return r;
2654
2655 return 1;
2656 }
2657
2658 int unit_add_dependency(
2659 Unit *u,
2660 UnitDependency d,
2661 Unit *other,
2662 bool add_reference,
2663 UnitDependencyMask mask) {
2664
2665 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2666 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2667 [UNIT_WANTS] = UNIT_WANTED_BY,
2668 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2669 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2670 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2671 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2672 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2673 [UNIT_WANTED_BY] = UNIT_WANTS,
2674 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2675 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2676 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2677 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2678 [UNIT_BEFORE] = UNIT_AFTER,
2679 [UNIT_AFTER] = UNIT_BEFORE,
2680 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2681 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2682 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2683 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2684 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2685 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2686 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2687 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2688 };
2689 Unit *original_u = u, *original_other = other;
2690 int r;
2691
2692 assert(u);
2693 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2694 assert(other);
2695
2696 u = unit_follow_merge(u);
2697 other = unit_follow_merge(other);
2698
2699 /* We won't allow dependencies on ourselves. We will not
2700 * consider them an error however. */
2701 if (u == other) {
2702 maybe_warn_about_dependency(original_u, original_other->id, d);
2703 return 0;
2704 }
2705
2706 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2707 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2708 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2709 return 0;
2710 }
2711
2712 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2713 if (r < 0)
2714 return r;
2715
2716 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2717 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2718 if (r < 0)
2719 return r;
2720 }
2721
2722 if (add_reference) {
2723 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2724 if (r < 0)
2725 return r;
2726
2727 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2728 if (r < 0)
2729 return r;
2730 }
2731
2732 unit_add_to_dbus_queue(u);
2733 return 0;
2734 }
2735
2736 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2737 int r;
2738
2739 assert(u);
2740
2741 r = unit_add_dependency(u, d, other, add_reference, mask);
2742 if (r < 0)
2743 return r;
2744
2745 return unit_add_dependency(u, e, other, add_reference, mask);
2746 }
2747
2748 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2749 int r;
2750
2751 assert(u);
2752 assert(name || path);
2753 assert(buf);
2754 assert(ret);
2755
2756 if (!name)
2757 name = basename(path);
2758
2759 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2760 *buf = NULL;
2761 *ret = name;
2762 return 0;
2763 }
2764
2765 if (u->instance)
2766 r = unit_name_replace_instance(name, u->instance, buf);
2767 else {
2768 _cleanup_free_ char *i = NULL;
2769
2770 r = unit_name_to_prefix(u->id, &i);
2771 if (r < 0)
2772 return r;
2773
2774 r = unit_name_replace_instance(name, i, buf);
2775 }
2776 if (r < 0)
2777 return r;
2778
2779 *ret = *buf;
2780 return 0;
2781 }
2782
2783 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2784 _cleanup_free_ char *buf = NULL;
2785 Unit *other;
2786 int r;
2787
2788 assert(u);
2789 assert(name || path);
2790
2791 r = resolve_template(u, name, path, &buf, &name);
2792 if (r < 0)
2793 return r;
2794
2795 r = manager_load_unit(u->manager, name, path, NULL, &other);
2796 if (r < 0)
2797 return r;
2798
2799 return unit_add_dependency(u, d, other, add_reference, mask);
2800 }
2801
2802 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2803 _cleanup_free_ char *buf = NULL;
2804 Unit *other;
2805 int r;
2806
2807 assert(u);
2808 assert(name || path);
2809
2810 r = resolve_template(u, name, path, &buf, &name);
2811 if (r < 0)
2812 return r;
2813
2814 r = manager_load_unit(u->manager, name, path, NULL, &other);
2815 if (r < 0)
2816 return r;
2817
2818 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2819 }
2820
2821 int set_unit_path(const char *p) {
2822 /* This is mostly for debug purposes */
2823 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2824 return -errno;
2825
2826 return 0;
2827 }
2828
2829 char *unit_dbus_path(Unit *u) {
2830 assert(u);
2831
2832 if (!u->id)
2833 return NULL;
2834
2835 return unit_dbus_path_from_name(u->id);
2836 }
2837
2838 char *unit_dbus_path_invocation_id(Unit *u) {
2839 assert(u);
2840
2841 if (sd_id128_is_null(u->invocation_id))
2842 return NULL;
2843
2844 return unit_dbus_path_from_name(u->invocation_id_string);
2845 }
2846
2847 int unit_set_slice(Unit *u, Unit *slice) {
2848 assert(u);
2849 assert(slice);
2850
2851 /* Sets the unit slice if it has not been set before. Is extra
2852 * careful, to only allow this for units that actually have a
2853 * cgroup context. Also, we don't allow to set this for slices
2854 * (since the parent slice is derived from the name). Make
2855 * sure the unit we set is actually a slice. */
2856
2857 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2858 return -EOPNOTSUPP;
2859
2860 if (u->type == UNIT_SLICE)
2861 return -EINVAL;
2862
2863 if (unit_active_state(u) != UNIT_INACTIVE)
2864 return -EBUSY;
2865
2866 if (slice->type != UNIT_SLICE)
2867 return -EINVAL;
2868
2869 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2870 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2871 return -EPERM;
2872
2873 if (UNIT_DEREF(u->slice) == slice)
2874 return 0;
2875
2876 /* Disallow slice changes if @u is already bound to cgroups */
2877 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2878 return -EBUSY;
2879
2880 unit_ref_unset(&u->slice);
2881 unit_ref_set(&u->slice, slice);
2882 return 1;
2883 }
2884
2885 int unit_set_default_slice(Unit *u) {
2886 _cleanup_free_ char *b = NULL;
2887 const char *slice_name;
2888 Unit *slice;
2889 int r;
2890
2891 assert(u);
2892
2893 if (UNIT_ISSET(u->slice))
2894 return 0;
2895
2896 if (u->instance) {
2897 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2898
2899 /* Implicitly place all instantiated units in their
2900 * own per-template slice */
2901
2902 r = unit_name_to_prefix(u->id, &prefix);
2903 if (r < 0)
2904 return r;
2905
2906 /* The prefix is already escaped, but it might include
2907 * "-" which has a special meaning for slice units,
2908 * hence escape it here extra. */
2909 escaped = unit_name_escape(prefix);
2910 if (!escaped)
2911 return -ENOMEM;
2912
2913 if (MANAGER_IS_SYSTEM(u->manager))
2914 b = strjoin("system-", escaped, ".slice");
2915 else
2916 b = strappend(escaped, ".slice");
2917 if (!b)
2918 return -ENOMEM;
2919
2920 slice_name = b;
2921 } else
2922 slice_name =
2923 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2924 ? SPECIAL_SYSTEM_SLICE
2925 : SPECIAL_ROOT_SLICE;
2926
2927 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2928 if (r < 0)
2929 return r;
2930
2931 return unit_set_slice(u, slice);
2932 }
2933
2934 const char *unit_slice_name(Unit *u) {
2935 assert(u);
2936
2937 if (!UNIT_ISSET(u->slice))
2938 return NULL;
2939
2940 return UNIT_DEREF(u->slice)->id;
2941 }
2942
2943 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2944 _cleanup_free_ char *t = NULL;
2945 int r;
2946
2947 assert(u);
2948 assert(type);
2949 assert(_found);
2950
2951 r = unit_name_change_suffix(u->id, type, &t);
2952 if (r < 0)
2953 return r;
2954 if (unit_has_name(u, t))
2955 return -EINVAL;
2956
2957 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2958 assert(r < 0 || *_found != u);
2959 return r;
2960 }
2961
2962 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2963 const char *name, *old_owner, *new_owner;
2964 Unit *u = userdata;
2965 int r;
2966
2967 assert(message);
2968 assert(u);
2969
2970 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2971 if (r < 0) {
2972 bus_log_parse_error(r);
2973 return 0;
2974 }
2975
2976 old_owner = isempty(old_owner) ? NULL : old_owner;
2977 new_owner = isempty(new_owner) ? NULL : new_owner;
2978
2979 if (UNIT_VTABLE(u)->bus_name_owner_change)
2980 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2981
2982 return 0;
2983 }
2984
2985 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2986 const char *match;
2987
2988 assert(u);
2989 assert(bus);
2990 assert(name);
2991
2992 if (u->match_bus_slot)
2993 return -EBUSY;
2994
2995 match = strjoina("type='signal',"
2996 "sender='org.freedesktop.DBus',"
2997 "path='/org/freedesktop/DBus',"
2998 "interface='org.freedesktop.DBus',"
2999 "member='NameOwnerChanged',"
3000 "arg0='", name, "'");
3001
3002 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
3003 }
3004
3005 int unit_watch_bus_name(Unit *u, const char *name) {
3006 int r;
3007
3008 assert(u);
3009 assert(name);
3010
3011 /* Watch a specific name on the bus. We only support one unit
3012 * watching each name for now. */
3013
3014 if (u->manager->api_bus) {
3015 /* If the bus is already available, install the match directly.
3016 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3017 r = unit_install_bus_match(u, u->manager->api_bus, name);
3018 if (r < 0)
3019 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3020 }
3021
3022 r = hashmap_put(u->manager->watch_bus, name, u);
3023 if (r < 0) {
3024 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3025 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3026 }
3027
3028 return 0;
3029 }
3030
3031 void unit_unwatch_bus_name(Unit *u, const char *name) {
3032 assert(u);
3033 assert(name);
3034
3035 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3036 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3037 }
3038
3039 bool unit_can_serialize(Unit *u) {
3040 assert(u);
3041
3042 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3043 }
3044
3045 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3046 _cleanup_free_ char *s = NULL;
3047 int r = 0;
3048
3049 assert(f);
3050 assert(key);
3051
3052 if (mask != 0) {
3053 r = cg_mask_to_string(mask, &s);
3054 if (r >= 0) {
3055 fputs(key, f);
3056 fputc('=', f);
3057 fputs(s, f);
3058 fputc('\n', f);
3059 }
3060 }
3061 return r;
3062 }
3063
3064 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3065 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3066 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3067 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3068 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3069 };
3070
3071 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3072 CGroupIPAccountingMetric m;
3073 int r;
3074
3075 assert(u);
3076 assert(f);
3077 assert(fds);
3078
3079 if (unit_can_serialize(u)) {
3080 ExecRuntime *rt;
3081
3082 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3083 if (r < 0)
3084 return r;
3085
3086 rt = unit_get_exec_runtime(u);
3087 if (rt) {
3088 r = exec_runtime_serialize(u, rt, f, fds);
3089 if (r < 0)
3090 return r;
3091 }
3092 }
3093
3094 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3095
3096 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3097 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3098 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3099 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3100
3101 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3102 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3103
3104 if (dual_timestamp_is_set(&u->condition_timestamp))
3105 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3106
3107 if (dual_timestamp_is_set(&u->assert_timestamp))
3108 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3109
3110 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3111
3112 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3113 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3114 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3115
3116 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3117 if (u->cpu_usage_last != NSEC_INFINITY)
3118 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3119
3120 if (u->cgroup_path)
3121 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3122 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3123 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3124 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3125 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3126
3127 if (uid_is_valid(u->ref_uid))
3128 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3129 if (gid_is_valid(u->ref_gid))
3130 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3131
3132 if (!sd_id128_is_null(u->invocation_id))
3133 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3134
3135 bus_track_serialize(u->bus_track, f, "ref");
3136
3137 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3138 uint64_t v;
3139
3140 r = unit_get_ip_accounting(u, m, &v);
3141 if (r >= 0)
3142 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3143 }
3144
3145 if (serialize_jobs) {
3146 if (u->job) {
3147 fprintf(f, "job\n");
3148 job_serialize(u->job, f);
3149 }
3150
3151 if (u->nop_job) {
3152 fprintf(f, "job\n");
3153 job_serialize(u->nop_job, f);
3154 }
3155 }
3156
3157 /* End marker */
3158 fputc('\n', f);
3159 return 0;
3160 }
3161
3162 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3163 assert(u);
3164 assert(f);
3165 assert(key);
3166
3167 if (!value)
3168 return 0;
3169
3170 fputs(key, f);
3171 fputc('=', f);
3172 fputs(value, f);
3173 fputc('\n', f);
3174
3175 return 1;
3176 }
3177
3178 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3179 _cleanup_free_ char *c = NULL;
3180
3181 assert(u);
3182 assert(f);
3183 assert(key);
3184
3185 if (!value)
3186 return 0;
3187
3188 c = cescape(value);
3189 if (!c)
3190 return -ENOMEM;
3191
3192 fputs(key, f);
3193 fputc('=', f);
3194 fputs(c, f);
3195 fputc('\n', f);
3196
3197 return 1;
3198 }
3199
3200 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3201 int copy;
3202
3203 assert(u);
3204 assert(f);
3205 assert(key);
3206
3207 if (fd < 0)
3208 return 0;
3209
3210 copy = fdset_put_dup(fds, fd);
3211 if (copy < 0)
3212 return copy;
3213
3214 fprintf(f, "%s=%i\n", key, copy);
3215 return 1;
3216 }
3217
3218 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3219 va_list ap;
3220
3221 assert(u);
3222 assert(f);
3223 assert(key);
3224 assert(format);
3225
3226 fputs(key, f);
3227 fputc('=', f);
3228
3229 va_start(ap, format);
3230 vfprintf(f, format, ap);
3231 va_end(ap);
3232
3233 fputc('\n', f);
3234 }
3235
3236 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3237 ExecRuntime **rt = NULL;
3238 size_t offset;
3239 int r;
3240
3241 assert(u);
3242 assert(f);
3243 assert(fds);
3244
3245 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3246 if (offset > 0)
3247 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3248
3249 for (;;) {
3250 char line[LINE_MAX], *l, *v;
3251 CGroupIPAccountingMetric m;
3252 size_t k;
3253
3254 if (!fgets(line, sizeof(line), f)) {
3255 if (feof(f))
3256 return 0;
3257 return -errno;
3258 }
3259
3260 char_array_0(line);
3261 l = strstrip(line);
3262
3263 /* End marker */
3264 if (isempty(l))
3265 break;
3266
3267 k = strcspn(l, "=");
3268
3269 if (l[k] == '=') {
3270 l[k] = 0;
3271 v = l+k+1;
3272 } else
3273 v = l+k;
3274
3275 if (streq(l, "job")) {
3276 if (v[0] == '\0') {
3277 /* new-style serialized job */
3278 Job *j;
3279
3280 j = job_new_raw(u);
3281 if (!j)
3282 return log_oom();
3283
3284 r = job_deserialize(j, f);
3285 if (r < 0) {
3286 job_free(j);
3287 return r;
3288 }
3289
3290 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3291 if (r < 0) {
3292 job_free(j);
3293 return r;
3294 }
3295
3296 r = job_install_deserialized(j);
3297 if (r < 0) {
3298 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3299 job_free(j);
3300 return r;
3301 }
3302 } else /* legacy for pre-44 */
3303 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3304 continue;
3305 } else if (streq(l, "state-change-timestamp")) {
3306 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3307 continue;
3308 } else if (streq(l, "inactive-exit-timestamp")) {
3309 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3310 continue;
3311 } else if (streq(l, "active-enter-timestamp")) {
3312 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3313 continue;
3314 } else if (streq(l, "active-exit-timestamp")) {
3315 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3316 continue;
3317 } else if (streq(l, "inactive-enter-timestamp")) {
3318 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3319 continue;
3320 } else if (streq(l, "condition-timestamp")) {
3321 dual_timestamp_deserialize(v, &u->condition_timestamp);
3322 continue;
3323 } else if (streq(l, "assert-timestamp")) {
3324 dual_timestamp_deserialize(v, &u->assert_timestamp);
3325 continue;
3326 } else if (streq(l, "condition-result")) {
3327
3328 r = parse_boolean(v);
3329 if (r < 0)
3330 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3331 else
3332 u->condition_result = r;
3333
3334 continue;
3335
3336 } else if (streq(l, "assert-result")) {
3337
3338 r = parse_boolean(v);
3339 if (r < 0)
3340 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3341 else
3342 u->assert_result = r;
3343
3344 continue;
3345
3346 } else if (streq(l, "transient")) {
3347
3348 r = parse_boolean(v);
3349 if (r < 0)
3350 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3351 else
3352 u->transient = r;
3353
3354 continue;
3355
3356 } else if (streq(l, "exported-invocation-id")) {
3357
3358 r = parse_boolean(v);
3359 if (r < 0)
3360 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3361 else
3362 u->exported_invocation_id = r;
3363
3364 continue;
3365
3366 } else if (streq(l, "exported-log-level-max")) {
3367
3368 r = parse_boolean(v);
3369 if (r < 0)
3370 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3371 else
3372 u->exported_log_level_max = r;
3373
3374 continue;
3375
3376 } else if (streq(l, "exported-log-extra-fields")) {
3377
3378 r = parse_boolean(v);
3379 if (r < 0)
3380 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3381 else
3382 u->exported_log_extra_fields = r;
3383
3384 continue;
3385
3386 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3387
3388 r = safe_atou64(v, &u->cpu_usage_base);
3389 if (r < 0)
3390 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3391
3392 continue;
3393
3394 } else if (streq(l, "cpu-usage-last")) {
3395
3396 r = safe_atou64(v, &u->cpu_usage_last);
3397 if (r < 0)
3398 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3399
3400 continue;
3401
3402 } else if (streq(l, "cgroup")) {
3403
3404 r = unit_set_cgroup_path(u, v);
3405 if (r < 0)
3406 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3407
3408 (void) unit_watch_cgroup(u);
3409
3410 continue;
3411 } else if (streq(l, "cgroup-realized")) {
3412 int b;
3413
3414 b = parse_boolean(v);
3415 if (b < 0)
3416 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3417 else
3418 u->cgroup_realized = b;
3419
3420 continue;
3421
3422 } else if (streq(l, "cgroup-realized-mask")) {
3423
3424 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3425 if (r < 0)
3426 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3427 continue;
3428
3429 } else if (streq(l, "cgroup-enabled-mask")) {
3430
3431 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3432 if (r < 0)
3433 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3434 continue;
3435
3436 } else if (streq(l, "cgroup-bpf-realized")) {
3437 int i;
3438
3439 r = safe_atoi(v, &i);
3440 if (r < 0)
3441 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3442 else
3443 u->cgroup_bpf_state =
3444 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3445 i > 0 ? UNIT_CGROUP_BPF_ON :
3446 UNIT_CGROUP_BPF_OFF;
3447
3448 continue;
3449
3450 } else if (streq(l, "ref-uid")) {
3451 uid_t uid;
3452
3453 r = parse_uid(v, &uid);
3454 if (r < 0)
3455 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3456 else
3457 unit_ref_uid_gid(u, uid, GID_INVALID);
3458
3459 continue;
3460
3461 } else if (streq(l, "ref-gid")) {
3462 gid_t gid;
3463
3464 r = parse_gid(v, &gid);
3465 if (r < 0)
3466 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3467 else
3468 unit_ref_uid_gid(u, UID_INVALID, gid);
3469
3470 } else if (streq(l, "ref")) {
3471
3472 r = strv_extend(&u->deserialized_refs, v);
3473 if (r < 0)
3474 log_oom();
3475
3476 continue;
3477 } else if (streq(l, "invocation-id")) {
3478 sd_id128_t id;
3479
3480 r = sd_id128_from_string(v, &id);
3481 if (r < 0)
3482 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3483 else {
3484 r = unit_set_invocation_id(u, id);
3485 if (r < 0)
3486 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3487 }
3488
3489 continue;
3490 }
3491
3492 /* Check if this is an IP accounting metric serialization field */
3493 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3494 if (streq(l, ip_accounting_metric_field[m]))
3495 break;
3496 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3497 uint64_t c;
3498
3499 r = safe_atou64(v, &c);
3500 if (r < 0)
3501 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3502 else
3503 u->ip_accounting_extra[m] = c;
3504 continue;
3505 }
3506
3507 if (unit_can_serialize(u)) {
3508 if (rt) {
3509 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3510 if (r < 0) {
3511 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3512 continue;
3513 }
3514
3515 /* Returns positive if key was handled by the call */
3516 if (r > 0)
3517 continue;
3518 }
3519
3520 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3521 if (r < 0)
3522 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3523 }
3524 }
3525
3526 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3527 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3528 * before 228 where the base for timeouts was not persistent across reboots. */
3529
3530 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3531 dual_timestamp_get(&u->state_change_timestamp);
3532
3533 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3534 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3535 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3536 unit_invalidate_cgroup_bpf(u);
3537
3538 return 0;
3539 }
3540
3541 void unit_deserialize_skip(FILE *f) {
3542 assert(f);
3543
3544 /* Skip serialized data for this unit. We don't know what it is. */
3545
3546 for (;;) {
3547 char line[LINE_MAX], *l;
3548
3549 if (!fgets(line, sizeof line, f))
3550 return;
3551
3552 char_array_0(line);
3553 l = strstrip(line);
3554
3555 /* End marker */
3556 if (isempty(l))
3557 return;
3558 }
3559 }
3560
3561
3562 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3563 Unit *device;
3564 _cleanup_free_ char *e = NULL;
3565 int r;
3566
3567 assert(u);
3568
3569 /* Adds in links to the device node that this unit is based on */
3570 if (isempty(what))
3571 return 0;
3572
3573 if (!is_device_path(what))
3574 return 0;
3575
3576 /* When device units aren't supported (such as in a
3577 * container), don't create dependencies on them. */
3578 if (!unit_type_supported(UNIT_DEVICE))
3579 return 0;
3580
3581 r = unit_name_from_path(what, ".device", &e);
3582 if (r < 0)
3583 return r;
3584
3585 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3586 if (r < 0)
3587 return r;
3588
3589 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3590 dep = UNIT_BINDS_TO;
3591
3592 r = unit_add_two_dependencies(u, UNIT_AFTER,
3593 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3594 device, true, mask);
3595 if (r < 0)
3596 return r;
3597
3598 if (wants) {
3599 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3600 if (r < 0)
3601 return r;
3602 }
3603
3604 return 0;
3605 }
3606
3607 int unit_coldplug(Unit *u) {
3608 int r = 0, q;
3609 char **i;
3610
3611 assert(u);
3612
3613 /* Make sure we don't enter a loop, when coldplugging
3614 * recursively. */
3615 if (u->coldplugged)
3616 return 0;
3617
3618 u->coldplugged = true;
3619
3620 STRV_FOREACH(i, u->deserialized_refs) {
3621 q = bus_unit_track_add_name(u, *i);
3622 if (q < 0 && r >= 0)
3623 r = q;
3624 }
3625 u->deserialized_refs = strv_free(u->deserialized_refs);
3626
3627 if (UNIT_VTABLE(u)->coldplug) {
3628 q = UNIT_VTABLE(u)->coldplug(u);
3629 if (q < 0 && r >= 0)
3630 r = q;
3631 }
3632
3633 if (u->job) {
3634 q = job_coldplug(u->job);
3635 if (q < 0 && r >= 0)
3636 r = q;
3637 }
3638
3639 return r;
3640 }
3641
3642 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3643 struct stat st;
3644
3645 if (!path)
3646 return false;
3647
3648 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3649 * are never out-of-date. */
3650 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3651 return false;
3652
3653 if (stat(path, &st) < 0)
3654 /* What, cannot access this anymore? */
3655 return true;
3656
3657 if (path_masked)
3658 /* For masked files check if they are still so */
3659 return !null_or_empty(&st);
3660 else
3661 /* For non-empty files check the mtime */
3662 return timespec_load(&st.st_mtim) > mtime;
3663
3664 return false;
3665 }
3666
3667 bool unit_need_daemon_reload(Unit *u) {
3668 _cleanup_strv_free_ char **t = NULL;
3669 char **path;
3670
3671 assert(u);
3672
3673 /* For unit files, we allow masking… */
3674 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3675 u->load_state == UNIT_MASKED))
3676 return true;
3677
3678 /* Source paths should not be masked… */
3679 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3680 return true;
3681
3682 if (u->load_state == UNIT_LOADED)
3683 (void) unit_find_dropin_paths(u, &t);
3684 if (!strv_equal(u->dropin_paths, t))
3685 return true;
3686
3687 /* … any drop-ins that are masked are simply omitted from the list. */
3688 STRV_FOREACH(path, u->dropin_paths)
3689 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3690 return true;
3691
3692 return false;
3693 }
3694
3695 void unit_reset_failed(Unit *u) {
3696 assert(u);
3697
3698 if (UNIT_VTABLE(u)->reset_failed)
3699 UNIT_VTABLE(u)->reset_failed(u);
3700
3701 RATELIMIT_RESET(u->start_limit);
3702 u->start_limit_hit = false;
3703 }
3704
3705 Unit *unit_following(Unit *u) {
3706 assert(u);
3707
3708 if (UNIT_VTABLE(u)->following)
3709 return UNIT_VTABLE(u)->following(u);
3710
3711 return NULL;
3712 }
3713
3714 bool unit_stop_pending(Unit *u) {
3715 assert(u);
3716
3717 /* This call does check the current state of the unit. It's
3718 * hence useful to be called from state change calls of the
3719 * unit itself, where the state isn't updated yet. This is
3720 * different from unit_inactive_or_pending() which checks both
3721 * the current state and for a queued job. */
3722
3723 return u->job && u->job->type == JOB_STOP;
3724 }
3725
3726 bool unit_inactive_or_pending(Unit *u) {
3727 assert(u);
3728
3729 /* Returns true if the unit is inactive or going down */
3730
3731 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3732 return true;
3733
3734 if (unit_stop_pending(u))
3735 return true;
3736
3737 return false;
3738 }
3739
3740 bool unit_active_or_pending(Unit *u) {
3741 assert(u);
3742
3743 /* Returns true if the unit is active or going up */
3744
3745 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3746 return true;
3747
3748 if (u->job &&
3749 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3750 return true;
3751
3752 return false;
3753 }
3754
3755 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3756 assert(u);
3757 assert(w >= 0 && w < _KILL_WHO_MAX);
3758 assert(SIGNAL_VALID(signo));
3759
3760 if (!UNIT_VTABLE(u)->kill)
3761 return -EOPNOTSUPP;
3762
3763 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3764 }
3765
3766 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3767 Set *pid_set;
3768 int r;
3769
3770 pid_set = set_new(NULL);
3771 if (!pid_set)
3772 return NULL;
3773
3774 /* Exclude the main/control pids from being killed via the cgroup */
3775 if (main_pid > 0) {
3776 r = set_put(pid_set, PID_TO_PTR(main_pid));
3777 if (r < 0)
3778 goto fail;
3779 }
3780
3781 if (control_pid > 0) {
3782 r = set_put(pid_set, PID_TO_PTR(control_pid));
3783 if (r < 0)
3784 goto fail;
3785 }
3786
3787 return pid_set;
3788
3789 fail:
3790 set_free(pid_set);
3791 return NULL;
3792 }
3793
3794 int unit_kill_common(
3795 Unit *u,
3796 KillWho who,
3797 int signo,
3798 pid_t main_pid,
3799 pid_t control_pid,
3800 sd_bus_error *error) {
3801
3802 int r = 0;
3803 bool killed = false;
3804
3805 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3806 if (main_pid < 0)
3807 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3808 else if (main_pid == 0)
3809 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3810 }
3811
3812 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3813 if (control_pid < 0)
3814 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3815 else if (control_pid == 0)
3816 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3817 }
3818
3819 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3820 if (control_pid > 0) {
3821 if (kill(control_pid, signo) < 0)
3822 r = -errno;
3823 else
3824 killed = true;
3825 }
3826
3827 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3828 if (main_pid > 0) {
3829 if (kill(main_pid, signo) < 0)
3830 r = -errno;
3831 else
3832 killed = true;
3833 }
3834
3835 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3836 _cleanup_set_free_ Set *pid_set = NULL;
3837 int q;
3838
3839 /* Exclude the main/control pids from being killed via the cgroup */
3840 pid_set = unit_pid_set(main_pid, control_pid);
3841 if (!pid_set)
3842 return -ENOMEM;
3843
3844 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3845 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3846 r = q;
3847 else
3848 killed = true;
3849 }
3850
3851 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3852 return -ESRCH;
3853
3854 return r;
3855 }
3856
3857 int unit_following_set(Unit *u, Set **s) {
3858 assert(u);
3859 assert(s);
3860
3861 if (UNIT_VTABLE(u)->following_set)
3862 return UNIT_VTABLE(u)->following_set(u, s);
3863
3864 *s = NULL;
3865 return 0;
3866 }
3867
3868 UnitFileState unit_get_unit_file_state(Unit *u) {
3869 int r;
3870
3871 assert(u);
3872
3873 if (u->unit_file_state < 0 && u->fragment_path) {
3874 r = unit_file_get_state(
3875 u->manager->unit_file_scope,
3876 NULL,
3877 basename(u->fragment_path),
3878 &u->unit_file_state);
3879 if (r < 0)
3880 u->unit_file_state = UNIT_FILE_BAD;
3881 }
3882
3883 return u->unit_file_state;
3884 }
3885
3886 int unit_get_unit_file_preset(Unit *u) {
3887 assert(u);
3888
3889 if (u->unit_file_preset < 0 && u->fragment_path)
3890 u->unit_file_preset = unit_file_query_preset(
3891 u->manager->unit_file_scope,
3892 NULL,
3893 basename(u->fragment_path));
3894
3895 return u->unit_file_preset;
3896 }
3897
3898 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3899 assert(ref);
3900 assert(u);
3901
3902 if (ref->unit)
3903 unit_ref_unset(ref);
3904
3905 ref->unit = u;
3906 LIST_PREPEND(refs, u->refs, ref);
3907 return u;
3908 }
3909
3910 void unit_ref_unset(UnitRef *ref) {
3911 assert(ref);
3912
3913 if (!ref->unit)
3914 return;
3915
3916 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3917 * be unreferenced now. */
3918 unit_add_to_gc_queue(ref->unit);
3919
3920 LIST_REMOVE(refs, ref->unit->refs, ref);
3921 ref->unit = NULL;
3922 }
3923
3924 static int user_from_unit_name(Unit *u, char **ret) {
3925
3926 static const uint8_t hash_key[] = {
3927 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3928 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3929 };
3930
3931 _cleanup_free_ char *n = NULL;
3932 int r;
3933
3934 r = unit_name_to_prefix(u->id, &n);
3935 if (r < 0)
3936 return r;
3937
3938 if (valid_user_group_name(n)) {
3939 *ret = n;
3940 n = NULL;
3941 return 0;
3942 }
3943
3944 /* If we can't use the unit name as a user name, then let's hash it and use that */
3945 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3946 return -ENOMEM;
3947
3948 return 0;
3949 }
3950
3951 int unit_patch_contexts(Unit *u) {
3952 CGroupContext *cc;
3953 ExecContext *ec;
3954 unsigned i;
3955 int r;
3956
3957 assert(u);
3958
3959 /* Patch in the manager defaults into the exec and cgroup
3960 * contexts, _after_ the rest of the settings have been
3961 * initialized */
3962
3963 ec = unit_get_exec_context(u);
3964 if (ec) {
3965 /* This only copies in the ones that need memory */
3966 for (i = 0; i < _RLIMIT_MAX; i++)
3967 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3968 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3969 if (!ec->rlimit[i])
3970 return -ENOMEM;
3971 }
3972
3973 if (MANAGER_IS_USER(u->manager) &&
3974 !ec->working_directory) {
3975
3976 r = get_home_dir(&ec->working_directory);
3977 if (r < 0)
3978 return r;
3979
3980 /* Allow user services to run, even if the
3981 * home directory is missing */
3982 ec->working_directory_missing_ok = true;
3983 }
3984
3985 if (ec->private_devices)
3986 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3987
3988 if (ec->protect_kernel_modules)
3989 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3990
3991 if (ec->dynamic_user) {
3992 if (!ec->user) {
3993 r = user_from_unit_name(u, &ec->user);
3994 if (r < 0)
3995 return r;
3996 }
3997
3998 if (!ec->group) {
3999 ec->group = strdup(ec->user);
4000 if (!ec->group)
4001 return -ENOMEM;
4002 }
4003
4004 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4005 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4006
4007 ec->private_tmp = true;
4008 ec->remove_ipc = true;
4009 ec->protect_system = PROTECT_SYSTEM_STRICT;
4010 if (ec->protect_home == PROTECT_HOME_NO)
4011 ec->protect_home = PROTECT_HOME_READ_ONLY;
4012 }
4013 }
4014
4015 cc = unit_get_cgroup_context(u);
4016 if (cc) {
4017
4018 if (ec &&
4019 ec->private_devices &&
4020 cc->device_policy == CGROUP_AUTO)
4021 cc->device_policy = CGROUP_CLOSED;
4022 }
4023
4024 return 0;
4025 }
4026
4027 ExecContext *unit_get_exec_context(Unit *u) {
4028 size_t offset;
4029 assert(u);
4030
4031 if (u->type < 0)
4032 return NULL;
4033
4034 offset = UNIT_VTABLE(u)->exec_context_offset;
4035 if (offset <= 0)
4036 return NULL;
4037
4038 return (ExecContext*) ((uint8_t*) u + offset);
4039 }
4040
4041 KillContext *unit_get_kill_context(Unit *u) {
4042 size_t offset;
4043 assert(u);
4044
4045 if (u->type < 0)
4046 return NULL;
4047
4048 offset = UNIT_VTABLE(u)->kill_context_offset;
4049 if (offset <= 0)
4050 return NULL;
4051
4052 return (KillContext*) ((uint8_t*) u + offset);
4053 }
4054
4055 CGroupContext *unit_get_cgroup_context(Unit *u) {
4056 size_t offset;
4057
4058 if (u->type < 0)
4059 return NULL;
4060
4061 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4062 if (offset <= 0)
4063 return NULL;
4064
4065 return (CGroupContext*) ((uint8_t*) u + offset);
4066 }
4067
4068 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4069 size_t offset;
4070
4071 if (u->type < 0)
4072 return NULL;
4073
4074 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4075 if (offset <= 0)
4076 return NULL;
4077
4078 return *(ExecRuntime**) ((uint8_t*) u + offset);
4079 }
4080
4081 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
4082 assert(u);
4083
4084 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
4085 return NULL;
4086
4087 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4088 return u->manager->lookup_paths.transient;
4089
4090 if (mode == UNIT_RUNTIME)
4091 return u->manager->lookup_paths.runtime_control;
4092
4093 if (mode == UNIT_PERSISTENT)
4094 return u->manager->lookup_paths.persistent_control;
4095
4096 return NULL;
4097 }
4098
4099 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
4100 _cleanup_free_ char *p = NULL, *q = NULL;
4101 const char *dir, *wrapped;
4102 int r;
4103
4104 assert(u);
4105
4106 if (u->transient_file) {
4107 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4108 * write to the transient unit file. */
4109 fputs(data, u->transient_file);
4110 fputc('\n', u->transient_file);
4111 return 0;
4112 }
4113
4114 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4115 return 0;
4116
4117 dir = unit_drop_in_dir(u, mode);
4118 if (!dir)
4119 return -EINVAL;
4120
4121 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4122 "# or an equivalent operation. Do not edit.\n",
4123 data,
4124 "\n");
4125
4126 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4127 if (r < 0)
4128 return r;
4129
4130 (void) mkdir_p(p, 0755);
4131 r = write_string_file_atomic_label(q, wrapped);
4132 if (r < 0)
4133 return r;
4134
4135 r = strv_push(&u->dropin_paths, q);
4136 if (r < 0)
4137 return r;
4138 q = NULL;
4139
4140 strv_uniq(u->dropin_paths);
4141
4142 u->dropin_mtime = now(CLOCK_REALTIME);
4143
4144 return 0;
4145 }
4146
4147 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
4148 _cleanup_free_ char *p = NULL;
4149 va_list ap;
4150 int r;
4151
4152 assert(u);
4153 assert(name);
4154 assert(format);
4155
4156 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4157 return 0;
4158
4159 va_start(ap, format);
4160 r = vasprintf(&p, format, ap);
4161 va_end(ap);
4162
4163 if (r < 0)
4164 return -ENOMEM;
4165
4166 return unit_write_drop_in(u, mode, name, p);
4167 }
4168
4169 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
4170 const char *ndata;
4171
4172 assert(u);
4173 assert(name);
4174 assert(data);
4175
4176 if (!UNIT_VTABLE(u)->private_section)
4177 return -EINVAL;
4178
4179 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4180 return 0;
4181
4182 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4183
4184 return unit_write_drop_in(u, mode, name, ndata);
4185 }
4186
4187 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
4188 _cleanup_free_ char *p = NULL;
4189 va_list ap;
4190 int r;
4191
4192 assert(u);
4193 assert(name);
4194 assert(format);
4195
4196 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4197 return 0;
4198
4199 va_start(ap, format);
4200 r = vasprintf(&p, format, ap);
4201 va_end(ap);
4202
4203 if (r < 0)
4204 return -ENOMEM;
4205
4206 return unit_write_drop_in_private(u, mode, name, p);
4207 }
4208
4209 int unit_make_transient(Unit *u) {
4210 FILE *f;
4211 char *path;
4212
4213 assert(u);
4214
4215 if (!UNIT_VTABLE(u)->can_transient)
4216 return -EOPNOTSUPP;
4217
4218 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4219 if (!path)
4220 return -ENOMEM;
4221
4222 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4223 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4224
4225 RUN_WITH_UMASK(0022) {
4226 f = fopen(path, "we");
4227 if (!f) {
4228 free(path);
4229 return -errno;
4230 }
4231 }
4232
4233 if (u->transient_file)
4234 fclose(u->transient_file);
4235 u->transient_file = f;
4236
4237 free(u->fragment_path);
4238 u->fragment_path = path;
4239
4240 u->source_path = mfree(u->source_path);
4241 u->dropin_paths = strv_free(u->dropin_paths);
4242 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4243
4244 u->load_state = UNIT_STUB;
4245 u->load_error = 0;
4246 u->transient = true;
4247
4248 unit_add_to_dbus_queue(u);
4249 unit_add_to_gc_queue(u);
4250
4251 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4252 u->transient_file);
4253
4254 return 0;
4255 }
4256
4257 static void log_kill(pid_t pid, int sig, void *userdata) {
4258 _cleanup_free_ char *comm = NULL;
4259
4260 (void) get_process_comm(pid, &comm);
4261
4262 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4263 only, like for example systemd's own PAM stub process. */
4264 if (comm && comm[0] == '(')
4265 return;
4266
4267 log_unit_notice(userdata,
4268 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4269 pid,
4270 strna(comm),
4271 signal_to_string(sig));
4272 }
4273
4274 static int operation_to_signal(KillContext *c, KillOperation k) {
4275 assert(c);
4276
4277 switch (k) {
4278
4279 case KILL_TERMINATE:
4280 case KILL_TERMINATE_AND_LOG:
4281 return c->kill_signal;
4282
4283 case KILL_KILL:
4284 return SIGKILL;
4285
4286 case KILL_ABORT:
4287 return SIGABRT;
4288
4289 default:
4290 assert_not_reached("KillOperation unknown");
4291 }
4292 }
4293
4294 int unit_kill_context(
4295 Unit *u,
4296 KillContext *c,
4297 KillOperation k,
4298 pid_t main_pid,
4299 pid_t control_pid,
4300 bool main_pid_alien) {
4301
4302 bool wait_for_exit = false, send_sighup;
4303 cg_kill_log_func_t log_func = NULL;
4304 int sig, r;
4305
4306 assert(u);
4307 assert(c);
4308
4309 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4310 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4311
4312 if (c->kill_mode == KILL_NONE)
4313 return 0;
4314
4315 sig = operation_to_signal(c, k);
4316
4317 send_sighup =
4318 c->send_sighup &&
4319 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4320 sig != SIGHUP;
4321
4322 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4323 log_func = log_kill;
4324
4325 if (main_pid > 0) {
4326 if (log_func)
4327 log_func(main_pid, sig, u);
4328
4329 r = kill_and_sigcont(main_pid, sig);
4330 if (r < 0 && r != -ESRCH) {
4331 _cleanup_free_ char *comm = NULL;
4332 (void) get_process_comm(main_pid, &comm);
4333
4334 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4335 } else {
4336 if (!main_pid_alien)
4337 wait_for_exit = true;
4338
4339 if (r != -ESRCH && send_sighup)
4340 (void) kill(main_pid, SIGHUP);
4341 }
4342 }
4343
4344 if (control_pid > 0) {
4345 if (log_func)
4346 log_func(control_pid, sig, u);
4347
4348 r = kill_and_sigcont(control_pid, sig);
4349 if (r < 0 && r != -ESRCH) {
4350 _cleanup_free_ char *comm = NULL;
4351 (void) get_process_comm(control_pid, &comm);
4352
4353 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4354 } else {
4355 wait_for_exit = true;
4356
4357 if (r != -ESRCH && send_sighup)
4358 (void) kill(control_pid, SIGHUP);
4359 }
4360 }
4361
4362 if (u->cgroup_path &&
4363 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4364 _cleanup_set_free_ Set *pid_set = NULL;
4365
4366 /* Exclude the main/control pids from being killed via the cgroup */
4367 pid_set = unit_pid_set(main_pid, control_pid);
4368 if (!pid_set)
4369 return -ENOMEM;
4370
4371 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4372 sig,
4373 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4374 pid_set,
4375 log_func, u);
4376 if (r < 0) {
4377 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4378 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4379
4380 } else if (r > 0) {
4381
4382 /* FIXME: For now, on the legacy hierarchy, we
4383 * will not wait for the cgroup members to die
4384 * if we are running in a container or if this
4385 * is a delegation unit, simply because cgroup
4386 * notification is unreliable in these
4387 * cases. It doesn't work at all in
4388 * containers, and outside of containers it
4389 * can be confused easily by left-over
4390 * directories in the cgroup — which however
4391 * should not exist in non-delegated units. On
4392 * the unified hierarchy that's different,
4393 * there we get proper events. Hence rely on
4394 * them. */
4395
4396 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4397 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u, delegate)))
4398 wait_for_exit = true;
4399
4400 if (send_sighup) {
4401 set_free(pid_set);
4402
4403 pid_set = unit_pid_set(main_pid, control_pid);
4404 if (!pid_set)
4405 return -ENOMEM;
4406
4407 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4408 SIGHUP,
4409 CGROUP_IGNORE_SELF,
4410 pid_set,
4411 NULL, NULL);
4412 }
4413 }
4414 }
4415
4416 return wait_for_exit;
4417 }
4418
4419 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4420 char prefix[strlen(path) + 1], *p;
4421 UnitDependencyInfo di;
4422 int r;
4423
4424 assert(u);
4425 assert(path);
4426
4427 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4428 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4429 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4430 * determine which units to make themselves a dependency of. */
4431
4432 if (!path_is_absolute(path))
4433 return -EINVAL;
4434
4435 r = hashmap_ensure_allocated(&u->requires_mounts_for, &string_hash_ops);
4436 if (r < 0)
4437 return r;
4438
4439 p = strdup(path);
4440 if (!p)
4441 return -ENOMEM;
4442
4443 path_kill_slashes(p);
4444
4445 if (!path_is_safe(p)) {
4446 free(p);
4447 return -EPERM;
4448 }
4449
4450 if (hashmap_contains(u->requires_mounts_for, p)) {
4451 free(p);
4452 return 0;
4453 }
4454
4455 di = (UnitDependencyInfo) {
4456 .origin_mask = mask
4457 };
4458
4459 r = hashmap_put(u->requires_mounts_for, p, di.data);
4460 if (r < 0) {
4461 free(p);
4462 return r;
4463 }
4464
4465 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4466 Set *x;
4467
4468 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4469 if (!x) {
4470 char *q;
4471
4472 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4473 if (r < 0)
4474 return r;
4475
4476 q = strdup(prefix);
4477 if (!q)
4478 return -ENOMEM;
4479
4480 x = set_new(NULL);
4481 if (!x) {
4482 free(q);
4483 return -ENOMEM;
4484 }
4485
4486 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4487 if (r < 0) {
4488 free(q);
4489 set_free(x);
4490 return r;
4491 }
4492 }
4493
4494 r = set_put(x, u);
4495 if (r < 0)
4496 return r;
4497 }
4498
4499 return 0;
4500 }
4501
4502 int unit_setup_exec_runtime(Unit *u) {
4503 ExecRuntime **rt;
4504 size_t offset;
4505 Unit *other;
4506 Iterator i;
4507 void *v;
4508
4509 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4510 assert(offset > 0);
4511
4512 /* Check if there already is an ExecRuntime for this unit? */
4513 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4514 if (*rt)
4515 return 0;
4516
4517 /* Try to get it from somebody else */
4518 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4519
4520 *rt = unit_get_exec_runtime(other);
4521 if (*rt) {
4522 exec_runtime_ref(*rt);
4523 return 0;
4524 }
4525 }
4526
4527 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4528 }
4529
4530 int unit_setup_dynamic_creds(Unit *u) {
4531 ExecContext *ec;
4532 DynamicCreds *dcreds;
4533 size_t offset;
4534
4535 assert(u);
4536
4537 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4538 assert(offset > 0);
4539 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4540
4541 ec = unit_get_exec_context(u);
4542 assert(ec);
4543
4544 if (!ec->dynamic_user)
4545 return 0;
4546
4547 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4548 }
4549
4550 bool unit_type_supported(UnitType t) {
4551 if (_unlikely_(t < 0))
4552 return false;
4553 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4554 return false;
4555
4556 if (!unit_vtable[t]->supported)
4557 return true;
4558
4559 return unit_vtable[t]->supported();
4560 }
4561
4562 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4563 int r;
4564
4565 assert(u);
4566 assert(where);
4567
4568 r = dir_is_empty(where);
4569 if (r > 0)
4570 return;
4571 if (r < 0) {
4572 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4573 return;
4574 }
4575
4576 log_struct(LOG_NOTICE,
4577 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4578 LOG_UNIT_ID(u),
4579 LOG_UNIT_INVOCATION_ID(u),
4580 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4581 "WHERE=%s", where,
4582 NULL);
4583 }
4584
4585 int unit_fail_if_symlink(Unit *u, const char* where) {
4586 int r;
4587
4588 assert(u);
4589 assert(where);
4590
4591 r = is_symlink(where);
4592 if (r < 0) {
4593 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4594 return 0;
4595 }
4596 if (r == 0)
4597 return 0;
4598
4599 log_struct(LOG_ERR,
4600 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4601 LOG_UNIT_ID(u),
4602 LOG_UNIT_INVOCATION_ID(u),
4603 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4604 "WHERE=%s", where,
4605 NULL);
4606
4607 return -ELOOP;
4608 }
4609
4610 bool unit_is_pristine(Unit *u) {
4611 assert(u);
4612
4613 /* Check if the unit already exists or is already around,
4614 * in a number of different ways. Note that to cater for unit
4615 * types such as slice, we are generally fine with units that
4616 * are marked UNIT_LOADED even though nothing was
4617 * actually loaded, as those unit types don't require a file
4618 * on disk to validly load. */
4619
4620 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4621 u->fragment_path ||
4622 u->source_path ||
4623 !strv_isempty(u->dropin_paths) ||
4624 u->job ||
4625 u->merged_into);
4626 }
4627
4628 pid_t unit_control_pid(Unit *u) {
4629 assert(u);
4630
4631 if (UNIT_VTABLE(u)->control_pid)
4632 return UNIT_VTABLE(u)->control_pid(u);
4633
4634 return 0;
4635 }
4636
4637 pid_t unit_main_pid(Unit *u) {
4638 assert(u);
4639
4640 if (UNIT_VTABLE(u)->main_pid)
4641 return UNIT_VTABLE(u)->main_pid(u);
4642
4643 return 0;
4644 }
4645
4646 static void unit_unref_uid_internal(
4647 Unit *u,
4648 uid_t *ref_uid,
4649 bool destroy_now,
4650 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4651
4652 assert(u);
4653 assert(ref_uid);
4654 assert(_manager_unref_uid);
4655
4656 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4657 * gid_t are actually the same time, with the same validity rules.
4658 *
4659 * Drops a reference to UID/GID from a unit. */
4660
4661 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4662 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4663
4664 if (!uid_is_valid(*ref_uid))
4665 return;
4666
4667 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4668 *ref_uid = UID_INVALID;
4669 }
4670
4671 void unit_unref_uid(Unit *u, bool destroy_now) {
4672 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4673 }
4674
4675 void unit_unref_gid(Unit *u, bool destroy_now) {
4676 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4677 }
4678
4679 static int unit_ref_uid_internal(
4680 Unit *u,
4681 uid_t *ref_uid,
4682 uid_t uid,
4683 bool clean_ipc,
4684 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4685
4686 int r;
4687
4688 assert(u);
4689 assert(ref_uid);
4690 assert(uid_is_valid(uid));
4691 assert(_manager_ref_uid);
4692
4693 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4694 * are actually the same type, and have the same validity rules.
4695 *
4696 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4697 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4698 * drops to zero. */
4699
4700 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4701 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4702
4703 if (*ref_uid == uid)
4704 return 0;
4705
4706 if (uid_is_valid(*ref_uid)) /* Already set? */
4707 return -EBUSY;
4708
4709 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4710 if (r < 0)
4711 return r;
4712
4713 *ref_uid = uid;
4714 return 1;
4715 }
4716
4717 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4718 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4719 }
4720
4721 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4722 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4723 }
4724
4725 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4726 int r = 0, q = 0;
4727
4728 assert(u);
4729
4730 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4731
4732 if (uid_is_valid(uid)) {
4733 r = unit_ref_uid(u, uid, clean_ipc);
4734 if (r < 0)
4735 return r;
4736 }
4737
4738 if (gid_is_valid(gid)) {
4739 q = unit_ref_gid(u, gid, clean_ipc);
4740 if (q < 0) {
4741 if (r > 0)
4742 unit_unref_uid(u, false);
4743
4744 return q;
4745 }
4746 }
4747
4748 return r > 0 || q > 0;
4749 }
4750
4751 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4752 ExecContext *c;
4753 int r;
4754
4755 assert(u);
4756
4757 c = unit_get_exec_context(u);
4758
4759 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4760 if (r < 0)
4761 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4762
4763 return r;
4764 }
4765
4766 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4767 assert(u);
4768
4769 unit_unref_uid(u, destroy_now);
4770 unit_unref_gid(u, destroy_now);
4771 }
4772
4773 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4774 int r;
4775
4776 assert(u);
4777
4778 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4779 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4780 * objects when no service references the UID/GID anymore. */
4781
4782 r = unit_ref_uid_gid(u, uid, gid);
4783 if (r > 0)
4784 bus_unit_send_change_signal(u);
4785 }
4786
4787 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4788 int r;
4789
4790 assert(u);
4791
4792 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4793
4794 if (sd_id128_equal(u->invocation_id, id))
4795 return 0;
4796
4797 if (!sd_id128_is_null(u->invocation_id))
4798 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4799
4800 if (sd_id128_is_null(id)) {
4801 r = 0;
4802 goto reset;
4803 }
4804
4805 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4806 if (r < 0)
4807 goto reset;
4808
4809 u->invocation_id = id;
4810 sd_id128_to_string(id, u->invocation_id_string);
4811
4812 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4813 if (r < 0)
4814 goto reset;
4815
4816 return 0;
4817
4818 reset:
4819 u->invocation_id = SD_ID128_NULL;
4820 u->invocation_id_string[0] = 0;
4821 return r;
4822 }
4823
4824 int unit_acquire_invocation_id(Unit *u) {
4825 sd_id128_t id;
4826 int r;
4827
4828 assert(u);
4829
4830 r = sd_id128_randomize(&id);
4831 if (r < 0)
4832 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4833
4834 r = unit_set_invocation_id(u, id);
4835 if (r < 0)
4836 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4837
4838 return 0;
4839 }
4840
4841 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4842 assert(u);
4843 assert(p);
4844
4845 p->cgroup_path = u->cgroup_path;
4846 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, UNIT_CGROUP_BOOL(u, delegate));
4847 }
4848
4849 int unit_fork_helper_process(Unit *u, pid_t *ret) {
4850 pid_t pid;
4851 int r;
4852
4853 assert(u);
4854 assert(ret);
4855
4856 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4857 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4858
4859 (void) unit_realize_cgroup(u);
4860
4861 pid = fork();
4862 if (pid < 0)
4863 return -errno;
4864
4865 if (pid == 0) {
4866
4867 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4868 (void) ignore_signals(SIGPIPE, -1);
4869
4870 log_close();
4871 log_open();
4872
4873 if (u->cgroup_path) {
4874 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4875 if (r < 0) {
4876 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4877 _exit(EXIT_CGROUP);
4878 }
4879 }
4880
4881 *ret = getpid_cached();
4882 return 0;
4883 }
4884
4885 *ret = pid;
4886 return 1;
4887 }
4888
4889 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
4890 assert(u);
4891 assert(d >= 0);
4892 assert(d < _UNIT_DEPENDENCY_MAX);
4893 assert(other);
4894
4895 if (di.origin_mask == 0 && di.destination_mask == 0) {
4896 /* No bit set anymore, let's drop the whole entry */
4897 assert_se(hashmap_remove(u->dependencies[d], other));
4898 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
4899 } else
4900 /* Mask was reduced, let's update the entry */
4901 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
4902 }
4903
4904 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
4905 UnitDependency d;
4906
4907 assert(u);
4908
4909 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
4910
4911 if (mask == 0)
4912 return;
4913
4914 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
4915 bool done;
4916
4917 do {
4918 UnitDependencyInfo di;
4919 Unit *other;
4920 Iterator i;
4921
4922 done = true;
4923
4924 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
4925 UnitDependency q;
4926
4927 if ((di.origin_mask & ~mask) == di.origin_mask)
4928 continue;
4929 di.origin_mask &= ~mask;
4930 unit_update_dependency_mask(u, d, other, di);
4931
4932 /* We updated the dependency from our unit to the other unit now. But most dependencies
4933 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
4934 * all dependency types on the other unit and delete all those which point to us and
4935 * have the right mask set. */
4936
4937 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
4938 UnitDependencyInfo dj;
4939
4940 dj.data = hashmap_get(other->dependencies[q], u);
4941 if ((dj.destination_mask & ~mask) == dj.destination_mask)
4942 continue;
4943 dj.destination_mask &= ~mask;
4944
4945 unit_update_dependency_mask(other, q, u, dj);
4946 }
4947
4948 unit_add_to_gc_queue(other);
4949
4950 done = false;
4951 break;
4952 }
4953
4954 } while (!done);
4955 }
4956 }
4957
4958 static int unit_export_invocation_id(Unit *u) {
4959 const char *p;
4960 int r;
4961
4962 assert(u);
4963
4964 if (u->exported_invocation_id)
4965 return 0;
4966
4967 if (sd_id128_is_null(u->invocation_id))
4968 return 0;
4969
4970 p = strjoina("/run/systemd/units/invocation:", u->id);
4971 r = symlink_atomic(u->invocation_id_string, p);
4972 if (r < 0)
4973 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
4974
4975 u->exported_invocation_id = true;
4976 return 0;
4977 }
4978
4979 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
4980 const char *p;
4981 char buf[2];
4982 int r;
4983
4984 assert(u);
4985 assert(c);
4986
4987 if (u->exported_log_level_max)
4988 return 0;
4989
4990 if (c->log_level_max < 0)
4991 return 0;
4992
4993 assert(c->log_level_max <= 7);
4994
4995 buf[0] = '0' + c->log_level_max;
4996 buf[1] = 0;
4997
4998 p = strjoina("/run/systemd/units/log-level-max:", u->id);
4999 r = symlink_atomic(buf, p);
5000 if (r < 0)
5001 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5002
5003 u->exported_log_level_max = true;
5004 return 0;
5005 }
5006
5007 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5008 _cleanup_close_ int fd = -1;
5009 struct iovec *iovec;
5010 const char *p;
5011 char *pattern;
5012 le64_t *sizes;
5013 ssize_t n;
5014 size_t i;
5015 int r;
5016
5017 if (u->exported_log_extra_fields)
5018 return 0;
5019
5020 if (c->n_log_extra_fields <= 0)
5021 return 0;
5022
5023 sizes = newa(le64_t, c->n_log_extra_fields);
5024 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5025
5026 for (i = 0; i < c->n_log_extra_fields; i++) {
5027 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5028
5029 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5030 iovec[i*2+1] = c->log_extra_fields[i];
5031 }
5032
5033 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5034 pattern = strjoina(p, ".XXXXXX");
5035
5036 fd = mkostemp_safe(pattern);
5037 if (fd < 0)
5038 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5039
5040 n = writev(fd, iovec, c->n_log_extra_fields*2);
5041 if (n < 0) {
5042 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5043 goto fail;
5044 }
5045
5046 (void) fchmod(fd, 0644);
5047
5048 if (rename(pattern, p) < 0) {
5049 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5050 goto fail;
5051 }
5052
5053 u->exported_log_extra_fields = true;
5054 return 0;
5055
5056 fail:
5057 (void) unlink(pattern);
5058 return r;
5059 }
5060
5061 void unit_export_state_files(Unit *u) {
5062 const ExecContext *c;
5063
5064 assert(u);
5065
5066 if (!u->id)
5067 return;
5068
5069 if (!MANAGER_IS_SYSTEM(u->manager))
5070 return;
5071
5072 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5073 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5074 * the IPC system itself and PID 1 also log to the journal.
5075 *
5076 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5077 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5078 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5079 * namespace at least.
5080 *
5081 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5082 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5083 * them with one. */
5084
5085 (void) unit_export_invocation_id(u);
5086
5087 c = unit_get_exec_context(u);
5088 if (c) {
5089 (void) unit_export_log_level_max(u, c);
5090 (void) unit_export_log_extra_fields(u, c);
5091 }
5092 }
5093
5094 void unit_unlink_state_files(Unit *u) {
5095 const char *p;
5096
5097 assert(u);
5098
5099 if (!u->id)
5100 return;
5101
5102 if (!MANAGER_IS_SYSTEM(u->manager))
5103 return;
5104
5105 /* Undoes the effect of unit_export_state() */
5106
5107 if (u->exported_invocation_id) {
5108 p = strjoina("/run/systemd/units/invocation:", u->id);
5109 (void) unlink(p);
5110
5111 u->exported_invocation_id = false;
5112 }
5113
5114 if (u->exported_log_level_max) {
5115 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5116 (void) unlink(p);
5117
5118 u->exported_log_level_max = false;
5119 }
5120
5121 if (u->exported_log_extra_fields) {
5122 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5123 (void) unlink(p);
5124
5125 u->exported_log_extra_fields = false;
5126 }
5127 }