]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #7198 from poettering/stdin-stdout
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <unistd.h>
26
27 #include "sd-id128.h"
28 #include "sd-messages.h"
29
30 #include "alloc-util.h"
31 #include "bus-common-errors.h"
32 #include "bus-util.h"
33 #include "cgroup-util.h"
34 #include "dbus-unit.h"
35 #include "dbus.h"
36 #include "dropin.h"
37 #include "escape.h"
38 #include "execute.h"
39 #include "fd-util.h"
40 #include "fileio-label.h"
41 #include "format-util.h"
42 #include "fs-util.h"
43 #include "id128-util.h"
44 #include "io-util.h"
45 #include "load-dropin.h"
46 #include "load-fragment.h"
47 #include "log.h"
48 #include "macro.h"
49 #include "missing.h"
50 #include "mkdir.h"
51 #include "parse-util.h"
52 #include "path-util.h"
53 #include "process-util.h"
54 #include "set.h"
55 #include "signal-util.h"
56 #include "sparse-endian.h"
57 #include "special.h"
58 #include "stat-util.h"
59 #include "stdio-util.h"
60 #include "string-table.h"
61 #include "string-util.h"
62 #include "strv.h"
63 #include "umask-util.h"
64 #include "unit-name.h"
65 #include "unit.h"
66 #include "user-util.h"
67 #include "virt.h"
68
69 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
70 [UNIT_SERVICE] = &service_vtable,
71 [UNIT_SOCKET] = &socket_vtable,
72 [UNIT_TARGET] = &target_vtable,
73 [UNIT_DEVICE] = &device_vtable,
74 [UNIT_MOUNT] = &mount_vtable,
75 [UNIT_AUTOMOUNT] = &automount_vtable,
76 [UNIT_SWAP] = &swap_vtable,
77 [UNIT_TIMER] = &timer_vtable,
78 [UNIT_PATH] = &path_vtable,
79 [UNIT_SLICE] = &slice_vtable,
80 [UNIT_SCOPE] = &scope_vtable,
81 };
82
83 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
84
85 Unit *unit_new(Manager *m, size_t size) {
86 Unit *u;
87
88 assert(m);
89 assert(size >= sizeof(Unit));
90
91 u = malloc0(size);
92 if (!u)
93 return NULL;
94
95 u->names = set_new(&string_hash_ops);
96 if (!u->names)
97 return mfree(u);
98
99 u->manager = m;
100 u->type = _UNIT_TYPE_INVALID;
101 u->default_dependencies = true;
102 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
103 u->unit_file_preset = -1;
104 u->on_failure_job_mode = JOB_REPLACE;
105 u->cgroup_inotify_wd = -1;
106 u->job_timeout = USEC_INFINITY;
107 u->job_running_timeout = USEC_INFINITY;
108 u->ref_uid = UID_INVALID;
109 u->ref_gid = GID_INVALID;
110 u->cpu_usage_last = NSEC_INFINITY;
111
112 u->ip_accounting_ingress_map_fd = -1;
113 u->ip_accounting_egress_map_fd = -1;
114 u->ipv4_allow_map_fd = -1;
115 u->ipv6_allow_map_fd = -1;
116 u->ipv4_deny_map_fd = -1;
117 u->ipv6_deny_map_fd = -1;
118
119 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
120 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
121
122 return u;
123 }
124
125 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
126 Unit *u;
127 int r;
128
129 u = unit_new(m, size);
130 if (!u)
131 return -ENOMEM;
132
133 r = unit_add_name(u, name);
134 if (r < 0) {
135 unit_free(u);
136 return r;
137 }
138
139 *ret = u;
140 return r;
141 }
142
143 bool unit_has_name(Unit *u, const char *name) {
144 assert(u);
145 assert(name);
146
147 return set_contains(u->names, (char*) name);
148 }
149
150 static void unit_init(Unit *u) {
151 CGroupContext *cc;
152 ExecContext *ec;
153 KillContext *kc;
154
155 assert(u);
156 assert(u->manager);
157 assert(u->type >= 0);
158
159 cc = unit_get_cgroup_context(u);
160 if (cc) {
161 cgroup_context_init(cc);
162
163 /* Copy in the manager defaults into the cgroup
164 * context, _before_ the rest of the settings have
165 * been initialized */
166
167 cc->cpu_accounting = u->manager->default_cpu_accounting;
168 cc->io_accounting = u->manager->default_io_accounting;
169 cc->ip_accounting = u->manager->default_ip_accounting;
170 cc->blockio_accounting = u->manager->default_blockio_accounting;
171 cc->memory_accounting = u->manager->default_memory_accounting;
172 cc->tasks_accounting = u->manager->default_tasks_accounting;
173 cc->ip_accounting = u->manager->default_ip_accounting;
174
175 if (u->type != UNIT_SLICE)
176 cc->tasks_max = u->manager->default_tasks_max;
177 }
178
179 ec = unit_get_exec_context(u);
180 if (ec) {
181 exec_context_init(ec);
182
183 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
184 EXEC_KEYRING_PRIVATE : EXEC_KEYRING_INHERIT;
185 }
186
187 kc = unit_get_kill_context(u);
188 if (kc)
189 kill_context_init(kc);
190
191 if (UNIT_VTABLE(u)->init)
192 UNIT_VTABLE(u)->init(u);
193 }
194
195 int unit_add_name(Unit *u, const char *text) {
196 _cleanup_free_ char *s = NULL, *i = NULL;
197 UnitType t;
198 int r;
199
200 assert(u);
201 assert(text);
202
203 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
204
205 if (!u->instance)
206 return -EINVAL;
207
208 r = unit_name_replace_instance(text, u->instance, &s);
209 if (r < 0)
210 return r;
211 } else {
212 s = strdup(text);
213 if (!s)
214 return -ENOMEM;
215 }
216
217 if (set_contains(u->names, s))
218 return 0;
219 if (hashmap_contains(u->manager->units, s))
220 return -EEXIST;
221
222 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
223 return -EINVAL;
224
225 t = unit_name_to_type(s);
226 if (t < 0)
227 return -EINVAL;
228
229 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
230 return -EINVAL;
231
232 r = unit_name_to_instance(s, &i);
233 if (r < 0)
234 return r;
235
236 if (i && !unit_type_may_template(t))
237 return -EINVAL;
238
239 /* Ensure that this unit is either instanced or not instanced,
240 * but not both. Note that we do allow names with different
241 * instance names however! */
242 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
243 return -EINVAL;
244
245 if (!unit_type_may_alias(t) && !set_isempty(u->names))
246 return -EEXIST;
247
248 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
249 return -E2BIG;
250
251 r = set_put(u->names, s);
252 if (r < 0)
253 return r;
254 assert(r > 0);
255
256 r = hashmap_put(u->manager->units, s, u);
257 if (r < 0) {
258 (void) set_remove(u->names, s);
259 return r;
260 }
261
262 if (u->type == _UNIT_TYPE_INVALID) {
263 u->type = t;
264 u->id = s;
265 u->instance = i;
266
267 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
268
269 unit_init(u);
270
271 i = NULL;
272 }
273
274 s = NULL;
275
276 unit_add_to_dbus_queue(u);
277 return 0;
278 }
279
280 int unit_choose_id(Unit *u, const char *name) {
281 _cleanup_free_ char *t = NULL;
282 char *s, *i;
283 int r;
284
285 assert(u);
286 assert(name);
287
288 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
289
290 if (!u->instance)
291 return -EINVAL;
292
293 r = unit_name_replace_instance(name, u->instance, &t);
294 if (r < 0)
295 return r;
296
297 name = t;
298 }
299
300 /* Selects one of the names of this unit as the id */
301 s = set_get(u->names, (char*) name);
302 if (!s)
303 return -ENOENT;
304
305 /* Determine the new instance from the new id */
306 r = unit_name_to_instance(s, &i);
307 if (r < 0)
308 return r;
309
310 u->id = s;
311
312 free(u->instance);
313 u->instance = i;
314
315 unit_add_to_dbus_queue(u);
316
317 return 0;
318 }
319
320 int unit_set_description(Unit *u, const char *description) {
321 int r;
322
323 assert(u);
324
325 r = free_and_strdup(&u->description, empty_to_null(description));
326 if (r < 0)
327 return r;
328 if (r > 0)
329 unit_add_to_dbus_queue(u);
330
331 return 0;
332 }
333
334 bool unit_check_gc(Unit *u) {
335 UnitActiveState state;
336
337 assert(u);
338
339 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true, when the unit shall
340 * stay around, false if there's no reason to keep it loaded. */
341
342 if (u->job)
343 return true;
344
345 if (u->nop_job)
346 return true;
347
348 state = unit_active_state(u);
349
350 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
351 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
352 UNIT_VTABLE(u)->release_resources)
353 UNIT_VTABLE(u)->release_resources(u);
354
355 if (u->perpetual)
356 return true;
357
358 if (u->refs)
359 return true;
360
361 if (sd_bus_track_count(u->bus_track) > 0)
362 return true;
363
364 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
365 switch (u->collect_mode) {
366
367 case COLLECT_INACTIVE:
368 if (state != UNIT_INACTIVE)
369 return true;
370
371 break;
372
373 case COLLECT_INACTIVE_OR_FAILED:
374 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
375 return true;
376
377 break;
378
379 default:
380 assert_not_reached("Unknown garbage collection mode");
381 }
382
383 if (UNIT_VTABLE(u)->check_gc)
384 if (UNIT_VTABLE(u)->check_gc(u))
385 return true;
386
387 return false;
388 }
389
390 void unit_add_to_load_queue(Unit *u) {
391 assert(u);
392 assert(u->type != _UNIT_TYPE_INVALID);
393
394 if (u->load_state != UNIT_STUB || u->in_load_queue)
395 return;
396
397 LIST_PREPEND(load_queue, u->manager->load_queue, u);
398 u->in_load_queue = true;
399 }
400
401 void unit_add_to_cleanup_queue(Unit *u) {
402 assert(u);
403
404 if (u->in_cleanup_queue)
405 return;
406
407 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
408 u->in_cleanup_queue = true;
409 }
410
411 void unit_add_to_gc_queue(Unit *u) {
412 assert(u);
413
414 if (u->in_gc_queue || u->in_cleanup_queue)
415 return;
416
417 if (unit_check_gc(u))
418 return;
419
420 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
421 u->in_gc_queue = true;
422 }
423
424 void unit_add_to_dbus_queue(Unit *u) {
425 assert(u);
426 assert(u->type != _UNIT_TYPE_INVALID);
427
428 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
429 return;
430
431 /* Shortcut things if nobody cares */
432 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
433 sd_bus_track_count(u->bus_track) <= 0 &&
434 set_isempty(u->manager->private_buses)) {
435 u->sent_dbus_new_signal = true;
436 return;
437 }
438
439 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
440 u->in_dbus_queue = true;
441 }
442
443 static void bidi_set_free(Unit *u, Hashmap *h) {
444 Unit *other;
445 Iterator i;
446 void *v;
447
448 assert(u);
449
450 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
451
452 HASHMAP_FOREACH_KEY(v, other, h, i) {
453 UnitDependency d;
454
455 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
456 hashmap_remove(other->dependencies[d], u);
457
458 unit_add_to_gc_queue(other);
459 }
460
461 hashmap_free(h);
462 }
463
464 static void unit_remove_transient(Unit *u) {
465 char **i;
466
467 assert(u);
468
469 if (!u->transient)
470 return;
471
472 if (u->fragment_path)
473 (void) unlink(u->fragment_path);
474
475 STRV_FOREACH(i, u->dropin_paths) {
476 _cleanup_free_ char *p = NULL, *pp = NULL;
477
478 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
479 if (!p)
480 continue;
481
482 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
483 if (!pp)
484 continue;
485
486 /* Only drop transient drop-ins */
487 if (!path_equal(u->manager->lookup_paths.transient, pp))
488 continue;
489
490 (void) unlink(*i);
491 (void) rmdir(p);
492 }
493 }
494
495 static void unit_free_requires_mounts_for(Unit *u) {
496 assert(u);
497
498 for (;;) {
499 _cleanup_free_ char *path;
500
501 path = hashmap_steal_first_key(u->requires_mounts_for);
502 if (!path)
503 break;
504 else {
505 char s[strlen(path) + 1];
506
507 PATH_FOREACH_PREFIX_MORE(s, path) {
508 char *y;
509 Set *x;
510
511 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
512 if (!x)
513 continue;
514
515 (void) set_remove(x, u);
516
517 if (set_isempty(x)) {
518 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
519 free(y);
520 set_free(x);
521 }
522 }
523 }
524 }
525
526 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
527 }
528
529 static void unit_done(Unit *u) {
530 ExecContext *ec;
531 CGroupContext *cc;
532
533 assert(u);
534
535 if (u->type < 0)
536 return;
537
538 if (UNIT_VTABLE(u)->done)
539 UNIT_VTABLE(u)->done(u);
540
541 ec = unit_get_exec_context(u);
542 if (ec)
543 exec_context_done(ec);
544
545 cc = unit_get_cgroup_context(u);
546 if (cc)
547 cgroup_context_done(cc);
548 }
549
550 void unit_free(Unit *u) {
551 UnitDependency d;
552 Iterator i;
553 char *t;
554
555 if (!u)
556 return;
557
558 if (u->transient_file)
559 fclose(u->transient_file);
560
561 if (!MANAGER_IS_RELOADING(u->manager))
562 unit_remove_transient(u);
563
564 bus_unit_send_removed_signal(u);
565
566 unit_done(u);
567
568 sd_bus_slot_unref(u->match_bus_slot);
569
570 sd_bus_track_unref(u->bus_track);
571 u->deserialized_refs = strv_free(u->deserialized_refs);
572
573 unit_free_requires_mounts_for(u);
574
575 SET_FOREACH(t, u->names, i)
576 hashmap_remove_value(u->manager->units, t, u);
577
578 if (!sd_id128_is_null(u->invocation_id))
579 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
580
581 if (u->job) {
582 Job *j = u->job;
583 job_uninstall(j);
584 job_free(j);
585 }
586
587 if (u->nop_job) {
588 Job *j = u->nop_job;
589 job_uninstall(j);
590 job_free(j);
591 }
592
593 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
594 bidi_set_free(u, u->dependencies[d]);
595
596 if (u->type != _UNIT_TYPE_INVALID)
597 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
598
599 if (u->in_load_queue)
600 LIST_REMOVE(load_queue, u->manager->load_queue, u);
601
602 if (u->in_dbus_queue)
603 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
604
605 if (u->in_cleanup_queue)
606 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
607
608 if (u->in_gc_queue)
609 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
610
611 if (u->in_cgroup_realize_queue)
612 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
613
614 if (u->in_cgroup_empty_queue)
615 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
616
617 unit_release_cgroup(u);
618
619 if (!MANAGER_IS_RELOADING(u->manager))
620 unit_unlink_state_files(u);
621
622 unit_unref_uid_gid(u, false);
623
624 (void) manager_update_failed_units(u->manager, u, false);
625 set_remove(u->manager->startup_units, u);
626
627 free(u->description);
628 strv_free(u->documentation);
629 free(u->fragment_path);
630 free(u->source_path);
631 strv_free(u->dropin_paths);
632 free(u->instance);
633
634 free(u->job_timeout_reboot_arg);
635
636 set_free_free(u->names);
637
638 unit_unwatch_all_pids(u);
639
640 condition_free_list(u->conditions);
641 condition_free_list(u->asserts);
642
643 free(u->reboot_arg);
644
645 unit_ref_unset(&u->slice);
646
647 while (u->refs)
648 unit_ref_unset(u->refs);
649
650 safe_close(u->ip_accounting_ingress_map_fd);
651 safe_close(u->ip_accounting_egress_map_fd);
652
653 safe_close(u->ipv4_allow_map_fd);
654 safe_close(u->ipv6_allow_map_fd);
655 safe_close(u->ipv4_deny_map_fd);
656 safe_close(u->ipv6_deny_map_fd);
657
658 bpf_program_unref(u->ip_bpf_ingress);
659 bpf_program_unref(u->ip_bpf_egress);
660
661 free(u);
662 }
663
664 UnitActiveState unit_active_state(Unit *u) {
665 assert(u);
666
667 if (u->load_state == UNIT_MERGED)
668 return unit_active_state(unit_follow_merge(u));
669
670 /* After a reload it might happen that a unit is not correctly
671 * loaded but still has a process around. That's why we won't
672 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
673
674 return UNIT_VTABLE(u)->active_state(u);
675 }
676
677 const char* unit_sub_state_to_string(Unit *u) {
678 assert(u);
679
680 return UNIT_VTABLE(u)->sub_state_to_string(u);
681 }
682
683 static int set_complete_move(Set **s, Set **other) {
684 assert(s);
685 assert(other);
686
687 if (!other)
688 return 0;
689
690 if (*s)
691 return set_move(*s, *other);
692 else {
693 *s = *other;
694 *other = NULL;
695 }
696
697 return 0;
698 }
699
700 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
701 assert(s);
702 assert(other);
703
704 if (!*other)
705 return 0;
706
707 if (*s)
708 return hashmap_move(*s, *other);
709 else {
710 *s = *other;
711 *other = NULL;
712 }
713
714 return 0;
715 }
716
717 static int merge_names(Unit *u, Unit *other) {
718 char *t;
719 Iterator i;
720 int r;
721
722 assert(u);
723 assert(other);
724
725 r = set_complete_move(&u->names, &other->names);
726 if (r < 0)
727 return r;
728
729 set_free_free(other->names);
730 other->names = NULL;
731 other->id = NULL;
732
733 SET_FOREACH(t, u->names, i)
734 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
735
736 return 0;
737 }
738
739 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
740 unsigned n_reserve;
741
742 assert(u);
743 assert(other);
744 assert(d < _UNIT_DEPENDENCY_MAX);
745
746 /*
747 * If u does not have this dependency set allocated, there is no need
748 * to reserve anything. In that case other's set will be transferred
749 * as a whole to u by complete_move().
750 */
751 if (!u->dependencies[d])
752 return 0;
753
754 /* merge_dependencies() will skip a u-on-u dependency */
755 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
756
757 return hashmap_reserve(u->dependencies[d], n_reserve);
758 }
759
760 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
761 Iterator i;
762 Unit *back;
763 void *v;
764 int r;
765
766 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
767
768 assert(u);
769 assert(other);
770 assert(d < _UNIT_DEPENDENCY_MAX);
771
772 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
773 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
774 UnitDependency k;
775
776 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
777 * pointers back, and let's fix them up, to instead point to 'u'. */
778
779 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
780 if (back == u) {
781 /* Do not add dependencies between u and itself. */
782 if (hashmap_remove(back->dependencies[k], other))
783 maybe_warn_about_dependency(u, other_id, k);
784 } else {
785 UnitDependencyInfo di_u, di_other, di_merged;
786
787 /* Let's drop this dependency between "back" and "other", and let's create it between
788 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
789 * and any such dependency which might already exist */
790
791 di_other.data = hashmap_get(back->dependencies[k], other);
792 if (!di_other.data)
793 continue; /* dependency isn't set, let's try the next one */
794
795 di_u.data = hashmap_get(back->dependencies[k], u);
796
797 di_merged = (UnitDependencyInfo) {
798 .origin_mask = di_u.origin_mask | di_other.origin_mask,
799 .destination_mask = di_u.destination_mask | di_other.destination_mask,
800 };
801
802 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
803 if (r < 0)
804 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
805 assert(r >= 0);
806
807 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
808 }
809 }
810
811 }
812
813 /* Also do not move dependencies on u to itself */
814 back = hashmap_remove(other->dependencies[d], u);
815 if (back)
816 maybe_warn_about_dependency(u, other_id, d);
817
818 /* The move cannot fail. The caller must have performed a reservation. */
819 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
820
821 other->dependencies[d] = hashmap_free(other->dependencies[d]);
822 }
823
824 int unit_merge(Unit *u, Unit *other) {
825 UnitDependency d;
826 const char *other_id = NULL;
827 int r;
828
829 assert(u);
830 assert(other);
831 assert(u->manager == other->manager);
832 assert(u->type != _UNIT_TYPE_INVALID);
833
834 other = unit_follow_merge(other);
835
836 if (other == u)
837 return 0;
838
839 if (u->type != other->type)
840 return -EINVAL;
841
842 if (!u->instance != !other->instance)
843 return -EINVAL;
844
845 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
846 return -EEXIST;
847
848 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
849 return -EEXIST;
850
851 if (other->job)
852 return -EEXIST;
853
854 if (other->nop_job)
855 return -EEXIST;
856
857 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
858 return -EEXIST;
859
860 if (other->id)
861 other_id = strdupa(other->id);
862
863 /* Make reservations to ensure merge_dependencies() won't fail */
864 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
865 r = reserve_dependencies(u, other, d);
866 /*
867 * We don't rollback reservations if we fail. We don't have
868 * a way to undo reservations. A reservation is not a leak.
869 */
870 if (r < 0)
871 return r;
872 }
873
874 /* Merge names */
875 r = merge_names(u, other);
876 if (r < 0)
877 return r;
878
879 /* Redirect all references */
880 while (other->refs)
881 unit_ref_set(other->refs, u);
882
883 /* Merge dependencies */
884 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
885 merge_dependencies(u, other, other_id, d);
886
887 other->load_state = UNIT_MERGED;
888 other->merged_into = u;
889
890 /* If there is still some data attached to the other node, we
891 * don't need it anymore, and can free it. */
892 if (other->load_state != UNIT_STUB)
893 if (UNIT_VTABLE(other)->done)
894 UNIT_VTABLE(other)->done(other);
895
896 unit_add_to_dbus_queue(u);
897 unit_add_to_cleanup_queue(other);
898
899 return 0;
900 }
901
902 int unit_merge_by_name(Unit *u, const char *name) {
903 _cleanup_free_ char *s = NULL;
904 Unit *other;
905 int r;
906
907 assert(u);
908 assert(name);
909
910 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
911 if (!u->instance)
912 return -EINVAL;
913
914 r = unit_name_replace_instance(name, u->instance, &s);
915 if (r < 0)
916 return r;
917
918 name = s;
919 }
920
921 other = manager_get_unit(u->manager, name);
922 if (other)
923 return unit_merge(u, other);
924
925 return unit_add_name(u, name);
926 }
927
928 Unit* unit_follow_merge(Unit *u) {
929 assert(u);
930
931 while (u->load_state == UNIT_MERGED)
932 assert_se(u = u->merged_into);
933
934 return u;
935 }
936
937 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
938 ExecDirectoryType dt;
939 char **dp;
940 int r;
941
942 assert(u);
943 assert(c);
944
945 if (c->working_directory) {
946 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
947 if (r < 0)
948 return r;
949 }
950
951 if (c->root_directory) {
952 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
953 if (r < 0)
954 return r;
955 }
956
957 if (c->root_image) {
958 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
959 if (r < 0)
960 return r;
961 }
962
963 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
964 if (!u->manager->prefix[dt])
965 continue;
966
967 STRV_FOREACH(dp, c->directories[dt].paths) {
968 _cleanup_free_ char *p;
969
970 p = strjoin(u->manager->prefix[dt], "/", *dp);
971 if (!p)
972 return -ENOMEM;
973
974 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
975 if (r < 0)
976 return r;
977 }
978 }
979
980 if (!MANAGER_IS_SYSTEM(u->manager))
981 return 0;
982
983 if (c->private_tmp) {
984 const char *p;
985
986 FOREACH_STRING(p, "/tmp", "/var/tmp") {
987 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
988 if (r < 0)
989 return r;
990 }
991
992 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true, UNIT_DEPENDENCY_FILE);
993 if (r < 0)
994 return r;
995 }
996
997 if (!IN_SET(c->std_output,
998 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
999 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1000 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1001 !IN_SET(c->std_error,
1002 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1003 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1004 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1005 return 0;
1006
1007 /* If syslog or kernel logging is requested, make sure our own
1008 * logging daemon is run first. */
1009
1010 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013
1014 return 0;
1015 }
1016
1017 const char *unit_description(Unit *u) {
1018 assert(u);
1019
1020 if (u->description)
1021 return u->description;
1022
1023 return strna(u->id);
1024 }
1025
1026 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1027 const struct {
1028 UnitDependencyMask mask;
1029 const char *name;
1030 } table[] = {
1031 { UNIT_DEPENDENCY_FILE, "file" },
1032 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1033 { UNIT_DEPENDENCY_DEFAULT, "default" },
1034 { UNIT_DEPENDENCY_UDEV, "udev" },
1035 { UNIT_DEPENDENCY_PATH, "path" },
1036 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1037 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1038 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1039 };
1040 size_t i;
1041
1042 assert(f);
1043 assert(kind);
1044 assert(space);
1045
1046 for (i = 0; i < ELEMENTSOF(table); i++) {
1047
1048 if (mask == 0)
1049 break;
1050
1051 if ((mask & table[i].mask) == table[i].mask) {
1052 if (*space)
1053 fputc(' ', f);
1054 else
1055 *space = true;
1056
1057 fputs(kind, f);
1058 fputs("-", f);
1059 fputs(table[i].name, f);
1060
1061 mask &= ~table[i].mask;
1062 }
1063 }
1064
1065 assert(mask == 0);
1066 }
1067
1068 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1069 char *t, **j;
1070 UnitDependency d;
1071 Iterator i;
1072 const char *prefix2;
1073 char
1074 timestamp0[FORMAT_TIMESTAMP_MAX],
1075 timestamp1[FORMAT_TIMESTAMP_MAX],
1076 timestamp2[FORMAT_TIMESTAMP_MAX],
1077 timestamp3[FORMAT_TIMESTAMP_MAX],
1078 timestamp4[FORMAT_TIMESTAMP_MAX],
1079 timespan[FORMAT_TIMESPAN_MAX];
1080 Unit *following;
1081 _cleanup_set_free_ Set *following_set = NULL;
1082 const char *n;
1083 CGroupMask m;
1084 int r;
1085
1086 assert(u);
1087 assert(u->type >= 0);
1088
1089 prefix = strempty(prefix);
1090 prefix2 = strjoina(prefix, "\t");
1091
1092 fprintf(f,
1093 "%s-> Unit %s:\n"
1094 "%s\tDescription: %s\n"
1095 "%s\tInstance: %s\n"
1096 "%s\tUnit Load State: %s\n"
1097 "%s\tUnit Active State: %s\n"
1098 "%s\tState Change Timestamp: %s\n"
1099 "%s\tInactive Exit Timestamp: %s\n"
1100 "%s\tActive Enter Timestamp: %s\n"
1101 "%s\tActive Exit Timestamp: %s\n"
1102 "%s\tInactive Enter Timestamp: %s\n"
1103 "%s\tGC Check Good: %s\n"
1104 "%s\tNeed Daemon Reload: %s\n"
1105 "%s\tTransient: %s\n"
1106 "%s\tPerpetual: %s\n"
1107 "%s\tGarbage Collection Mode: %s\n"
1108 "%s\tSlice: %s\n"
1109 "%s\tCGroup: %s\n"
1110 "%s\tCGroup realized: %s\n",
1111 prefix, u->id,
1112 prefix, unit_description(u),
1113 prefix, strna(u->instance),
1114 prefix, unit_load_state_to_string(u->load_state),
1115 prefix, unit_active_state_to_string(unit_active_state(u)),
1116 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
1117 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
1118 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
1119 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
1120 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
1121 prefix, yes_no(unit_check_gc(u)),
1122 prefix, yes_no(unit_need_daemon_reload(u)),
1123 prefix, yes_no(u->transient),
1124 prefix, yes_no(u->perpetual),
1125 prefix, collect_mode_to_string(u->collect_mode),
1126 prefix, strna(unit_slice_name(u)),
1127 prefix, strna(u->cgroup_path),
1128 prefix, yes_no(u->cgroup_realized));
1129
1130 if (u->cgroup_realized_mask != 0) {
1131 _cleanup_free_ char *s = NULL;
1132 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1133 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1134 }
1135 if (u->cgroup_enabled_mask != 0) {
1136 _cleanup_free_ char *s = NULL;
1137 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1138 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1139 }
1140 m = unit_get_own_mask(u);
1141 if (m != 0) {
1142 _cleanup_free_ char *s = NULL;
1143 (void) cg_mask_to_string(m, &s);
1144 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1145 }
1146 m = unit_get_members_mask(u);
1147 if (m != 0) {
1148 _cleanup_free_ char *s = NULL;
1149 (void) cg_mask_to_string(m, &s);
1150 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1151 }
1152
1153 SET_FOREACH(t, u->names, i)
1154 fprintf(f, "%s\tName: %s\n", prefix, t);
1155
1156 if (!sd_id128_is_null(u->invocation_id))
1157 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1158 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1159
1160 STRV_FOREACH(j, u->documentation)
1161 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1162
1163 following = unit_following(u);
1164 if (following)
1165 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1166
1167 r = unit_following_set(u, &following_set);
1168 if (r >= 0) {
1169 Unit *other;
1170
1171 SET_FOREACH(other, following_set, i)
1172 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1173 }
1174
1175 if (u->fragment_path)
1176 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1177
1178 if (u->source_path)
1179 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1180
1181 STRV_FOREACH(j, u->dropin_paths)
1182 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1183
1184 if (u->job_timeout != USEC_INFINITY)
1185 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1186
1187 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1188 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1189
1190 if (u->job_timeout_reboot_arg)
1191 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1192
1193 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1194 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1195
1196 if (dual_timestamp_is_set(&u->condition_timestamp))
1197 fprintf(f,
1198 "%s\tCondition Timestamp: %s\n"
1199 "%s\tCondition Result: %s\n",
1200 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1201 prefix, yes_no(u->condition_result));
1202
1203 if (dual_timestamp_is_set(&u->assert_timestamp))
1204 fprintf(f,
1205 "%s\tAssert Timestamp: %s\n"
1206 "%s\tAssert Result: %s\n",
1207 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1208 prefix, yes_no(u->assert_result));
1209
1210 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1211 UnitDependencyInfo di;
1212 Unit *other;
1213
1214 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1215 bool space = false;
1216
1217 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1218
1219 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1220 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1221
1222 fputs(")\n", f);
1223 }
1224 }
1225
1226 if (!hashmap_isempty(u->requires_mounts_for)) {
1227 UnitDependencyInfo di;
1228 const char *path;
1229
1230 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1231 bool space = false;
1232
1233 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1234
1235 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1236 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1237
1238 fputs(")\n", f);
1239 }
1240 }
1241
1242 if (u->load_state == UNIT_LOADED) {
1243
1244 fprintf(f,
1245 "%s\tStopWhenUnneeded: %s\n"
1246 "%s\tRefuseManualStart: %s\n"
1247 "%s\tRefuseManualStop: %s\n"
1248 "%s\tDefaultDependencies: %s\n"
1249 "%s\tOnFailureJobMode: %s\n"
1250 "%s\tIgnoreOnIsolate: %s\n",
1251 prefix, yes_no(u->stop_when_unneeded),
1252 prefix, yes_no(u->refuse_manual_start),
1253 prefix, yes_no(u->refuse_manual_stop),
1254 prefix, yes_no(u->default_dependencies),
1255 prefix, job_mode_to_string(u->on_failure_job_mode),
1256 prefix, yes_no(u->ignore_on_isolate));
1257
1258 if (UNIT_VTABLE(u)->dump)
1259 UNIT_VTABLE(u)->dump(u, f, prefix2);
1260
1261 } else if (u->load_state == UNIT_MERGED)
1262 fprintf(f,
1263 "%s\tMerged into: %s\n",
1264 prefix, u->merged_into->id);
1265 else if (u->load_state == UNIT_ERROR)
1266 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1267
1268 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1269 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1270
1271 if (u->job)
1272 job_dump(u->job, f, prefix2);
1273
1274 if (u->nop_job)
1275 job_dump(u->nop_job, f, prefix2);
1276 }
1277
1278 /* Common implementation for multiple backends */
1279 int unit_load_fragment_and_dropin(Unit *u) {
1280 int r;
1281
1282 assert(u);
1283
1284 /* Load a .{service,socket,...} file */
1285 r = unit_load_fragment(u);
1286 if (r < 0)
1287 return r;
1288
1289 if (u->load_state == UNIT_STUB)
1290 return -ENOENT;
1291
1292 /* Load drop-in directory data. If u is an alias, we might be reloading the
1293 * target unit needlessly. But we cannot be sure which drops-ins have already
1294 * been loaded and which not, at least without doing complicated book-keeping,
1295 * so let's always reread all drop-ins. */
1296 return unit_load_dropin(unit_follow_merge(u));
1297 }
1298
1299 /* Common implementation for multiple backends */
1300 int unit_load_fragment_and_dropin_optional(Unit *u) {
1301 int r;
1302
1303 assert(u);
1304
1305 /* Same as unit_load_fragment_and_dropin(), but whether
1306 * something can be loaded or not doesn't matter. */
1307
1308 /* Load a .service file */
1309 r = unit_load_fragment(u);
1310 if (r < 0)
1311 return r;
1312
1313 if (u->load_state == UNIT_STUB)
1314 u->load_state = UNIT_LOADED;
1315
1316 /* Load drop-in directory data */
1317 return unit_load_dropin(unit_follow_merge(u));
1318 }
1319
1320 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1321 assert(u);
1322 assert(target);
1323
1324 if (target->type != UNIT_TARGET)
1325 return 0;
1326
1327 /* Only add the dependency if both units are loaded, so that
1328 * that loop check below is reliable */
1329 if (u->load_state != UNIT_LOADED ||
1330 target->load_state != UNIT_LOADED)
1331 return 0;
1332
1333 /* If either side wants no automatic dependencies, then let's
1334 * skip this */
1335 if (!u->default_dependencies ||
1336 !target->default_dependencies)
1337 return 0;
1338
1339 /* Don't create loops */
1340 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1341 return 0;
1342
1343 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1344 }
1345
1346 static int unit_add_target_dependencies(Unit *u) {
1347
1348 static const UnitDependency deps[] = {
1349 UNIT_REQUIRED_BY,
1350 UNIT_REQUISITE_OF,
1351 UNIT_WANTED_BY,
1352 UNIT_BOUND_BY
1353 };
1354
1355 unsigned k;
1356 int r = 0;
1357
1358 assert(u);
1359
1360 for (k = 0; k < ELEMENTSOF(deps); k++) {
1361 Unit *target;
1362 Iterator i;
1363 void *v;
1364
1365 HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
1366 r = unit_add_default_target_dependency(u, target);
1367 if (r < 0)
1368 return r;
1369 }
1370 }
1371
1372 return r;
1373 }
1374
1375 static int unit_add_slice_dependencies(Unit *u) {
1376 UnitDependencyMask mask;
1377 assert(u);
1378
1379 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1380 return 0;
1381
1382 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1383 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1384 relationship). */
1385 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1386
1387 if (UNIT_ISSET(u->slice))
1388 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1389
1390 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1391 return 0;
1392
1393 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true, mask);
1394 }
1395
1396 static int unit_add_mount_dependencies(Unit *u) {
1397 UnitDependencyInfo di;
1398 const char *path;
1399 Iterator i;
1400 int r;
1401
1402 assert(u);
1403
1404 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1405 char prefix[strlen(path) + 1];
1406
1407 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1408 _cleanup_free_ char *p = NULL;
1409 Unit *m;
1410
1411 r = unit_name_from_path(prefix, ".mount", &p);
1412 if (r < 0)
1413 return r;
1414
1415 m = manager_get_unit(u->manager, p);
1416 if (!m) {
1417 /* Make sure to load the mount unit if
1418 * it exists. If so the dependencies
1419 * on this unit will be added later
1420 * during the loading of the mount
1421 * unit. */
1422 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1423 continue;
1424 }
1425 if (m == u)
1426 continue;
1427
1428 if (m->load_state != UNIT_LOADED)
1429 continue;
1430
1431 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1432 if (r < 0)
1433 return r;
1434
1435 if (m->fragment_path) {
1436 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1437 if (r < 0)
1438 return r;
1439 }
1440 }
1441 }
1442
1443 return 0;
1444 }
1445
1446 static int unit_add_startup_units(Unit *u) {
1447 CGroupContext *c;
1448 int r;
1449
1450 c = unit_get_cgroup_context(u);
1451 if (!c)
1452 return 0;
1453
1454 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1455 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1456 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1457 return 0;
1458
1459 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1460 if (r < 0)
1461 return r;
1462
1463 return set_put(u->manager->startup_units, u);
1464 }
1465
1466 int unit_load(Unit *u) {
1467 int r;
1468
1469 assert(u);
1470
1471 if (u->in_load_queue) {
1472 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1473 u->in_load_queue = false;
1474 }
1475
1476 if (u->type == _UNIT_TYPE_INVALID)
1477 return -EINVAL;
1478
1479 if (u->load_state != UNIT_STUB)
1480 return 0;
1481
1482 if (u->transient_file) {
1483 r = fflush_and_check(u->transient_file);
1484 if (r < 0)
1485 goto fail;
1486
1487 fclose(u->transient_file);
1488 u->transient_file = NULL;
1489
1490 u->fragment_mtime = now(CLOCK_REALTIME);
1491 }
1492
1493 if (UNIT_VTABLE(u)->load) {
1494 r = UNIT_VTABLE(u)->load(u);
1495 if (r < 0)
1496 goto fail;
1497 }
1498
1499 if (u->load_state == UNIT_STUB) {
1500 r = -ENOENT;
1501 goto fail;
1502 }
1503
1504 if (u->load_state == UNIT_LOADED) {
1505
1506 r = unit_add_target_dependencies(u);
1507 if (r < 0)
1508 goto fail;
1509
1510 r = unit_add_slice_dependencies(u);
1511 if (r < 0)
1512 goto fail;
1513
1514 r = unit_add_mount_dependencies(u);
1515 if (r < 0)
1516 goto fail;
1517
1518 r = unit_add_startup_units(u);
1519 if (r < 0)
1520 goto fail;
1521
1522 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1523 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1524 r = -EINVAL;
1525 goto fail;
1526 }
1527
1528 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1529 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1530
1531 unit_update_cgroup_members_masks(u);
1532 }
1533
1534 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1535
1536 unit_add_to_dbus_queue(unit_follow_merge(u));
1537 unit_add_to_gc_queue(u);
1538
1539 return 0;
1540
1541 fail:
1542 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1543 u->load_error = r;
1544 unit_add_to_dbus_queue(u);
1545 unit_add_to_gc_queue(u);
1546
1547 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1548
1549 return r;
1550 }
1551
1552 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1553 Condition *c;
1554 int triggered = -1;
1555
1556 assert(u);
1557 assert(to_string);
1558
1559 /* If the condition list is empty, then it is true */
1560 if (!first)
1561 return true;
1562
1563 /* Otherwise, if all of the non-trigger conditions apply and
1564 * if any of the trigger conditions apply (unless there are
1565 * none) we return true */
1566 LIST_FOREACH(conditions, c, first) {
1567 int r;
1568
1569 r = condition_test(c);
1570 if (r < 0)
1571 log_unit_warning(u,
1572 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1573 to_string(c->type),
1574 c->trigger ? "|" : "",
1575 c->negate ? "!" : "",
1576 c->parameter);
1577 else
1578 log_unit_debug(u,
1579 "%s=%s%s%s %s.",
1580 to_string(c->type),
1581 c->trigger ? "|" : "",
1582 c->negate ? "!" : "",
1583 c->parameter,
1584 condition_result_to_string(c->result));
1585
1586 if (!c->trigger && r <= 0)
1587 return false;
1588
1589 if (c->trigger && triggered <= 0)
1590 triggered = r > 0;
1591 }
1592
1593 return triggered != 0;
1594 }
1595
1596 static bool unit_condition_test(Unit *u) {
1597 assert(u);
1598
1599 dual_timestamp_get(&u->condition_timestamp);
1600 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1601
1602 return u->condition_result;
1603 }
1604
1605 static bool unit_assert_test(Unit *u) {
1606 assert(u);
1607
1608 dual_timestamp_get(&u->assert_timestamp);
1609 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1610
1611 return u->assert_result;
1612 }
1613
1614 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1615 DISABLE_WARNING_FORMAT_NONLITERAL;
1616 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1617 REENABLE_WARNING;
1618 }
1619
1620 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1621 const char *format;
1622 const UnitStatusMessageFormats *format_table;
1623
1624 assert(u);
1625 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1626
1627 if (t != JOB_RELOAD) {
1628 format_table = &UNIT_VTABLE(u)->status_message_formats;
1629 if (format_table) {
1630 format = format_table->starting_stopping[t == JOB_STOP];
1631 if (format)
1632 return format;
1633 }
1634 }
1635
1636 /* Return generic strings */
1637 if (t == JOB_START)
1638 return "Starting %s.";
1639 else if (t == JOB_STOP)
1640 return "Stopping %s.";
1641 else
1642 return "Reloading %s.";
1643 }
1644
1645 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1646 const char *format;
1647
1648 assert(u);
1649
1650 /* Reload status messages have traditionally not been printed to console. */
1651 if (!IN_SET(t, JOB_START, JOB_STOP))
1652 return;
1653
1654 format = unit_get_status_message_format(u, t);
1655
1656 DISABLE_WARNING_FORMAT_NONLITERAL;
1657 unit_status_printf(u, "", format);
1658 REENABLE_WARNING;
1659 }
1660
1661 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1662 const char *format, *mid;
1663 char buf[LINE_MAX];
1664
1665 assert(u);
1666
1667 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1668 return;
1669
1670 if (log_on_console())
1671 return;
1672
1673 /* We log status messages for all units and all operations. */
1674
1675 format = unit_get_status_message_format(u, t);
1676
1677 DISABLE_WARNING_FORMAT_NONLITERAL;
1678 snprintf(buf, sizeof buf, format, unit_description(u));
1679 REENABLE_WARNING;
1680
1681 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1682 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1683 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1684
1685 /* Note that we deliberately use LOG_MESSAGE() instead of
1686 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1687 * closely what is written to screen using the status output,
1688 * which is supposed the highest level, friendliest output
1689 * possible, which means we should avoid the low-level unit
1690 * name. */
1691 log_struct(LOG_INFO,
1692 LOG_MESSAGE("%s", buf),
1693 LOG_UNIT_ID(u),
1694 LOG_UNIT_INVOCATION_ID(u),
1695 mid,
1696 NULL);
1697 }
1698
1699 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1700 assert(u);
1701 assert(t >= 0);
1702 assert(t < _JOB_TYPE_MAX);
1703
1704 unit_status_log_starting_stopping_reloading(u, t);
1705 unit_status_print_starting_stopping(u, t);
1706 }
1707
1708 int unit_start_limit_test(Unit *u) {
1709 assert(u);
1710
1711 if (ratelimit_test(&u->start_limit)) {
1712 u->start_limit_hit = false;
1713 return 0;
1714 }
1715
1716 log_unit_warning(u, "Start request repeated too quickly.");
1717 u->start_limit_hit = true;
1718
1719 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1720 }
1721
1722 bool unit_shall_confirm_spawn(Unit *u) {
1723 assert(u);
1724
1725 if (manager_is_confirm_spawn_disabled(u->manager))
1726 return false;
1727
1728 /* For some reasons units remaining in the same process group
1729 * as PID 1 fail to acquire the console even if it's not used
1730 * by any process. So skip the confirmation question for them. */
1731 return !unit_get_exec_context(u)->same_pgrp;
1732 }
1733
1734 static bool unit_verify_deps(Unit *u) {
1735 Unit *other;
1736 Iterator j;
1737 void *v;
1738
1739 assert(u);
1740
1741 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1742 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1743 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1744 * conjunction with After= as for them any such check would make things entirely racy. */
1745
1746 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1747
1748 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1749 continue;
1750
1751 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1752 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1753 return false;
1754 }
1755 }
1756
1757 return true;
1758 }
1759
1760 /* Errors:
1761 * -EBADR: This unit type does not support starting.
1762 * -EALREADY: Unit is already started.
1763 * -EAGAIN: An operation is already in progress. Retry later.
1764 * -ECANCELED: Too many requests for now.
1765 * -EPROTO: Assert failed
1766 * -EINVAL: Unit not loaded
1767 * -EOPNOTSUPP: Unit type not supported
1768 * -ENOLINK: The necessary dependencies are not fulfilled.
1769 */
1770 int unit_start(Unit *u) {
1771 UnitActiveState state;
1772 Unit *following;
1773
1774 assert(u);
1775
1776 /* If this is already started, then this will succeed. Note
1777 * that this will even succeed if this unit is not startable
1778 * by the user. This is relied on to detect when we need to
1779 * wait for units and when waiting is finished. */
1780 state = unit_active_state(u);
1781 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1782 return -EALREADY;
1783
1784 /* Units that aren't loaded cannot be started */
1785 if (u->load_state != UNIT_LOADED)
1786 return -EINVAL;
1787
1788 /* If the conditions failed, don't do anything at all. If we
1789 * already are activating this call might still be useful to
1790 * speed up activation in case there is some hold-off time,
1791 * but we don't want to recheck the condition in that case. */
1792 if (state != UNIT_ACTIVATING &&
1793 !unit_condition_test(u)) {
1794 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1795 return -EALREADY;
1796 }
1797
1798 /* If the asserts failed, fail the entire job */
1799 if (state != UNIT_ACTIVATING &&
1800 !unit_assert_test(u)) {
1801 log_unit_notice(u, "Starting requested but asserts failed.");
1802 return -EPROTO;
1803 }
1804
1805 /* Units of types that aren't supported cannot be
1806 * started. Note that we do this test only after the condition
1807 * checks, so that we rather return condition check errors
1808 * (which are usually not considered a true failure) than "not
1809 * supported" errors (which are considered a failure).
1810 */
1811 if (!unit_supported(u))
1812 return -EOPNOTSUPP;
1813
1814 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1815 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1816 * effect anymore, due to a reload or due to a failed condition. */
1817 if (!unit_verify_deps(u))
1818 return -ENOLINK;
1819
1820 /* Forward to the main object, if we aren't it. */
1821 following = unit_following(u);
1822 if (following) {
1823 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1824 return unit_start(following);
1825 }
1826
1827 /* If it is stopped, but we cannot start it, then fail */
1828 if (!UNIT_VTABLE(u)->start)
1829 return -EBADR;
1830
1831 /* We don't suppress calls to ->start() here when we are
1832 * already starting, to allow this request to be used as a
1833 * "hurry up" call, for example when the unit is in some "auto
1834 * restart" state where it waits for a holdoff timer to elapse
1835 * before it will start again. */
1836
1837 unit_add_to_dbus_queue(u);
1838
1839 return UNIT_VTABLE(u)->start(u);
1840 }
1841
1842 bool unit_can_start(Unit *u) {
1843 assert(u);
1844
1845 if (u->load_state != UNIT_LOADED)
1846 return false;
1847
1848 if (!unit_supported(u))
1849 return false;
1850
1851 return !!UNIT_VTABLE(u)->start;
1852 }
1853
1854 bool unit_can_isolate(Unit *u) {
1855 assert(u);
1856
1857 return unit_can_start(u) &&
1858 u->allow_isolate;
1859 }
1860
1861 /* Errors:
1862 * -EBADR: This unit type does not support stopping.
1863 * -EALREADY: Unit is already stopped.
1864 * -EAGAIN: An operation is already in progress. Retry later.
1865 */
1866 int unit_stop(Unit *u) {
1867 UnitActiveState state;
1868 Unit *following;
1869
1870 assert(u);
1871
1872 state = unit_active_state(u);
1873 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1874 return -EALREADY;
1875
1876 following = unit_following(u);
1877 if (following) {
1878 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1879 return unit_stop(following);
1880 }
1881
1882 if (!UNIT_VTABLE(u)->stop)
1883 return -EBADR;
1884
1885 unit_add_to_dbus_queue(u);
1886
1887 return UNIT_VTABLE(u)->stop(u);
1888 }
1889
1890 bool unit_can_stop(Unit *u) {
1891 assert(u);
1892
1893 if (!unit_supported(u))
1894 return false;
1895
1896 if (u->perpetual)
1897 return false;
1898
1899 return !!UNIT_VTABLE(u)->stop;
1900 }
1901
1902 /* Errors:
1903 * -EBADR: This unit type does not support reloading.
1904 * -ENOEXEC: Unit is not started.
1905 * -EAGAIN: An operation is already in progress. Retry later.
1906 */
1907 int unit_reload(Unit *u) {
1908 UnitActiveState state;
1909 Unit *following;
1910
1911 assert(u);
1912
1913 if (u->load_state != UNIT_LOADED)
1914 return -EINVAL;
1915
1916 if (!unit_can_reload(u))
1917 return -EBADR;
1918
1919 state = unit_active_state(u);
1920 if (state == UNIT_RELOADING)
1921 return -EALREADY;
1922
1923 if (state != UNIT_ACTIVE) {
1924 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1925 return -ENOEXEC;
1926 }
1927
1928 following = unit_following(u);
1929 if (following) {
1930 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1931 return unit_reload(following);
1932 }
1933
1934 unit_add_to_dbus_queue(u);
1935
1936 if (!UNIT_VTABLE(u)->reload) {
1937 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1938 unit_notify(u, unit_active_state(u), unit_active_state(u), true);
1939 return 0;
1940 }
1941
1942 return UNIT_VTABLE(u)->reload(u);
1943 }
1944
1945 bool unit_can_reload(Unit *u) {
1946 assert(u);
1947
1948 if (UNIT_VTABLE(u)->can_reload)
1949 return UNIT_VTABLE(u)->can_reload(u);
1950
1951 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1952 return true;
1953
1954 return UNIT_VTABLE(u)->reload;
1955 }
1956
1957 static void unit_check_unneeded(Unit *u) {
1958
1959 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1960
1961 static const UnitDependency needed_dependencies[] = {
1962 UNIT_REQUIRED_BY,
1963 UNIT_REQUISITE_OF,
1964 UNIT_WANTED_BY,
1965 UNIT_BOUND_BY,
1966 };
1967
1968 unsigned j;
1969 int r;
1970
1971 assert(u);
1972
1973 /* If this service shall be shut down when unneeded then do
1974 * so. */
1975
1976 if (!u->stop_when_unneeded)
1977 return;
1978
1979 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1980 return;
1981
1982 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++) {
1983 Unit *other;
1984 Iterator i;
1985 void *v;
1986
1987 HASHMAP_FOREACH_KEY(v, other, u->dependencies[needed_dependencies[j]], i)
1988 if (unit_active_or_pending(other))
1989 return;
1990 }
1991
1992 /* If stopping a unit fails continuously we might enter a stop
1993 * loop here, hence stop acting on the service being
1994 * unnecessary after a while. */
1995 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1996 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1997 return;
1998 }
1999
2000 log_unit_info(u, "Unit not needed anymore. Stopping.");
2001
2002 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2003 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2004 if (r < 0)
2005 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2006 }
2007
2008 static void unit_check_binds_to(Unit *u) {
2009 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2010 bool stop = false;
2011 Unit *other;
2012 Iterator i;
2013 void *v;
2014 int r;
2015
2016 assert(u);
2017
2018 if (u->job)
2019 return;
2020
2021 if (unit_active_state(u) != UNIT_ACTIVE)
2022 return;
2023
2024 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2025 if (other->job)
2026 continue;
2027
2028 if (!other->coldplugged)
2029 /* We might yet create a job for the other unit… */
2030 continue;
2031
2032 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2033 continue;
2034
2035 stop = true;
2036 break;
2037 }
2038
2039 if (!stop)
2040 return;
2041
2042 /* If stopping a unit fails continuously we might enter a stop
2043 * loop here, hence stop acting on the service being
2044 * unnecessary after a while. */
2045 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
2046 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2047 return;
2048 }
2049
2050 assert(other);
2051 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2052
2053 /* A unit we need to run is gone. Sniff. Let's stop this. */
2054 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
2055 if (r < 0)
2056 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2057 }
2058
2059 static void retroactively_start_dependencies(Unit *u) {
2060 Iterator i;
2061 Unit *other;
2062 void *v;
2063
2064 assert(u);
2065 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2066
2067 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2068 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2069 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2070 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2071
2072 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2073 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2074 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2075 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
2076
2077 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2078 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2079 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2080 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
2081
2082 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2083 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2084 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2085
2086 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2087 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2088 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2089 }
2090
2091 static void retroactively_stop_dependencies(Unit *u) {
2092 Unit *other;
2093 Iterator i;
2094 void *v;
2095
2096 assert(u);
2097 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2098
2099 /* Pull down units which are bound to us recursively if enabled */
2100 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2101 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2102 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
2103 }
2104
2105 static void check_unneeded_dependencies(Unit *u) {
2106 Unit *other;
2107 Iterator i;
2108 void *v;
2109
2110 assert(u);
2111 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2112
2113 /* Garbage collect services that might not be needed anymore, if enabled */
2114 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2115 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2116 unit_check_unneeded(other);
2117 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2118 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2119 unit_check_unneeded(other);
2120 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUISITE], i)
2121 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2122 unit_check_unneeded(other);
2123 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2124 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2125 unit_check_unneeded(other);
2126 }
2127
2128 void unit_start_on_failure(Unit *u) {
2129 Unit *other;
2130 Iterator i;
2131 void *v;
2132
2133 assert(u);
2134
2135 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2136 return;
2137
2138 log_unit_info(u, "Triggering OnFailure= dependencies.");
2139
2140 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2141 int r;
2142
2143 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
2144 if (r < 0)
2145 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
2146 }
2147 }
2148
2149 void unit_trigger_notify(Unit *u) {
2150 Unit *other;
2151 Iterator i;
2152 void *v;
2153
2154 assert(u);
2155
2156 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2157 if (UNIT_VTABLE(other)->trigger_notify)
2158 UNIT_VTABLE(other)->trigger_notify(other, u);
2159 }
2160
2161 static int unit_log_resources(Unit *u) {
2162
2163 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + 4];
2164 size_t n_message_parts = 0, n_iovec = 0;
2165 char* message_parts[3 + 1], *t;
2166 nsec_t nsec = NSEC_INFINITY;
2167 CGroupIPAccountingMetric m;
2168 size_t i;
2169 int r;
2170 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2171 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2172 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2173 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2174 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2175 };
2176
2177 assert(u);
2178
2179 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2180 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2181 * information and the complete data in structured fields. */
2182
2183 (void) unit_get_cpu_usage(u, &nsec);
2184 if (nsec != NSEC_INFINITY) {
2185 char buf[FORMAT_TIMESPAN_MAX] = "";
2186
2187 /* Format the CPU time for inclusion in the structured log message */
2188 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2189 r = log_oom();
2190 goto finish;
2191 }
2192 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2193
2194 /* Format the CPU time for inclusion in the human language message string */
2195 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2196 t = strjoin(n_message_parts > 0 ? "consumed " : "Consumed ", buf, " CPU time");
2197 if (!t) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201
2202 message_parts[n_message_parts++] = t;
2203 }
2204
2205 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2206 char buf[FORMAT_BYTES_MAX] = "";
2207 uint64_t value = UINT64_MAX;
2208
2209 assert(ip_fields[m]);
2210
2211 (void) unit_get_ip_accounting(u, m, &value);
2212 if (value == UINT64_MAX)
2213 continue;
2214
2215 /* Format IP accounting data for inclusion in the structured log message */
2216 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2217 r = log_oom();
2218 goto finish;
2219 }
2220 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2221
2222 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2223 * bytes counters (and not for the packets counters) */
2224 if (m == CGROUP_IP_INGRESS_BYTES)
2225 t = strjoin(n_message_parts > 0 ? "received " : "Received ",
2226 format_bytes(buf, sizeof(buf), value),
2227 " IP traffic");
2228 else if (m == CGROUP_IP_EGRESS_BYTES)
2229 t = strjoin(n_message_parts > 0 ? "sent " : "Sent ",
2230 format_bytes(buf, sizeof(buf), value),
2231 " IP traffic");
2232 else
2233 continue;
2234 if (!t) {
2235 r = log_oom();
2236 goto finish;
2237 }
2238
2239 message_parts[n_message_parts++] = t;
2240 }
2241
2242 /* Is there any accounting data available at all? */
2243 if (n_iovec == 0) {
2244 r = 0;
2245 goto finish;
2246 }
2247
2248 if (n_message_parts == 0)
2249 t = strjoina("MESSAGE=", u->id, ": Completed");
2250 else {
2251 _cleanup_free_ char *joined;
2252
2253 message_parts[n_message_parts] = NULL;
2254
2255 joined = strv_join(message_parts, ", ");
2256 if (!joined) {
2257 r = log_oom();
2258 goto finish;
2259 }
2260
2261 t = strjoina("MESSAGE=", u->id, ": ", joined);
2262 }
2263
2264 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2265 * and hence don't increase n_iovec for them */
2266 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2267 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2268
2269 t = strjoina(u->manager->unit_log_field, u->id);
2270 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2271
2272 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2273 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2274
2275 log_struct_iovec(LOG_INFO, iovec, n_iovec + 4);
2276 r = 0;
2277
2278 finish:
2279 for (i = 0; i < n_message_parts; i++)
2280 free(message_parts[i]);
2281
2282 for (i = 0; i < n_iovec; i++)
2283 free(iovec[i].iov_base);
2284
2285 return r;
2286
2287 }
2288
2289 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
2290 Manager *m;
2291 bool unexpected;
2292
2293 assert(u);
2294 assert(os < _UNIT_ACTIVE_STATE_MAX);
2295 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2296
2297 /* Note that this is called for all low-level state changes,
2298 * even if they might map to the same high-level
2299 * UnitActiveState! That means that ns == os is an expected
2300 * behavior here. For example: if a mount point is remounted
2301 * this function will be called too! */
2302
2303 m = u->manager;
2304
2305 /* Update timestamps for state changes */
2306 if (!MANAGER_IS_RELOADING(m)) {
2307 dual_timestamp_get(&u->state_change_timestamp);
2308
2309 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2310 u->inactive_exit_timestamp = u->state_change_timestamp;
2311 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2312 u->inactive_enter_timestamp = u->state_change_timestamp;
2313
2314 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2315 u->active_enter_timestamp = u->state_change_timestamp;
2316 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2317 u->active_exit_timestamp = u->state_change_timestamp;
2318 }
2319
2320 /* Keep track of failed units */
2321 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
2322
2323 /* Make sure the cgroup and state files are always removed when we become inactive */
2324 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2325 unit_prune_cgroup(u);
2326 unit_unlink_state_files(u);
2327 }
2328
2329 /* Note that this doesn't apply to RemainAfterExit services exiting
2330 * successfully, since there's no change of state in that case. Which is
2331 * why it is handled in service_set_state() */
2332 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2333 ExecContext *ec;
2334
2335 ec = unit_get_exec_context(u);
2336 if (ec && exec_context_may_touch_console(ec)) {
2337 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2338 m->n_on_console--;
2339
2340 if (m->n_on_console == 0)
2341 /* unset no_console_output flag, since the console is free */
2342 m->no_console_output = false;
2343 } else
2344 m->n_on_console++;
2345 }
2346 }
2347
2348 if (u->job) {
2349 unexpected = false;
2350
2351 if (u->job->state == JOB_WAITING)
2352
2353 /* So we reached a different state for this
2354 * job. Let's see if we can run it now if it
2355 * failed previously due to EAGAIN. */
2356 job_add_to_run_queue(u->job);
2357
2358 /* Let's check whether this state change constitutes a
2359 * finished job, or maybe contradicts a running job and
2360 * hence needs to invalidate jobs. */
2361
2362 switch (u->job->type) {
2363
2364 case JOB_START:
2365 case JOB_VERIFY_ACTIVE:
2366
2367 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2368 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2369 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2370 unexpected = true;
2371
2372 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2373 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2374 }
2375
2376 break;
2377
2378 case JOB_RELOAD:
2379 case JOB_RELOAD_OR_START:
2380 case JOB_TRY_RELOAD:
2381
2382 if (u->job->state == JOB_RUNNING) {
2383 if (ns == UNIT_ACTIVE)
2384 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2385 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2386 unexpected = true;
2387
2388 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2389 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2390 }
2391 }
2392
2393 break;
2394
2395 case JOB_STOP:
2396 case JOB_RESTART:
2397 case JOB_TRY_RESTART:
2398
2399 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2400 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2401 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2402 unexpected = true;
2403 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2404 }
2405
2406 break;
2407
2408 default:
2409 assert_not_reached("Job type unknown");
2410 }
2411
2412 } else
2413 unexpected = true;
2414
2415 if (!MANAGER_IS_RELOADING(m)) {
2416
2417 /* If this state change happened without being
2418 * requested by a job, then let's retroactively start
2419 * or stop dependencies. We skip that step when
2420 * deserializing, since we don't want to create any
2421 * additional jobs just because something is already
2422 * activated. */
2423
2424 if (unexpected) {
2425 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2426 retroactively_start_dependencies(u);
2427 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2428 retroactively_stop_dependencies(u);
2429 }
2430
2431 /* stop unneeded units regardless if going down was expected or not */
2432 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2433 check_unneeded_dependencies(u);
2434
2435 if (ns != os && ns == UNIT_FAILED) {
2436 log_unit_debug(u, "Unit entered failed state.");
2437 unit_start_on_failure(u);
2438 }
2439 }
2440
2441 /* Some names are special */
2442 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2443
2444 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2445 /* The bus might have just become available,
2446 * hence try to connect to it, if we aren't
2447 * yet connected. */
2448 bus_init(m, true);
2449
2450 if (u->type == UNIT_SERVICE &&
2451 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2452 !MANAGER_IS_RELOADING(m)) {
2453 /* Write audit record if we have just finished starting up */
2454 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2455 u->in_audit = true;
2456 }
2457
2458 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2459 manager_send_unit_plymouth(m, u);
2460
2461 } else {
2462 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2463 * anyway. */
2464
2465 if (UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2466 !UNIT_IS_INACTIVE_OR_FAILED(os)
2467 && !MANAGER_IS_RELOADING(m)) {
2468
2469 /* This unit just stopped/failed. */
2470 if (u->type == UNIT_SERVICE) {
2471
2472 /* Hmm, if there was no start record written
2473 * write it now, so that we always have a nice
2474 * pair */
2475 if (!u->in_audit) {
2476 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2477
2478 if (ns == UNIT_INACTIVE)
2479 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2480 } else
2481 /* Write audit record if we have just finished shutting down */
2482 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2483
2484 u->in_audit = false;
2485 }
2486
2487 /* Write a log message about consumed resources */
2488 unit_log_resources(u);
2489 }
2490 }
2491
2492 manager_recheck_journal(m);
2493 unit_trigger_notify(u);
2494
2495 if (!MANAGER_IS_RELOADING(u->manager)) {
2496 /* Maybe we finished startup and are now ready for
2497 * being stopped because unneeded? */
2498 unit_check_unneeded(u);
2499
2500 /* Maybe we finished startup, but something we needed
2501 * has vanished? Let's die then. (This happens when
2502 * something BindsTo= to a Type=oneshot unit, as these
2503 * units go directly from starting to inactive,
2504 * without ever entering started.) */
2505 unit_check_binds_to(u);
2506 }
2507
2508 unit_add_to_dbus_queue(u);
2509 unit_add_to_gc_queue(u);
2510 }
2511
2512 int unit_watch_pid(Unit *u, pid_t pid) {
2513 int q, r;
2514
2515 assert(u);
2516 assert(pid >= 1);
2517
2518 /* Watch a specific PID. We only support one or two units
2519 * watching each PID for now, not more. */
2520
2521 r = set_ensure_allocated(&u->pids, NULL);
2522 if (r < 0)
2523 return r;
2524
2525 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2526 if (r < 0)
2527 return r;
2528
2529 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2530 if (r == -EEXIST) {
2531 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2532 if (r < 0)
2533 return r;
2534
2535 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2536 }
2537
2538 q = set_put(u->pids, PID_TO_PTR(pid));
2539 if (q < 0)
2540 return q;
2541
2542 return r;
2543 }
2544
2545 void unit_unwatch_pid(Unit *u, pid_t pid) {
2546 assert(u);
2547 assert(pid >= 1);
2548
2549 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2550 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2551 (void) set_remove(u->pids, PID_TO_PTR(pid));
2552 }
2553
2554 void unit_unwatch_all_pids(Unit *u) {
2555 assert(u);
2556
2557 while (!set_isempty(u->pids))
2558 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2559
2560 u->pids = set_free(u->pids);
2561 }
2562
2563 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2564 Iterator i;
2565 void *e;
2566
2567 assert(u);
2568
2569 /* Cleans dead PIDs from our list */
2570
2571 SET_FOREACH(e, u->pids, i) {
2572 pid_t pid = PTR_TO_PID(e);
2573
2574 if (pid == except1 || pid == except2)
2575 continue;
2576
2577 if (!pid_is_unwaited(pid))
2578 unit_unwatch_pid(u, pid);
2579 }
2580 }
2581
2582 bool unit_job_is_applicable(Unit *u, JobType j) {
2583 assert(u);
2584 assert(j >= 0 && j < _JOB_TYPE_MAX);
2585
2586 switch (j) {
2587
2588 case JOB_VERIFY_ACTIVE:
2589 case JOB_START:
2590 case JOB_NOP:
2591 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2592 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2593 * jobs for it. */
2594 return true;
2595
2596 case JOB_STOP:
2597 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2598 * external events), hence it makes no sense to permit enqueing such a request either. */
2599 return !u->perpetual;
2600
2601 case JOB_RESTART:
2602 case JOB_TRY_RESTART:
2603 return unit_can_stop(u) && unit_can_start(u);
2604
2605 case JOB_RELOAD:
2606 case JOB_TRY_RELOAD:
2607 return unit_can_reload(u);
2608
2609 case JOB_RELOAD_OR_START:
2610 return unit_can_reload(u) && unit_can_start(u);
2611
2612 default:
2613 assert_not_reached("Invalid job type");
2614 }
2615 }
2616
2617 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2618 assert(u);
2619
2620 /* Only warn about some unit types */
2621 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2622 return;
2623
2624 if (streq_ptr(u->id, other))
2625 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2626 else
2627 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2628 }
2629
2630 static int unit_add_dependency_hashmap(
2631 Hashmap **h,
2632 Unit *other,
2633 UnitDependencyMask origin_mask,
2634 UnitDependencyMask destination_mask) {
2635
2636 UnitDependencyInfo info;
2637 int r;
2638
2639 assert(h);
2640 assert(other);
2641 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2642 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2643 assert(origin_mask > 0 || destination_mask > 0);
2644
2645 r = hashmap_ensure_allocated(h, NULL);
2646 if (r < 0)
2647 return r;
2648
2649 assert_cc(sizeof(void*) == sizeof(info));
2650
2651 info.data = hashmap_get(*h, other);
2652 if (info.data) {
2653 /* Entry already exists. Add in our mask. */
2654
2655 if ((info.origin_mask & origin_mask) == info.origin_mask &&
2656 (info.destination_mask & destination_mask) == info.destination_mask)
2657 return 0; /* NOP */
2658
2659 info.origin_mask |= origin_mask;
2660 info.destination_mask |= destination_mask;
2661
2662 r = hashmap_update(*h, other, info.data);
2663 } else {
2664 info = (UnitDependencyInfo) {
2665 .origin_mask = origin_mask,
2666 .destination_mask = destination_mask,
2667 };
2668
2669 r = hashmap_put(*h, other, info.data);
2670 }
2671 if (r < 0)
2672 return r;
2673
2674 return 1;
2675 }
2676
2677 int unit_add_dependency(
2678 Unit *u,
2679 UnitDependency d,
2680 Unit *other,
2681 bool add_reference,
2682 UnitDependencyMask mask) {
2683
2684 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2685 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2686 [UNIT_WANTS] = UNIT_WANTED_BY,
2687 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2688 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2689 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2690 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2691 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2692 [UNIT_WANTED_BY] = UNIT_WANTS,
2693 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2694 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2695 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2696 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2697 [UNIT_BEFORE] = UNIT_AFTER,
2698 [UNIT_AFTER] = UNIT_BEFORE,
2699 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2700 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2701 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2702 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2703 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2704 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2705 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2706 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2707 };
2708 Unit *original_u = u, *original_other = other;
2709 int r;
2710
2711 assert(u);
2712 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2713 assert(other);
2714
2715 u = unit_follow_merge(u);
2716 other = unit_follow_merge(other);
2717
2718 /* We won't allow dependencies on ourselves. We will not
2719 * consider them an error however. */
2720 if (u == other) {
2721 maybe_warn_about_dependency(original_u, original_other->id, d);
2722 return 0;
2723 }
2724
2725 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2726 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2727 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2728 return 0;
2729 }
2730
2731 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2732 if (r < 0)
2733 return r;
2734
2735 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2736 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2737 if (r < 0)
2738 return r;
2739 }
2740
2741 if (add_reference) {
2742 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2743 if (r < 0)
2744 return r;
2745
2746 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2747 if (r < 0)
2748 return r;
2749 }
2750
2751 unit_add_to_dbus_queue(u);
2752 return 0;
2753 }
2754
2755 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2756 int r;
2757
2758 assert(u);
2759
2760 r = unit_add_dependency(u, d, other, add_reference, mask);
2761 if (r < 0)
2762 return r;
2763
2764 return unit_add_dependency(u, e, other, add_reference, mask);
2765 }
2766
2767 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2768 int r;
2769
2770 assert(u);
2771 assert(name || path);
2772 assert(buf);
2773 assert(ret);
2774
2775 if (!name)
2776 name = basename(path);
2777
2778 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2779 *buf = NULL;
2780 *ret = name;
2781 return 0;
2782 }
2783
2784 if (u->instance)
2785 r = unit_name_replace_instance(name, u->instance, buf);
2786 else {
2787 _cleanup_free_ char *i = NULL;
2788
2789 r = unit_name_to_prefix(u->id, &i);
2790 if (r < 0)
2791 return r;
2792
2793 r = unit_name_replace_instance(name, i, buf);
2794 }
2795 if (r < 0)
2796 return r;
2797
2798 *ret = *buf;
2799 return 0;
2800 }
2801
2802 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2803 _cleanup_free_ char *buf = NULL;
2804 Unit *other;
2805 int r;
2806
2807 assert(u);
2808 assert(name || path);
2809
2810 r = resolve_template(u, name, path, &buf, &name);
2811 if (r < 0)
2812 return r;
2813
2814 r = manager_load_unit(u->manager, name, path, NULL, &other);
2815 if (r < 0)
2816 return r;
2817
2818 return unit_add_dependency(u, d, other, add_reference, mask);
2819 }
2820
2821 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference, UnitDependencyMask mask) {
2822 _cleanup_free_ char *buf = NULL;
2823 Unit *other;
2824 int r;
2825
2826 assert(u);
2827 assert(name || path);
2828
2829 r = resolve_template(u, name, path, &buf, &name);
2830 if (r < 0)
2831 return r;
2832
2833 r = manager_load_unit(u->manager, name, path, NULL, &other);
2834 if (r < 0)
2835 return r;
2836
2837 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2838 }
2839
2840 int set_unit_path(const char *p) {
2841 /* This is mostly for debug purposes */
2842 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2843 return -errno;
2844
2845 return 0;
2846 }
2847
2848 char *unit_dbus_path(Unit *u) {
2849 assert(u);
2850
2851 if (!u->id)
2852 return NULL;
2853
2854 return unit_dbus_path_from_name(u->id);
2855 }
2856
2857 char *unit_dbus_path_invocation_id(Unit *u) {
2858 assert(u);
2859
2860 if (sd_id128_is_null(u->invocation_id))
2861 return NULL;
2862
2863 return unit_dbus_path_from_name(u->invocation_id_string);
2864 }
2865
2866 int unit_set_slice(Unit *u, Unit *slice) {
2867 assert(u);
2868 assert(slice);
2869
2870 /* Sets the unit slice if it has not been set before. Is extra
2871 * careful, to only allow this for units that actually have a
2872 * cgroup context. Also, we don't allow to set this for slices
2873 * (since the parent slice is derived from the name). Make
2874 * sure the unit we set is actually a slice. */
2875
2876 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2877 return -EOPNOTSUPP;
2878
2879 if (u->type == UNIT_SLICE)
2880 return -EINVAL;
2881
2882 if (unit_active_state(u) != UNIT_INACTIVE)
2883 return -EBUSY;
2884
2885 if (slice->type != UNIT_SLICE)
2886 return -EINVAL;
2887
2888 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2889 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2890 return -EPERM;
2891
2892 if (UNIT_DEREF(u->slice) == slice)
2893 return 0;
2894
2895 /* Disallow slice changes if @u is already bound to cgroups */
2896 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2897 return -EBUSY;
2898
2899 unit_ref_unset(&u->slice);
2900 unit_ref_set(&u->slice, slice);
2901 return 1;
2902 }
2903
2904 int unit_set_default_slice(Unit *u) {
2905 _cleanup_free_ char *b = NULL;
2906 const char *slice_name;
2907 Unit *slice;
2908 int r;
2909
2910 assert(u);
2911
2912 if (UNIT_ISSET(u->slice))
2913 return 0;
2914
2915 if (u->instance) {
2916 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2917
2918 /* Implicitly place all instantiated units in their
2919 * own per-template slice */
2920
2921 r = unit_name_to_prefix(u->id, &prefix);
2922 if (r < 0)
2923 return r;
2924
2925 /* The prefix is already escaped, but it might include
2926 * "-" which has a special meaning for slice units,
2927 * hence escape it here extra. */
2928 escaped = unit_name_escape(prefix);
2929 if (!escaped)
2930 return -ENOMEM;
2931
2932 if (MANAGER_IS_SYSTEM(u->manager))
2933 b = strjoin("system-", escaped, ".slice");
2934 else
2935 b = strappend(escaped, ".slice");
2936 if (!b)
2937 return -ENOMEM;
2938
2939 slice_name = b;
2940 } else
2941 slice_name =
2942 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2943 ? SPECIAL_SYSTEM_SLICE
2944 : SPECIAL_ROOT_SLICE;
2945
2946 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2947 if (r < 0)
2948 return r;
2949
2950 return unit_set_slice(u, slice);
2951 }
2952
2953 const char *unit_slice_name(Unit *u) {
2954 assert(u);
2955
2956 if (!UNIT_ISSET(u->slice))
2957 return NULL;
2958
2959 return UNIT_DEREF(u->slice)->id;
2960 }
2961
2962 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2963 _cleanup_free_ char *t = NULL;
2964 int r;
2965
2966 assert(u);
2967 assert(type);
2968 assert(_found);
2969
2970 r = unit_name_change_suffix(u->id, type, &t);
2971 if (r < 0)
2972 return r;
2973 if (unit_has_name(u, t))
2974 return -EINVAL;
2975
2976 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2977 assert(r < 0 || *_found != u);
2978 return r;
2979 }
2980
2981 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2982 const char *name, *old_owner, *new_owner;
2983 Unit *u = userdata;
2984 int r;
2985
2986 assert(message);
2987 assert(u);
2988
2989 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2990 if (r < 0) {
2991 bus_log_parse_error(r);
2992 return 0;
2993 }
2994
2995 old_owner = isempty(old_owner) ? NULL : old_owner;
2996 new_owner = isempty(new_owner) ? NULL : new_owner;
2997
2998 if (UNIT_VTABLE(u)->bus_name_owner_change)
2999 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3000
3001 return 0;
3002 }
3003
3004 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3005 const char *match;
3006
3007 assert(u);
3008 assert(bus);
3009 assert(name);
3010
3011 if (u->match_bus_slot)
3012 return -EBUSY;
3013
3014 match = strjoina("type='signal',"
3015 "sender='org.freedesktop.DBus',"
3016 "path='/org/freedesktop/DBus',"
3017 "interface='org.freedesktop.DBus',"
3018 "member='NameOwnerChanged',"
3019 "arg0='", name, "'");
3020
3021 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
3022 }
3023
3024 int unit_watch_bus_name(Unit *u, const char *name) {
3025 int r;
3026
3027 assert(u);
3028 assert(name);
3029
3030 /* Watch a specific name on the bus. We only support one unit
3031 * watching each name for now. */
3032
3033 if (u->manager->api_bus) {
3034 /* If the bus is already available, install the match directly.
3035 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3036 r = unit_install_bus_match(u, u->manager->api_bus, name);
3037 if (r < 0)
3038 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3039 }
3040
3041 r = hashmap_put(u->manager->watch_bus, name, u);
3042 if (r < 0) {
3043 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3044 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3045 }
3046
3047 return 0;
3048 }
3049
3050 void unit_unwatch_bus_name(Unit *u, const char *name) {
3051 assert(u);
3052 assert(name);
3053
3054 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3055 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3056 }
3057
3058 bool unit_can_serialize(Unit *u) {
3059 assert(u);
3060
3061 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3062 }
3063
3064 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3065 _cleanup_free_ char *s = NULL;
3066 int r = 0;
3067
3068 assert(f);
3069 assert(key);
3070
3071 if (mask != 0) {
3072 r = cg_mask_to_string(mask, &s);
3073 if (r >= 0) {
3074 fputs(key, f);
3075 fputc('=', f);
3076 fputs(s, f);
3077 fputc('\n', f);
3078 }
3079 }
3080 return r;
3081 }
3082
3083 static const char *ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3084 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3085 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3086 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3087 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3088 };
3089
3090 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3091 CGroupIPAccountingMetric m;
3092 int r;
3093
3094 assert(u);
3095 assert(f);
3096 assert(fds);
3097
3098 if (unit_can_serialize(u)) {
3099 ExecRuntime *rt;
3100
3101 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3102 if (r < 0)
3103 return r;
3104
3105 rt = unit_get_exec_runtime(u);
3106 if (rt) {
3107 r = exec_runtime_serialize(u, rt, f, fds);
3108 if (r < 0)
3109 return r;
3110 }
3111 }
3112
3113 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
3114
3115 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3116 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
3117 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
3118 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3119
3120 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
3121 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
3122
3123 if (dual_timestamp_is_set(&u->condition_timestamp))
3124 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
3125
3126 if (dual_timestamp_is_set(&u->assert_timestamp))
3127 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
3128
3129 unit_serialize_item(u, f, "transient", yes_no(u->transient));
3130
3131 unit_serialize_item(u, f, "exported-invocation-id", yes_no(u->exported_invocation_id));
3132 unit_serialize_item(u, f, "exported-log-level-max", yes_no(u->exported_log_level_max));
3133 unit_serialize_item(u, f, "exported-log-extra-fields", yes_no(u->exported_log_extra_fields));
3134
3135 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3136 if (u->cpu_usage_last != NSEC_INFINITY)
3137 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3138
3139 if (u->cgroup_path)
3140 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
3141 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
3142 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3143 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3144 unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
3145
3146 if (uid_is_valid(u->ref_uid))
3147 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
3148 if (gid_is_valid(u->ref_gid))
3149 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
3150
3151 if (!sd_id128_is_null(u->invocation_id))
3152 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3153
3154 bus_track_serialize(u->bus_track, f, "ref");
3155
3156 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3157 uint64_t v;
3158
3159 r = unit_get_ip_accounting(u, m, &v);
3160 if (r >= 0)
3161 unit_serialize_item_format(u, f, ip_accounting_metric_field[m], "%" PRIu64, v);
3162 }
3163
3164 if (serialize_jobs) {
3165 if (u->job) {
3166 fprintf(f, "job\n");
3167 job_serialize(u->job, f);
3168 }
3169
3170 if (u->nop_job) {
3171 fprintf(f, "job\n");
3172 job_serialize(u->nop_job, f);
3173 }
3174 }
3175
3176 /* End marker */
3177 fputc('\n', f);
3178 return 0;
3179 }
3180
3181 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
3182 assert(u);
3183 assert(f);
3184 assert(key);
3185
3186 if (!value)
3187 return 0;
3188
3189 fputs(key, f);
3190 fputc('=', f);
3191 fputs(value, f);
3192 fputc('\n', f);
3193
3194 return 1;
3195 }
3196
3197 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
3198 _cleanup_free_ char *c = NULL;
3199
3200 assert(u);
3201 assert(f);
3202 assert(key);
3203
3204 if (!value)
3205 return 0;
3206
3207 c = cescape(value);
3208 if (!c)
3209 return -ENOMEM;
3210
3211 fputs(key, f);
3212 fputc('=', f);
3213 fputs(c, f);
3214 fputc('\n', f);
3215
3216 return 1;
3217 }
3218
3219 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
3220 int copy;
3221
3222 assert(u);
3223 assert(f);
3224 assert(key);
3225
3226 if (fd < 0)
3227 return 0;
3228
3229 copy = fdset_put_dup(fds, fd);
3230 if (copy < 0)
3231 return copy;
3232
3233 fprintf(f, "%s=%i\n", key, copy);
3234 return 1;
3235 }
3236
3237 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
3238 va_list ap;
3239
3240 assert(u);
3241 assert(f);
3242 assert(key);
3243 assert(format);
3244
3245 fputs(key, f);
3246 fputc('=', f);
3247
3248 va_start(ap, format);
3249 vfprintf(f, format, ap);
3250 va_end(ap);
3251
3252 fputc('\n', f);
3253 }
3254
3255 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3256 ExecRuntime **rt = NULL;
3257 size_t offset;
3258 int r;
3259
3260 assert(u);
3261 assert(f);
3262 assert(fds);
3263
3264 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3265 if (offset > 0)
3266 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3267
3268 for (;;) {
3269 char line[LINE_MAX], *l, *v;
3270 CGroupIPAccountingMetric m;
3271 size_t k;
3272
3273 if (!fgets(line, sizeof(line), f)) {
3274 if (feof(f))
3275 return 0;
3276 return -errno;
3277 }
3278
3279 char_array_0(line);
3280 l = strstrip(line);
3281
3282 /* End marker */
3283 if (isempty(l))
3284 break;
3285
3286 k = strcspn(l, "=");
3287
3288 if (l[k] == '=') {
3289 l[k] = 0;
3290 v = l+k+1;
3291 } else
3292 v = l+k;
3293
3294 if (streq(l, "job")) {
3295 if (v[0] == '\0') {
3296 /* new-style serialized job */
3297 Job *j;
3298
3299 j = job_new_raw(u);
3300 if (!j)
3301 return log_oom();
3302
3303 r = job_deserialize(j, f);
3304 if (r < 0) {
3305 job_free(j);
3306 return r;
3307 }
3308
3309 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
3310 if (r < 0) {
3311 job_free(j);
3312 return r;
3313 }
3314
3315 r = job_install_deserialized(j);
3316 if (r < 0) {
3317 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
3318 job_free(j);
3319 return r;
3320 }
3321 } else /* legacy for pre-44 */
3322 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3323 continue;
3324 } else if (streq(l, "state-change-timestamp")) {
3325 dual_timestamp_deserialize(v, &u->state_change_timestamp);
3326 continue;
3327 } else if (streq(l, "inactive-exit-timestamp")) {
3328 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
3329 continue;
3330 } else if (streq(l, "active-enter-timestamp")) {
3331 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
3332 continue;
3333 } else if (streq(l, "active-exit-timestamp")) {
3334 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
3335 continue;
3336 } else if (streq(l, "inactive-enter-timestamp")) {
3337 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
3338 continue;
3339 } else if (streq(l, "condition-timestamp")) {
3340 dual_timestamp_deserialize(v, &u->condition_timestamp);
3341 continue;
3342 } else if (streq(l, "assert-timestamp")) {
3343 dual_timestamp_deserialize(v, &u->assert_timestamp);
3344 continue;
3345 } else if (streq(l, "condition-result")) {
3346
3347 r = parse_boolean(v);
3348 if (r < 0)
3349 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3350 else
3351 u->condition_result = r;
3352
3353 continue;
3354
3355 } else if (streq(l, "assert-result")) {
3356
3357 r = parse_boolean(v);
3358 if (r < 0)
3359 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3360 else
3361 u->assert_result = r;
3362
3363 continue;
3364
3365 } else if (streq(l, "transient")) {
3366
3367 r = parse_boolean(v);
3368 if (r < 0)
3369 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3370 else
3371 u->transient = r;
3372
3373 continue;
3374
3375 } else if (streq(l, "exported-invocation-id")) {
3376
3377 r = parse_boolean(v);
3378 if (r < 0)
3379 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3380 else
3381 u->exported_invocation_id = r;
3382
3383 continue;
3384
3385 } else if (streq(l, "exported-log-level-max")) {
3386
3387 r = parse_boolean(v);
3388 if (r < 0)
3389 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3390 else
3391 u->exported_log_level_max = r;
3392
3393 continue;
3394
3395 } else if (streq(l, "exported-log-extra-fields")) {
3396
3397 r = parse_boolean(v);
3398 if (r < 0)
3399 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3400 else
3401 u->exported_log_extra_fields = r;
3402
3403 continue;
3404
3405 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3406
3407 r = safe_atou64(v, &u->cpu_usage_base);
3408 if (r < 0)
3409 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3410
3411 continue;
3412
3413 } else if (streq(l, "cpu-usage-last")) {
3414
3415 r = safe_atou64(v, &u->cpu_usage_last);
3416 if (r < 0)
3417 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3418
3419 continue;
3420
3421 } else if (streq(l, "cgroup")) {
3422
3423 r = unit_set_cgroup_path(u, v);
3424 if (r < 0)
3425 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3426
3427 (void) unit_watch_cgroup(u);
3428
3429 continue;
3430 } else if (streq(l, "cgroup-realized")) {
3431 int b;
3432
3433 b = parse_boolean(v);
3434 if (b < 0)
3435 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3436 else
3437 u->cgroup_realized = b;
3438
3439 continue;
3440
3441 } else if (streq(l, "cgroup-realized-mask")) {
3442
3443 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3444 if (r < 0)
3445 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3446 continue;
3447
3448 } else if (streq(l, "cgroup-enabled-mask")) {
3449
3450 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3451 if (r < 0)
3452 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3453 continue;
3454
3455 } else if (streq(l, "cgroup-bpf-realized")) {
3456 int i;
3457
3458 r = safe_atoi(v, &i);
3459 if (r < 0)
3460 log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
3461 else
3462 u->cgroup_bpf_state =
3463 i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
3464 i > 0 ? UNIT_CGROUP_BPF_ON :
3465 UNIT_CGROUP_BPF_OFF;
3466
3467 continue;
3468
3469 } else if (streq(l, "ref-uid")) {
3470 uid_t uid;
3471
3472 r = parse_uid(v, &uid);
3473 if (r < 0)
3474 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3475 else
3476 unit_ref_uid_gid(u, uid, GID_INVALID);
3477
3478 continue;
3479
3480 } else if (streq(l, "ref-gid")) {
3481 gid_t gid;
3482
3483 r = parse_gid(v, &gid);
3484 if (r < 0)
3485 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3486 else
3487 unit_ref_uid_gid(u, UID_INVALID, gid);
3488
3489 } else if (streq(l, "ref")) {
3490
3491 r = strv_extend(&u->deserialized_refs, v);
3492 if (r < 0)
3493 log_oom();
3494
3495 continue;
3496 } else if (streq(l, "invocation-id")) {
3497 sd_id128_t id;
3498
3499 r = sd_id128_from_string(v, &id);
3500 if (r < 0)
3501 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3502 else {
3503 r = unit_set_invocation_id(u, id);
3504 if (r < 0)
3505 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3506 }
3507
3508 continue;
3509 }
3510
3511 /* Check if this is an IP accounting metric serialization field */
3512 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++)
3513 if (streq(l, ip_accounting_metric_field[m]))
3514 break;
3515 if (m < _CGROUP_IP_ACCOUNTING_METRIC_MAX) {
3516 uint64_t c;
3517
3518 r = safe_atou64(v, &c);
3519 if (r < 0)
3520 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3521 else
3522 u->ip_accounting_extra[m] = c;
3523 continue;
3524 }
3525
3526 if (unit_can_serialize(u)) {
3527 if (rt) {
3528 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3529 if (r < 0) {
3530 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3531 continue;
3532 }
3533
3534 /* Returns positive if key was handled by the call */
3535 if (r > 0)
3536 continue;
3537 }
3538
3539 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3540 if (r < 0)
3541 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3542 }
3543 }
3544
3545 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3546 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3547 * before 228 where the base for timeouts was not persistent across reboots. */
3548
3549 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3550 dual_timestamp_get(&u->state_change_timestamp);
3551
3552 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3553 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3554 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3555 unit_invalidate_cgroup_bpf(u);
3556
3557 return 0;
3558 }
3559
3560 void unit_deserialize_skip(FILE *f) {
3561 assert(f);
3562
3563 /* Skip serialized data for this unit. We don't know what it is. */
3564
3565 for (;;) {
3566 char line[LINE_MAX], *l;
3567
3568 if (!fgets(line, sizeof line, f))
3569 return;
3570
3571 char_array_0(line);
3572 l = strstrip(line);
3573
3574 /* End marker */
3575 if (isempty(l))
3576 return;
3577 }
3578 }
3579
3580
3581 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3582 Unit *device;
3583 _cleanup_free_ char *e = NULL;
3584 int r;
3585
3586 assert(u);
3587
3588 /* Adds in links to the device node that this unit is based on */
3589 if (isempty(what))
3590 return 0;
3591
3592 if (!is_device_path(what))
3593 return 0;
3594
3595 /* When device units aren't supported (such as in a
3596 * container), don't create dependencies on them. */
3597 if (!unit_type_supported(UNIT_DEVICE))
3598 return 0;
3599
3600 r = unit_name_from_path(what, ".device", &e);
3601 if (r < 0)
3602 return r;
3603
3604 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3605 if (r < 0)
3606 return r;
3607
3608 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3609 dep = UNIT_BINDS_TO;
3610
3611 r = unit_add_two_dependencies(u, UNIT_AFTER,
3612 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3613 device, true, mask);
3614 if (r < 0)
3615 return r;
3616
3617 if (wants) {
3618 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3619 if (r < 0)
3620 return r;
3621 }
3622
3623 return 0;
3624 }
3625
3626 int unit_coldplug(Unit *u) {
3627 int r = 0, q;
3628 char **i;
3629
3630 assert(u);
3631
3632 /* Make sure we don't enter a loop, when coldplugging
3633 * recursively. */
3634 if (u->coldplugged)
3635 return 0;
3636
3637 u->coldplugged = true;
3638
3639 STRV_FOREACH(i, u->deserialized_refs) {
3640 q = bus_unit_track_add_name(u, *i);
3641 if (q < 0 && r >= 0)
3642 r = q;
3643 }
3644 u->deserialized_refs = strv_free(u->deserialized_refs);
3645
3646 if (UNIT_VTABLE(u)->coldplug) {
3647 q = UNIT_VTABLE(u)->coldplug(u);
3648 if (q < 0 && r >= 0)
3649 r = q;
3650 }
3651
3652 if (u->job) {
3653 q = job_coldplug(u->job);
3654 if (q < 0 && r >= 0)
3655 r = q;
3656 }
3657
3658 return r;
3659 }
3660
3661 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3662 struct stat st;
3663
3664 if (!path)
3665 return false;
3666
3667 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3668 * are never out-of-date. */
3669 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3670 return false;
3671
3672 if (stat(path, &st) < 0)
3673 /* What, cannot access this anymore? */
3674 return true;
3675
3676 if (path_masked)
3677 /* For masked files check if they are still so */
3678 return !null_or_empty(&st);
3679 else
3680 /* For non-empty files check the mtime */
3681 return timespec_load(&st.st_mtim) > mtime;
3682
3683 return false;
3684 }
3685
3686 bool unit_need_daemon_reload(Unit *u) {
3687 _cleanup_strv_free_ char **t = NULL;
3688 char **path;
3689
3690 assert(u);
3691
3692 /* For unit files, we allow masking… */
3693 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3694 u->load_state == UNIT_MASKED))
3695 return true;
3696
3697 /* Source paths should not be masked… */
3698 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3699 return true;
3700
3701 if (u->load_state == UNIT_LOADED)
3702 (void) unit_find_dropin_paths(u, &t);
3703 if (!strv_equal(u->dropin_paths, t))
3704 return true;
3705
3706 /* … any drop-ins that are masked are simply omitted from the list. */
3707 STRV_FOREACH(path, u->dropin_paths)
3708 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3709 return true;
3710
3711 return false;
3712 }
3713
3714 void unit_reset_failed(Unit *u) {
3715 assert(u);
3716
3717 if (UNIT_VTABLE(u)->reset_failed)
3718 UNIT_VTABLE(u)->reset_failed(u);
3719
3720 RATELIMIT_RESET(u->start_limit);
3721 u->start_limit_hit = false;
3722 }
3723
3724 Unit *unit_following(Unit *u) {
3725 assert(u);
3726
3727 if (UNIT_VTABLE(u)->following)
3728 return UNIT_VTABLE(u)->following(u);
3729
3730 return NULL;
3731 }
3732
3733 bool unit_stop_pending(Unit *u) {
3734 assert(u);
3735
3736 /* This call does check the current state of the unit. It's
3737 * hence useful to be called from state change calls of the
3738 * unit itself, where the state isn't updated yet. This is
3739 * different from unit_inactive_or_pending() which checks both
3740 * the current state and for a queued job. */
3741
3742 return u->job && u->job->type == JOB_STOP;
3743 }
3744
3745 bool unit_inactive_or_pending(Unit *u) {
3746 assert(u);
3747
3748 /* Returns true if the unit is inactive or going down */
3749
3750 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3751 return true;
3752
3753 if (unit_stop_pending(u))
3754 return true;
3755
3756 return false;
3757 }
3758
3759 bool unit_active_or_pending(Unit *u) {
3760 assert(u);
3761
3762 /* Returns true if the unit is active or going up */
3763
3764 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3765 return true;
3766
3767 if (u->job &&
3768 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3769 return true;
3770
3771 return false;
3772 }
3773
3774 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3775 assert(u);
3776 assert(w >= 0 && w < _KILL_WHO_MAX);
3777 assert(SIGNAL_VALID(signo));
3778
3779 if (!UNIT_VTABLE(u)->kill)
3780 return -EOPNOTSUPP;
3781
3782 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3783 }
3784
3785 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3786 Set *pid_set;
3787 int r;
3788
3789 pid_set = set_new(NULL);
3790 if (!pid_set)
3791 return NULL;
3792
3793 /* Exclude the main/control pids from being killed via the cgroup */
3794 if (main_pid > 0) {
3795 r = set_put(pid_set, PID_TO_PTR(main_pid));
3796 if (r < 0)
3797 goto fail;
3798 }
3799
3800 if (control_pid > 0) {
3801 r = set_put(pid_set, PID_TO_PTR(control_pid));
3802 if (r < 0)
3803 goto fail;
3804 }
3805
3806 return pid_set;
3807
3808 fail:
3809 set_free(pid_set);
3810 return NULL;
3811 }
3812
3813 int unit_kill_common(
3814 Unit *u,
3815 KillWho who,
3816 int signo,
3817 pid_t main_pid,
3818 pid_t control_pid,
3819 sd_bus_error *error) {
3820
3821 int r = 0;
3822 bool killed = false;
3823
3824 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3825 if (main_pid < 0)
3826 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3827 else if (main_pid == 0)
3828 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3829 }
3830
3831 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3832 if (control_pid < 0)
3833 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3834 else if (control_pid == 0)
3835 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3836 }
3837
3838 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3839 if (control_pid > 0) {
3840 if (kill(control_pid, signo) < 0)
3841 r = -errno;
3842 else
3843 killed = true;
3844 }
3845
3846 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3847 if (main_pid > 0) {
3848 if (kill(main_pid, signo) < 0)
3849 r = -errno;
3850 else
3851 killed = true;
3852 }
3853
3854 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3855 _cleanup_set_free_ Set *pid_set = NULL;
3856 int q;
3857
3858 /* Exclude the main/control pids from being killed via the cgroup */
3859 pid_set = unit_pid_set(main_pid, control_pid);
3860 if (!pid_set)
3861 return -ENOMEM;
3862
3863 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3864 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
3865 r = q;
3866 else
3867 killed = true;
3868 }
3869
3870 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3871 return -ESRCH;
3872
3873 return r;
3874 }
3875
3876 int unit_following_set(Unit *u, Set **s) {
3877 assert(u);
3878 assert(s);
3879
3880 if (UNIT_VTABLE(u)->following_set)
3881 return UNIT_VTABLE(u)->following_set(u, s);
3882
3883 *s = NULL;
3884 return 0;
3885 }
3886
3887 UnitFileState unit_get_unit_file_state(Unit *u) {
3888 int r;
3889
3890 assert(u);
3891
3892 if (u->unit_file_state < 0 && u->fragment_path) {
3893 r = unit_file_get_state(
3894 u->manager->unit_file_scope,
3895 NULL,
3896 basename(u->fragment_path),
3897 &u->unit_file_state);
3898 if (r < 0)
3899 u->unit_file_state = UNIT_FILE_BAD;
3900 }
3901
3902 return u->unit_file_state;
3903 }
3904
3905 int unit_get_unit_file_preset(Unit *u) {
3906 assert(u);
3907
3908 if (u->unit_file_preset < 0 && u->fragment_path)
3909 u->unit_file_preset = unit_file_query_preset(
3910 u->manager->unit_file_scope,
3911 NULL,
3912 basename(u->fragment_path));
3913
3914 return u->unit_file_preset;
3915 }
3916
3917 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3918 assert(ref);
3919 assert(u);
3920
3921 if (ref->unit)
3922 unit_ref_unset(ref);
3923
3924 ref->unit = u;
3925 LIST_PREPEND(refs, u->refs, ref);
3926 return u;
3927 }
3928
3929 void unit_ref_unset(UnitRef *ref) {
3930 assert(ref);
3931
3932 if (!ref->unit)
3933 return;
3934
3935 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3936 * be unreferenced now. */
3937 unit_add_to_gc_queue(ref->unit);
3938
3939 LIST_REMOVE(refs, ref->unit->refs, ref);
3940 ref->unit = NULL;
3941 }
3942
3943 static int user_from_unit_name(Unit *u, char **ret) {
3944
3945 static const uint8_t hash_key[] = {
3946 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3947 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3948 };
3949
3950 _cleanup_free_ char *n = NULL;
3951 int r;
3952
3953 r = unit_name_to_prefix(u->id, &n);
3954 if (r < 0)
3955 return r;
3956
3957 if (valid_user_group_name(n)) {
3958 *ret = n;
3959 n = NULL;
3960 return 0;
3961 }
3962
3963 /* If we can't use the unit name as a user name, then let's hash it and use that */
3964 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3965 return -ENOMEM;
3966
3967 return 0;
3968 }
3969
3970 int unit_patch_contexts(Unit *u) {
3971 CGroupContext *cc;
3972 ExecContext *ec;
3973 unsigned i;
3974 int r;
3975
3976 assert(u);
3977
3978 /* Patch in the manager defaults into the exec and cgroup
3979 * contexts, _after_ the rest of the settings have been
3980 * initialized */
3981
3982 ec = unit_get_exec_context(u);
3983 if (ec) {
3984 /* This only copies in the ones that need memory */
3985 for (i = 0; i < _RLIMIT_MAX; i++)
3986 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3987 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3988 if (!ec->rlimit[i])
3989 return -ENOMEM;
3990 }
3991
3992 if (MANAGER_IS_USER(u->manager) &&
3993 !ec->working_directory) {
3994
3995 r = get_home_dir(&ec->working_directory);
3996 if (r < 0)
3997 return r;
3998
3999 /* Allow user services to run, even if the
4000 * home directory is missing */
4001 ec->working_directory_missing_ok = true;
4002 }
4003
4004 if (ec->private_devices)
4005 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4006
4007 if (ec->protect_kernel_modules)
4008 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4009
4010 if (ec->dynamic_user) {
4011 if (!ec->user) {
4012 r = user_from_unit_name(u, &ec->user);
4013 if (r < 0)
4014 return r;
4015 }
4016
4017 if (!ec->group) {
4018 ec->group = strdup(ec->user);
4019 if (!ec->group)
4020 return -ENOMEM;
4021 }
4022
4023 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4024 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4025
4026 ec->private_tmp = true;
4027 ec->remove_ipc = true;
4028 ec->protect_system = PROTECT_SYSTEM_STRICT;
4029 if (ec->protect_home == PROTECT_HOME_NO)
4030 ec->protect_home = PROTECT_HOME_READ_ONLY;
4031 }
4032 }
4033
4034 cc = unit_get_cgroup_context(u);
4035 if (cc) {
4036
4037 if (ec &&
4038 ec->private_devices &&
4039 cc->device_policy == CGROUP_AUTO)
4040 cc->device_policy = CGROUP_CLOSED;
4041 }
4042
4043 return 0;
4044 }
4045
4046 ExecContext *unit_get_exec_context(Unit *u) {
4047 size_t offset;
4048 assert(u);
4049
4050 if (u->type < 0)
4051 return NULL;
4052
4053 offset = UNIT_VTABLE(u)->exec_context_offset;
4054 if (offset <= 0)
4055 return NULL;
4056
4057 return (ExecContext*) ((uint8_t*) u + offset);
4058 }
4059
4060 KillContext *unit_get_kill_context(Unit *u) {
4061 size_t offset;
4062 assert(u);
4063
4064 if (u->type < 0)
4065 return NULL;
4066
4067 offset = UNIT_VTABLE(u)->kill_context_offset;
4068 if (offset <= 0)
4069 return NULL;
4070
4071 return (KillContext*) ((uint8_t*) u + offset);
4072 }
4073
4074 CGroupContext *unit_get_cgroup_context(Unit *u) {
4075 size_t offset;
4076
4077 if (u->type < 0)
4078 return NULL;
4079
4080 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4081 if (offset <= 0)
4082 return NULL;
4083
4084 return (CGroupContext*) ((uint8_t*) u + offset);
4085 }
4086
4087 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4088 size_t offset;
4089
4090 if (u->type < 0)
4091 return NULL;
4092
4093 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4094 if (offset <= 0)
4095 return NULL;
4096
4097 return *(ExecRuntime**) ((uint8_t*) u + offset);
4098 }
4099
4100 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
4101 assert(u);
4102
4103 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
4104 return NULL;
4105
4106 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4107 return u->manager->lookup_paths.transient;
4108
4109 if (mode == UNIT_RUNTIME)
4110 return u->manager->lookup_paths.runtime_control;
4111
4112 if (mode == UNIT_PERSISTENT)
4113 return u->manager->lookup_paths.persistent_control;
4114
4115 return NULL;
4116 }
4117
4118 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
4119 _cleanup_free_ char *p = NULL, *q = NULL;
4120 const char *dir, *wrapped;
4121 int r;
4122
4123 assert(u);
4124
4125 if (u->transient_file) {
4126 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4127 * write to the transient unit file. */
4128 fputs(data, u->transient_file);
4129 fputc('\n', u->transient_file);
4130 return 0;
4131 }
4132
4133 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4134 return 0;
4135
4136 dir = unit_drop_in_dir(u, mode);
4137 if (!dir)
4138 return -EINVAL;
4139
4140 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4141 "# or an equivalent operation. Do not edit.\n",
4142 data,
4143 "\n");
4144
4145 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4146 if (r < 0)
4147 return r;
4148
4149 (void) mkdir_p(p, 0755);
4150 r = write_string_file_atomic_label(q, wrapped);
4151 if (r < 0)
4152 return r;
4153
4154 r = strv_push(&u->dropin_paths, q);
4155 if (r < 0)
4156 return r;
4157 q = NULL;
4158
4159 strv_uniq(u->dropin_paths);
4160
4161 u->dropin_mtime = now(CLOCK_REALTIME);
4162
4163 return 0;
4164 }
4165
4166 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
4167 _cleanup_free_ char *p = NULL;
4168 va_list ap;
4169 int r;
4170
4171 assert(u);
4172 assert(name);
4173 assert(format);
4174
4175 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4176 return 0;
4177
4178 va_start(ap, format);
4179 r = vasprintf(&p, format, ap);
4180 va_end(ap);
4181
4182 if (r < 0)
4183 return -ENOMEM;
4184
4185 return unit_write_drop_in(u, mode, name, p);
4186 }
4187
4188 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
4189 const char *ndata;
4190
4191 assert(u);
4192 assert(name);
4193 assert(data);
4194
4195 if (!UNIT_VTABLE(u)->private_section)
4196 return -EINVAL;
4197
4198 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4199 return 0;
4200
4201 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4202
4203 return unit_write_drop_in(u, mode, name, ndata);
4204 }
4205
4206 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
4207 _cleanup_free_ char *p = NULL;
4208 va_list ap;
4209 int r;
4210
4211 assert(u);
4212 assert(name);
4213 assert(format);
4214
4215 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
4216 return 0;
4217
4218 va_start(ap, format);
4219 r = vasprintf(&p, format, ap);
4220 va_end(ap);
4221
4222 if (r < 0)
4223 return -ENOMEM;
4224
4225 return unit_write_drop_in_private(u, mode, name, p);
4226 }
4227
4228 int unit_make_transient(Unit *u) {
4229 FILE *f;
4230 char *path;
4231
4232 assert(u);
4233
4234 if (!UNIT_VTABLE(u)->can_transient)
4235 return -EOPNOTSUPP;
4236
4237 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
4238 if (!path)
4239 return -ENOMEM;
4240
4241 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4242 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4243
4244 RUN_WITH_UMASK(0022) {
4245 f = fopen(path, "we");
4246 if (!f) {
4247 free(path);
4248 return -errno;
4249 }
4250 }
4251
4252 if (u->transient_file)
4253 fclose(u->transient_file);
4254 u->transient_file = f;
4255
4256 free(u->fragment_path);
4257 u->fragment_path = path;
4258
4259 u->source_path = mfree(u->source_path);
4260 u->dropin_paths = strv_free(u->dropin_paths);
4261 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4262
4263 u->load_state = UNIT_STUB;
4264 u->load_error = 0;
4265 u->transient = true;
4266
4267 unit_add_to_dbus_queue(u);
4268 unit_add_to_gc_queue(u);
4269
4270 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4271 u->transient_file);
4272
4273 return 0;
4274 }
4275
4276 static void log_kill(pid_t pid, int sig, void *userdata) {
4277 _cleanup_free_ char *comm = NULL;
4278
4279 (void) get_process_comm(pid, &comm);
4280
4281 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4282 only, like for example systemd's own PAM stub process. */
4283 if (comm && comm[0] == '(')
4284 return;
4285
4286 log_unit_notice(userdata,
4287 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4288 pid,
4289 strna(comm),
4290 signal_to_string(sig));
4291 }
4292
4293 static int operation_to_signal(KillContext *c, KillOperation k) {
4294 assert(c);
4295
4296 switch (k) {
4297
4298 case KILL_TERMINATE:
4299 case KILL_TERMINATE_AND_LOG:
4300 return c->kill_signal;
4301
4302 case KILL_KILL:
4303 return SIGKILL;
4304
4305 case KILL_ABORT:
4306 return SIGABRT;
4307
4308 default:
4309 assert_not_reached("KillOperation unknown");
4310 }
4311 }
4312
4313 int unit_kill_context(
4314 Unit *u,
4315 KillContext *c,
4316 KillOperation k,
4317 pid_t main_pid,
4318 pid_t control_pid,
4319 bool main_pid_alien) {
4320
4321 bool wait_for_exit = false, send_sighup;
4322 cg_kill_log_func_t log_func = NULL;
4323 int sig, r;
4324
4325 assert(u);
4326 assert(c);
4327
4328 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4329 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4330
4331 if (c->kill_mode == KILL_NONE)
4332 return 0;
4333
4334 sig = operation_to_signal(c, k);
4335
4336 send_sighup =
4337 c->send_sighup &&
4338 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4339 sig != SIGHUP;
4340
4341 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4342 log_func = log_kill;
4343
4344 if (main_pid > 0) {
4345 if (log_func)
4346 log_func(main_pid, sig, u);
4347
4348 r = kill_and_sigcont(main_pid, sig);
4349 if (r < 0 && r != -ESRCH) {
4350 _cleanup_free_ char *comm = NULL;
4351 (void) get_process_comm(main_pid, &comm);
4352
4353 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4354 } else {
4355 if (!main_pid_alien)
4356 wait_for_exit = true;
4357
4358 if (r != -ESRCH && send_sighup)
4359 (void) kill(main_pid, SIGHUP);
4360 }
4361 }
4362
4363 if (control_pid > 0) {
4364 if (log_func)
4365 log_func(control_pid, sig, u);
4366
4367 r = kill_and_sigcont(control_pid, sig);
4368 if (r < 0 && r != -ESRCH) {
4369 _cleanup_free_ char *comm = NULL;
4370 (void) get_process_comm(control_pid, &comm);
4371
4372 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4373 } else {
4374 wait_for_exit = true;
4375
4376 if (r != -ESRCH && send_sighup)
4377 (void) kill(control_pid, SIGHUP);
4378 }
4379 }
4380
4381 if (u->cgroup_path &&
4382 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4383 _cleanup_set_free_ Set *pid_set = NULL;
4384
4385 /* Exclude the main/control pids from being killed via the cgroup */
4386 pid_set = unit_pid_set(main_pid, control_pid);
4387 if (!pid_set)
4388 return -ENOMEM;
4389
4390 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4391 sig,
4392 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4393 pid_set,
4394 log_func, u);
4395 if (r < 0) {
4396 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4397 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4398
4399 } else if (r > 0) {
4400
4401 /* FIXME: For now, on the legacy hierarchy, we
4402 * will not wait for the cgroup members to die
4403 * if we are running in a container or if this
4404 * is a delegation unit, simply because cgroup
4405 * notification is unreliable in these
4406 * cases. It doesn't work at all in
4407 * containers, and outside of containers it
4408 * can be confused easily by left-over
4409 * directories in the cgroup — which however
4410 * should not exist in non-delegated units. On
4411 * the unified hierarchy that's different,
4412 * there we get proper events. Hence rely on
4413 * them. */
4414
4415 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4416 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u, delegate)))
4417 wait_for_exit = true;
4418
4419 if (send_sighup) {
4420 set_free(pid_set);
4421
4422 pid_set = unit_pid_set(main_pid, control_pid);
4423 if (!pid_set)
4424 return -ENOMEM;
4425
4426 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4427 SIGHUP,
4428 CGROUP_IGNORE_SELF,
4429 pid_set,
4430 NULL, NULL);
4431 }
4432 }
4433 }
4434
4435 return wait_for_exit;
4436 }
4437
4438 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4439 char prefix[strlen(path) + 1], *p;
4440 UnitDependencyInfo di;
4441 int r;
4442
4443 assert(u);
4444 assert(path);
4445
4446 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4447 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4448 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4449 * determine which units to make themselves a dependency of. */
4450
4451 if (!path_is_absolute(path))
4452 return -EINVAL;
4453
4454 r = hashmap_ensure_allocated(&u->requires_mounts_for, &string_hash_ops);
4455 if (r < 0)
4456 return r;
4457
4458 p = strdup(path);
4459 if (!p)
4460 return -ENOMEM;
4461
4462 path_kill_slashes(p);
4463
4464 if (!path_is_normalized(p)) {
4465 free(p);
4466 return -EPERM;
4467 }
4468
4469 if (hashmap_contains(u->requires_mounts_for, p)) {
4470 free(p);
4471 return 0;
4472 }
4473
4474 di = (UnitDependencyInfo) {
4475 .origin_mask = mask
4476 };
4477
4478 r = hashmap_put(u->requires_mounts_for, p, di.data);
4479 if (r < 0) {
4480 free(p);
4481 return r;
4482 }
4483
4484 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4485 Set *x;
4486
4487 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4488 if (!x) {
4489 char *q;
4490
4491 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4492 if (r < 0)
4493 return r;
4494
4495 q = strdup(prefix);
4496 if (!q)
4497 return -ENOMEM;
4498
4499 x = set_new(NULL);
4500 if (!x) {
4501 free(q);
4502 return -ENOMEM;
4503 }
4504
4505 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4506 if (r < 0) {
4507 free(q);
4508 set_free(x);
4509 return r;
4510 }
4511 }
4512
4513 r = set_put(x, u);
4514 if (r < 0)
4515 return r;
4516 }
4517
4518 return 0;
4519 }
4520
4521 int unit_setup_exec_runtime(Unit *u) {
4522 ExecRuntime **rt;
4523 size_t offset;
4524 Unit *other;
4525 Iterator i;
4526 void *v;
4527
4528 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4529 assert(offset > 0);
4530
4531 /* Check if there already is an ExecRuntime for this unit? */
4532 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4533 if (*rt)
4534 return 0;
4535
4536 /* Try to get it from somebody else */
4537 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4538
4539 *rt = unit_get_exec_runtime(other);
4540 if (*rt) {
4541 exec_runtime_ref(*rt);
4542 return 0;
4543 }
4544 }
4545
4546 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4547 }
4548
4549 int unit_setup_dynamic_creds(Unit *u) {
4550 ExecContext *ec;
4551 DynamicCreds *dcreds;
4552 size_t offset;
4553
4554 assert(u);
4555
4556 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4557 assert(offset > 0);
4558 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4559
4560 ec = unit_get_exec_context(u);
4561 assert(ec);
4562
4563 if (!ec->dynamic_user)
4564 return 0;
4565
4566 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4567 }
4568
4569 bool unit_type_supported(UnitType t) {
4570 if (_unlikely_(t < 0))
4571 return false;
4572 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4573 return false;
4574
4575 if (!unit_vtable[t]->supported)
4576 return true;
4577
4578 return unit_vtable[t]->supported();
4579 }
4580
4581 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4582 int r;
4583
4584 assert(u);
4585 assert(where);
4586
4587 r = dir_is_empty(where);
4588 if (r > 0)
4589 return;
4590 if (r < 0) {
4591 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4592 return;
4593 }
4594
4595 log_struct(LOG_NOTICE,
4596 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4597 LOG_UNIT_ID(u),
4598 LOG_UNIT_INVOCATION_ID(u),
4599 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4600 "WHERE=%s", where,
4601 NULL);
4602 }
4603
4604 int unit_fail_if_symlink(Unit *u, const char* where) {
4605 int r;
4606
4607 assert(u);
4608 assert(where);
4609
4610 r = is_symlink(where);
4611 if (r < 0) {
4612 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4613 return 0;
4614 }
4615 if (r == 0)
4616 return 0;
4617
4618 log_struct(LOG_ERR,
4619 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4620 LOG_UNIT_ID(u),
4621 LOG_UNIT_INVOCATION_ID(u),
4622 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4623 "WHERE=%s", where,
4624 NULL);
4625
4626 return -ELOOP;
4627 }
4628
4629 bool unit_is_pristine(Unit *u) {
4630 assert(u);
4631
4632 /* Check if the unit already exists or is already around,
4633 * in a number of different ways. Note that to cater for unit
4634 * types such as slice, we are generally fine with units that
4635 * are marked UNIT_LOADED even though nothing was
4636 * actually loaded, as those unit types don't require a file
4637 * on disk to validly load. */
4638
4639 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4640 u->fragment_path ||
4641 u->source_path ||
4642 !strv_isempty(u->dropin_paths) ||
4643 u->job ||
4644 u->merged_into);
4645 }
4646
4647 pid_t unit_control_pid(Unit *u) {
4648 assert(u);
4649
4650 if (UNIT_VTABLE(u)->control_pid)
4651 return UNIT_VTABLE(u)->control_pid(u);
4652
4653 return 0;
4654 }
4655
4656 pid_t unit_main_pid(Unit *u) {
4657 assert(u);
4658
4659 if (UNIT_VTABLE(u)->main_pid)
4660 return UNIT_VTABLE(u)->main_pid(u);
4661
4662 return 0;
4663 }
4664
4665 static void unit_unref_uid_internal(
4666 Unit *u,
4667 uid_t *ref_uid,
4668 bool destroy_now,
4669 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4670
4671 assert(u);
4672 assert(ref_uid);
4673 assert(_manager_unref_uid);
4674
4675 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4676 * gid_t are actually the same time, with the same validity rules.
4677 *
4678 * Drops a reference to UID/GID from a unit. */
4679
4680 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4681 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4682
4683 if (!uid_is_valid(*ref_uid))
4684 return;
4685
4686 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4687 *ref_uid = UID_INVALID;
4688 }
4689
4690 void unit_unref_uid(Unit *u, bool destroy_now) {
4691 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4692 }
4693
4694 void unit_unref_gid(Unit *u, bool destroy_now) {
4695 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4696 }
4697
4698 static int unit_ref_uid_internal(
4699 Unit *u,
4700 uid_t *ref_uid,
4701 uid_t uid,
4702 bool clean_ipc,
4703 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4704
4705 int r;
4706
4707 assert(u);
4708 assert(ref_uid);
4709 assert(uid_is_valid(uid));
4710 assert(_manager_ref_uid);
4711
4712 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4713 * are actually the same type, and have the same validity rules.
4714 *
4715 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4716 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4717 * drops to zero. */
4718
4719 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4720 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4721
4722 if (*ref_uid == uid)
4723 return 0;
4724
4725 if (uid_is_valid(*ref_uid)) /* Already set? */
4726 return -EBUSY;
4727
4728 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4729 if (r < 0)
4730 return r;
4731
4732 *ref_uid = uid;
4733 return 1;
4734 }
4735
4736 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4737 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4738 }
4739
4740 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4741 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4742 }
4743
4744 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4745 int r = 0, q = 0;
4746
4747 assert(u);
4748
4749 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4750
4751 if (uid_is_valid(uid)) {
4752 r = unit_ref_uid(u, uid, clean_ipc);
4753 if (r < 0)
4754 return r;
4755 }
4756
4757 if (gid_is_valid(gid)) {
4758 q = unit_ref_gid(u, gid, clean_ipc);
4759 if (q < 0) {
4760 if (r > 0)
4761 unit_unref_uid(u, false);
4762
4763 return q;
4764 }
4765 }
4766
4767 return r > 0 || q > 0;
4768 }
4769
4770 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4771 ExecContext *c;
4772 int r;
4773
4774 assert(u);
4775
4776 c = unit_get_exec_context(u);
4777
4778 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4779 if (r < 0)
4780 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4781
4782 return r;
4783 }
4784
4785 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4786 assert(u);
4787
4788 unit_unref_uid(u, destroy_now);
4789 unit_unref_gid(u, destroy_now);
4790 }
4791
4792 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4793 int r;
4794
4795 assert(u);
4796
4797 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4798 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4799 * objects when no service references the UID/GID anymore. */
4800
4801 r = unit_ref_uid_gid(u, uid, gid);
4802 if (r > 0)
4803 bus_unit_send_change_signal(u);
4804 }
4805
4806 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4807 int r;
4808
4809 assert(u);
4810
4811 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4812
4813 if (sd_id128_equal(u->invocation_id, id))
4814 return 0;
4815
4816 if (!sd_id128_is_null(u->invocation_id))
4817 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4818
4819 if (sd_id128_is_null(id)) {
4820 r = 0;
4821 goto reset;
4822 }
4823
4824 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4825 if (r < 0)
4826 goto reset;
4827
4828 u->invocation_id = id;
4829 sd_id128_to_string(id, u->invocation_id_string);
4830
4831 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4832 if (r < 0)
4833 goto reset;
4834
4835 return 0;
4836
4837 reset:
4838 u->invocation_id = SD_ID128_NULL;
4839 u->invocation_id_string[0] = 0;
4840 return r;
4841 }
4842
4843 int unit_acquire_invocation_id(Unit *u) {
4844 sd_id128_t id;
4845 int r;
4846
4847 assert(u);
4848
4849 r = sd_id128_randomize(&id);
4850 if (r < 0)
4851 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4852
4853 r = unit_set_invocation_id(u, id);
4854 if (r < 0)
4855 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4856
4857 return 0;
4858 }
4859
4860 void unit_set_exec_params(Unit *u, ExecParameters *p) {
4861 assert(u);
4862 assert(p);
4863
4864 p->cgroup_path = u->cgroup_path;
4865 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, UNIT_CGROUP_BOOL(u, delegate));
4866 }
4867
4868 int unit_fork_helper_process(Unit *u, pid_t *ret) {
4869 pid_t pid;
4870 int r;
4871
4872 assert(u);
4873 assert(ret);
4874
4875 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4876 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4877
4878 (void) unit_realize_cgroup(u);
4879
4880 pid = fork();
4881 if (pid < 0)
4882 return -errno;
4883
4884 if (pid == 0) {
4885
4886 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
4887 (void) ignore_signals(SIGPIPE, -1);
4888
4889 log_close();
4890 log_open();
4891
4892 if (u->cgroup_path) {
4893 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4894 if (r < 0) {
4895 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4896 _exit(EXIT_CGROUP);
4897 }
4898 }
4899
4900 *ret = getpid_cached();
4901 return 0;
4902 }
4903
4904 *ret = pid;
4905 return 1;
4906 }
4907
4908 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
4909 assert(u);
4910 assert(d >= 0);
4911 assert(d < _UNIT_DEPENDENCY_MAX);
4912 assert(other);
4913
4914 if (di.origin_mask == 0 && di.destination_mask == 0) {
4915 /* No bit set anymore, let's drop the whole entry */
4916 assert_se(hashmap_remove(u->dependencies[d], other));
4917 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
4918 } else
4919 /* Mask was reduced, let's update the entry */
4920 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
4921 }
4922
4923 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
4924 UnitDependency d;
4925
4926 assert(u);
4927
4928 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
4929
4930 if (mask == 0)
4931 return;
4932
4933 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
4934 bool done;
4935
4936 do {
4937 UnitDependencyInfo di;
4938 Unit *other;
4939 Iterator i;
4940
4941 done = true;
4942
4943 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
4944 UnitDependency q;
4945
4946 if ((di.origin_mask & ~mask) == di.origin_mask)
4947 continue;
4948 di.origin_mask &= ~mask;
4949 unit_update_dependency_mask(u, d, other, di);
4950
4951 /* We updated the dependency from our unit to the other unit now. But most dependencies
4952 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
4953 * all dependency types on the other unit and delete all those which point to us and
4954 * have the right mask set. */
4955
4956 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
4957 UnitDependencyInfo dj;
4958
4959 dj.data = hashmap_get(other->dependencies[q], u);
4960 if ((dj.destination_mask & ~mask) == dj.destination_mask)
4961 continue;
4962 dj.destination_mask &= ~mask;
4963
4964 unit_update_dependency_mask(other, q, u, dj);
4965 }
4966
4967 unit_add_to_gc_queue(other);
4968
4969 done = false;
4970 break;
4971 }
4972
4973 } while (!done);
4974 }
4975 }
4976
4977 static int unit_export_invocation_id(Unit *u) {
4978 const char *p;
4979 int r;
4980
4981 assert(u);
4982
4983 if (u->exported_invocation_id)
4984 return 0;
4985
4986 if (sd_id128_is_null(u->invocation_id))
4987 return 0;
4988
4989 p = strjoina("/run/systemd/units/invocation:", u->id);
4990 r = symlink_atomic(u->invocation_id_string, p);
4991 if (r < 0)
4992 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
4993
4994 u->exported_invocation_id = true;
4995 return 0;
4996 }
4997
4998 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
4999 const char *p;
5000 char buf[2];
5001 int r;
5002
5003 assert(u);
5004 assert(c);
5005
5006 if (u->exported_log_level_max)
5007 return 0;
5008
5009 if (c->log_level_max < 0)
5010 return 0;
5011
5012 assert(c->log_level_max <= 7);
5013
5014 buf[0] = '0' + c->log_level_max;
5015 buf[1] = 0;
5016
5017 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5018 r = symlink_atomic(buf, p);
5019 if (r < 0)
5020 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5021
5022 u->exported_log_level_max = true;
5023 return 0;
5024 }
5025
5026 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5027 _cleanup_close_ int fd = -1;
5028 struct iovec *iovec;
5029 const char *p;
5030 char *pattern;
5031 le64_t *sizes;
5032 ssize_t n;
5033 size_t i;
5034 int r;
5035
5036 if (u->exported_log_extra_fields)
5037 return 0;
5038
5039 if (c->n_log_extra_fields <= 0)
5040 return 0;
5041
5042 sizes = newa(le64_t, c->n_log_extra_fields);
5043 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5044
5045 for (i = 0; i < c->n_log_extra_fields; i++) {
5046 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5047
5048 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5049 iovec[i*2+1] = c->log_extra_fields[i];
5050 }
5051
5052 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5053 pattern = strjoina(p, ".XXXXXX");
5054
5055 fd = mkostemp_safe(pattern);
5056 if (fd < 0)
5057 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5058
5059 n = writev(fd, iovec, c->n_log_extra_fields*2);
5060 if (n < 0) {
5061 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5062 goto fail;
5063 }
5064
5065 (void) fchmod(fd, 0644);
5066
5067 if (rename(pattern, p) < 0) {
5068 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5069 goto fail;
5070 }
5071
5072 u->exported_log_extra_fields = true;
5073 return 0;
5074
5075 fail:
5076 (void) unlink(pattern);
5077 return r;
5078 }
5079
5080 void unit_export_state_files(Unit *u) {
5081 const ExecContext *c;
5082
5083 assert(u);
5084
5085 if (!u->id)
5086 return;
5087
5088 if (!MANAGER_IS_SYSTEM(u->manager))
5089 return;
5090
5091 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5092 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5093 * the IPC system itself and PID 1 also log to the journal.
5094 *
5095 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5096 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5097 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5098 * namespace at least.
5099 *
5100 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5101 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5102 * them with one. */
5103
5104 (void) unit_export_invocation_id(u);
5105
5106 c = unit_get_exec_context(u);
5107 if (c) {
5108 (void) unit_export_log_level_max(u, c);
5109 (void) unit_export_log_extra_fields(u, c);
5110 }
5111 }
5112
5113 void unit_unlink_state_files(Unit *u) {
5114 const char *p;
5115
5116 assert(u);
5117
5118 if (!u->id)
5119 return;
5120
5121 if (!MANAGER_IS_SYSTEM(u->manager))
5122 return;
5123
5124 /* Undoes the effect of unit_export_state() */
5125
5126 if (u->exported_invocation_id) {
5127 p = strjoina("/run/systemd/units/invocation:", u->id);
5128 (void) unlink(p);
5129
5130 u->exported_invocation_id = false;
5131 }
5132
5133 if (u->exported_log_level_max) {
5134 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5135 (void) unlink(p);
5136
5137 u->exported_log_level_max = false;
5138 }
5139
5140 if (u->exported_log_extra_fields) {
5141 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5142 (void) unlink(p);
5143
5144 u->exported_log_extra_fields = false;
5145 }
5146 }
5147
5148 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5149 [COLLECT_INACTIVE] = "inactive",
5150 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5151 };
5152
5153 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);