]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: when deserializing a unit, fully restore its cgroup state
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fileio-label.h"
39 #include "format-util.h"
40 #include "id128-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
43 #include "log.h"
44 #include "macro.h"
45 #include "missing.h"
46 #include "mkdir.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
50 #include "set.h"
51 #include "signal-util.h"
52 #include "special.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-util.h"
56 #include "strv.h"
57 #include "umask-util.h"
58 #include "unit-name.h"
59 #include "unit.h"
60 #include "user-util.h"
61 #include "virt.h"
62
63 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
64 [UNIT_SERVICE] = &service_vtable,
65 [UNIT_SOCKET] = &socket_vtable,
66 [UNIT_BUSNAME] = &busname_vtable,
67 [UNIT_TARGET] = &target_vtable,
68 [UNIT_DEVICE] = &device_vtable,
69 [UNIT_MOUNT] = &mount_vtable,
70 [UNIT_AUTOMOUNT] = &automount_vtable,
71 [UNIT_SWAP] = &swap_vtable,
72 [UNIT_TIMER] = &timer_vtable,
73 [UNIT_PATH] = &path_vtable,
74 [UNIT_SLICE] = &slice_vtable,
75 [UNIT_SCOPE] = &scope_vtable
76 };
77
78 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
79
80 Unit *unit_new(Manager *m, size_t size) {
81 Unit *u;
82
83 assert(m);
84 assert(size >= sizeof(Unit));
85
86 u = malloc0(size);
87 if (!u)
88 return NULL;
89
90 u->names = set_new(&string_hash_ops);
91 if (!u->names)
92 return mfree(u);
93
94 u->manager = m;
95 u->type = _UNIT_TYPE_INVALID;
96 u->default_dependencies = true;
97 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
98 u->unit_file_preset = -1;
99 u->on_failure_job_mode = JOB_REPLACE;
100 u->cgroup_inotify_wd = -1;
101 u->job_timeout = USEC_INFINITY;
102 u->job_running_timeout = USEC_INFINITY;
103 u->ref_uid = UID_INVALID;
104 u->ref_gid = GID_INVALID;
105 u->cpu_usage_last = NSEC_INFINITY;
106
107 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
108 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
109
110 return u;
111 }
112
113 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
114 Unit *u;
115 int r;
116
117 u = unit_new(m, size);
118 if (!u)
119 return -ENOMEM;
120
121 r = unit_add_name(u, name);
122 if (r < 0) {
123 unit_free(u);
124 return r;
125 }
126
127 *ret = u;
128 return r;
129 }
130
131 bool unit_has_name(Unit *u, const char *name) {
132 assert(u);
133 assert(name);
134
135 return set_contains(u->names, (char*) name);
136 }
137
138 static void unit_init(Unit *u) {
139 CGroupContext *cc;
140 ExecContext *ec;
141 KillContext *kc;
142
143 assert(u);
144 assert(u->manager);
145 assert(u->type >= 0);
146
147 cc = unit_get_cgroup_context(u);
148 if (cc) {
149 cgroup_context_init(cc);
150
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
154
155 cc->cpu_accounting = u->manager->default_cpu_accounting;
156 cc->io_accounting = u->manager->default_io_accounting;
157 cc->blockio_accounting = u->manager->default_blockio_accounting;
158 cc->memory_accounting = u->manager->default_memory_accounting;
159 cc->tasks_accounting = u->manager->default_tasks_accounting;
160
161 if (u->type != UNIT_SLICE)
162 cc->tasks_max = u->manager->default_tasks_max;
163 }
164
165 ec = unit_get_exec_context(u);
166 if (ec)
167 exec_context_init(ec);
168
169 kc = unit_get_kill_context(u);
170 if (kc)
171 kill_context_init(kc);
172
173 if (UNIT_VTABLE(u)->init)
174 UNIT_VTABLE(u)->init(u);
175 }
176
177 int unit_add_name(Unit *u, const char *text) {
178 _cleanup_free_ char *s = NULL, *i = NULL;
179 UnitType t;
180 int r;
181
182 assert(u);
183 assert(text);
184
185 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
186
187 if (!u->instance)
188 return -EINVAL;
189
190 r = unit_name_replace_instance(text, u->instance, &s);
191 if (r < 0)
192 return r;
193 } else {
194 s = strdup(text);
195 if (!s)
196 return -ENOMEM;
197 }
198
199 if (set_contains(u->names, s))
200 return 0;
201 if (hashmap_contains(u->manager->units, s))
202 return -EEXIST;
203
204 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
205 return -EINVAL;
206
207 t = unit_name_to_type(s);
208 if (t < 0)
209 return -EINVAL;
210
211 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
212 return -EINVAL;
213
214 r = unit_name_to_instance(s, &i);
215 if (r < 0)
216 return r;
217
218 if (i && !unit_type_may_template(t))
219 return -EINVAL;
220
221 /* Ensure that this unit is either instanced or not instanced,
222 * but not both. Note that we do allow names with different
223 * instance names however! */
224 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
225 return -EINVAL;
226
227 if (!unit_type_may_alias(t) && !set_isempty(u->names))
228 return -EEXIST;
229
230 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
231 return -E2BIG;
232
233 r = set_put(u->names, s);
234 if (r < 0)
235 return r;
236 assert(r > 0);
237
238 r = hashmap_put(u->manager->units, s, u);
239 if (r < 0) {
240 (void) set_remove(u->names, s);
241 return r;
242 }
243
244 if (u->type == _UNIT_TYPE_INVALID) {
245 u->type = t;
246 u->id = s;
247 u->instance = i;
248
249 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
250
251 unit_init(u);
252
253 i = NULL;
254 }
255
256 s = NULL;
257
258 unit_add_to_dbus_queue(u);
259 return 0;
260 }
261
262 int unit_choose_id(Unit *u, const char *name) {
263 _cleanup_free_ char *t = NULL;
264 char *s, *i;
265 int r;
266
267 assert(u);
268 assert(name);
269
270 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
271
272 if (!u->instance)
273 return -EINVAL;
274
275 r = unit_name_replace_instance(name, u->instance, &t);
276 if (r < 0)
277 return r;
278
279 name = t;
280 }
281
282 /* Selects one of the names of this unit as the id */
283 s = set_get(u->names, (char*) name);
284 if (!s)
285 return -ENOENT;
286
287 /* Determine the new instance from the new id */
288 r = unit_name_to_instance(s, &i);
289 if (r < 0)
290 return r;
291
292 u->id = s;
293
294 free(u->instance);
295 u->instance = i;
296
297 unit_add_to_dbus_queue(u);
298
299 return 0;
300 }
301
302 int unit_set_description(Unit *u, const char *description) {
303 char *s;
304
305 assert(u);
306
307 if (isempty(description))
308 s = NULL;
309 else {
310 s = strdup(description);
311 if (!s)
312 return -ENOMEM;
313 }
314
315 free(u->description);
316 u->description = s;
317
318 unit_add_to_dbus_queue(u);
319 return 0;
320 }
321
322 bool unit_check_gc(Unit *u) {
323 UnitActiveState state;
324 bool inactive;
325 assert(u);
326
327 if (u->job)
328 return true;
329
330 if (u->nop_job)
331 return true;
332
333 state = unit_active_state(u);
334 inactive = state == UNIT_INACTIVE;
335
336 /* If the unit is inactive and failed and no job is queued for
337 * it, then release its runtime resources */
338 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
339 UNIT_VTABLE(u)->release_resources)
340 UNIT_VTABLE(u)->release_resources(u, inactive);
341
342 /* But we keep the unit object around for longer when it is
343 * referenced or configured to not be gc'ed */
344 if (!inactive)
345 return true;
346
347 if (u->perpetual)
348 return true;
349
350 if (u->refs)
351 return true;
352
353 if (sd_bus_track_count(u->bus_track) > 0)
354 return true;
355
356 if (UNIT_VTABLE(u)->check_gc)
357 if (UNIT_VTABLE(u)->check_gc(u))
358 return true;
359
360 return false;
361 }
362
363 void unit_add_to_load_queue(Unit *u) {
364 assert(u);
365 assert(u->type != _UNIT_TYPE_INVALID);
366
367 if (u->load_state != UNIT_STUB || u->in_load_queue)
368 return;
369
370 LIST_PREPEND(load_queue, u->manager->load_queue, u);
371 u->in_load_queue = true;
372 }
373
374 void unit_add_to_cleanup_queue(Unit *u) {
375 assert(u);
376
377 if (u->in_cleanup_queue)
378 return;
379
380 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
381 u->in_cleanup_queue = true;
382 }
383
384 void unit_add_to_gc_queue(Unit *u) {
385 assert(u);
386
387 if (u->in_gc_queue || u->in_cleanup_queue)
388 return;
389
390 if (unit_check_gc(u))
391 return;
392
393 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
394 u->in_gc_queue = true;
395 }
396
397 void unit_add_to_dbus_queue(Unit *u) {
398 assert(u);
399 assert(u->type != _UNIT_TYPE_INVALID);
400
401 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
402 return;
403
404 /* Shortcut things if nobody cares */
405 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
406 sd_bus_track_count(u->bus_track) <= 0 &&
407 set_isempty(u->manager->private_buses)) {
408 u->sent_dbus_new_signal = true;
409 return;
410 }
411
412 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
413 u->in_dbus_queue = true;
414 }
415
416 static void bidi_set_free(Unit *u, Set *s) {
417 Iterator i;
418 Unit *other;
419
420 assert(u);
421
422 /* Frees the set and makes sure we are dropped from the
423 * inverse pointers */
424
425 SET_FOREACH(other, s, i) {
426 UnitDependency d;
427
428 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
429 set_remove(other->dependencies[d], u);
430
431 unit_add_to_gc_queue(other);
432 }
433
434 set_free(s);
435 }
436
437 static void unit_remove_transient(Unit *u) {
438 char **i;
439
440 assert(u);
441
442 if (!u->transient)
443 return;
444
445 if (u->fragment_path)
446 (void) unlink(u->fragment_path);
447
448 STRV_FOREACH(i, u->dropin_paths) {
449 _cleanup_free_ char *p = NULL, *pp = NULL;
450
451 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
452 if (!p)
453 continue;
454
455 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
456 if (!pp)
457 continue;
458
459 /* Only drop transient drop-ins */
460 if (!path_equal(u->manager->lookup_paths.transient, pp))
461 continue;
462
463 (void) unlink(*i);
464 (void) rmdir(p);
465 }
466 }
467
468 static void unit_free_requires_mounts_for(Unit *u) {
469 char **j;
470
471 STRV_FOREACH(j, u->requires_mounts_for) {
472 char s[strlen(*j) + 1];
473
474 PATH_FOREACH_PREFIX_MORE(s, *j) {
475 char *y;
476 Set *x;
477
478 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
479 if (!x)
480 continue;
481
482 set_remove(x, u);
483
484 if (set_isempty(x)) {
485 hashmap_remove(u->manager->units_requiring_mounts_for, y);
486 free(y);
487 set_free(x);
488 }
489 }
490 }
491
492 u->requires_mounts_for = strv_free(u->requires_mounts_for);
493 }
494
495 static void unit_done(Unit *u) {
496 ExecContext *ec;
497 CGroupContext *cc;
498
499 assert(u);
500
501 if (u->type < 0)
502 return;
503
504 if (UNIT_VTABLE(u)->done)
505 UNIT_VTABLE(u)->done(u);
506
507 ec = unit_get_exec_context(u);
508 if (ec)
509 exec_context_done(ec);
510
511 cc = unit_get_cgroup_context(u);
512 if (cc)
513 cgroup_context_done(cc);
514 }
515
516 void unit_free(Unit *u) {
517 UnitDependency d;
518 Iterator i;
519 char *t;
520
521 if (!u)
522 return;
523
524 if (u->transient_file)
525 fclose(u->transient_file);
526
527 if (!MANAGER_IS_RELOADING(u->manager))
528 unit_remove_transient(u);
529
530 bus_unit_send_removed_signal(u);
531
532 unit_done(u);
533
534 sd_bus_slot_unref(u->match_bus_slot);
535
536 sd_bus_track_unref(u->bus_track);
537 u->deserialized_refs = strv_free(u->deserialized_refs);
538
539 unit_free_requires_mounts_for(u);
540
541 SET_FOREACH(t, u->names, i)
542 hashmap_remove_value(u->manager->units, t, u);
543
544 if (!sd_id128_is_null(u->invocation_id))
545 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
546
547 if (u->job) {
548 Job *j = u->job;
549 job_uninstall(j);
550 job_free(j);
551 }
552
553 if (u->nop_job) {
554 Job *j = u->nop_job;
555 job_uninstall(j);
556 job_free(j);
557 }
558
559 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
560 bidi_set_free(u, u->dependencies[d]);
561
562 if (u->type != _UNIT_TYPE_INVALID)
563 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
564
565 if (u->in_load_queue)
566 LIST_REMOVE(load_queue, u->manager->load_queue, u);
567
568 if (u->in_dbus_queue)
569 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
570
571 if (u->in_cleanup_queue)
572 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
573
574 if (u->in_gc_queue)
575 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
576
577 if (u->in_cgroup_queue)
578 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
579
580 unit_release_cgroup(u);
581
582 unit_unref_uid_gid(u, false);
583
584 (void) manager_update_failed_units(u->manager, u, false);
585 set_remove(u->manager->startup_units, u);
586
587 free(u->description);
588 strv_free(u->documentation);
589 free(u->fragment_path);
590 free(u->source_path);
591 strv_free(u->dropin_paths);
592 free(u->instance);
593
594 free(u->job_timeout_reboot_arg);
595
596 set_free_free(u->names);
597
598 unit_unwatch_all_pids(u);
599
600 condition_free_list(u->conditions);
601 condition_free_list(u->asserts);
602
603 free(u->reboot_arg);
604
605 unit_ref_unset(&u->slice);
606
607 while (u->refs)
608 unit_ref_unset(u->refs);
609
610 free(u);
611 }
612
613 UnitActiveState unit_active_state(Unit *u) {
614 assert(u);
615
616 if (u->load_state == UNIT_MERGED)
617 return unit_active_state(unit_follow_merge(u));
618
619 /* After a reload it might happen that a unit is not correctly
620 * loaded but still has a process around. That's why we won't
621 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
622
623 return UNIT_VTABLE(u)->active_state(u);
624 }
625
626 const char* unit_sub_state_to_string(Unit *u) {
627 assert(u);
628
629 return UNIT_VTABLE(u)->sub_state_to_string(u);
630 }
631
632 static int complete_move(Set **s, Set **other) {
633 int r;
634
635 assert(s);
636 assert(other);
637
638 if (!*other)
639 return 0;
640
641 if (*s) {
642 r = set_move(*s, *other);
643 if (r < 0)
644 return r;
645 } else {
646 *s = *other;
647 *other = NULL;
648 }
649
650 return 0;
651 }
652
653 static int merge_names(Unit *u, Unit *other) {
654 char *t;
655 Iterator i;
656 int r;
657
658 assert(u);
659 assert(other);
660
661 r = complete_move(&u->names, &other->names);
662 if (r < 0)
663 return r;
664
665 set_free_free(other->names);
666 other->names = NULL;
667 other->id = NULL;
668
669 SET_FOREACH(t, u->names, i)
670 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
671
672 return 0;
673 }
674
675 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
676 unsigned n_reserve;
677
678 assert(u);
679 assert(other);
680 assert(d < _UNIT_DEPENDENCY_MAX);
681
682 /*
683 * If u does not have this dependency set allocated, there is no need
684 * to reserve anything. In that case other's set will be transferred
685 * as a whole to u by complete_move().
686 */
687 if (!u->dependencies[d])
688 return 0;
689
690 /* merge_dependencies() will skip a u-on-u dependency */
691 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
692
693 return set_reserve(u->dependencies[d], n_reserve);
694 }
695
696 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
697 Iterator i;
698 Unit *back;
699 int r;
700
701 assert(u);
702 assert(other);
703 assert(d < _UNIT_DEPENDENCY_MAX);
704
705 /* Fix backwards pointers */
706 SET_FOREACH(back, other->dependencies[d], i) {
707 UnitDependency k;
708
709 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
710 /* Do not add dependencies between u and itself */
711 if (back == u) {
712 if (set_remove(back->dependencies[k], other))
713 maybe_warn_about_dependency(u, other_id, k);
714 } else {
715 r = set_remove_and_put(back->dependencies[k], other, u);
716 if (r == -EEXIST)
717 set_remove(back->dependencies[k], other);
718 else
719 assert(r >= 0 || r == -ENOENT);
720 }
721 }
722 }
723
724 /* Also do not move dependencies on u to itself */
725 back = set_remove(other->dependencies[d], u);
726 if (back)
727 maybe_warn_about_dependency(u, other_id, d);
728
729 /* The move cannot fail. The caller must have performed a reservation. */
730 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
731
732 other->dependencies[d] = set_free(other->dependencies[d]);
733 }
734
735 int unit_merge(Unit *u, Unit *other) {
736 UnitDependency d;
737 const char *other_id = NULL;
738 int r;
739
740 assert(u);
741 assert(other);
742 assert(u->manager == other->manager);
743 assert(u->type != _UNIT_TYPE_INVALID);
744
745 other = unit_follow_merge(other);
746
747 if (other == u)
748 return 0;
749
750 if (u->type != other->type)
751 return -EINVAL;
752
753 if (!u->instance != !other->instance)
754 return -EINVAL;
755
756 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
757 return -EEXIST;
758
759 if (other->load_state != UNIT_STUB &&
760 other->load_state != UNIT_NOT_FOUND)
761 return -EEXIST;
762
763 if (other->job)
764 return -EEXIST;
765
766 if (other->nop_job)
767 return -EEXIST;
768
769 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
770 return -EEXIST;
771
772 if (other->id)
773 other_id = strdupa(other->id);
774
775 /* Make reservations to ensure merge_dependencies() won't fail */
776 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
777 r = reserve_dependencies(u, other, d);
778 /*
779 * We don't rollback reservations if we fail. We don't have
780 * a way to undo reservations. A reservation is not a leak.
781 */
782 if (r < 0)
783 return r;
784 }
785
786 /* Merge names */
787 r = merge_names(u, other);
788 if (r < 0)
789 return r;
790
791 /* Redirect all references */
792 while (other->refs)
793 unit_ref_set(other->refs, u);
794
795 /* Merge dependencies */
796 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
797 merge_dependencies(u, other, other_id, d);
798
799 other->load_state = UNIT_MERGED;
800 other->merged_into = u;
801
802 /* If there is still some data attached to the other node, we
803 * don't need it anymore, and can free it. */
804 if (other->load_state != UNIT_STUB)
805 if (UNIT_VTABLE(other)->done)
806 UNIT_VTABLE(other)->done(other);
807
808 unit_add_to_dbus_queue(u);
809 unit_add_to_cleanup_queue(other);
810
811 return 0;
812 }
813
814 int unit_merge_by_name(Unit *u, const char *name) {
815 _cleanup_free_ char *s = NULL;
816 Unit *other;
817 int r;
818
819 assert(u);
820 assert(name);
821
822 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
823 if (!u->instance)
824 return -EINVAL;
825
826 r = unit_name_replace_instance(name, u->instance, &s);
827 if (r < 0)
828 return r;
829
830 name = s;
831 }
832
833 other = manager_get_unit(u->manager, name);
834 if (other)
835 return unit_merge(u, other);
836
837 return unit_add_name(u, name);
838 }
839
840 Unit* unit_follow_merge(Unit *u) {
841 assert(u);
842
843 while (u->load_state == UNIT_MERGED)
844 assert_se(u = u->merged_into);
845
846 return u;
847 }
848
849 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
850 int r;
851
852 assert(u);
853 assert(c);
854
855 if (c->working_directory) {
856 r = unit_require_mounts_for(u, c->working_directory);
857 if (r < 0)
858 return r;
859 }
860
861 if (c->root_directory) {
862 r = unit_require_mounts_for(u, c->root_directory);
863 if (r < 0)
864 return r;
865 }
866
867 if (c->root_image) {
868 r = unit_require_mounts_for(u, c->root_image);
869 if (r < 0)
870 return r;
871 }
872
873 if (!MANAGER_IS_SYSTEM(u->manager))
874 return 0;
875
876 if (c->private_tmp) {
877 const char *p;
878
879 FOREACH_STRING(p, "/tmp", "/var/tmp") {
880 r = unit_require_mounts_for(u, p);
881 if (r < 0)
882 return r;
883 }
884
885 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true);
886 if (r < 0)
887 return r;
888 }
889
890 if (!IN_SET(c->std_output,
891 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
892 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
893 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
894 !IN_SET(c->std_error,
895 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
896 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
897 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
898 return 0;
899
900 /* If syslog or kernel logging is requested, make sure our own
901 * logging daemon is run first. */
902
903 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
904 if (r < 0)
905 return r;
906
907 return 0;
908 }
909
910 const char *unit_description(Unit *u) {
911 assert(u);
912
913 if (u->description)
914 return u->description;
915
916 return strna(u->id);
917 }
918
919 void unit_dump(Unit *u, FILE *f, const char *prefix) {
920 char *t, **j;
921 UnitDependency d;
922 Iterator i;
923 const char *prefix2;
924 char
925 timestamp0[FORMAT_TIMESTAMP_MAX],
926 timestamp1[FORMAT_TIMESTAMP_MAX],
927 timestamp2[FORMAT_TIMESTAMP_MAX],
928 timestamp3[FORMAT_TIMESTAMP_MAX],
929 timestamp4[FORMAT_TIMESTAMP_MAX],
930 timespan[FORMAT_TIMESPAN_MAX];
931 Unit *following;
932 _cleanup_set_free_ Set *following_set = NULL;
933 int r;
934 const char *n;
935
936 assert(u);
937 assert(u->type >= 0);
938
939 prefix = strempty(prefix);
940 prefix2 = strjoina(prefix, "\t");
941
942 fprintf(f,
943 "%s-> Unit %s:\n"
944 "%s\tDescription: %s\n"
945 "%s\tInstance: %s\n"
946 "%s\tUnit Load State: %s\n"
947 "%s\tUnit Active State: %s\n"
948 "%s\tState Change Timestamp: %s\n"
949 "%s\tInactive Exit Timestamp: %s\n"
950 "%s\tActive Enter Timestamp: %s\n"
951 "%s\tActive Exit Timestamp: %s\n"
952 "%s\tInactive Enter Timestamp: %s\n"
953 "%s\tGC Check Good: %s\n"
954 "%s\tNeed Daemon Reload: %s\n"
955 "%s\tTransient: %s\n"
956 "%s\tPerpetual: %s\n"
957 "%s\tSlice: %s\n"
958 "%s\tCGroup: %s\n"
959 "%s\tCGroup realized: %s\n",
960 prefix, u->id,
961 prefix, unit_description(u),
962 prefix, strna(u->instance),
963 prefix, unit_load_state_to_string(u->load_state),
964 prefix, unit_active_state_to_string(unit_active_state(u)),
965 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
966 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
967 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
968 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
969 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
970 prefix, yes_no(unit_check_gc(u)),
971 prefix, yes_no(unit_need_daemon_reload(u)),
972 prefix, yes_no(u->transient),
973 prefix, yes_no(u->perpetual),
974 prefix, strna(unit_slice_name(u)),
975 prefix, strna(u->cgroup_path),
976 prefix, yes_no(u->cgroup_realized));
977
978 if (u->cgroup_realized_mask != 0) {
979 _cleanup_free_ char *s = NULL;
980 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
981 fprintf(f, "%s\tCGroup mask: %s\n", prefix, strnull(s));
982 }
983 if (u->cgroup_members_mask != 0) {
984 _cleanup_free_ char *s = NULL;
985 (void) cg_mask_to_string(u->cgroup_members_mask, &s);
986 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
987 }
988
989 SET_FOREACH(t, u->names, i)
990 fprintf(f, "%s\tName: %s\n", prefix, t);
991
992 if (!sd_id128_is_null(u->invocation_id))
993 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
994 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
995
996 STRV_FOREACH(j, u->documentation)
997 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
998
999 following = unit_following(u);
1000 if (following)
1001 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1002
1003 r = unit_following_set(u, &following_set);
1004 if (r >= 0) {
1005 Unit *other;
1006
1007 SET_FOREACH(other, following_set, i)
1008 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1009 }
1010
1011 if (u->fragment_path)
1012 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1013
1014 if (u->source_path)
1015 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1016
1017 STRV_FOREACH(j, u->dropin_paths)
1018 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1019
1020 if (u->job_timeout != USEC_INFINITY)
1021 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1022
1023 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1024 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1025
1026 if (u->job_timeout_reboot_arg)
1027 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1028
1029 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1030 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1031
1032 if (dual_timestamp_is_set(&u->condition_timestamp))
1033 fprintf(f,
1034 "%s\tCondition Timestamp: %s\n"
1035 "%s\tCondition Result: %s\n",
1036 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1037 prefix, yes_no(u->condition_result));
1038
1039 if (dual_timestamp_is_set(&u->assert_timestamp))
1040 fprintf(f,
1041 "%s\tAssert Timestamp: %s\n"
1042 "%s\tAssert Result: %s\n",
1043 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1044 prefix, yes_no(u->assert_result));
1045
1046 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1047 Unit *other;
1048
1049 SET_FOREACH(other, u->dependencies[d], i)
1050 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1051 }
1052
1053 if (!strv_isempty(u->requires_mounts_for)) {
1054 fprintf(f,
1055 "%s\tRequiresMountsFor:", prefix);
1056
1057 STRV_FOREACH(j, u->requires_mounts_for)
1058 fprintf(f, " %s", *j);
1059
1060 fputs("\n", f);
1061 }
1062
1063 if (u->load_state == UNIT_LOADED) {
1064
1065 fprintf(f,
1066 "%s\tStopWhenUnneeded: %s\n"
1067 "%s\tRefuseManualStart: %s\n"
1068 "%s\tRefuseManualStop: %s\n"
1069 "%s\tDefaultDependencies: %s\n"
1070 "%s\tOnFailureJobMode: %s\n"
1071 "%s\tIgnoreOnIsolate: %s\n",
1072 prefix, yes_no(u->stop_when_unneeded),
1073 prefix, yes_no(u->refuse_manual_start),
1074 prefix, yes_no(u->refuse_manual_stop),
1075 prefix, yes_no(u->default_dependencies),
1076 prefix, job_mode_to_string(u->on_failure_job_mode),
1077 prefix, yes_no(u->ignore_on_isolate));
1078
1079 if (UNIT_VTABLE(u)->dump)
1080 UNIT_VTABLE(u)->dump(u, f, prefix2);
1081
1082 } else if (u->load_state == UNIT_MERGED)
1083 fprintf(f,
1084 "%s\tMerged into: %s\n",
1085 prefix, u->merged_into->id);
1086 else if (u->load_state == UNIT_ERROR)
1087 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1088
1089 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1090 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1091
1092 if (u->job)
1093 job_dump(u->job, f, prefix2);
1094
1095 if (u->nop_job)
1096 job_dump(u->nop_job, f, prefix2);
1097 }
1098
1099 /* Common implementation for multiple backends */
1100 int unit_load_fragment_and_dropin(Unit *u) {
1101 Unit *t;
1102 int r;
1103
1104 assert(u);
1105
1106 /* Load a .{service,socket,...} file */
1107 r = unit_load_fragment(u);
1108 if (r < 0)
1109 return r;
1110
1111 if (u->load_state == UNIT_STUB)
1112 return -ENOENT;
1113
1114 /* If the unit is an alias and the final unit has already been
1115 * loaded, there's no point in reloading the dropins one more time. */
1116 t = unit_follow_merge(u);
1117 if (t != u && t->load_state != UNIT_STUB)
1118 return 0;
1119
1120 return unit_load_dropin(t);
1121 }
1122
1123 /* Common implementation for multiple backends */
1124 int unit_load_fragment_and_dropin_optional(Unit *u) {
1125 Unit *t;
1126 int r;
1127
1128 assert(u);
1129
1130 /* Same as unit_load_fragment_and_dropin(), but whether
1131 * something can be loaded or not doesn't matter. */
1132
1133 /* Load a .service file */
1134 r = unit_load_fragment(u);
1135 if (r < 0)
1136 return r;
1137
1138 if (u->load_state == UNIT_STUB)
1139 u->load_state = UNIT_LOADED;
1140
1141 /* If the unit is an alias and the final unit has already been
1142 * loaded, there's no point in reloading the dropins one more time. */
1143 t = unit_follow_merge(u);
1144 if (t != u && t->load_state != UNIT_STUB)
1145 return 0;
1146
1147 return unit_load_dropin(t);
1148 }
1149
1150 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1151 assert(u);
1152 assert(target);
1153
1154 if (target->type != UNIT_TARGET)
1155 return 0;
1156
1157 /* Only add the dependency if both units are loaded, so that
1158 * that loop check below is reliable */
1159 if (u->load_state != UNIT_LOADED ||
1160 target->load_state != UNIT_LOADED)
1161 return 0;
1162
1163 /* If either side wants no automatic dependencies, then let's
1164 * skip this */
1165 if (!u->default_dependencies ||
1166 !target->default_dependencies)
1167 return 0;
1168
1169 /* Don't create loops */
1170 if (set_get(target->dependencies[UNIT_BEFORE], u))
1171 return 0;
1172
1173 return unit_add_dependency(target, UNIT_AFTER, u, true);
1174 }
1175
1176 static int unit_add_target_dependencies(Unit *u) {
1177
1178 static const UnitDependency deps[] = {
1179 UNIT_REQUIRED_BY,
1180 UNIT_REQUISITE_OF,
1181 UNIT_WANTED_BY,
1182 UNIT_BOUND_BY
1183 };
1184
1185 Unit *target;
1186 Iterator i;
1187 unsigned k;
1188 int r = 0;
1189
1190 assert(u);
1191
1192 for (k = 0; k < ELEMENTSOF(deps); k++)
1193 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1194 r = unit_add_default_target_dependency(u, target);
1195 if (r < 0)
1196 return r;
1197 }
1198
1199 return r;
1200 }
1201
1202 static int unit_add_slice_dependencies(Unit *u) {
1203 assert(u);
1204
1205 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1206 return 0;
1207
1208 if (UNIT_ISSET(u->slice))
1209 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1210
1211 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1212 return 0;
1213
1214 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1215 }
1216
1217 static int unit_add_mount_dependencies(Unit *u) {
1218 char **i;
1219 int r;
1220
1221 assert(u);
1222
1223 STRV_FOREACH(i, u->requires_mounts_for) {
1224 char prefix[strlen(*i) + 1];
1225
1226 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1227 _cleanup_free_ char *p = NULL;
1228 Unit *m;
1229
1230 r = unit_name_from_path(prefix, ".mount", &p);
1231 if (r < 0)
1232 return r;
1233
1234 m = manager_get_unit(u->manager, p);
1235 if (!m) {
1236 /* Make sure to load the mount unit if
1237 * it exists. If so the dependencies
1238 * on this unit will be added later
1239 * during the loading of the mount
1240 * unit. */
1241 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1242 continue;
1243 }
1244 if (m == u)
1245 continue;
1246
1247 if (m->load_state != UNIT_LOADED)
1248 continue;
1249
1250 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1251 if (r < 0)
1252 return r;
1253
1254 if (m->fragment_path) {
1255 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1256 if (r < 0)
1257 return r;
1258 }
1259 }
1260 }
1261
1262 return 0;
1263 }
1264
1265 static int unit_add_startup_units(Unit *u) {
1266 CGroupContext *c;
1267 int r;
1268
1269 c = unit_get_cgroup_context(u);
1270 if (!c)
1271 return 0;
1272
1273 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1274 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1275 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1276 return 0;
1277
1278 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1279 if (r < 0)
1280 return r;
1281
1282 return set_put(u->manager->startup_units, u);
1283 }
1284
1285 int unit_load(Unit *u) {
1286 int r;
1287
1288 assert(u);
1289
1290 if (u->in_load_queue) {
1291 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1292 u->in_load_queue = false;
1293 }
1294
1295 if (u->type == _UNIT_TYPE_INVALID)
1296 return -EINVAL;
1297
1298 if (u->load_state != UNIT_STUB)
1299 return 0;
1300
1301 if (u->transient_file) {
1302 r = fflush_and_check(u->transient_file);
1303 if (r < 0)
1304 goto fail;
1305
1306 fclose(u->transient_file);
1307 u->transient_file = NULL;
1308
1309 u->fragment_mtime = now(CLOCK_REALTIME);
1310 }
1311
1312 if (UNIT_VTABLE(u)->load) {
1313 r = UNIT_VTABLE(u)->load(u);
1314 if (r < 0)
1315 goto fail;
1316 }
1317
1318 if (u->load_state == UNIT_STUB) {
1319 r = -ENOENT;
1320 goto fail;
1321 }
1322
1323 if (u->load_state == UNIT_LOADED) {
1324
1325 r = unit_add_target_dependencies(u);
1326 if (r < 0)
1327 goto fail;
1328
1329 r = unit_add_slice_dependencies(u);
1330 if (r < 0)
1331 goto fail;
1332
1333 r = unit_add_mount_dependencies(u);
1334 if (r < 0)
1335 goto fail;
1336
1337 r = unit_add_startup_units(u);
1338 if (r < 0)
1339 goto fail;
1340
1341 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1342 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1343 r = -EINVAL;
1344 goto fail;
1345 }
1346
1347 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1348 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1349
1350 unit_update_cgroup_members_masks(u);
1351 }
1352
1353 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1354
1355 unit_add_to_dbus_queue(unit_follow_merge(u));
1356 unit_add_to_gc_queue(u);
1357
1358 return 0;
1359
1360 fail:
1361 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1362 u->load_error = r;
1363 unit_add_to_dbus_queue(u);
1364 unit_add_to_gc_queue(u);
1365
1366 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1367
1368 return r;
1369 }
1370
1371 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1372 Condition *c;
1373 int triggered = -1;
1374
1375 assert(u);
1376 assert(to_string);
1377
1378 /* If the condition list is empty, then it is true */
1379 if (!first)
1380 return true;
1381
1382 /* Otherwise, if all of the non-trigger conditions apply and
1383 * if any of the trigger conditions apply (unless there are
1384 * none) we return true */
1385 LIST_FOREACH(conditions, c, first) {
1386 int r;
1387
1388 r = condition_test(c);
1389 if (r < 0)
1390 log_unit_warning(u,
1391 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1392 to_string(c->type),
1393 c->trigger ? "|" : "",
1394 c->negate ? "!" : "",
1395 c->parameter);
1396 else
1397 log_unit_debug(u,
1398 "%s=%s%s%s %s.",
1399 to_string(c->type),
1400 c->trigger ? "|" : "",
1401 c->negate ? "!" : "",
1402 c->parameter,
1403 condition_result_to_string(c->result));
1404
1405 if (!c->trigger && r <= 0)
1406 return false;
1407
1408 if (c->trigger && triggered <= 0)
1409 triggered = r > 0;
1410 }
1411
1412 return triggered != 0;
1413 }
1414
1415 static bool unit_condition_test(Unit *u) {
1416 assert(u);
1417
1418 dual_timestamp_get(&u->condition_timestamp);
1419 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1420
1421 return u->condition_result;
1422 }
1423
1424 static bool unit_assert_test(Unit *u) {
1425 assert(u);
1426
1427 dual_timestamp_get(&u->assert_timestamp);
1428 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1429
1430 return u->assert_result;
1431 }
1432
1433 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1434 DISABLE_WARNING_FORMAT_NONLITERAL;
1435 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1436 REENABLE_WARNING;
1437 }
1438
1439 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1440 const char *format;
1441 const UnitStatusMessageFormats *format_table;
1442
1443 assert(u);
1444 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1445
1446 if (t != JOB_RELOAD) {
1447 format_table = &UNIT_VTABLE(u)->status_message_formats;
1448 if (format_table) {
1449 format = format_table->starting_stopping[t == JOB_STOP];
1450 if (format)
1451 return format;
1452 }
1453 }
1454
1455 /* Return generic strings */
1456 if (t == JOB_START)
1457 return "Starting %s.";
1458 else if (t == JOB_STOP)
1459 return "Stopping %s.";
1460 else
1461 return "Reloading %s.";
1462 }
1463
1464 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1465 const char *format;
1466
1467 assert(u);
1468
1469 /* Reload status messages have traditionally not been printed to console. */
1470 if (!IN_SET(t, JOB_START, JOB_STOP))
1471 return;
1472
1473 format = unit_get_status_message_format(u, t);
1474
1475 DISABLE_WARNING_FORMAT_NONLITERAL;
1476 unit_status_printf(u, "", format);
1477 REENABLE_WARNING;
1478 }
1479
1480 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1481 const char *format, *mid;
1482 char buf[LINE_MAX];
1483
1484 assert(u);
1485
1486 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1487 return;
1488
1489 if (log_on_console())
1490 return;
1491
1492 /* We log status messages for all units and all operations. */
1493
1494 format = unit_get_status_message_format(u, t);
1495
1496 DISABLE_WARNING_FORMAT_NONLITERAL;
1497 snprintf(buf, sizeof buf, format, unit_description(u));
1498 REENABLE_WARNING;
1499
1500 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1501 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1502 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1503
1504 /* Note that we deliberately use LOG_MESSAGE() instead of
1505 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1506 * closely what is written to screen using the status output,
1507 * which is supposed the highest level, friendliest output
1508 * possible, which means we should avoid the low-level unit
1509 * name. */
1510 log_struct(LOG_INFO,
1511 LOG_MESSAGE("%s", buf),
1512 LOG_UNIT_ID(u),
1513 mid,
1514 NULL);
1515 }
1516
1517 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1518 assert(u);
1519 assert(t >= 0);
1520 assert(t < _JOB_TYPE_MAX);
1521
1522 unit_status_log_starting_stopping_reloading(u, t);
1523 unit_status_print_starting_stopping(u, t);
1524 }
1525
1526 int unit_start_limit_test(Unit *u) {
1527 assert(u);
1528
1529 if (ratelimit_test(&u->start_limit)) {
1530 u->start_limit_hit = false;
1531 return 0;
1532 }
1533
1534 log_unit_warning(u, "Start request repeated too quickly.");
1535 u->start_limit_hit = true;
1536
1537 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1538 }
1539
1540 bool unit_shall_confirm_spawn(Unit *u) {
1541 assert(u);
1542
1543 if (manager_is_confirm_spawn_disabled(u->manager))
1544 return false;
1545
1546 /* For some reasons units remaining in the same process group
1547 * as PID 1 fail to acquire the console even if it's not used
1548 * by any process. So skip the confirmation question for them. */
1549 return !unit_get_exec_context(u)->same_pgrp;
1550 }
1551
1552 static bool unit_verify_deps(Unit *u) {
1553 Unit *other;
1554 Iterator j;
1555
1556 assert(u);
1557
1558 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1559 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1560 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1561 * conjunction with After= as for them any such check would make things entirely racy. */
1562
1563 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], j) {
1564
1565 if (!set_contains(u->dependencies[UNIT_AFTER], other))
1566 continue;
1567
1568 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1569 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1570 return false;
1571 }
1572 }
1573
1574 return true;
1575 }
1576
1577 /* Errors:
1578 * -EBADR: This unit type does not support starting.
1579 * -EALREADY: Unit is already started.
1580 * -EAGAIN: An operation is already in progress. Retry later.
1581 * -ECANCELED: Too many requests for now.
1582 * -EPROTO: Assert failed
1583 * -EINVAL: Unit not loaded
1584 * -EOPNOTSUPP: Unit type not supported
1585 * -ENOLINK: The necessary dependencies are not fulfilled.
1586 */
1587 int unit_start(Unit *u) {
1588 UnitActiveState state;
1589 Unit *following;
1590
1591 assert(u);
1592
1593 /* If this is already started, then this will succeed. Note
1594 * that this will even succeed if this unit is not startable
1595 * by the user. This is relied on to detect when we need to
1596 * wait for units and when waiting is finished. */
1597 state = unit_active_state(u);
1598 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1599 return -EALREADY;
1600
1601 /* Units that aren't loaded cannot be started */
1602 if (u->load_state != UNIT_LOADED)
1603 return -EINVAL;
1604
1605 /* If the conditions failed, don't do anything at all. If we
1606 * already are activating this call might still be useful to
1607 * speed up activation in case there is some hold-off time,
1608 * but we don't want to recheck the condition in that case. */
1609 if (state != UNIT_ACTIVATING &&
1610 !unit_condition_test(u)) {
1611 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1612 return -EALREADY;
1613 }
1614
1615 /* If the asserts failed, fail the entire job */
1616 if (state != UNIT_ACTIVATING &&
1617 !unit_assert_test(u)) {
1618 log_unit_notice(u, "Starting requested but asserts failed.");
1619 return -EPROTO;
1620 }
1621
1622 /* Units of types that aren't supported cannot be
1623 * started. Note that we do this test only after the condition
1624 * checks, so that we rather return condition check errors
1625 * (which are usually not considered a true failure) than "not
1626 * supported" errors (which are considered a failure).
1627 */
1628 if (!unit_supported(u))
1629 return -EOPNOTSUPP;
1630
1631 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1632 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1633 * effect anymore, due to a reload or due to a failed condition. */
1634 if (!unit_verify_deps(u))
1635 return -ENOLINK;
1636
1637 /* Forward to the main object, if we aren't it. */
1638 following = unit_following(u);
1639 if (following) {
1640 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1641 return unit_start(following);
1642 }
1643
1644 /* If it is stopped, but we cannot start it, then fail */
1645 if (!UNIT_VTABLE(u)->start)
1646 return -EBADR;
1647
1648 /* We don't suppress calls to ->start() here when we are
1649 * already starting, to allow this request to be used as a
1650 * "hurry up" call, for example when the unit is in some "auto
1651 * restart" state where it waits for a holdoff timer to elapse
1652 * before it will start again. */
1653
1654 unit_add_to_dbus_queue(u);
1655
1656 return UNIT_VTABLE(u)->start(u);
1657 }
1658
1659 bool unit_can_start(Unit *u) {
1660 assert(u);
1661
1662 if (u->load_state != UNIT_LOADED)
1663 return false;
1664
1665 if (!unit_supported(u))
1666 return false;
1667
1668 return !!UNIT_VTABLE(u)->start;
1669 }
1670
1671 bool unit_can_isolate(Unit *u) {
1672 assert(u);
1673
1674 return unit_can_start(u) &&
1675 u->allow_isolate;
1676 }
1677
1678 /* Errors:
1679 * -EBADR: This unit type does not support stopping.
1680 * -EALREADY: Unit is already stopped.
1681 * -EAGAIN: An operation is already in progress. Retry later.
1682 */
1683 int unit_stop(Unit *u) {
1684 UnitActiveState state;
1685 Unit *following;
1686
1687 assert(u);
1688
1689 state = unit_active_state(u);
1690 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1691 return -EALREADY;
1692
1693 following = unit_following(u);
1694 if (following) {
1695 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1696 return unit_stop(following);
1697 }
1698
1699 if (!UNIT_VTABLE(u)->stop)
1700 return -EBADR;
1701
1702 unit_add_to_dbus_queue(u);
1703
1704 return UNIT_VTABLE(u)->stop(u);
1705 }
1706
1707 bool unit_can_stop(Unit *u) {
1708 assert(u);
1709
1710 if (!unit_supported(u))
1711 return false;
1712
1713 if (u->perpetual)
1714 return false;
1715
1716 return !!UNIT_VTABLE(u)->stop;
1717 }
1718
1719 /* Errors:
1720 * -EBADR: This unit type does not support reloading.
1721 * -ENOEXEC: Unit is not started.
1722 * -EAGAIN: An operation is already in progress. Retry later.
1723 */
1724 int unit_reload(Unit *u) {
1725 UnitActiveState state;
1726 Unit *following;
1727
1728 assert(u);
1729
1730 if (u->load_state != UNIT_LOADED)
1731 return -EINVAL;
1732
1733 if (!unit_can_reload(u))
1734 return -EBADR;
1735
1736 state = unit_active_state(u);
1737 if (state == UNIT_RELOADING)
1738 return -EALREADY;
1739
1740 if (state != UNIT_ACTIVE) {
1741 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1742 return -ENOEXEC;
1743 }
1744
1745 following = unit_following(u);
1746 if (following) {
1747 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1748 return unit_reload(following);
1749 }
1750
1751 unit_add_to_dbus_queue(u);
1752
1753 return UNIT_VTABLE(u)->reload(u);
1754 }
1755
1756 bool unit_can_reload(Unit *u) {
1757 assert(u);
1758
1759 if (!UNIT_VTABLE(u)->reload)
1760 return false;
1761
1762 if (!UNIT_VTABLE(u)->can_reload)
1763 return true;
1764
1765 return UNIT_VTABLE(u)->can_reload(u);
1766 }
1767
1768 static void unit_check_unneeded(Unit *u) {
1769
1770 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1771
1772 static const UnitDependency needed_dependencies[] = {
1773 UNIT_REQUIRED_BY,
1774 UNIT_REQUISITE_OF,
1775 UNIT_WANTED_BY,
1776 UNIT_BOUND_BY,
1777 };
1778
1779 Unit *other;
1780 Iterator i;
1781 unsigned j;
1782 int r;
1783
1784 assert(u);
1785
1786 /* If this service shall be shut down when unneeded then do
1787 * so. */
1788
1789 if (!u->stop_when_unneeded)
1790 return;
1791
1792 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1793 return;
1794
1795 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1796 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1797 if (unit_active_or_pending(other))
1798 return;
1799
1800 /* If stopping a unit fails continuously we might enter a stop
1801 * loop here, hence stop acting on the service being
1802 * unnecessary after a while. */
1803 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1804 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1805 return;
1806 }
1807
1808 log_unit_info(u, "Unit not needed anymore. Stopping.");
1809
1810 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1811 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1812 if (r < 0)
1813 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1814 }
1815
1816 static void unit_check_binds_to(Unit *u) {
1817 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1818 bool stop = false;
1819 Unit *other;
1820 Iterator i;
1821 int r;
1822
1823 assert(u);
1824
1825 if (u->job)
1826 return;
1827
1828 if (unit_active_state(u) != UNIT_ACTIVE)
1829 return;
1830
1831 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1832 if (other->job)
1833 continue;
1834
1835 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1836 continue;
1837
1838 stop = true;
1839 break;
1840 }
1841
1842 if (!stop)
1843 return;
1844
1845 /* If stopping a unit fails continuously we might enter a stop
1846 * loop here, hence stop acting on the service being
1847 * unnecessary after a while. */
1848 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1849 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1850 return;
1851 }
1852
1853 assert(other);
1854 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1855
1856 /* A unit we need to run is gone. Sniff. Let's stop this. */
1857 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1858 if (r < 0)
1859 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1860 }
1861
1862 static void retroactively_start_dependencies(Unit *u) {
1863 Iterator i;
1864 Unit *other;
1865
1866 assert(u);
1867 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1868
1869 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1870 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1871 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1872 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1873
1874 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1875 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1876 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1877 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1878
1879 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1880 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1881 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1882 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1883
1884 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1885 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1886 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1887
1888 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1889 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1890 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1891 }
1892
1893 static void retroactively_stop_dependencies(Unit *u) {
1894 Iterator i;
1895 Unit *other;
1896
1897 assert(u);
1898 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1899
1900 /* Pull down units which are bound to us recursively if enabled */
1901 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1902 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1903 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1904 }
1905
1906 static void check_unneeded_dependencies(Unit *u) {
1907 Iterator i;
1908 Unit *other;
1909
1910 assert(u);
1911 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1912
1913 /* Garbage collect services that might not be needed anymore, if enabled */
1914 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1915 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1916 unit_check_unneeded(other);
1917 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1918 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1919 unit_check_unneeded(other);
1920 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1921 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1922 unit_check_unneeded(other);
1923 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1924 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1925 unit_check_unneeded(other);
1926 }
1927
1928 void unit_start_on_failure(Unit *u) {
1929 Unit *other;
1930 Iterator i;
1931
1932 assert(u);
1933
1934 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1935 return;
1936
1937 log_unit_info(u, "Triggering OnFailure= dependencies.");
1938
1939 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1940 int r;
1941
1942 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1943 if (r < 0)
1944 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1945 }
1946 }
1947
1948 void unit_trigger_notify(Unit *u) {
1949 Unit *other;
1950 Iterator i;
1951
1952 assert(u);
1953
1954 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1955 if (UNIT_VTABLE(other)->trigger_notify)
1956 UNIT_VTABLE(other)->trigger_notify(other, u);
1957 }
1958
1959 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1960 Manager *m;
1961 bool unexpected;
1962
1963 assert(u);
1964 assert(os < _UNIT_ACTIVE_STATE_MAX);
1965 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1966
1967 /* Note that this is called for all low-level state changes,
1968 * even if they might map to the same high-level
1969 * UnitActiveState! That means that ns == os is an expected
1970 * behavior here. For example: if a mount point is remounted
1971 * this function will be called too! */
1972
1973 m = u->manager;
1974
1975 /* Update timestamps for state changes */
1976 if (!MANAGER_IS_RELOADING(m)) {
1977 dual_timestamp_get(&u->state_change_timestamp);
1978
1979 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1980 u->inactive_exit_timestamp = u->state_change_timestamp;
1981 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1982 u->inactive_enter_timestamp = u->state_change_timestamp;
1983
1984 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1985 u->active_enter_timestamp = u->state_change_timestamp;
1986 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1987 u->active_exit_timestamp = u->state_change_timestamp;
1988 }
1989
1990 /* Keep track of failed units */
1991 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1992
1993 /* Make sure the cgroup is always removed when we become inactive */
1994 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1995 unit_prune_cgroup(u);
1996
1997 /* Note that this doesn't apply to RemainAfterExit services exiting
1998 * successfully, since there's no change of state in that case. Which is
1999 * why it is handled in service_set_state() */
2000 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2001 ExecContext *ec;
2002
2003 ec = unit_get_exec_context(u);
2004 if (ec && exec_context_may_touch_console(ec)) {
2005 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2006 m->n_on_console--;
2007
2008 if (m->n_on_console == 0)
2009 /* unset no_console_output flag, since the console is free */
2010 m->no_console_output = false;
2011 } else
2012 m->n_on_console++;
2013 }
2014 }
2015
2016 if (u->job) {
2017 unexpected = false;
2018
2019 if (u->job->state == JOB_WAITING)
2020
2021 /* So we reached a different state for this
2022 * job. Let's see if we can run it now if it
2023 * failed previously due to EAGAIN. */
2024 job_add_to_run_queue(u->job);
2025
2026 /* Let's check whether this state change constitutes a
2027 * finished job, or maybe contradicts a running job and
2028 * hence needs to invalidate jobs. */
2029
2030 switch (u->job->type) {
2031
2032 case JOB_START:
2033 case JOB_VERIFY_ACTIVE:
2034
2035 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2036 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2037 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2038 unexpected = true;
2039
2040 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2041 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2042 }
2043
2044 break;
2045
2046 case JOB_RELOAD:
2047 case JOB_RELOAD_OR_START:
2048 case JOB_TRY_RELOAD:
2049
2050 if (u->job->state == JOB_RUNNING) {
2051 if (ns == UNIT_ACTIVE)
2052 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2053 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
2054 unexpected = true;
2055
2056 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2057 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2058 }
2059 }
2060
2061 break;
2062
2063 case JOB_STOP:
2064 case JOB_RESTART:
2065 case JOB_TRY_RESTART:
2066
2067 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2068 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2069 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2070 unexpected = true;
2071 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2072 }
2073
2074 break;
2075
2076 default:
2077 assert_not_reached("Job type unknown");
2078 }
2079
2080 } else
2081 unexpected = true;
2082
2083 if (!MANAGER_IS_RELOADING(m)) {
2084
2085 /* If this state change happened without being
2086 * requested by a job, then let's retroactively start
2087 * or stop dependencies. We skip that step when
2088 * deserializing, since we don't want to create any
2089 * additional jobs just because something is already
2090 * activated. */
2091
2092 if (unexpected) {
2093 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2094 retroactively_start_dependencies(u);
2095 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2096 retroactively_stop_dependencies(u);
2097 }
2098
2099 /* stop unneeded units regardless if going down was expected or not */
2100 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2101 check_unneeded_dependencies(u);
2102
2103 if (ns != os && ns == UNIT_FAILED) {
2104 log_unit_notice(u, "Unit entered failed state.");
2105 unit_start_on_failure(u);
2106 }
2107 }
2108
2109 /* Some names are special */
2110 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2111
2112 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2113 /* The bus might have just become available,
2114 * hence try to connect to it, if we aren't
2115 * yet connected. */
2116 bus_init(m, true);
2117
2118 if (u->type == UNIT_SERVICE &&
2119 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2120 !MANAGER_IS_RELOADING(m)) {
2121 /* Write audit record if we have just finished starting up */
2122 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2123 u->in_audit = true;
2124 }
2125
2126 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2127 manager_send_unit_plymouth(m, u);
2128
2129 } else {
2130
2131 /* We don't care about D-Bus here, since we'll get an
2132 * asynchronous notification for it anyway. */
2133
2134 if (u->type == UNIT_SERVICE &&
2135 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2136 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2137 !MANAGER_IS_RELOADING(m)) {
2138
2139 /* Hmm, if there was no start record written
2140 * write it now, so that we always have a nice
2141 * pair */
2142 if (!u->in_audit) {
2143 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2144
2145 if (ns == UNIT_INACTIVE)
2146 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2147 } else
2148 /* Write audit record if we have just finished shutting down */
2149 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2150
2151 u->in_audit = false;
2152 }
2153 }
2154
2155 manager_recheck_journal(m);
2156 unit_trigger_notify(u);
2157
2158 if (!MANAGER_IS_RELOADING(u->manager)) {
2159 /* Maybe we finished startup and are now ready for
2160 * being stopped because unneeded? */
2161 unit_check_unneeded(u);
2162
2163 /* Maybe we finished startup, but something we needed
2164 * has vanished? Let's die then. (This happens when
2165 * something BindsTo= to a Type=oneshot unit, as these
2166 * units go directly from starting to inactive,
2167 * without ever entering started.) */
2168 unit_check_binds_to(u);
2169 }
2170
2171 unit_add_to_dbus_queue(u);
2172 unit_add_to_gc_queue(u);
2173 }
2174
2175 int unit_watch_pid(Unit *u, pid_t pid) {
2176 int q, r;
2177
2178 assert(u);
2179 assert(pid >= 1);
2180
2181 /* Watch a specific PID. We only support one or two units
2182 * watching each PID for now, not more. */
2183
2184 r = set_ensure_allocated(&u->pids, NULL);
2185 if (r < 0)
2186 return r;
2187
2188 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2189 if (r < 0)
2190 return r;
2191
2192 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2193 if (r == -EEXIST) {
2194 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2195 if (r < 0)
2196 return r;
2197
2198 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2199 }
2200
2201 q = set_put(u->pids, PID_TO_PTR(pid));
2202 if (q < 0)
2203 return q;
2204
2205 return r;
2206 }
2207
2208 void unit_unwatch_pid(Unit *u, pid_t pid) {
2209 assert(u);
2210 assert(pid >= 1);
2211
2212 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2213 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2214 (void) set_remove(u->pids, PID_TO_PTR(pid));
2215 }
2216
2217 void unit_unwatch_all_pids(Unit *u) {
2218 assert(u);
2219
2220 while (!set_isempty(u->pids))
2221 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2222
2223 u->pids = set_free(u->pids);
2224 }
2225
2226 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2227 Iterator i;
2228 void *e;
2229
2230 assert(u);
2231
2232 /* Cleans dead PIDs from our list */
2233
2234 SET_FOREACH(e, u->pids, i) {
2235 pid_t pid = PTR_TO_PID(e);
2236
2237 if (pid == except1 || pid == except2)
2238 continue;
2239
2240 if (!pid_is_unwaited(pid))
2241 unit_unwatch_pid(u, pid);
2242 }
2243 }
2244
2245 bool unit_job_is_applicable(Unit *u, JobType j) {
2246 assert(u);
2247 assert(j >= 0 && j < _JOB_TYPE_MAX);
2248
2249 switch (j) {
2250
2251 case JOB_VERIFY_ACTIVE:
2252 case JOB_START:
2253 case JOB_NOP:
2254 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2255 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2256 * jobs for it. */
2257 return true;
2258
2259 case JOB_STOP:
2260 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2261 * external events), hence it makes no sense to permit enqueing such a request either. */
2262 return !u->perpetual;
2263
2264 case JOB_RESTART:
2265 case JOB_TRY_RESTART:
2266 return unit_can_stop(u) && unit_can_start(u);
2267
2268 case JOB_RELOAD:
2269 case JOB_TRY_RELOAD:
2270 return unit_can_reload(u);
2271
2272 case JOB_RELOAD_OR_START:
2273 return unit_can_reload(u) && unit_can_start(u);
2274
2275 default:
2276 assert_not_reached("Invalid job type");
2277 }
2278 }
2279
2280 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2281 assert(u);
2282
2283 /* Only warn about some unit types */
2284 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2285 return;
2286
2287 if (streq_ptr(u->id, other))
2288 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2289 else
2290 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2291 }
2292
2293 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2294
2295 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2296 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2297 [UNIT_WANTS] = UNIT_WANTED_BY,
2298 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2299 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2300 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2301 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2302 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2303 [UNIT_WANTED_BY] = UNIT_WANTS,
2304 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2305 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2306 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2307 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2308 [UNIT_BEFORE] = UNIT_AFTER,
2309 [UNIT_AFTER] = UNIT_BEFORE,
2310 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2311 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2312 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2313 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2314 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2315 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2316 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2317 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2318 };
2319 int r, q = 0, v = 0, w = 0;
2320 Unit *orig_u = u, *orig_other = other;
2321
2322 assert(u);
2323 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2324 assert(other);
2325
2326 u = unit_follow_merge(u);
2327 other = unit_follow_merge(other);
2328
2329 /* We won't allow dependencies on ourselves. We will not
2330 * consider them an error however. */
2331 if (u == other) {
2332 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2333 return 0;
2334 }
2335
2336 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2337 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2338 return 0;
2339 }
2340
2341 r = set_ensure_allocated(&u->dependencies[d], NULL);
2342 if (r < 0)
2343 return r;
2344
2345 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2346 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2347 if (r < 0)
2348 return r;
2349 }
2350
2351 if (add_reference) {
2352 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2353 if (r < 0)
2354 return r;
2355
2356 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2357 if (r < 0)
2358 return r;
2359 }
2360
2361 q = set_put(u->dependencies[d], other);
2362 if (q < 0)
2363 return q;
2364
2365 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2366 v = set_put(other->dependencies[inverse_table[d]], u);
2367 if (v < 0) {
2368 r = v;
2369 goto fail;
2370 }
2371 }
2372
2373 if (add_reference) {
2374 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2375 if (w < 0) {
2376 r = w;
2377 goto fail;
2378 }
2379
2380 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2381 if (r < 0)
2382 goto fail;
2383 }
2384
2385 unit_add_to_dbus_queue(u);
2386 return 0;
2387
2388 fail:
2389 if (q > 0)
2390 set_remove(u->dependencies[d], other);
2391
2392 if (v > 0)
2393 set_remove(other->dependencies[inverse_table[d]], u);
2394
2395 if (w > 0)
2396 set_remove(u->dependencies[UNIT_REFERENCES], other);
2397
2398 return r;
2399 }
2400
2401 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2402 int r;
2403
2404 assert(u);
2405
2406 r = unit_add_dependency(u, d, other, add_reference);
2407 if (r < 0)
2408 return r;
2409
2410 return unit_add_dependency(u, e, other, add_reference);
2411 }
2412
2413 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2414 int r;
2415
2416 assert(u);
2417 assert(name || path);
2418 assert(buf);
2419 assert(ret);
2420
2421 if (!name)
2422 name = basename(path);
2423
2424 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2425 *buf = NULL;
2426 *ret = name;
2427 return 0;
2428 }
2429
2430 if (u->instance)
2431 r = unit_name_replace_instance(name, u->instance, buf);
2432 else {
2433 _cleanup_free_ char *i = NULL;
2434
2435 r = unit_name_to_prefix(u->id, &i);
2436 if (r < 0)
2437 return r;
2438
2439 r = unit_name_replace_instance(name, i, buf);
2440 }
2441 if (r < 0)
2442 return r;
2443
2444 *ret = *buf;
2445 return 0;
2446 }
2447
2448 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2449 _cleanup_free_ char *buf = NULL;
2450 Unit *other;
2451 int r;
2452
2453 assert(u);
2454 assert(name || path);
2455
2456 r = resolve_template(u, name, path, &buf, &name);
2457 if (r < 0)
2458 return r;
2459
2460 r = manager_load_unit(u->manager, name, path, NULL, &other);
2461 if (r < 0)
2462 return r;
2463
2464 return unit_add_dependency(u, d, other, add_reference);
2465 }
2466
2467 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2468 _cleanup_free_ char *buf = NULL;
2469 Unit *other;
2470 int r;
2471
2472 assert(u);
2473 assert(name || path);
2474
2475 r = resolve_template(u, name, path, &buf, &name);
2476 if (r < 0)
2477 return r;
2478
2479 r = manager_load_unit(u->manager, name, path, NULL, &other);
2480 if (r < 0)
2481 return r;
2482
2483 return unit_add_two_dependencies(u, d, e, other, add_reference);
2484 }
2485
2486 int set_unit_path(const char *p) {
2487 /* This is mostly for debug purposes */
2488 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2489 return -errno;
2490
2491 return 0;
2492 }
2493
2494 char *unit_dbus_path(Unit *u) {
2495 assert(u);
2496
2497 if (!u->id)
2498 return NULL;
2499
2500 return unit_dbus_path_from_name(u->id);
2501 }
2502
2503 char *unit_dbus_path_invocation_id(Unit *u) {
2504 assert(u);
2505
2506 if (sd_id128_is_null(u->invocation_id))
2507 return NULL;
2508
2509 return unit_dbus_path_from_name(u->invocation_id_string);
2510 }
2511
2512 int unit_set_slice(Unit *u, Unit *slice) {
2513 assert(u);
2514 assert(slice);
2515
2516 /* Sets the unit slice if it has not been set before. Is extra
2517 * careful, to only allow this for units that actually have a
2518 * cgroup context. Also, we don't allow to set this for slices
2519 * (since the parent slice is derived from the name). Make
2520 * sure the unit we set is actually a slice. */
2521
2522 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2523 return -EOPNOTSUPP;
2524
2525 if (u->type == UNIT_SLICE)
2526 return -EINVAL;
2527
2528 if (unit_active_state(u) != UNIT_INACTIVE)
2529 return -EBUSY;
2530
2531 if (slice->type != UNIT_SLICE)
2532 return -EINVAL;
2533
2534 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2535 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2536 return -EPERM;
2537
2538 if (UNIT_DEREF(u->slice) == slice)
2539 return 0;
2540
2541 /* Disallow slice changes if @u is already bound to cgroups */
2542 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2543 return -EBUSY;
2544
2545 unit_ref_unset(&u->slice);
2546 unit_ref_set(&u->slice, slice);
2547 return 1;
2548 }
2549
2550 int unit_set_default_slice(Unit *u) {
2551 _cleanup_free_ char *b = NULL;
2552 const char *slice_name;
2553 Unit *slice;
2554 int r;
2555
2556 assert(u);
2557
2558 if (UNIT_ISSET(u->slice))
2559 return 0;
2560
2561 if (u->instance) {
2562 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2563
2564 /* Implicitly place all instantiated units in their
2565 * own per-template slice */
2566
2567 r = unit_name_to_prefix(u->id, &prefix);
2568 if (r < 0)
2569 return r;
2570
2571 /* The prefix is already escaped, but it might include
2572 * "-" which has a special meaning for slice units,
2573 * hence escape it here extra. */
2574 escaped = unit_name_escape(prefix);
2575 if (!escaped)
2576 return -ENOMEM;
2577
2578 if (MANAGER_IS_SYSTEM(u->manager))
2579 b = strjoin("system-", escaped, ".slice");
2580 else
2581 b = strappend(escaped, ".slice");
2582 if (!b)
2583 return -ENOMEM;
2584
2585 slice_name = b;
2586 } else
2587 slice_name =
2588 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2589 ? SPECIAL_SYSTEM_SLICE
2590 : SPECIAL_ROOT_SLICE;
2591
2592 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2593 if (r < 0)
2594 return r;
2595
2596 return unit_set_slice(u, slice);
2597 }
2598
2599 const char *unit_slice_name(Unit *u) {
2600 assert(u);
2601
2602 if (!UNIT_ISSET(u->slice))
2603 return NULL;
2604
2605 return UNIT_DEREF(u->slice)->id;
2606 }
2607
2608 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2609 _cleanup_free_ char *t = NULL;
2610 int r;
2611
2612 assert(u);
2613 assert(type);
2614 assert(_found);
2615
2616 r = unit_name_change_suffix(u->id, type, &t);
2617 if (r < 0)
2618 return r;
2619 if (unit_has_name(u, t))
2620 return -EINVAL;
2621
2622 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2623 assert(r < 0 || *_found != u);
2624 return r;
2625 }
2626
2627 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2628 const char *name, *old_owner, *new_owner;
2629 Unit *u = userdata;
2630 int r;
2631
2632 assert(message);
2633 assert(u);
2634
2635 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2636 if (r < 0) {
2637 bus_log_parse_error(r);
2638 return 0;
2639 }
2640
2641 if (UNIT_VTABLE(u)->bus_name_owner_change)
2642 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2643
2644 return 0;
2645 }
2646
2647 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2648 const char *match;
2649
2650 assert(u);
2651 assert(bus);
2652 assert(name);
2653
2654 if (u->match_bus_slot)
2655 return -EBUSY;
2656
2657 match = strjoina("type='signal',"
2658 "sender='org.freedesktop.DBus',"
2659 "path='/org/freedesktop/DBus',"
2660 "interface='org.freedesktop.DBus',"
2661 "member='NameOwnerChanged',"
2662 "arg0='", name, "'");
2663
2664 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2665 }
2666
2667 int unit_watch_bus_name(Unit *u, const char *name) {
2668 int r;
2669
2670 assert(u);
2671 assert(name);
2672
2673 /* Watch a specific name on the bus. We only support one unit
2674 * watching each name for now. */
2675
2676 if (u->manager->api_bus) {
2677 /* If the bus is already available, install the match directly.
2678 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2679 r = unit_install_bus_match(u, u->manager->api_bus, name);
2680 if (r < 0)
2681 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2682 }
2683
2684 r = hashmap_put(u->manager->watch_bus, name, u);
2685 if (r < 0) {
2686 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2687 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2688 }
2689
2690 return 0;
2691 }
2692
2693 void unit_unwatch_bus_name(Unit *u, const char *name) {
2694 assert(u);
2695 assert(name);
2696
2697 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
2698 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2699 }
2700
2701 bool unit_can_serialize(Unit *u) {
2702 assert(u);
2703
2704 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2705 }
2706
2707 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
2708 _cleanup_free_ char *s = NULL;
2709 int r = 0;
2710
2711 assert(f);
2712 assert(key);
2713
2714 if (mask != 0) {
2715 r = cg_mask_to_string(mask, &s);
2716 if (r >= 0) {
2717 fputs(key, f);
2718 fputc('=', f);
2719 fputs(s, f);
2720 fputc('\n', f);
2721 }
2722 }
2723 return r;
2724 }
2725
2726 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2727 int r;
2728
2729 assert(u);
2730 assert(f);
2731 assert(fds);
2732
2733 if (unit_can_serialize(u)) {
2734 ExecRuntime *rt;
2735
2736 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2737 if (r < 0)
2738 return r;
2739
2740 rt = unit_get_exec_runtime(u);
2741 if (rt) {
2742 r = exec_runtime_serialize(u, rt, f, fds);
2743 if (r < 0)
2744 return r;
2745 }
2746 }
2747
2748 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2749
2750 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2751 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2752 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2753 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2754
2755 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2756 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2757
2758 if (dual_timestamp_is_set(&u->condition_timestamp))
2759 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2760
2761 if (dual_timestamp_is_set(&u->assert_timestamp))
2762 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2763
2764 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2765
2766 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2767 if (u->cpu_usage_last != NSEC_INFINITY)
2768 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2769
2770 if (u->cgroup_path)
2771 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2772 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2773 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
2774 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
2775
2776 if (uid_is_valid(u->ref_uid))
2777 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2778 if (gid_is_valid(u->ref_gid))
2779 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2780
2781 if (!sd_id128_is_null(u->invocation_id))
2782 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
2783
2784 bus_track_serialize(u->bus_track, f, "ref");
2785
2786 if (serialize_jobs) {
2787 if (u->job) {
2788 fprintf(f, "job\n");
2789 job_serialize(u->job, f);
2790 }
2791
2792 if (u->nop_job) {
2793 fprintf(f, "job\n");
2794 job_serialize(u->nop_job, f);
2795 }
2796 }
2797
2798 /* End marker */
2799 fputc('\n', f);
2800 return 0;
2801 }
2802
2803 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2804 assert(u);
2805 assert(f);
2806 assert(key);
2807
2808 if (!value)
2809 return 0;
2810
2811 fputs(key, f);
2812 fputc('=', f);
2813 fputs(value, f);
2814 fputc('\n', f);
2815
2816 return 1;
2817 }
2818
2819 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2820 _cleanup_free_ char *c = NULL;
2821
2822 assert(u);
2823 assert(f);
2824 assert(key);
2825
2826 if (!value)
2827 return 0;
2828
2829 c = cescape(value);
2830 if (!c)
2831 return -ENOMEM;
2832
2833 fputs(key, f);
2834 fputc('=', f);
2835 fputs(c, f);
2836 fputc('\n', f);
2837
2838 return 1;
2839 }
2840
2841 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2842 int copy;
2843
2844 assert(u);
2845 assert(f);
2846 assert(key);
2847
2848 if (fd < 0)
2849 return 0;
2850
2851 copy = fdset_put_dup(fds, fd);
2852 if (copy < 0)
2853 return copy;
2854
2855 fprintf(f, "%s=%i\n", key, copy);
2856 return 1;
2857 }
2858
2859 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2860 va_list ap;
2861
2862 assert(u);
2863 assert(f);
2864 assert(key);
2865 assert(format);
2866
2867 fputs(key, f);
2868 fputc('=', f);
2869
2870 va_start(ap, format);
2871 vfprintf(f, format, ap);
2872 va_end(ap);
2873
2874 fputc('\n', f);
2875 }
2876
2877 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2878 ExecRuntime **rt = NULL;
2879 size_t offset;
2880 int r;
2881
2882 assert(u);
2883 assert(f);
2884 assert(fds);
2885
2886 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2887 if (offset > 0)
2888 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2889
2890 for (;;) {
2891 char line[LINE_MAX], *l, *v;
2892 size_t k;
2893
2894 if (!fgets(line, sizeof(line), f)) {
2895 if (feof(f))
2896 return 0;
2897 return -errno;
2898 }
2899
2900 char_array_0(line);
2901 l = strstrip(line);
2902
2903 /* End marker */
2904 if (isempty(l))
2905 break;
2906
2907 k = strcspn(l, "=");
2908
2909 if (l[k] == '=') {
2910 l[k] = 0;
2911 v = l+k+1;
2912 } else
2913 v = l+k;
2914
2915 if (streq(l, "job")) {
2916 if (v[0] == '\0') {
2917 /* new-style serialized job */
2918 Job *j;
2919
2920 j = job_new_raw(u);
2921 if (!j)
2922 return log_oom();
2923
2924 r = job_deserialize(j, f);
2925 if (r < 0) {
2926 job_free(j);
2927 return r;
2928 }
2929
2930 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2931 if (r < 0) {
2932 job_free(j);
2933 return r;
2934 }
2935
2936 r = job_install_deserialized(j);
2937 if (r < 0) {
2938 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2939 job_free(j);
2940 return r;
2941 }
2942 } else /* legacy for pre-44 */
2943 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2944 continue;
2945 } else if (streq(l, "state-change-timestamp")) {
2946 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2947 continue;
2948 } else if (streq(l, "inactive-exit-timestamp")) {
2949 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2950 continue;
2951 } else if (streq(l, "active-enter-timestamp")) {
2952 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2953 continue;
2954 } else if (streq(l, "active-exit-timestamp")) {
2955 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2956 continue;
2957 } else if (streq(l, "inactive-enter-timestamp")) {
2958 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2959 continue;
2960 } else if (streq(l, "condition-timestamp")) {
2961 dual_timestamp_deserialize(v, &u->condition_timestamp);
2962 continue;
2963 } else if (streq(l, "assert-timestamp")) {
2964 dual_timestamp_deserialize(v, &u->assert_timestamp);
2965 continue;
2966 } else if (streq(l, "condition-result")) {
2967
2968 r = parse_boolean(v);
2969 if (r < 0)
2970 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2971 else
2972 u->condition_result = r;
2973
2974 continue;
2975
2976 } else if (streq(l, "assert-result")) {
2977
2978 r = parse_boolean(v);
2979 if (r < 0)
2980 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2981 else
2982 u->assert_result = r;
2983
2984 continue;
2985
2986 } else if (streq(l, "transient")) {
2987
2988 r = parse_boolean(v);
2989 if (r < 0)
2990 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2991 else
2992 u->transient = r;
2993
2994 continue;
2995
2996 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2997
2998 r = safe_atou64(v, &u->cpu_usage_base);
2999 if (r < 0)
3000 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3001
3002 continue;
3003
3004 } else if (streq(l, "cpu-usage-last")) {
3005
3006 r = safe_atou64(v, &u->cpu_usage_last);
3007 if (r < 0)
3008 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3009
3010 continue;
3011
3012 } else if (streq(l, "cgroup")) {
3013
3014 r = unit_set_cgroup_path(u, v);
3015 if (r < 0)
3016 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3017
3018 (void) unit_watch_cgroup(u);
3019
3020 continue;
3021 } else if (streq(l, "cgroup-realized")) {
3022 int b;
3023
3024 b = parse_boolean(v);
3025 if (b < 0)
3026 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3027 else
3028 u->cgroup_realized = b;
3029
3030 continue;
3031
3032 } else if (streq(l, "cgroup-realized-mask")) {
3033
3034 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3035 if (r < 0)
3036 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3037 continue;
3038
3039 } else if (streq(l, "cgroup-enabled-mask")) {
3040
3041 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3042 if (r < 0)
3043 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3044 continue;
3045
3046 } else if (streq(l, "ref-uid")) {
3047 uid_t uid;
3048
3049 r = parse_uid(v, &uid);
3050 if (r < 0)
3051 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3052 else
3053 unit_ref_uid_gid(u, uid, GID_INVALID);
3054
3055 continue;
3056
3057 } else if (streq(l, "ref-gid")) {
3058 gid_t gid;
3059
3060 r = parse_gid(v, &gid);
3061 if (r < 0)
3062 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3063 else
3064 unit_ref_uid_gid(u, UID_INVALID, gid);
3065
3066 } else if (streq(l, "ref")) {
3067
3068 r = strv_extend(&u->deserialized_refs, v);
3069 if (r < 0)
3070 log_oom();
3071
3072 continue;
3073 } else if (streq(l, "invocation-id")) {
3074 sd_id128_t id;
3075
3076 r = sd_id128_from_string(v, &id);
3077 if (r < 0)
3078 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3079 else {
3080 r = unit_set_invocation_id(u, id);
3081 if (r < 0)
3082 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3083 }
3084
3085 continue;
3086 }
3087
3088 if (unit_can_serialize(u)) {
3089 if (rt) {
3090 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3091 if (r < 0) {
3092 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3093 continue;
3094 }
3095
3096 /* Returns positive if key was handled by the call */
3097 if (r > 0)
3098 continue;
3099 }
3100
3101 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3102 if (r < 0)
3103 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3104 }
3105 }
3106
3107 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3108 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3109 * before 228 where the base for timeouts was not persistent across reboots. */
3110
3111 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3112 dual_timestamp_get(&u->state_change_timestamp);
3113
3114 return 0;
3115 }
3116
3117 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
3118 Unit *device;
3119 _cleanup_free_ char *e = NULL;
3120 int r;
3121
3122 assert(u);
3123
3124 /* Adds in links to the device node that this unit is based on */
3125 if (isempty(what))
3126 return 0;
3127
3128 if (!is_device_path(what))
3129 return 0;
3130
3131 /* When device units aren't supported (such as in a
3132 * container), don't create dependencies on them. */
3133 if (!unit_type_supported(UNIT_DEVICE))
3134 return 0;
3135
3136 r = unit_name_from_path(what, ".device", &e);
3137 if (r < 0)
3138 return r;
3139
3140 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3141 if (r < 0)
3142 return r;
3143
3144 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3145 dep = UNIT_BINDS_TO;
3146
3147 r = unit_add_two_dependencies(u, UNIT_AFTER,
3148 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3149 device, true);
3150 if (r < 0)
3151 return r;
3152
3153 if (wants) {
3154 r = unit_add_dependency(device, UNIT_WANTS, u, false);
3155 if (r < 0)
3156 return r;
3157 }
3158
3159 return 0;
3160 }
3161
3162 int unit_coldplug(Unit *u) {
3163 int r = 0, q;
3164 char **i;
3165
3166 assert(u);
3167
3168 /* Make sure we don't enter a loop, when coldplugging
3169 * recursively. */
3170 if (u->coldplugged)
3171 return 0;
3172
3173 u->coldplugged = true;
3174
3175 STRV_FOREACH(i, u->deserialized_refs) {
3176 q = bus_unit_track_add_name(u, *i);
3177 if (q < 0 && r >= 0)
3178 r = q;
3179 }
3180 u->deserialized_refs = strv_free(u->deserialized_refs);
3181
3182 if (UNIT_VTABLE(u)->coldplug) {
3183 q = UNIT_VTABLE(u)->coldplug(u);
3184 if (q < 0 && r >= 0)
3185 r = q;
3186 }
3187
3188 if (u->job) {
3189 q = job_coldplug(u->job);
3190 if (q < 0 && r >= 0)
3191 r = q;
3192 }
3193
3194 return r;
3195 }
3196
3197 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3198 struct stat st;
3199
3200 if (!path)
3201 return false;
3202
3203 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3204 * are never out-of-date. */
3205 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3206 return false;
3207
3208 if (stat(path, &st) < 0)
3209 /* What, cannot access this anymore? */
3210 return true;
3211
3212 if (path_masked)
3213 /* For masked files check if they are still so */
3214 return !null_or_empty(&st);
3215 else
3216 /* For non-empty files check the mtime */
3217 return timespec_load(&st.st_mtim) > mtime;
3218
3219 return false;
3220 }
3221
3222 bool unit_need_daemon_reload(Unit *u) {
3223 _cleanup_strv_free_ char **t = NULL;
3224 char **path;
3225
3226 assert(u);
3227
3228 /* For unit files, we allow masking… */
3229 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3230 u->load_state == UNIT_MASKED))
3231 return true;
3232
3233 /* Source paths should not be masked… */
3234 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3235 return true;
3236
3237 (void) unit_find_dropin_paths(u, &t);
3238 if (!strv_equal(u->dropin_paths, t))
3239 return true;
3240
3241 /* … any drop-ins that are masked are simply omitted from the list. */
3242 STRV_FOREACH(path, u->dropin_paths)
3243 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3244 return true;
3245
3246 return false;
3247 }
3248
3249 void unit_reset_failed(Unit *u) {
3250 assert(u);
3251
3252 if (UNIT_VTABLE(u)->reset_failed)
3253 UNIT_VTABLE(u)->reset_failed(u);
3254
3255 RATELIMIT_RESET(u->start_limit);
3256 u->start_limit_hit = false;
3257 }
3258
3259 Unit *unit_following(Unit *u) {
3260 assert(u);
3261
3262 if (UNIT_VTABLE(u)->following)
3263 return UNIT_VTABLE(u)->following(u);
3264
3265 return NULL;
3266 }
3267
3268 bool unit_stop_pending(Unit *u) {
3269 assert(u);
3270
3271 /* This call does check the current state of the unit. It's
3272 * hence useful to be called from state change calls of the
3273 * unit itself, where the state isn't updated yet. This is
3274 * different from unit_inactive_or_pending() which checks both
3275 * the current state and for a queued job. */
3276
3277 return u->job && u->job->type == JOB_STOP;
3278 }
3279
3280 bool unit_inactive_or_pending(Unit *u) {
3281 assert(u);
3282
3283 /* Returns true if the unit is inactive or going down */
3284
3285 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3286 return true;
3287
3288 if (unit_stop_pending(u))
3289 return true;
3290
3291 return false;
3292 }
3293
3294 bool unit_active_or_pending(Unit *u) {
3295 assert(u);
3296
3297 /* Returns true if the unit is active or going up */
3298
3299 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3300 return true;
3301
3302 if (u->job &&
3303 (u->job->type == JOB_START ||
3304 u->job->type == JOB_RELOAD_OR_START ||
3305 u->job->type == JOB_RESTART))
3306 return true;
3307
3308 return false;
3309 }
3310
3311 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3312 assert(u);
3313 assert(w >= 0 && w < _KILL_WHO_MAX);
3314 assert(SIGNAL_VALID(signo));
3315
3316 if (!UNIT_VTABLE(u)->kill)
3317 return -EOPNOTSUPP;
3318
3319 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3320 }
3321
3322 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3323 Set *pid_set;
3324 int r;
3325
3326 pid_set = set_new(NULL);
3327 if (!pid_set)
3328 return NULL;
3329
3330 /* Exclude the main/control pids from being killed via the cgroup */
3331 if (main_pid > 0) {
3332 r = set_put(pid_set, PID_TO_PTR(main_pid));
3333 if (r < 0)
3334 goto fail;
3335 }
3336
3337 if (control_pid > 0) {
3338 r = set_put(pid_set, PID_TO_PTR(control_pid));
3339 if (r < 0)
3340 goto fail;
3341 }
3342
3343 return pid_set;
3344
3345 fail:
3346 set_free(pid_set);
3347 return NULL;
3348 }
3349
3350 int unit_kill_common(
3351 Unit *u,
3352 KillWho who,
3353 int signo,
3354 pid_t main_pid,
3355 pid_t control_pid,
3356 sd_bus_error *error) {
3357
3358 int r = 0;
3359 bool killed = false;
3360
3361 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3362 if (main_pid < 0)
3363 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3364 else if (main_pid == 0)
3365 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3366 }
3367
3368 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3369 if (control_pid < 0)
3370 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3371 else if (control_pid == 0)
3372 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3373 }
3374
3375 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3376 if (control_pid > 0) {
3377 if (kill(control_pid, signo) < 0)
3378 r = -errno;
3379 else
3380 killed = true;
3381 }
3382
3383 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3384 if (main_pid > 0) {
3385 if (kill(main_pid, signo) < 0)
3386 r = -errno;
3387 else
3388 killed = true;
3389 }
3390
3391 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3392 _cleanup_set_free_ Set *pid_set = NULL;
3393 int q;
3394
3395 /* Exclude the main/control pids from being killed via the cgroup */
3396 pid_set = unit_pid_set(main_pid, control_pid);
3397 if (!pid_set)
3398 return -ENOMEM;
3399
3400 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3401 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3402 r = q;
3403 else
3404 killed = true;
3405 }
3406
3407 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3408 return -ESRCH;
3409
3410 return r;
3411 }
3412
3413 int unit_following_set(Unit *u, Set **s) {
3414 assert(u);
3415 assert(s);
3416
3417 if (UNIT_VTABLE(u)->following_set)
3418 return UNIT_VTABLE(u)->following_set(u, s);
3419
3420 *s = NULL;
3421 return 0;
3422 }
3423
3424 UnitFileState unit_get_unit_file_state(Unit *u) {
3425 int r;
3426
3427 assert(u);
3428
3429 if (u->unit_file_state < 0 && u->fragment_path) {
3430 r = unit_file_get_state(
3431 u->manager->unit_file_scope,
3432 NULL,
3433 basename(u->fragment_path),
3434 &u->unit_file_state);
3435 if (r < 0)
3436 u->unit_file_state = UNIT_FILE_BAD;
3437 }
3438
3439 return u->unit_file_state;
3440 }
3441
3442 int unit_get_unit_file_preset(Unit *u) {
3443 assert(u);
3444
3445 if (u->unit_file_preset < 0 && u->fragment_path)
3446 u->unit_file_preset = unit_file_query_preset(
3447 u->manager->unit_file_scope,
3448 NULL,
3449 basename(u->fragment_path));
3450
3451 return u->unit_file_preset;
3452 }
3453
3454 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3455 assert(ref);
3456 assert(u);
3457
3458 if (ref->unit)
3459 unit_ref_unset(ref);
3460
3461 ref->unit = u;
3462 LIST_PREPEND(refs, u->refs, ref);
3463 return u;
3464 }
3465
3466 void unit_ref_unset(UnitRef *ref) {
3467 assert(ref);
3468
3469 if (!ref->unit)
3470 return;
3471
3472 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3473 * be unreferenced now. */
3474 unit_add_to_gc_queue(ref->unit);
3475
3476 LIST_REMOVE(refs, ref->unit->refs, ref);
3477 ref->unit = NULL;
3478 }
3479
3480 static int user_from_unit_name(Unit *u, char **ret) {
3481
3482 static const uint8_t hash_key[] = {
3483 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3484 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3485 };
3486
3487 _cleanup_free_ char *n = NULL;
3488 int r;
3489
3490 r = unit_name_to_prefix(u->id, &n);
3491 if (r < 0)
3492 return r;
3493
3494 if (valid_user_group_name(n)) {
3495 *ret = n;
3496 n = NULL;
3497 return 0;
3498 }
3499
3500 /* If we can't use the unit name as a user name, then let's hash it and use that */
3501 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3502 return -ENOMEM;
3503
3504 return 0;
3505 }
3506
3507 int unit_patch_contexts(Unit *u) {
3508 CGroupContext *cc;
3509 ExecContext *ec;
3510 unsigned i;
3511 int r;
3512
3513 assert(u);
3514
3515 /* Patch in the manager defaults into the exec and cgroup
3516 * contexts, _after_ the rest of the settings have been
3517 * initialized */
3518
3519 ec = unit_get_exec_context(u);
3520 if (ec) {
3521 /* This only copies in the ones that need memory */
3522 for (i = 0; i < _RLIMIT_MAX; i++)
3523 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3524 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3525 if (!ec->rlimit[i])
3526 return -ENOMEM;
3527 }
3528
3529 if (MANAGER_IS_USER(u->manager) &&
3530 !ec->working_directory) {
3531
3532 r = get_home_dir(&ec->working_directory);
3533 if (r < 0)
3534 return r;
3535
3536 /* Allow user services to run, even if the
3537 * home directory is missing */
3538 ec->working_directory_missing_ok = true;
3539 }
3540
3541 if (ec->private_devices)
3542 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3543
3544 if (ec->protect_kernel_modules)
3545 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3546
3547 if (ec->dynamic_user) {
3548 if (!ec->user) {
3549 r = user_from_unit_name(u, &ec->user);
3550 if (r < 0)
3551 return r;
3552 }
3553
3554 if (!ec->group) {
3555 ec->group = strdup(ec->user);
3556 if (!ec->group)
3557 return -ENOMEM;
3558 }
3559
3560 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3561 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3562
3563 ec->private_tmp = true;
3564 ec->remove_ipc = true;
3565 ec->protect_system = PROTECT_SYSTEM_STRICT;
3566 if (ec->protect_home == PROTECT_HOME_NO)
3567 ec->protect_home = PROTECT_HOME_READ_ONLY;
3568 }
3569 }
3570
3571 cc = unit_get_cgroup_context(u);
3572 if (cc) {
3573
3574 if (ec &&
3575 ec->private_devices &&
3576 cc->device_policy == CGROUP_AUTO)
3577 cc->device_policy = CGROUP_CLOSED;
3578 }
3579
3580 return 0;
3581 }
3582
3583 ExecContext *unit_get_exec_context(Unit *u) {
3584 size_t offset;
3585 assert(u);
3586
3587 if (u->type < 0)
3588 return NULL;
3589
3590 offset = UNIT_VTABLE(u)->exec_context_offset;
3591 if (offset <= 0)
3592 return NULL;
3593
3594 return (ExecContext*) ((uint8_t*) u + offset);
3595 }
3596
3597 KillContext *unit_get_kill_context(Unit *u) {
3598 size_t offset;
3599 assert(u);
3600
3601 if (u->type < 0)
3602 return NULL;
3603
3604 offset = UNIT_VTABLE(u)->kill_context_offset;
3605 if (offset <= 0)
3606 return NULL;
3607
3608 return (KillContext*) ((uint8_t*) u + offset);
3609 }
3610
3611 CGroupContext *unit_get_cgroup_context(Unit *u) {
3612 size_t offset;
3613
3614 if (u->type < 0)
3615 return NULL;
3616
3617 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3618 if (offset <= 0)
3619 return NULL;
3620
3621 return (CGroupContext*) ((uint8_t*) u + offset);
3622 }
3623
3624 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3625 size_t offset;
3626
3627 if (u->type < 0)
3628 return NULL;
3629
3630 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3631 if (offset <= 0)
3632 return NULL;
3633
3634 return *(ExecRuntime**) ((uint8_t*) u + offset);
3635 }
3636
3637 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3638 assert(u);
3639
3640 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3641 return NULL;
3642
3643 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3644 return u->manager->lookup_paths.transient;
3645
3646 if (mode == UNIT_RUNTIME)
3647 return u->manager->lookup_paths.runtime_control;
3648
3649 if (mode == UNIT_PERSISTENT)
3650 return u->manager->lookup_paths.persistent_control;
3651
3652 return NULL;
3653 }
3654
3655 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3656 _cleanup_free_ char *p = NULL, *q = NULL;
3657 const char *dir, *wrapped;
3658 int r;
3659
3660 assert(u);
3661
3662 if (u->transient_file) {
3663 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3664 * write to the transient unit file. */
3665 fputs(data, u->transient_file);
3666 fputc('\n', u->transient_file);
3667 return 0;
3668 }
3669
3670 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3671 return 0;
3672
3673 dir = unit_drop_in_dir(u, mode);
3674 if (!dir)
3675 return -EINVAL;
3676
3677 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3678 "# or an equivalent operation. Do not edit.\n",
3679 data,
3680 "\n");
3681
3682 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3683 if (r < 0)
3684 return r;
3685
3686 (void) mkdir_p(p, 0755);
3687 r = write_string_file_atomic_label(q, wrapped);
3688 if (r < 0)
3689 return r;
3690
3691 r = strv_push(&u->dropin_paths, q);
3692 if (r < 0)
3693 return r;
3694 q = NULL;
3695
3696 strv_uniq(u->dropin_paths);
3697
3698 u->dropin_mtime = now(CLOCK_REALTIME);
3699
3700 return 0;
3701 }
3702
3703 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3704 _cleanup_free_ char *p = NULL;
3705 va_list ap;
3706 int r;
3707
3708 assert(u);
3709 assert(name);
3710 assert(format);
3711
3712 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3713 return 0;
3714
3715 va_start(ap, format);
3716 r = vasprintf(&p, format, ap);
3717 va_end(ap);
3718
3719 if (r < 0)
3720 return -ENOMEM;
3721
3722 return unit_write_drop_in(u, mode, name, p);
3723 }
3724
3725 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3726 const char *ndata;
3727
3728 assert(u);
3729 assert(name);
3730 assert(data);
3731
3732 if (!UNIT_VTABLE(u)->private_section)
3733 return -EINVAL;
3734
3735 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3736 return 0;
3737
3738 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3739
3740 return unit_write_drop_in(u, mode, name, ndata);
3741 }
3742
3743 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3744 _cleanup_free_ char *p = NULL;
3745 va_list ap;
3746 int r;
3747
3748 assert(u);
3749 assert(name);
3750 assert(format);
3751
3752 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3753 return 0;
3754
3755 va_start(ap, format);
3756 r = vasprintf(&p, format, ap);
3757 va_end(ap);
3758
3759 if (r < 0)
3760 return -ENOMEM;
3761
3762 return unit_write_drop_in_private(u, mode, name, p);
3763 }
3764
3765 int unit_make_transient(Unit *u) {
3766 FILE *f;
3767 char *path;
3768
3769 assert(u);
3770
3771 if (!UNIT_VTABLE(u)->can_transient)
3772 return -EOPNOTSUPP;
3773
3774 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
3775 if (!path)
3776 return -ENOMEM;
3777
3778 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3779 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3780
3781 RUN_WITH_UMASK(0022) {
3782 f = fopen(path, "we");
3783 if (!f) {
3784 free(path);
3785 return -errno;
3786 }
3787 }
3788
3789 if (u->transient_file)
3790 fclose(u->transient_file);
3791 u->transient_file = f;
3792
3793 free(u->fragment_path);
3794 u->fragment_path = path;
3795
3796 u->source_path = mfree(u->source_path);
3797 u->dropin_paths = strv_free(u->dropin_paths);
3798 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3799
3800 u->load_state = UNIT_STUB;
3801 u->load_error = 0;
3802 u->transient = true;
3803
3804 unit_add_to_dbus_queue(u);
3805 unit_add_to_gc_queue(u);
3806
3807 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3808 u->transient_file);
3809
3810 return 0;
3811 }
3812
3813 static void log_kill(pid_t pid, int sig, void *userdata) {
3814 _cleanup_free_ char *comm = NULL;
3815
3816 (void) get_process_comm(pid, &comm);
3817
3818 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3819 only, like for example systemd's own PAM stub process. */
3820 if (comm && comm[0] == '(')
3821 return;
3822
3823 log_unit_notice(userdata,
3824 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3825 pid,
3826 strna(comm),
3827 signal_to_string(sig));
3828 }
3829
3830 static int operation_to_signal(KillContext *c, KillOperation k) {
3831 assert(c);
3832
3833 switch (k) {
3834
3835 case KILL_TERMINATE:
3836 case KILL_TERMINATE_AND_LOG:
3837 return c->kill_signal;
3838
3839 case KILL_KILL:
3840 return SIGKILL;
3841
3842 case KILL_ABORT:
3843 return SIGABRT;
3844
3845 default:
3846 assert_not_reached("KillOperation unknown");
3847 }
3848 }
3849
3850 int unit_kill_context(
3851 Unit *u,
3852 KillContext *c,
3853 KillOperation k,
3854 pid_t main_pid,
3855 pid_t control_pid,
3856 bool main_pid_alien) {
3857
3858 bool wait_for_exit = false, send_sighup;
3859 cg_kill_log_func_t log_func = NULL;
3860 int sig, r;
3861
3862 assert(u);
3863 assert(c);
3864
3865 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
3866 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
3867
3868 if (c->kill_mode == KILL_NONE)
3869 return 0;
3870
3871 sig = operation_to_signal(c, k);
3872
3873 send_sighup =
3874 c->send_sighup &&
3875 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3876 sig != SIGHUP;
3877
3878 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
3879 log_func = log_kill;
3880
3881 if (main_pid > 0) {
3882 if (log_func)
3883 log_func(main_pid, sig, u);
3884
3885 r = kill_and_sigcont(main_pid, sig);
3886 if (r < 0 && r != -ESRCH) {
3887 _cleanup_free_ char *comm = NULL;
3888 (void) get_process_comm(main_pid, &comm);
3889
3890 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3891 } else {
3892 if (!main_pid_alien)
3893 wait_for_exit = true;
3894
3895 if (r != -ESRCH && send_sighup)
3896 (void) kill(main_pid, SIGHUP);
3897 }
3898 }
3899
3900 if (control_pid > 0) {
3901 if (log_func)
3902 log_func(control_pid, sig, u);
3903
3904 r = kill_and_sigcont(control_pid, sig);
3905 if (r < 0 && r != -ESRCH) {
3906 _cleanup_free_ char *comm = NULL;
3907 (void) get_process_comm(control_pid, &comm);
3908
3909 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3910 } else {
3911 wait_for_exit = true;
3912
3913 if (r != -ESRCH && send_sighup)
3914 (void) kill(control_pid, SIGHUP);
3915 }
3916 }
3917
3918 if (u->cgroup_path &&
3919 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3920 _cleanup_set_free_ Set *pid_set = NULL;
3921
3922 /* Exclude the main/control pids from being killed via the cgroup */
3923 pid_set = unit_pid_set(main_pid, control_pid);
3924 if (!pid_set)
3925 return -ENOMEM;
3926
3927 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3928 sig,
3929 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3930 pid_set,
3931 log_func, u);
3932 if (r < 0) {
3933 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3934 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3935
3936 } else if (r > 0) {
3937
3938 /* FIXME: For now, on the legacy hierarchy, we
3939 * will not wait for the cgroup members to die
3940 * if we are running in a container or if this
3941 * is a delegation unit, simply because cgroup
3942 * notification is unreliable in these
3943 * cases. It doesn't work at all in
3944 * containers, and outside of containers it
3945 * can be confused easily by left-over
3946 * directories in the cgroup — which however
3947 * should not exist in non-delegated units. On
3948 * the unified hierarchy that's different,
3949 * there we get proper events. Hence rely on
3950 * them. */
3951
3952 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3953 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3954 wait_for_exit = true;
3955
3956 if (send_sighup) {
3957 set_free(pid_set);
3958
3959 pid_set = unit_pid_set(main_pid, control_pid);
3960 if (!pid_set)
3961 return -ENOMEM;
3962
3963 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3964 SIGHUP,
3965 CGROUP_IGNORE_SELF,
3966 pid_set,
3967 NULL, NULL);
3968 }
3969 }
3970 }
3971
3972 return wait_for_exit;
3973 }
3974
3975 int unit_require_mounts_for(Unit *u, const char *path) {
3976 char prefix[strlen(path) + 1], *p;
3977 int r;
3978
3979 assert(u);
3980 assert(path);
3981
3982 /* Registers a unit for requiring a certain path and all its
3983 * prefixes. We keep a simple array of these paths in the
3984 * unit, since its usually short. However, we build a prefix
3985 * table for all possible prefixes so that new appearing mount
3986 * units can easily determine which units to make themselves a
3987 * dependency of. */
3988
3989 if (!path_is_absolute(path))
3990 return -EINVAL;
3991
3992 p = strdup(path);
3993 if (!p)
3994 return -ENOMEM;
3995
3996 path_kill_slashes(p);
3997
3998 if (!path_is_safe(p)) {
3999 free(p);
4000 return -EPERM;
4001 }
4002
4003 if (strv_contains(u->requires_mounts_for, p)) {
4004 free(p);
4005 return 0;
4006 }
4007
4008 r = strv_consume(&u->requires_mounts_for, p);
4009 if (r < 0)
4010 return r;
4011
4012 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4013 Set *x;
4014
4015 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4016 if (!x) {
4017 char *q;
4018
4019 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4020 if (r < 0)
4021 return r;
4022
4023 q = strdup(prefix);
4024 if (!q)
4025 return -ENOMEM;
4026
4027 x = set_new(NULL);
4028 if (!x) {
4029 free(q);
4030 return -ENOMEM;
4031 }
4032
4033 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4034 if (r < 0) {
4035 free(q);
4036 set_free(x);
4037 return r;
4038 }
4039 }
4040
4041 r = set_put(x, u);
4042 if (r < 0)
4043 return r;
4044 }
4045
4046 return 0;
4047 }
4048
4049 int unit_setup_exec_runtime(Unit *u) {
4050 ExecRuntime **rt;
4051 size_t offset;
4052 Iterator i;
4053 Unit *other;
4054
4055 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4056 assert(offset > 0);
4057
4058 /* Check if there already is an ExecRuntime for this unit? */
4059 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4060 if (*rt)
4061 return 0;
4062
4063 /* Try to get it from somebody else */
4064 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4065
4066 *rt = unit_get_exec_runtime(other);
4067 if (*rt) {
4068 exec_runtime_ref(*rt);
4069 return 0;
4070 }
4071 }
4072
4073 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4074 }
4075
4076 int unit_setup_dynamic_creds(Unit *u) {
4077 ExecContext *ec;
4078 DynamicCreds *dcreds;
4079 size_t offset;
4080
4081 assert(u);
4082
4083 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4084 assert(offset > 0);
4085 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4086
4087 ec = unit_get_exec_context(u);
4088 assert(ec);
4089
4090 if (!ec->dynamic_user)
4091 return 0;
4092
4093 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4094 }
4095
4096 bool unit_type_supported(UnitType t) {
4097 if (_unlikely_(t < 0))
4098 return false;
4099 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4100 return false;
4101
4102 if (!unit_vtable[t]->supported)
4103 return true;
4104
4105 return unit_vtable[t]->supported();
4106 }
4107
4108 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4109 int r;
4110
4111 assert(u);
4112 assert(where);
4113
4114 r = dir_is_empty(where);
4115 if (r > 0)
4116 return;
4117 if (r < 0) {
4118 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4119 return;
4120 }
4121
4122 log_struct(LOG_NOTICE,
4123 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4124 LOG_UNIT_ID(u),
4125 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4126 "WHERE=%s", where,
4127 NULL);
4128 }
4129
4130 int unit_fail_if_symlink(Unit *u, const char* where) {
4131 int r;
4132
4133 assert(u);
4134 assert(where);
4135
4136 r = is_symlink(where);
4137 if (r < 0) {
4138 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4139 return 0;
4140 }
4141 if (r == 0)
4142 return 0;
4143
4144 log_struct(LOG_ERR,
4145 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4146 LOG_UNIT_ID(u),
4147 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4148 "WHERE=%s", where,
4149 NULL);
4150
4151 return -ELOOP;
4152 }
4153
4154 bool unit_is_pristine(Unit *u) {
4155 assert(u);
4156
4157 /* Check if the unit already exists or is already around,
4158 * in a number of different ways. Note that to cater for unit
4159 * types such as slice, we are generally fine with units that
4160 * are marked UNIT_LOADED even though nothing was
4161 * actually loaded, as those unit types don't require a file
4162 * on disk to validly load. */
4163
4164 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4165 u->fragment_path ||
4166 u->source_path ||
4167 !strv_isempty(u->dropin_paths) ||
4168 u->job ||
4169 u->merged_into);
4170 }
4171
4172 pid_t unit_control_pid(Unit *u) {
4173 assert(u);
4174
4175 if (UNIT_VTABLE(u)->control_pid)
4176 return UNIT_VTABLE(u)->control_pid(u);
4177
4178 return 0;
4179 }
4180
4181 pid_t unit_main_pid(Unit *u) {
4182 assert(u);
4183
4184 if (UNIT_VTABLE(u)->main_pid)
4185 return UNIT_VTABLE(u)->main_pid(u);
4186
4187 return 0;
4188 }
4189
4190 static void unit_unref_uid_internal(
4191 Unit *u,
4192 uid_t *ref_uid,
4193 bool destroy_now,
4194 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4195
4196 assert(u);
4197 assert(ref_uid);
4198 assert(_manager_unref_uid);
4199
4200 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4201 * gid_t are actually the same time, with the same validity rules.
4202 *
4203 * Drops a reference to UID/GID from a unit. */
4204
4205 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4206 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4207
4208 if (!uid_is_valid(*ref_uid))
4209 return;
4210
4211 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4212 *ref_uid = UID_INVALID;
4213 }
4214
4215 void unit_unref_uid(Unit *u, bool destroy_now) {
4216 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4217 }
4218
4219 void unit_unref_gid(Unit *u, bool destroy_now) {
4220 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4221 }
4222
4223 static int unit_ref_uid_internal(
4224 Unit *u,
4225 uid_t *ref_uid,
4226 uid_t uid,
4227 bool clean_ipc,
4228 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4229
4230 int r;
4231
4232 assert(u);
4233 assert(ref_uid);
4234 assert(uid_is_valid(uid));
4235 assert(_manager_ref_uid);
4236
4237 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4238 * are actually the same type, and have the same validity rules.
4239 *
4240 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4241 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4242 * drops to zero. */
4243
4244 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4245 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4246
4247 if (*ref_uid == uid)
4248 return 0;
4249
4250 if (uid_is_valid(*ref_uid)) /* Already set? */
4251 return -EBUSY;
4252
4253 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4254 if (r < 0)
4255 return r;
4256
4257 *ref_uid = uid;
4258 return 1;
4259 }
4260
4261 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4262 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4263 }
4264
4265 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4266 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4267 }
4268
4269 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4270 int r = 0, q = 0;
4271
4272 assert(u);
4273
4274 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4275
4276 if (uid_is_valid(uid)) {
4277 r = unit_ref_uid(u, uid, clean_ipc);
4278 if (r < 0)
4279 return r;
4280 }
4281
4282 if (gid_is_valid(gid)) {
4283 q = unit_ref_gid(u, gid, clean_ipc);
4284 if (q < 0) {
4285 if (r > 0)
4286 unit_unref_uid(u, false);
4287
4288 return q;
4289 }
4290 }
4291
4292 return r > 0 || q > 0;
4293 }
4294
4295 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4296 ExecContext *c;
4297 int r;
4298
4299 assert(u);
4300
4301 c = unit_get_exec_context(u);
4302
4303 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4304 if (r < 0)
4305 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4306
4307 return r;
4308 }
4309
4310 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4311 assert(u);
4312
4313 unit_unref_uid(u, destroy_now);
4314 unit_unref_gid(u, destroy_now);
4315 }
4316
4317 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4318 int r;
4319
4320 assert(u);
4321
4322 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4323 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4324 * objects when no service references the UID/GID anymore. */
4325
4326 r = unit_ref_uid_gid(u, uid, gid);
4327 if (r > 0)
4328 bus_unit_send_change_signal(u);
4329 }
4330
4331 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4332 int r;
4333
4334 assert(u);
4335
4336 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4337
4338 if (sd_id128_equal(u->invocation_id, id))
4339 return 0;
4340
4341 if (!sd_id128_is_null(u->invocation_id))
4342 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4343
4344 if (sd_id128_is_null(id)) {
4345 r = 0;
4346 goto reset;
4347 }
4348
4349 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4350 if (r < 0)
4351 goto reset;
4352
4353 u->invocation_id = id;
4354 sd_id128_to_string(id, u->invocation_id_string);
4355
4356 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4357 if (r < 0)
4358 goto reset;
4359
4360 return 0;
4361
4362 reset:
4363 u->invocation_id = SD_ID128_NULL;
4364 u->invocation_id_string[0] = 0;
4365 return r;
4366 }
4367
4368 int unit_acquire_invocation_id(Unit *u) {
4369 sd_id128_t id;
4370 int r;
4371
4372 assert(u);
4373
4374 r = sd_id128_randomize(&id);
4375 if (r < 0)
4376 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4377
4378 r = unit_set_invocation_id(u, id);
4379 if (r < 0)
4380 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4381
4382 return 0;
4383 }