]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #6461 from keszybz/meson-options-fix
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fileio-label.h"
39 #include "format-util.h"
40 #include "id128-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
43 #include "log.h"
44 #include "macro.h"
45 #include "missing.h"
46 #include "mkdir.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
50 #include "set.h"
51 #include "signal-util.h"
52 #include "special.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-util.h"
56 #include "strv.h"
57 #include "umask-util.h"
58 #include "unit-name.h"
59 #include "unit.h"
60 #include "user-util.h"
61 #include "virt.h"
62
63 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
64 [UNIT_SERVICE] = &service_vtable,
65 [UNIT_SOCKET] = &socket_vtable,
66 [UNIT_TARGET] = &target_vtable,
67 [UNIT_DEVICE] = &device_vtable,
68 [UNIT_MOUNT] = &mount_vtable,
69 [UNIT_AUTOMOUNT] = &automount_vtable,
70 [UNIT_SWAP] = &swap_vtable,
71 [UNIT_TIMER] = &timer_vtable,
72 [UNIT_PATH] = &path_vtable,
73 [UNIT_SLICE] = &slice_vtable,
74 [UNIT_SCOPE] = &scope_vtable
75 };
76
77 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
78
79 Unit *unit_new(Manager *m, size_t size) {
80 Unit *u;
81
82 assert(m);
83 assert(size >= sizeof(Unit));
84
85 u = malloc0(size);
86 if (!u)
87 return NULL;
88
89 u->names = set_new(&string_hash_ops);
90 if (!u->names)
91 return mfree(u);
92
93 u->manager = m;
94 u->type = _UNIT_TYPE_INVALID;
95 u->default_dependencies = true;
96 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
97 u->unit_file_preset = -1;
98 u->on_failure_job_mode = JOB_REPLACE;
99 u->cgroup_inotify_wd = -1;
100 u->job_timeout = USEC_INFINITY;
101 u->job_running_timeout = USEC_INFINITY;
102 u->ref_uid = UID_INVALID;
103 u->ref_gid = GID_INVALID;
104 u->cpu_usage_last = NSEC_INFINITY;
105
106 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
107 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
108
109 return u;
110 }
111
112 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
113 Unit *u;
114 int r;
115
116 u = unit_new(m, size);
117 if (!u)
118 return -ENOMEM;
119
120 r = unit_add_name(u, name);
121 if (r < 0) {
122 unit_free(u);
123 return r;
124 }
125
126 *ret = u;
127 return r;
128 }
129
130 bool unit_has_name(Unit *u, const char *name) {
131 assert(u);
132 assert(name);
133
134 return set_contains(u->names, (char*) name);
135 }
136
137 static void unit_init(Unit *u) {
138 CGroupContext *cc;
139 ExecContext *ec;
140 KillContext *kc;
141
142 assert(u);
143 assert(u->manager);
144 assert(u->type >= 0);
145
146 cc = unit_get_cgroup_context(u);
147 if (cc) {
148 cgroup_context_init(cc);
149
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
153
154 cc->cpu_accounting = u->manager->default_cpu_accounting;
155 cc->io_accounting = u->manager->default_io_accounting;
156 cc->blockio_accounting = u->manager->default_blockio_accounting;
157 cc->memory_accounting = u->manager->default_memory_accounting;
158 cc->tasks_accounting = u->manager->default_tasks_accounting;
159
160 if (u->type != UNIT_SLICE)
161 cc->tasks_max = u->manager->default_tasks_max;
162 }
163
164 ec = unit_get_exec_context(u);
165 if (ec)
166 exec_context_init(ec);
167
168 kc = unit_get_kill_context(u);
169 if (kc)
170 kill_context_init(kc);
171
172 if (UNIT_VTABLE(u)->init)
173 UNIT_VTABLE(u)->init(u);
174 }
175
176 int unit_add_name(Unit *u, const char *text) {
177 _cleanup_free_ char *s = NULL, *i = NULL;
178 UnitType t;
179 int r;
180
181 assert(u);
182 assert(text);
183
184 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
185
186 if (!u->instance)
187 return -EINVAL;
188
189 r = unit_name_replace_instance(text, u->instance, &s);
190 if (r < 0)
191 return r;
192 } else {
193 s = strdup(text);
194 if (!s)
195 return -ENOMEM;
196 }
197
198 if (set_contains(u->names, s))
199 return 0;
200 if (hashmap_contains(u->manager->units, s))
201 return -EEXIST;
202
203 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
204 return -EINVAL;
205
206 t = unit_name_to_type(s);
207 if (t < 0)
208 return -EINVAL;
209
210 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
211 return -EINVAL;
212
213 r = unit_name_to_instance(s, &i);
214 if (r < 0)
215 return r;
216
217 if (i && !unit_type_may_template(t))
218 return -EINVAL;
219
220 /* Ensure that this unit is either instanced or not instanced,
221 * but not both. Note that we do allow names with different
222 * instance names however! */
223 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
224 return -EINVAL;
225
226 if (!unit_type_may_alias(t) && !set_isempty(u->names))
227 return -EEXIST;
228
229 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
230 return -E2BIG;
231
232 r = set_put(u->names, s);
233 if (r < 0)
234 return r;
235 assert(r > 0);
236
237 r = hashmap_put(u->manager->units, s, u);
238 if (r < 0) {
239 (void) set_remove(u->names, s);
240 return r;
241 }
242
243 if (u->type == _UNIT_TYPE_INVALID) {
244 u->type = t;
245 u->id = s;
246 u->instance = i;
247
248 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
249
250 unit_init(u);
251
252 i = NULL;
253 }
254
255 s = NULL;
256
257 unit_add_to_dbus_queue(u);
258 return 0;
259 }
260
261 int unit_choose_id(Unit *u, const char *name) {
262 _cleanup_free_ char *t = NULL;
263 char *s, *i;
264 int r;
265
266 assert(u);
267 assert(name);
268
269 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
270
271 if (!u->instance)
272 return -EINVAL;
273
274 r = unit_name_replace_instance(name, u->instance, &t);
275 if (r < 0)
276 return r;
277
278 name = t;
279 }
280
281 /* Selects one of the names of this unit as the id */
282 s = set_get(u->names, (char*) name);
283 if (!s)
284 return -ENOENT;
285
286 /* Determine the new instance from the new id */
287 r = unit_name_to_instance(s, &i);
288 if (r < 0)
289 return r;
290
291 u->id = s;
292
293 free(u->instance);
294 u->instance = i;
295
296 unit_add_to_dbus_queue(u);
297
298 return 0;
299 }
300
301 int unit_set_description(Unit *u, const char *description) {
302 char *s;
303
304 assert(u);
305
306 if (isempty(description))
307 s = NULL;
308 else {
309 s = strdup(description);
310 if (!s)
311 return -ENOMEM;
312 }
313
314 free(u->description);
315 u->description = s;
316
317 unit_add_to_dbus_queue(u);
318 return 0;
319 }
320
321 bool unit_check_gc(Unit *u) {
322 UnitActiveState state;
323 bool inactive;
324 assert(u);
325
326 if (u->job)
327 return true;
328
329 if (u->nop_job)
330 return true;
331
332 state = unit_active_state(u);
333 inactive = state == UNIT_INACTIVE;
334
335 /* If the unit is inactive and failed and no job is queued for
336 * it, then release its runtime resources */
337 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
338 UNIT_VTABLE(u)->release_resources)
339 UNIT_VTABLE(u)->release_resources(u, inactive);
340
341 /* But we keep the unit object around for longer when it is
342 * referenced or configured to not be gc'ed */
343 if (!inactive)
344 return true;
345
346 if (u->perpetual)
347 return true;
348
349 if (u->refs)
350 return true;
351
352 if (sd_bus_track_count(u->bus_track) > 0)
353 return true;
354
355 if (UNIT_VTABLE(u)->check_gc)
356 if (UNIT_VTABLE(u)->check_gc(u))
357 return true;
358
359 return false;
360 }
361
362 void unit_add_to_load_queue(Unit *u) {
363 assert(u);
364 assert(u->type != _UNIT_TYPE_INVALID);
365
366 if (u->load_state != UNIT_STUB || u->in_load_queue)
367 return;
368
369 LIST_PREPEND(load_queue, u->manager->load_queue, u);
370 u->in_load_queue = true;
371 }
372
373 void unit_add_to_cleanup_queue(Unit *u) {
374 assert(u);
375
376 if (u->in_cleanup_queue)
377 return;
378
379 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
380 u->in_cleanup_queue = true;
381 }
382
383 void unit_add_to_gc_queue(Unit *u) {
384 assert(u);
385
386 if (u->in_gc_queue || u->in_cleanup_queue)
387 return;
388
389 if (unit_check_gc(u))
390 return;
391
392 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
393 u->in_gc_queue = true;
394 }
395
396 void unit_add_to_dbus_queue(Unit *u) {
397 assert(u);
398 assert(u->type != _UNIT_TYPE_INVALID);
399
400 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
401 return;
402
403 /* Shortcut things if nobody cares */
404 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
405 sd_bus_track_count(u->bus_track) <= 0 &&
406 set_isempty(u->manager->private_buses)) {
407 u->sent_dbus_new_signal = true;
408 return;
409 }
410
411 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
412 u->in_dbus_queue = true;
413 }
414
415 static void bidi_set_free(Unit *u, Set *s) {
416 Iterator i;
417 Unit *other;
418
419 assert(u);
420
421 /* Frees the set and makes sure we are dropped from the
422 * inverse pointers */
423
424 SET_FOREACH(other, s, i) {
425 UnitDependency d;
426
427 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
428 set_remove(other->dependencies[d], u);
429
430 unit_add_to_gc_queue(other);
431 }
432
433 set_free(s);
434 }
435
436 static void unit_remove_transient(Unit *u) {
437 char **i;
438
439 assert(u);
440
441 if (!u->transient)
442 return;
443
444 if (u->fragment_path)
445 (void) unlink(u->fragment_path);
446
447 STRV_FOREACH(i, u->dropin_paths) {
448 _cleanup_free_ char *p = NULL, *pp = NULL;
449
450 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
451 if (!p)
452 continue;
453
454 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
455 if (!pp)
456 continue;
457
458 /* Only drop transient drop-ins */
459 if (!path_equal(u->manager->lookup_paths.transient, pp))
460 continue;
461
462 (void) unlink(*i);
463 (void) rmdir(p);
464 }
465 }
466
467 static void unit_free_requires_mounts_for(Unit *u) {
468 char **j;
469
470 STRV_FOREACH(j, u->requires_mounts_for) {
471 char s[strlen(*j) + 1];
472
473 PATH_FOREACH_PREFIX_MORE(s, *j) {
474 char *y;
475 Set *x;
476
477 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
478 if (!x)
479 continue;
480
481 set_remove(x, u);
482
483 if (set_isempty(x)) {
484 hashmap_remove(u->manager->units_requiring_mounts_for, y);
485 free(y);
486 set_free(x);
487 }
488 }
489 }
490
491 u->requires_mounts_for = strv_free(u->requires_mounts_for);
492 }
493
494 static void unit_done(Unit *u) {
495 ExecContext *ec;
496 CGroupContext *cc;
497
498 assert(u);
499
500 if (u->type < 0)
501 return;
502
503 if (UNIT_VTABLE(u)->done)
504 UNIT_VTABLE(u)->done(u);
505
506 ec = unit_get_exec_context(u);
507 if (ec)
508 exec_context_done(ec);
509
510 cc = unit_get_cgroup_context(u);
511 if (cc)
512 cgroup_context_done(cc);
513 }
514
515 void unit_free(Unit *u) {
516 UnitDependency d;
517 Iterator i;
518 char *t;
519
520 if (!u)
521 return;
522
523 if (u->transient_file)
524 fclose(u->transient_file);
525
526 if (!MANAGER_IS_RELOADING(u->manager))
527 unit_remove_transient(u);
528
529 bus_unit_send_removed_signal(u);
530
531 unit_done(u);
532
533 sd_bus_slot_unref(u->match_bus_slot);
534
535 sd_bus_track_unref(u->bus_track);
536 u->deserialized_refs = strv_free(u->deserialized_refs);
537
538 unit_free_requires_mounts_for(u);
539
540 SET_FOREACH(t, u->names, i)
541 hashmap_remove_value(u->manager->units, t, u);
542
543 if (!sd_id128_is_null(u->invocation_id))
544 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
545
546 if (u->job) {
547 Job *j = u->job;
548 job_uninstall(j);
549 job_free(j);
550 }
551
552 if (u->nop_job) {
553 Job *j = u->nop_job;
554 job_uninstall(j);
555 job_free(j);
556 }
557
558 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
559 bidi_set_free(u, u->dependencies[d]);
560
561 if (u->type != _UNIT_TYPE_INVALID)
562 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
563
564 if (u->in_load_queue)
565 LIST_REMOVE(load_queue, u->manager->load_queue, u);
566
567 if (u->in_dbus_queue)
568 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
569
570 if (u->in_cleanup_queue)
571 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
572
573 if (u->in_gc_queue)
574 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
575
576 if (u->in_cgroup_queue)
577 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
578
579 unit_release_cgroup(u);
580
581 unit_unref_uid_gid(u, false);
582
583 (void) manager_update_failed_units(u->manager, u, false);
584 set_remove(u->manager->startup_units, u);
585
586 free(u->description);
587 strv_free(u->documentation);
588 free(u->fragment_path);
589 free(u->source_path);
590 strv_free(u->dropin_paths);
591 free(u->instance);
592
593 free(u->job_timeout_reboot_arg);
594
595 set_free_free(u->names);
596
597 unit_unwatch_all_pids(u);
598
599 condition_free_list(u->conditions);
600 condition_free_list(u->asserts);
601
602 free(u->reboot_arg);
603
604 unit_ref_unset(&u->slice);
605
606 while (u->refs)
607 unit_ref_unset(u->refs);
608
609 free(u);
610 }
611
612 UnitActiveState unit_active_state(Unit *u) {
613 assert(u);
614
615 if (u->load_state == UNIT_MERGED)
616 return unit_active_state(unit_follow_merge(u));
617
618 /* After a reload it might happen that a unit is not correctly
619 * loaded but still has a process around. That's why we won't
620 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
621
622 return UNIT_VTABLE(u)->active_state(u);
623 }
624
625 const char* unit_sub_state_to_string(Unit *u) {
626 assert(u);
627
628 return UNIT_VTABLE(u)->sub_state_to_string(u);
629 }
630
631 static int complete_move(Set **s, Set **other) {
632 int r;
633
634 assert(s);
635 assert(other);
636
637 if (!*other)
638 return 0;
639
640 if (*s) {
641 r = set_move(*s, *other);
642 if (r < 0)
643 return r;
644 } else {
645 *s = *other;
646 *other = NULL;
647 }
648
649 return 0;
650 }
651
652 static int merge_names(Unit *u, Unit *other) {
653 char *t;
654 Iterator i;
655 int r;
656
657 assert(u);
658 assert(other);
659
660 r = complete_move(&u->names, &other->names);
661 if (r < 0)
662 return r;
663
664 set_free_free(other->names);
665 other->names = NULL;
666 other->id = NULL;
667
668 SET_FOREACH(t, u->names, i)
669 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
670
671 return 0;
672 }
673
674 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
675 unsigned n_reserve;
676
677 assert(u);
678 assert(other);
679 assert(d < _UNIT_DEPENDENCY_MAX);
680
681 /*
682 * If u does not have this dependency set allocated, there is no need
683 * to reserve anything. In that case other's set will be transferred
684 * as a whole to u by complete_move().
685 */
686 if (!u->dependencies[d])
687 return 0;
688
689 /* merge_dependencies() will skip a u-on-u dependency */
690 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
691
692 return set_reserve(u->dependencies[d], n_reserve);
693 }
694
695 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
696 Iterator i;
697 Unit *back;
698 int r;
699
700 assert(u);
701 assert(other);
702 assert(d < _UNIT_DEPENDENCY_MAX);
703
704 /* Fix backwards pointers */
705 SET_FOREACH(back, other->dependencies[d], i) {
706 UnitDependency k;
707
708 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
709 /* Do not add dependencies between u and itself */
710 if (back == u) {
711 if (set_remove(back->dependencies[k], other))
712 maybe_warn_about_dependency(u, other_id, k);
713 } else {
714 r = set_remove_and_put(back->dependencies[k], other, u);
715 if (r == -EEXIST)
716 set_remove(back->dependencies[k], other);
717 else
718 assert(r >= 0 || r == -ENOENT);
719 }
720 }
721 }
722
723 /* Also do not move dependencies on u to itself */
724 back = set_remove(other->dependencies[d], u);
725 if (back)
726 maybe_warn_about_dependency(u, other_id, d);
727
728 /* The move cannot fail. The caller must have performed a reservation. */
729 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
730
731 other->dependencies[d] = set_free(other->dependencies[d]);
732 }
733
734 int unit_merge(Unit *u, Unit *other) {
735 UnitDependency d;
736 const char *other_id = NULL;
737 int r;
738
739 assert(u);
740 assert(other);
741 assert(u->manager == other->manager);
742 assert(u->type != _UNIT_TYPE_INVALID);
743
744 other = unit_follow_merge(other);
745
746 if (other == u)
747 return 0;
748
749 if (u->type != other->type)
750 return -EINVAL;
751
752 if (!u->instance != !other->instance)
753 return -EINVAL;
754
755 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
756 return -EEXIST;
757
758 if (other->load_state != UNIT_STUB &&
759 other->load_state != UNIT_NOT_FOUND)
760 return -EEXIST;
761
762 if (other->job)
763 return -EEXIST;
764
765 if (other->nop_job)
766 return -EEXIST;
767
768 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
769 return -EEXIST;
770
771 if (other->id)
772 other_id = strdupa(other->id);
773
774 /* Make reservations to ensure merge_dependencies() won't fail */
775 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
776 r = reserve_dependencies(u, other, d);
777 /*
778 * We don't rollback reservations if we fail. We don't have
779 * a way to undo reservations. A reservation is not a leak.
780 */
781 if (r < 0)
782 return r;
783 }
784
785 /* Merge names */
786 r = merge_names(u, other);
787 if (r < 0)
788 return r;
789
790 /* Redirect all references */
791 while (other->refs)
792 unit_ref_set(other->refs, u);
793
794 /* Merge dependencies */
795 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
796 merge_dependencies(u, other, other_id, d);
797
798 other->load_state = UNIT_MERGED;
799 other->merged_into = u;
800
801 /* If there is still some data attached to the other node, we
802 * don't need it anymore, and can free it. */
803 if (other->load_state != UNIT_STUB)
804 if (UNIT_VTABLE(other)->done)
805 UNIT_VTABLE(other)->done(other);
806
807 unit_add_to_dbus_queue(u);
808 unit_add_to_cleanup_queue(other);
809
810 return 0;
811 }
812
813 int unit_merge_by_name(Unit *u, const char *name) {
814 _cleanup_free_ char *s = NULL;
815 Unit *other;
816 int r;
817
818 assert(u);
819 assert(name);
820
821 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
822 if (!u->instance)
823 return -EINVAL;
824
825 r = unit_name_replace_instance(name, u->instance, &s);
826 if (r < 0)
827 return r;
828
829 name = s;
830 }
831
832 other = manager_get_unit(u->manager, name);
833 if (other)
834 return unit_merge(u, other);
835
836 return unit_add_name(u, name);
837 }
838
839 Unit* unit_follow_merge(Unit *u) {
840 assert(u);
841
842 while (u->load_state == UNIT_MERGED)
843 assert_se(u = u->merged_into);
844
845 return u;
846 }
847
848 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
849 int r;
850
851 assert(u);
852 assert(c);
853
854 if (c->working_directory) {
855 r = unit_require_mounts_for(u, c->working_directory);
856 if (r < 0)
857 return r;
858 }
859
860 if (c->root_directory) {
861 r = unit_require_mounts_for(u, c->root_directory);
862 if (r < 0)
863 return r;
864 }
865
866 if (c->root_image) {
867 r = unit_require_mounts_for(u, c->root_image);
868 if (r < 0)
869 return r;
870 }
871
872 if (!MANAGER_IS_SYSTEM(u->manager))
873 return 0;
874
875 if (c->private_tmp) {
876 const char *p;
877
878 FOREACH_STRING(p, "/tmp", "/var/tmp") {
879 r = unit_require_mounts_for(u, p);
880 if (r < 0)
881 return r;
882 }
883
884 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true);
885 if (r < 0)
886 return r;
887 }
888
889 if (!IN_SET(c->std_output,
890 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
891 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
892 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
893 !IN_SET(c->std_error,
894 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
895 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
896 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
897 return 0;
898
899 /* If syslog or kernel logging is requested, make sure our own
900 * logging daemon is run first. */
901
902 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
903 if (r < 0)
904 return r;
905
906 return 0;
907 }
908
909 const char *unit_description(Unit *u) {
910 assert(u);
911
912 if (u->description)
913 return u->description;
914
915 return strna(u->id);
916 }
917
918 void unit_dump(Unit *u, FILE *f, const char *prefix) {
919 char *t, **j;
920 UnitDependency d;
921 Iterator i;
922 const char *prefix2;
923 char
924 timestamp0[FORMAT_TIMESTAMP_MAX],
925 timestamp1[FORMAT_TIMESTAMP_MAX],
926 timestamp2[FORMAT_TIMESTAMP_MAX],
927 timestamp3[FORMAT_TIMESTAMP_MAX],
928 timestamp4[FORMAT_TIMESTAMP_MAX],
929 timespan[FORMAT_TIMESPAN_MAX];
930 Unit *following;
931 _cleanup_set_free_ Set *following_set = NULL;
932 int r;
933 const char *n;
934
935 assert(u);
936 assert(u->type >= 0);
937
938 prefix = strempty(prefix);
939 prefix2 = strjoina(prefix, "\t");
940
941 fprintf(f,
942 "%s-> Unit %s:\n"
943 "%s\tDescription: %s\n"
944 "%s\tInstance: %s\n"
945 "%s\tUnit Load State: %s\n"
946 "%s\tUnit Active State: %s\n"
947 "%s\tState Change Timestamp: %s\n"
948 "%s\tInactive Exit Timestamp: %s\n"
949 "%s\tActive Enter Timestamp: %s\n"
950 "%s\tActive Exit Timestamp: %s\n"
951 "%s\tInactive Enter Timestamp: %s\n"
952 "%s\tGC Check Good: %s\n"
953 "%s\tNeed Daemon Reload: %s\n"
954 "%s\tTransient: %s\n"
955 "%s\tPerpetual: %s\n"
956 "%s\tSlice: %s\n"
957 "%s\tCGroup: %s\n"
958 "%s\tCGroup realized: %s\n",
959 prefix, u->id,
960 prefix, unit_description(u),
961 prefix, strna(u->instance),
962 prefix, unit_load_state_to_string(u->load_state),
963 prefix, unit_active_state_to_string(unit_active_state(u)),
964 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
965 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
966 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
967 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
968 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
969 prefix, yes_no(unit_check_gc(u)),
970 prefix, yes_no(unit_need_daemon_reload(u)),
971 prefix, yes_no(u->transient),
972 prefix, yes_no(u->perpetual),
973 prefix, strna(unit_slice_name(u)),
974 prefix, strna(u->cgroup_path),
975 prefix, yes_no(u->cgroup_realized));
976
977 if (u->cgroup_realized_mask != 0) {
978 _cleanup_free_ char *s = NULL;
979 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
980 fprintf(f, "%s\tCGroup mask: %s\n", prefix, strnull(s));
981 }
982 if (u->cgroup_members_mask != 0) {
983 _cleanup_free_ char *s = NULL;
984 (void) cg_mask_to_string(u->cgroup_members_mask, &s);
985 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
986 }
987
988 SET_FOREACH(t, u->names, i)
989 fprintf(f, "%s\tName: %s\n", prefix, t);
990
991 if (!sd_id128_is_null(u->invocation_id))
992 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
993 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
994
995 STRV_FOREACH(j, u->documentation)
996 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
997
998 following = unit_following(u);
999 if (following)
1000 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1001
1002 r = unit_following_set(u, &following_set);
1003 if (r >= 0) {
1004 Unit *other;
1005
1006 SET_FOREACH(other, following_set, i)
1007 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1008 }
1009
1010 if (u->fragment_path)
1011 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1012
1013 if (u->source_path)
1014 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1015
1016 STRV_FOREACH(j, u->dropin_paths)
1017 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1018
1019 if (u->job_timeout != USEC_INFINITY)
1020 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1021
1022 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1023 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1024
1025 if (u->job_timeout_reboot_arg)
1026 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1027
1028 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1029 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1030
1031 if (dual_timestamp_is_set(&u->condition_timestamp))
1032 fprintf(f,
1033 "%s\tCondition Timestamp: %s\n"
1034 "%s\tCondition Result: %s\n",
1035 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1036 prefix, yes_no(u->condition_result));
1037
1038 if (dual_timestamp_is_set(&u->assert_timestamp))
1039 fprintf(f,
1040 "%s\tAssert Timestamp: %s\n"
1041 "%s\tAssert Result: %s\n",
1042 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1043 prefix, yes_no(u->assert_result));
1044
1045 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1046 Unit *other;
1047
1048 SET_FOREACH(other, u->dependencies[d], i)
1049 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1050 }
1051
1052 if (!strv_isempty(u->requires_mounts_for)) {
1053 fprintf(f,
1054 "%s\tRequiresMountsFor:", prefix);
1055
1056 STRV_FOREACH(j, u->requires_mounts_for)
1057 fprintf(f, " %s", *j);
1058
1059 fputs("\n", f);
1060 }
1061
1062 if (u->load_state == UNIT_LOADED) {
1063
1064 fprintf(f,
1065 "%s\tStopWhenUnneeded: %s\n"
1066 "%s\tRefuseManualStart: %s\n"
1067 "%s\tRefuseManualStop: %s\n"
1068 "%s\tDefaultDependencies: %s\n"
1069 "%s\tOnFailureJobMode: %s\n"
1070 "%s\tIgnoreOnIsolate: %s\n",
1071 prefix, yes_no(u->stop_when_unneeded),
1072 prefix, yes_no(u->refuse_manual_start),
1073 prefix, yes_no(u->refuse_manual_stop),
1074 prefix, yes_no(u->default_dependencies),
1075 prefix, job_mode_to_string(u->on_failure_job_mode),
1076 prefix, yes_no(u->ignore_on_isolate));
1077
1078 if (UNIT_VTABLE(u)->dump)
1079 UNIT_VTABLE(u)->dump(u, f, prefix2);
1080
1081 } else if (u->load_state == UNIT_MERGED)
1082 fprintf(f,
1083 "%s\tMerged into: %s\n",
1084 prefix, u->merged_into->id);
1085 else if (u->load_state == UNIT_ERROR)
1086 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1087
1088 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1089 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1090
1091 if (u->job)
1092 job_dump(u->job, f, prefix2);
1093
1094 if (u->nop_job)
1095 job_dump(u->nop_job, f, prefix2);
1096 }
1097
1098 /* Common implementation for multiple backends */
1099 int unit_load_fragment_and_dropin(Unit *u) {
1100 int r;
1101
1102 assert(u);
1103
1104 /* Load a .{service,socket,...} file */
1105 r = unit_load_fragment(u);
1106 if (r < 0)
1107 return r;
1108
1109 if (u->load_state == UNIT_STUB)
1110 return -ENOENT;
1111
1112 /* Load drop-in directory data. If u is an alias, we might be reloading the
1113 * target unit needlessly. But we cannot be sure which drops-ins have already
1114 * been loaded and which not, at least without doing complicated book-keeping,
1115 * so let's always reread all drop-ins. */
1116 return unit_load_dropin(unit_follow_merge(u));
1117 }
1118
1119 /* Common implementation for multiple backends */
1120 int unit_load_fragment_and_dropin_optional(Unit *u) {
1121 int r;
1122
1123 assert(u);
1124
1125 /* Same as unit_load_fragment_and_dropin(), but whether
1126 * something can be loaded or not doesn't matter. */
1127
1128 /* Load a .service file */
1129 r = unit_load_fragment(u);
1130 if (r < 0)
1131 return r;
1132
1133 if (u->load_state == UNIT_STUB)
1134 u->load_state = UNIT_LOADED;
1135
1136 /* Load drop-in directory data */
1137 return unit_load_dropin(unit_follow_merge(u));
1138 }
1139
1140 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1141 assert(u);
1142 assert(target);
1143
1144 if (target->type != UNIT_TARGET)
1145 return 0;
1146
1147 /* Only add the dependency if both units are loaded, so that
1148 * that loop check below is reliable */
1149 if (u->load_state != UNIT_LOADED ||
1150 target->load_state != UNIT_LOADED)
1151 return 0;
1152
1153 /* If either side wants no automatic dependencies, then let's
1154 * skip this */
1155 if (!u->default_dependencies ||
1156 !target->default_dependencies)
1157 return 0;
1158
1159 /* Don't create loops */
1160 if (set_get(target->dependencies[UNIT_BEFORE], u))
1161 return 0;
1162
1163 return unit_add_dependency(target, UNIT_AFTER, u, true);
1164 }
1165
1166 static int unit_add_target_dependencies(Unit *u) {
1167
1168 static const UnitDependency deps[] = {
1169 UNIT_REQUIRED_BY,
1170 UNIT_REQUISITE_OF,
1171 UNIT_WANTED_BY,
1172 UNIT_BOUND_BY
1173 };
1174
1175 Unit *target;
1176 Iterator i;
1177 unsigned k;
1178 int r = 0;
1179
1180 assert(u);
1181
1182 for (k = 0; k < ELEMENTSOF(deps); k++)
1183 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1184 r = unit_add_default_target_dependency(u, target);
1185 if (r < 0)
1186 return r;
1187 }
1188
1189 return r;
1190 }
1191
1192 static int unit_add_slice_dependencies(Unit *u) {
1193 assert(u);
1194
1195 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1196 return 0;
1197
1198 if (UNIT_ISSET(u->slice))
1199 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1200
1201 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1202 return 0;
1203
1204 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1205 }
1206
1207 static int unit_add_mount_dependencies(Unit *u) {
1208 char **i;
1209 int r;
1210
1211 assert(u);
1212
1213 STRV_FOREACH(i, u->requires_mounts_for) {
1214 char prefix[strlen(*i) + 1];
1215
1216 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1217 _cleanup_free_ char *p = NULL;
1218 Unit *m;
1219
1220 r = unit_name_from_path(prefix, ".mount", &p);
1221 if (r < 0)
1222 return r;
1223
1224 m = manager_get_unit(u->manager, p);
1225 if (!m) {
1226 /* Make sure to load the mount unit if
1227 * it exists. If so the dependencies
1228 * on this unit will be added later
1229 * during the loading of the mount
1230 * unit. */
1231 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1232 continue;
1233 }
1234 if (m == u)
1235 continue;
1236
1237 if (m->load_state != UNIT_LOADED)
1238 continue;
1239
1240 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1241 if (r < 0)
1242 return r;
1243
1244 if (m->fragment_path) {
1245 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1246 if (r < 0)
1247 return r;
1248 }
1249 }
1250 }
1251
1252 return 0;
1253 }
1254
1255 static int unit_add_startup_units(Unit *u) {
1256 CGroupContext *c;
1257 int r;
1258
1259 c = unit_get_cgroup_context(u);
1260 if (!c)
1261 return 0;
1262
1263 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1264 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1265 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1266 return 0;
1267
1268 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1269 if (r < 0)
1270 return r;
1271
1272 return set_put(u->manager->startup_units, u);
1273 }
1274
1275 int unit_load(Unit *u) {
1276 int r;
1277
1278 assert(u);
1279
1280 if (u->in_load_queue) {
1281 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1282 u->in_load_queue = false;
1283 }
1284
1285 if (u->type == _UNIT_TYPE_INVALID)
1286 return -EINVAL;
1287
1288 if (u->load_state != UNIT_STUB)
1289 return 0;
1290
1291 if (u->transient_file) {
1292 r = fflush_and_check(u->transient_file);
1293 if (r < 0)
1294 goto fail;
1295
1296 fclose(u->transient_file);
1297 u->transient_file = NULL;
1298
1299 u->fragment_mtime = now(CLOCK_REALTIME);
1300 }
1301
1302 if (UNIT_VTABLE(u)->load) {
1303 r = UNIT_VTABLE(u)->load(u);
1304 if (r < 0)
1305 goto fail;
1306 }
1307
1308 if (u->load_state == UNIT_STUB) {
1309 r = -ENOENT;
1310 goto fail;
1311 }
1312
1313 if (u->load_state == UNIT_LOADED) {
1314
1315 r = unit_add_target_dependencies(u);
1316 if (r < 0)
1317 goto fail;
1318
1319 r = unit_add_slice_dependencies(u);
1320 if (r < 0)
1321 goto fail;
1322
1323 r = unit_add_mount_dependencies(u);
1324 if (r < 0)
1325 goto fail;
1326
1327 r = unit_add_startup_units(u);
1328 if (r < 0)
1329 goto fail;
1330
1331 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1332 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1333 r = -EINVAL;
1334 goto fail;
1335 }
1336
1337 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1338 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1339
1340 unit_update_cgroup_members_masks(u);
1341 }
1342
1343 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1344
1345 unit_add_to_dbus_queue(unit_follow_merge(u));
1346 unit_add_to_gc_queue(u);
1347
1348 return 0;
1349
1350 fail:
1351 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1352 u->load_error = r;
1353 unit_add_to_dbus_queue(u);
1354 unit_add_to_gc_queue(u);
1355
1356 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1357
1358 return r;
1359 }
1360
1361 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1362 Condition *c;
1363 int triggered = -1;
1364
1365 assert(u);
1366 assert(to_string);
1367
1368 /* If the condition list is empty, then it is true */
1369 if (!first)
1370 return true;
1371
1372 /* Otherwise, if all of the non-trigger conditions apply and
1373 * if any of the trigger conditions apply (unless there are
1374 * none) we return true */
1375 LIST_FOREACH(conditions, c, first) {
1376 int r;
1377
1378 r = condition_test(c);
1379 if (r < 0)
1380 log_unit_warning(u,
1381 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1382 to_string(c->type),
1383 c->trigger ? "|" : "",
1384 c->negate ? "!" : "",
1385 c->parameter);
1386 else
1387 log_unit_debug(u,
1388 "%s=%s%s%s %s.",
1389 to_string(c->type),
1390 c->trigger ? "|" : "",
1391 c->negate ? "!" : "",
1392 c->parameter,
1393 condition_result_to_string(c->result));
1394
1395 if (!c->trigger && r <= 0)
1396 return false;
1397
1398 if (c->trigger && triggered <= 0)
1399 triggered = r > 0;
1400 }
1401
1402 return triggered != 0;
1403 }
1404
1405 static bool unit_condition_test(Unit *u) {
1406 assert(u);
1407
1408 dual_timestamp_get(&u->condition_timestamp);
1409 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1410
1411 return u->condition_result;
1412 }
1413
1414 static bool unit_assert_test(Unit *u) {
1415 assert(u);
1416
1417 dual_timestamp_get(&u->assert_timestamp);
1418 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1419
1420 return u->assert_result;
1421 }
1422
1423 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1424 DISABLE_WARNING_FORMAT_NONLITERAL;
1425 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1426 REENABLE_WARNING;
1427 }
1428
1429 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1430 const char *format;
1431 const UnitStatusMessageFormats *format_table;
1432
1433 assert(u);
1434 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1435
1436 if (t != JOB_RELOAD) {
1437 format_table = &UNIT_VTABLE(u)->status_message_formats;
1438 if (format_table) {
1439 format = format_table->starting_stopping[t == JOB_STOP];
1440 if (format)
1441 return format;
1442 }
1443 }
1444
1445 /* Return generic strings */
1446 if (t == JOB_START)
1447 return "Starting %s.";
1448 else if (t == JOB_STOP)
1449 return "Stopping %s.";
1450 else
1451 return "Reloading %s.";
1452 }
1453
1454 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1455 const char *format;
1456
1457 assert(u);
1458
1459 /* Reload status messages have traditionally not been printed to console. */
1460 if (!IN_SET(t, JOB_START, JOB_STOP))
1461 return;
1462
1463 format = unit_get_status_message_format(u, t);
1464
1465 DISABLE_WARNING_FORMAT_NONLITERAL;
1466 unit_status_printf(u, "", format);
1467 REENABLE_WARNING;
1468 }
1469
1470 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1471 const char *format, *mid;
1472 char buf[LINE_MAX];
1473
1474 assert(u);
1475
1476 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1477 return;
1478
1479 if (log_on_console())
1480 return;
1481
1482 /* We log status messages for all units and all operations. */
1483
1484 format = unit_get_status_message_format(u, t);
1485
1486 DISABLE_WARNING_FORMAT_NONLITERAL;
1487 snprintf(buf, sizeof buf, format, unit_description(u));
1488 REENABLE_WARNING;
1489
1490 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1491 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1492 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1493
1494 /* Note that we deliberately use LOG_MESSAGE() instead of
1495 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1496 * closely what is written to screen using the status output,
1497 * which is supposed the highest level, friendliest output
1498 * possible, which means we should avoid the low-level unit
1499 * name. */
1500 log_struct(LOG_INFO,
1501 LOG_MESSAGE("%s", buf),
1502 LOG_UNIT_ID(u),
1503 mid,
1504 NULL);
1505 }
1506
1507 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1508 assert(u);
1509 assert(t >= 0);
1510 assert(t < _JOB_TYPE_MAX);
1511
1512 unit_status_log_starting_stopping_reloading(u, t);
1513 unit_status_print_starting_stopping(u, t);
1514 }
1515
1516 int unit_start_limit_test(Unit *u) {
1517 assert(u);
1518
1519 if (ratelimit_test(&u->start_limit)) {
1520 u->start_limit_hit = false;
1521 return 0;
1522 }
1523
1524 log_unit_warning(u, "Start request repeated too quickly.");
1525 u->start_limit_hit = true;
1526
1527 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1528 }
1529
1530 bool unit_shall_confirm_spawn(Unit *u) {
1531 assert(u);
1532
1533 if (manager_is_confirm_spawn_disabled(u->manager))
1534 return false;
1535
1536 /* For some reasons units remaining in the same process group
1537 * as PID 1 fail to acquire the console even if it's not used
1538 * by any process. So skip the confirmation question for them. */
1539 return !unit_get_exec_context(u)->same_pgrp;
1540 }
1541
1542 static bool unit_verify_deps(Unit *u) {
1543 Unit *other;
1544 Iterator j;
1545
1546 assert(u);
1547
1548 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1549 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1550 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1551 * conjunction with After= as for them any such check would make things entirely racy. */
1552
1553 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], j) {
1554
1555 if (!set_contains(u->dependencies[UNIT_AFTER], other))
1556 continue;
1557
1558 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1559 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1560 return false;
1561 }
1562 }
1563
1564 return true;
1565 }
1566
1567 /* Errors:
1568 * -EBADR: This unit type does not support starting.
1569 * -EALREADY: Unit is already started.
1570 * -EAGAIN: An operation is already in progress. Retry later.
1571 * -ECANCELED: Too many requests for now.
1572 * -EPROTO: Assert failed
1573 * -EINVAL: Unit not loaded
1574 * -EOPNOTSUPP: Unit type not supported
1575 * -ENOLINK: The necessary dependencies are not fulfilled.
1576 */
1577 int unit_start(Unit *u) {
1578 UnitActiveState state;
1579 Unit *following;
1580
1581 assert(u);
1582
1583 /* If this is already started, then this will succeed. Note
1584 * that this will even succeed if this unit is not startable
1585 * by the user. This is relied on to detect when we need to
1586 * wait for units and when waiting is finished. */
1587 state = unit_active_state(u);
1588 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1589 return -EALREADY;
1590
1591 /* Units that aren't loaded cannot be started */
1592 if (u->load_state != UNIT_LOADED)
1593 return -EINVAL;
1594
1595 /* If the conditions failed, don't do anything at all. If we
1596 * already are activating this call might still be useful to
1597 * speed up activation in case there is some hold-off time,
1598 * but we don't want to recheck the condition in that case. */
1599 if (state != UNIT_ACTIVATING &&
1600 !unit_condition_test(u)) {
1601 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1602 return -EALREADY;
1603 }
1604
1605 /* If the asserts failed, fail the entire job */
1606 if (state != UNIT_ACTIVATING &&
1607 !unit_assert_test(u)) {
1608 log_unit_notice(u, "Starting requested but asserts failed.");
1609 return -EPROTO;
1610 }
1611
1612 /* Units of types that aren't supported cannot be
1613 * started. Note that we do this test only after the condition
1614 * checks, so that we rather return condition check errors
1615 * (which are usually not considered a true failure) than "not
1616 * supported" errors (which are considered a failure).
1617 */
1618 if (!unit_supported(u))
1619 return -EOPNOTSUPP;
1620
1621 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1622 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1623 * effect anymore, due to a reload or due to a failed condition. */
1624 if (!unit_verify_deps(u))
1625 return -ENOLINK;
1626
1627 /* Forward to the main object, if we aren't it. */
1628 following = unit_following(u);
1629 if (following) {
1630 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1631 return unit_start(following);
1632 }
1633
1634 /* If it is stopped, but we cannot start it, then fail */
1635 if (!UNIT_VTABLE(u)->start)
1636 return -EBADR;
1637
1638 /* We don't suppress calls to ->start() here when we are
1639 * already starting, to allow this request to be used as a
1640 * "hurry up" call, for example when the unit is in some "auto
1641 * restart" state where it waits for a holdoff timer to elapse
1642 * before it will start again. */
1643
1644 unit_add_to_dbus_queue(u);
1645
1646 return UNIT_VTABLE(u)->start(u);
1647 }
1648
1649 bool unit_can_start(Unit *u) {
1650 assert(u);
1651
1652 if (u->load_state != UNIT_LOADED)
1653 return false;
1654
1655 if (!unit_supported(u))
1656 return false;
1657
1658 return !!UNIT_VTABLE(u)->start;
1659 }
1660
1661 bool unit_can_isolate(Unit *u) {
1662 assert(u);
1663
1664 return unit_can_start(u) &&
1665 u->allow_isolate;
1666 }
1667
1668 /* Errors:
1669 * -EBADR: This unit type does not support stopping.
1670 * -EALREADY: Unit is already stopped.
1671 * -EAGAIN: An operation is already in progress. Retry later.
1672 */
1673 int unit_stop(Unit *u) {
1674 UnitActiveState state;
1675 Unit *following;
1676
1677 assert(u);
1678
1679 state = unit_active_state(u);
1680 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1681 return -EALREADY;
1682
1683 following = unit_following(u);
1684 if (following) {
1685 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1686 return unit_stop(following);
1687 }
1688
1689 if (!UNIT_VTABLE(u)->stop)
1690 return -EBADR;
1691
1692 unit_add_to_dbus_queue(u);
1693
1694 return UNIT_VTABLE(u)->stop(u);
1695 }
1696
1697 bool unit_can_stop(Unit *u) {
1698 assert(u);
1699
1700 if (!unit_supported(u))
1701 return false;
1702
1703 if (u->perpetual)
1704 return false;
1705
1706 return !!UNIT_VTABLE(u)->stop;
1707 }
1708
1709 /* Errors:
1710 * -EBADR: This unit type does not support reloading.
1711 * -ENOEXEC: Unit is not started.
1712 * -EAGAIN: An operation is already in progress. Retry later.
1713 */
1714 int unit_reload(Unit *u) {
1715 UnitActiveState state;
1716 Unit *following;
1717
1718 assert(u);
1719
1720 if (u->load_state != UNIT_LOADED)
1721 return -EINVAL;
1722
1723 if (!unit_can_reload(u))
1724 return -EBADR;
1725
1726 state = unit_active_state(u);
1727 if (state == UNIT_RELOADING)
1728 return -EALREADY;
1729
1730 if (state != UNIT_ACTIVE) {
1731 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1732 return -ENOEXEC;
1733 }
1734
1735 following = unit_following(u);
1736 if (following) {
1737 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1738 return unit_reload(following);
1739 }
1740
1741 unit_add_to_dbus_queue(u);
1742
1743 return UNIT_VTABLE(u)->reload(u);
1744 }
1745
1746 bool unit_can_reload(Unit *u) {
1747 assert(u);
1748
1749 if (!UNIT_VTABLE(u)->reload)
1750 return false;
1751
1752 if (!UNIT_VTABLE(u)->can_reload)
1753 return true;
1754
1755 return UNIT_VTABLE(u)->can_reload(u);
1756 }
1757
1758 static void unit_check_unneeded(Unit *u) {
1759
1760 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1761
1762 static const UnitDependency needed_dependencies[] = {
1763 UNIT_REQUIRED_BY,
1764 UNIT_REQUISITE_OF,
1765 UNIT_WANTED_BY,
1766 UNIT_BOUND_BY,
1767 };
1768
1769 Unit *other;
1770 Iterator i;
1771 unsigned j;
1772 int r;
1773
1774 assert(u);
1775
1776 /* If this service shall be shut down when unneeded then do
1777 * so. */
1778
1779 if (!u->stop_when_unneeded)
1780 return;
1781
1782 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1783 return;
1784
1785 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1786 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1787 if (unit_active_or_pending(other))
1788 return;
1789
1790 /* If stopping a unit fails continuously we might enter a stop
1791 * loop here, hence stop acting on the service being
1792 * unnecessary after a while. */
1793 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1794 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1795 return;
1796 }
1797
1798 log_unit_info(u, "Unit not needed anymore. Stopping.");
1799
1800 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1801 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1802 if (r < 0)
1803 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1804 }
1805
1806 static void unit_check_binds_to(Unit *u) {
1807 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1808 bool stop = false;
1809 Unit *other;
1810 Iterator i;
1811 int r;
1812
1813 assert(u);
1814
1815 if (u->job)
1816 return;
1817
1818 if (unit_active_state(u) != UNIT_ACTIVE)
1819 return;
1820
1821 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1822 if (other->job)
1823 continue;
1824
1825 if (!other->coldplugged)
1826 /* We might yet create a job for the other unit… */
1827 continue;
1828
1829 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1830 continue;
1831
1832 stop = true;
1833 break;
1834 }
1835
1836 if (!stop)
1837 return;
1838
1839 /* If stopping a unit fails continuously we might enter a stop
1840 * loop here, hence stop acting on the service being
1841 * unnecessary after a while. */
1842 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1843 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1844 return;
1845 }
1846
1847 assert(other);
1848 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1849
1850 /* A unit we need to run is gone. Sniff. Let's stop this. */
1851 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1852 if (r < 0)
1853 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1854 }
1855
1856 static void retroactively_start_dependencies(Unit *u) {
1857 Iterator i;
1858 Unit *other;
1859
1860 assert(u);
1861 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1862
1863 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1864 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1865 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1866 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1867
1868 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1869 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1870 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1871 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1872
1873 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1874 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1875 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1876 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1877
1878 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1879 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1880 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1881
1882 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1883 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1884 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1885 }
1886
1887 static void retroactively_stop_dependencies(Unit *u) {
1888 Iterator i;
1889 Unit *other;
1890
1891 assert(u);
1892 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1893
1894 /* Pull down units which are bound to us recursively if enabled */
1895 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1896 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1897 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1898 }
1899
1900 static void check_unneeded_dependencies(Unit *u) {
1901 Iterator i;
1902 Unit *other;
1903
1904 assert(u);
1905 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1906
1907 /* Garbage collect services that might not be needed anymore, if enabled */
1908 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1909 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1910 unit_check_unneeded(other);
1911 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1912 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1913 unit_check_unneeded(other);
1914 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1915 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1916 unit_check_unneeded(other);
1917 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1918 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1919 unit_check_unneeded(other);
1920 }
1921
1922 void unit_start_on_failure(Unit *u) {
1923 Unit *other;
1924 Iterator i;
1925
1926 assert(u);
1927
1928 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1929 return;
1930
1931 log_unit_info(u, "Triggering OnFailure= dependencies.");
1932
1933 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1934 int r;
1935
1936 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1937 if (r < 0)
1938 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1939 }
1940 }
1941
1942 void unit_trigger_notify(Unit *u) {
1943 Unit *other;
1944 Iterator i;
1945
1946 assert(u);
1947
1948 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1949 if (UNIT_VTABLE(other)->trigger_notify)
1950 UNIT_VTABLE(other)->trigger_notify(other, u);
1951 }
1952
1953 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1954 Manager *m;
1955 bool unexpected;
1956
1957 assert(u);
1958 assert(os < _UNIT_ACTIVE_STATE_MAX);
1959 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1960
1961 /* Note that this is called for all low-level state changes,
1962 * even if they might map to the same high-level
1963 * UnitActiveState! That means that ns == os is an expected
1964 * behavior here. For example: if a mount point is remounted
1965 * this function will be called too! */
1966
1967 m = u->manager;
1968
1969 /* Update timestamps for state changes */
1970 if (!MANAGER_IS_RELOADING(m)) {
1971 dual_timestamp_get(&u->state_change_timestamp);
1972
1973 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1974 u->inactive_exit_timestamp = u->state_change_timestamp;
1975 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1976 u->inactive_enter_timestamp = u->state_change_timestamp;
1977
1978 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1979 u->active_enter_timestamp = u->state_change_timestamp;
1980 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1981 u->active_exit_timestamp = u->state_change_timestamp;
1982 }
1983
1984 /* Keep track of failed units */
1985 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1986
1987 /* Make sure the cgroup is always removed when we become inactive */
1988 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1989 unit_prune_cgroup(u);
1990
1991 /* Note that this doesn't apply to RemainAfterExit services exiting
1992 * successfully, since there's no change of state in that case. Which is
1993 * why it is handled in service_set_state() */
1994 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1995 ExecContext *ec;
1996
1997 ec = unit_get_exec_context(u);
1998 if (ec && exec_context_may_touch_console(ec)) {
1999 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2000 m->n_on_console--;
2001
2002 if (m->n_on_console == 0)
2003 /* unset no_console_output flag, since the console is free */
2004 m->no_console_output = false;
2005 } else
2006 m->n_on_console++;
2007 }
2008 }
2009
2010 if (u->job) {
2011 unexpected = false;
2012
2013 if (u->job->state == JOB_WAITING)
2014
2015 /* So we reached a different state for this
2016 * job. Let's see if we can run it now if it
2017 * failed previously due to EAGAIN. */
2018 job_add_to_run_queue(u->job);
2019
2020 /* Let's check whether this state change constitutes a
2021 * finished job, or maybe contradicts a running job and
2022 * hence needs to invalidate jobs. */
2023
2024 switch (u->job->type) {
2025
2026 case JOB_START:
2027 case JOB_VERIFY_ACTIVE:
2028
2029 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2030 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2031 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2032 unexpected = true;
2033
2034 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2035 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2036 }
2037
2038 break;
2039
2040 case JOB_RELOAD:
2041 case JOB_RELOAD_OR_START:
2042 case JOB_TRY_RELOAD:
2043
2044 if (u->job->state == JOB_RUNNING) {
2045 if (ns == UNIT_ACTIVE)
2046 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2047 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
2048 unexpected = true;
2049
2050 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2051 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2052 }
2053 }
2054
2055 break;
2056
2057 case JOB_STOP:
2058 case JOB_RESTART:
2059 case JOB_TRY_RESTART:
2060
2061 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2062 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2063 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2064 unexpected = true;
2065 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2066 }
2067
2068 break;
2069
2070 default:
2071 assert_not_reached("Job type unknown");
2072 }
2073
2074 } else
2075 unexpected = true;
2076
2077 if (!MANAGER_IS_RELOADING(m)) {
2078
2079 /* If this state change happened without being
2080 * requested by a job, then let's retroactively start
2081 * or stop dependencies. We skip that step when
2082 * deserializing, since we don't want to create any
2083 * additional jobs just because something is already
2084 * activated. */
2085
2086 if (unexpected) {
2087 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2088 retroactively_start_dependencies(u);
2089 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2090 retroactively_stop_dependencies(u);
2091 }
2092
2093 /* stop unneeded units regardless if going down was expected or not */
2094 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2095 check_unneeded_dependencies(u);
2096
2097 if (ns != os && ns == UNIT_FAILED) {
2098 log_unit_notice(u, "Unit entered failed state.");
2099 unit_start_on_failure(u);
2100 }
2101 }
2102
2103 /* Some names are special */
2104 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2105
2106 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2107 /* The bus might have just become available,
2108 * hence try to connect to it, if we aren't
2109 * yet connected. */
2110 bus_init(m, true);
2111
2112 if (u->type == UNIT_SERVICE &&
2113 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2114 !MANAGER_IS_RELOADING(m)) {
2115 /* Write audit record if we have just finished starting up */
2116 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2117 u->in_audit = true;
2118 }
2119
2120 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2121 manager_send_unit_plymouth(m, u);
2122
2123 } else {
2124
2125 /* We don't care about D-Bus here, since we'll get an
2126 * asynchronous notification for it anyway. */
2127
2128 if (u->type == UNIT_SERVICE &&
2129 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2130 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2131 !MANAGER_IS_RELOADING(m)) {
2132
2133 /* Hmm, if there was no start record written
2134 * write it now, so that we always have a nice
2135 * pair */
2136 if (!u->in_audit) {
2137 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2138
2139 if (ns == UNIT_INACTIVE)
2140 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2141 } else
2142 /* Write audit record if we have just finished shutting down */
2143 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2144
2145 u->in_audit = false;
2146 }
2147 }
2148
2149 manager_recheck_journal(m);
2150 unit_trigger_notify(u);
2151
2152 if (!MANAGER_IS_RELOADING(u->manager)) {
2153 /* Maybe we finished startup and are now ready for
2154 * being stopped because unneeded? */
2155 unit_check_unneeded(u);
2156
2157 /* Maybe we finished startup, but something we needed
2158 * has vanished? Let's die then. (This happens when
2159 * something BindsTo= to a Type=oneshot unit, as these
2160 * units go directly from starting to inactive,
2161 * without ever entering started.) */
2162 unit_check_binds_to(u);
2163 }
2164
2165 unit_add_to_dbus_queue(u);
2166 unit_add_to_gc_queue(u);
2167 }
2168
2169 int unit_watch_pid(Unit *u, pid_t pid) {
2170 int q, r;
2171
2172 assert(u);
2173 assert(pid >= 1);
2174
2175 /* Watch a specific PID. We only support one or two units
2176 * watching each PID for now, not more. */
2177
2178 r = set_ensure_allocated(&u->pids, NULL);
2179 if (r < 0)
2180 return r;
2181
2182 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2183 if (r < 0)
2184 return r;
2185
2186 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2187 if (r == -EEXIST) {
2188 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2189 if (r < 0)
2190 return r;
2191
2192 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2193 }
2194
2195 q = set_put(u->pids, PID_TO_PTR(pid));
2196 if (q < 0)
2197 return q;
2198
2199 return r;
2200 }
2201
2202 void unit_unwatch_pid(Unit *u, pid_t pid) {
2203 assert(u);
2204 assert(pid >= 1);
2205
2206 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2207 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2208 (void) set_remove(u->pids, PID_TO_PTR(pid));
2209 }
2210
2211 void unit_unwatch_all_pids(Unit *u) {
2212 assert(u);
2213
2214 while (!set_isempty(u->pids))
2215 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2216
2217 u->pids = set_free(u->pids);
2218 }
2219
2220 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2221 Iterator i;
2222 void *e;
2223
2224 assert(u);
2225
2226 /* Cleans dead PIDs from our list */
2227
2228 SET_FOREACH(e, u->pids, i) {
2229 pid_t pid = PTR_TO_PID(e);
2230
2231 if (pid == except1 || pid == except2)
2232 continue;
2233
2234 if (!pid_is_unwaited(pid))
2235 unit_unwatch_pid(u, pid);
2236 }
2237 }
2238
2239 bool unit_job_is_applicable(Unit *u, JobType j) {
2240 assert(u);
2241 assert(j >= 0 && j < _JOB_TYPE_MAX);
2242
2243 switch (j) {
2244
2245 case JOB_VERIFY_ACTIVE:
2246 case JOB_START:
2247 case JOB_NOP:
2248 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2249 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2250 * jobs for it. */
2251 return true;
2252
2253 case JOB_STOP:
2254 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2255 * external events), hence it makes no sense to permit enqueing such a request either. */
2256 return !u->perpetual;
2257
2258 case JOB_RESTART:
2259 case JOB_TRY_RESTART:
2260 return unit_can_stop(u) && unit_can_start(u);
2261
2262 case JOB_RELOAD:
2263 case JOB_TRY_RELOAD:
2264 return unit_can_reload(u);
2265
2266 case JOB_RELOAD_OR_START:
2267 return unit_can_reload(u) && unit_can_start(u);
2268
2269 default:
2270 assert_not_reached("Invalid job type");
2271 }
2272 }
2273
2274 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2275 assert(u);
2276
2277 /* Only warn about some unit types */
2278 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2279 return;
2280
2281 if (streq_ptr(u->id, other))
2282 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2283 else
2284 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2285 }
2286
2287 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2288
2289 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2290 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2291 [UNIT_WANTS] = UNIT_WANTED_BY,
2292 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2293 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2294 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2295 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2296 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2297 [UNIT_WANTED_BY] = UNIT_WANTS,
2298 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2299 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2300 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2301 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2302 [UNIT_BEFORE] = UNIT_AFTER,
2303 [UNIT_AFTER] = UNIT_BEFORE,
2304 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2305 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2306 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2307 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2308 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2309 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2310 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2311 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2312 };
2313 int r, q = 0, v = 0, w = 0;
2314 Unit *orig_u = u, *orig_other = other;
2315
2316 assert(u);
2317 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2318 assert(other);
2319
2320 u = unit_follow_merge(u);
2321 other = unit_follow_merge(other);
2322
2323 /* We won't allow dependencies on ourselves. We will not
2324 * consider them an error however. */
2325 if (u == other) {
2326 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2327 return 0;
2328 }
2329
2330 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2331 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2332 return 0;
2333 }
2334
2335 r = set_ensure_allocated(&u->dependencies[d], NULL);
2336 if (r < 0)
2337 return r;
2338
2339 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2340 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2341 if (r < 0)
2342 return r;
2343 }
2344
2345 if (add_reference) {
2346 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2347 if (r < 0)
2348 return r;
2349
2350 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2351 if (r < 0)
2352 return r;
2353 }
2354
2355 q = set_put(u->dependencies[d], other);
2356 if (q < 0)
2357 return q;
2358
2359 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2360 v = set_put(other->dependencies[inverse_table[d]], u);
2361 if (v < 0) {
2362 r = v;
2363 goto fail;
2364 }
2365 }
2366
2367 if (add_reference) {
2368 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2369 if (w < 0) {
2370 r = w;
2371 goto fail;
2372 }
2373
2374 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2375 if (r < 0)
2376 goto fail;
2377 }
2378
2379 unit_add_to_dbus_queue(u);
2380 return 0;
2381
2382 fail:
2383 if (q > 0)
2384 set_remove(u->dependencies[d], other);
2385
2386 if (v > 0)
2387 set_remove(other->dependencies[inverse_table[d]], u);
2388
2389 if (w > 0)
2390 set_remove(u->dependencies[UNIT_REFERENCES], other);
2391
2392 return r;
2393 }
2394
2395 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2396 int r;
2397
2398 assert(u);
2399
2400 r = unit_add_dependency(u, d, other, add_reference);
2401 if (r < 0)
2402 return r;
2403
2404 return unit_add_dependency(u, e, other, add_reference);
2405 }
2406
2407 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2408 int r;
2409
2410 assert(u);
2411 assert(name || path);
2412 assert(buf);
2413 assert(ret);
2414
2415 if (!name)
2416 name = basename(path);
2417
2418 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2419 *buf = NULL;
2420 *ret = name;
2421 return 0;
2422 }
2423
2424 if (u->instance)
2425 r = unit_name_replace_instance(name, u->instance, buf);
2426 else {
2427 _cleanup_free_ char *i = NULL;
2428
2429 r = unit_name_to_prefix(u->id, &i);
2430 if (r < 0)
2431 return r;
2432
2433 r = unit_name_replace_instance(name, i, buf);
2434 }
2435 if (r < 0)
2436 return r;
2437
2438 *ret = *buf;
2439 return 0;
2440 }
2441
2442 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2443 _cleanup_free_ char *buf = NULL;
2444 Unit *other;
2445 int r;
2446
2447 assert(u);
2448 assert(name || path);
2449
2450 r = resolve_template(u, name, path, &buf, &name);
2451 if (r < 0)
2452 return r;
2453
2454 r = manager_load_unit(u->manager, name, path, NULL, &other);
2455 if (r < 0)
2456 return r;
2457
2458 return unit_add_dependency(u, d, other, add_reference);
2459 }
2460
2461 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2462 _cleanup_free_ char *buf = NULL;
2463 Unit *other;
2464 int r;
2465
2466 assert(u);
2467 assert(name || path);
2468
2469 r = resolve_template(u, name, path, &buf, &name);
2470 if (r < 0)
2471 return r;
2472
2473 r = manager_load_unit(u->manager, name, path, NULL, &other);
2474 if (r < 0)
2475 return r;
2476
2477 return unit_add_two_dependencies(u, d, e, other, add_reference);
2478 }
2479
2480 int set_unit_path(const char *p) {
2481 /* This is mostly for debug purposes */
2482 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2483 return -errno;
2484
2485 return 0;
2486 }
2487
2488 char *unit_dbus_path(Unit *u) {
2489 assert(u);
2490
2491 if (!u->id)
2492 return NULL;
2493
2494 return unit_dbus_path_from_name(u->id);
2495 }
2496
2497 char *unit_dbus_path_invocation_id(Unit *u) {
2498 assert(u);
2499
2500 if (sd_id128_is_null(u->invocation_id))
2501 return NULL;
2502
2503 return unit_dbus_path_from_name(u->invocation_id_string);
2504 }
2505
2506 int unit_set_slice(Unit *u, Unit *slice) {
2507 assert(u);
2508 assert(slice);
2509
2510 /* Sets the unit slice if it has not been set before. Is extra
2511 * careful, to only allow this for units that actually have a
2512 * cgroup context. Also, we don't allow to set this for slices
2513 * (since the parent slice is derived from the name). Make
2514 * sure the unit we set is actually a slice. */
2515
2516 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2517 return -EOPNOTSUPP;
2518
2519 if (u->type == UNIT_SLICE)
2520 return -EINVAL;
2521
2522 if (unit_active_state(u) != UNIT_INACTIVE)
2523 return -EBUSY;
2524
2525 if (slice->type != UNIT_SLICE)
2526 return -EINVAL;
2527
2528 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2529 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2530 return -EPERM;
2531
2532 if (UNIT_DEREF(u->slice) == slice)
2533 return 0;
2534
2535 /* Disallow slice changes if @u is already bound to cgroups */
2536 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2537 return -EBUSY;
2538
2539 unit_ref_unset(&u->slice);
2540 unit_ref_set(&u->slice, slice);
2541 return 1;
2542 }
2543
2544 int unit_set_default_slice(Unit *u) {
2545 _cleanup_free_ char *b = NULL;
2546 const char *slice_name;
2547 Unit *slice;
2548 int r;
2549
2550 assert(u);
2551
2552 if (UNIT_ISSET(u->slice))
2553 return 0;
2554
2555 if (u->instance) {
2556 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2557
2558 /* Implicitly place all instantiated units in their
2559 * own per-template slice */
2560
2561 r = unit_name_to_prefix(u->id, &prefix);
2562 if (r < 0)
2563 return r;
2564
2565 /* The prefix is already escaped, but it might include
2566 * "-" which has a special meaning for slice units,
2567 * hence escape it here extra. */
2568 escaped = unit_name_escape(prefix);
2569 if (!escaped)
2570 return -ENOMEM;
2571
2572 if (MANAGER_IS_SYSTEM(u->manager))
2573 b = strjoin("system-", escaped, ".slice");
2574 else
2575 b = strappend(escaped, ".slice");
2576 if (!b)
2577 return -ENOMEM;
2578
2579 slice_name = b;
2580 } else
2581 slice_name =
2582 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2583 ? SPECIAL_SYSTEM_SLICE
2584 : SPECIAL_ROOT_SLICE;
2585
2586 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2587 if (r < 0)
2588 return r;
2589
2590 return unit_set_slice(u, slice);
2591 }
2592
2593 const char *unit_slice_name(Unit *u) {
2594 assert(u);
2595
2596 if (!UNIT_ISSET(u->slice))
2597 return NULL;
2598
2599 return UNIT_DEREF(u->slice)->id;
2600 }
2601
2602 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2603 _cleanup_free_ char *t = NULL;
2604 int r;
2605
2606 assert(u);
2607 assert(type);
2608 assert(_found);
2609
2610 r = unit_name_change_suffix(u->id, type, &t);
2611 if (r < 0)
2612 return r;
2613 if (unit_has_name(u, t))
2614 return -EINVAL;
2615
2616 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2617 assert(r < 0 || *_found != u);
2618 return r;
2619 }
2620
2621 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2622 const char *name, *old_owner, *new_owner;
2623 Unit *u = userdata;
2624 int r;
2625
2626 assert(message);
2627 assert(u);
2628
2629 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2630 if (r < 0) {
2631 bus_log_parse_error(r);
2632 return 0;
2633 }
2634
2635 old_owner = isempty(old_owner) ? NULL : old_owner;
2636 new_owner = isempty(new_owner) ? NULL : new_owner;
2637
2638 if (UNIT_VTABLE(u)->bus_name_owner_change)
2639 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2640
2641 return 0;
2642 }
2643
2644 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2645 const char *match;
2646
2647 assert(u);
2648 assert(bus);
2649 assert(name);
2650
2651 if (u->match_bus_slot)
2652 return -EBUSY;
2653
2654 match = strjoina("type='signal',"
2655 "sender='org.freedesktop.DBus',"
2656 "path='/org/freedesktop/DBus',"
2657 "interface='org.freedesktop.DBus',"
2658 "member='NameOwnerChanged',"
2659 "arg0='", name, "'");
2660
2661 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2662 }
2663
2664 int unit_watch_bus_name(Unit *u, const char *name) {
2665 int r;
2666
2667 assert(u);
2668 assert(name);
2669
2670 /* Watch a specific name on the bus. We only support one unit
2671 * watching each name for now. */
2672
2673 if (u->manager->api_bus) {
2674 /* If the bus is already available, install the match directly.
2675 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2676 r = unit_install_bus_match(u, u->manager->api_bus, name);
2677 if (r < 0)
2678 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2679 }
2680
2681 r = hashmap_put(u->manager->watch_bus, name, u);
2682 if (r < 0) {
2683 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2684 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2685 }
2686
2687 return 0;
2688 }
2689
2690 void unit_unwatch_bus_name(Unit *u, const char *name) {
2691 assert(u);
2692 assert(name);
2693
2694 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
2695 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2696 }
2697
2698 bool unit_can_serialize(Unit *u) {
2699 assert(u);
2700
2701 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2702 }
2703
2704 static int unit_serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
2705 _cleanup_free_ char *s = NULL;
2706 int r = 0;
2707
2708 assert(f);
2709 assert(key);
2710
2711 if (mask != 0) {
2712 r = cg_mask_to_string(mask, &s);
2713 if (r >= 0) {
2714 fputs(key, f);
2715 fputc('=', f);
2716 fputs(s, f);
2717 fputc('\n', f);
2718 }
2719 }
2720 return r;
2721 }
2722
2723 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2724 int r;
2725
2726 assert(u);
2727 assert(f);
2728 assert(fds);
2729
2730 if (unit_can_serialize(u)) {
2731 ExecRuntime *rt;
2732
2733 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2734 if (r < 0)
2735 return r;
2736
2737 rt = unit_get_exec_runtime(u);
2738 if (rt) {
2739 r = exec_runtime_serialize(u, rt, f, fds);
2740 if (r < 0)
2741 return r;
2742 }
2743 }
2744
2745 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2746
2747 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2748 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2749 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2750 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2751
2752 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2753 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2754
2755 if (dual_timestamp_is_set(&u->condition_timestamp))
2756 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2757
2758 if (dual_timestamp_is_set(&u->assert_timestamp))
2759 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2760
2761 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2762
2763 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2764 if (u->cpu_usage_last != NSEC_INFINITY)
2765 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2766
2767 if (u->cgroup_path)
2768 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2769 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2770 (void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
2771 (void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
2772
2773 if (uid_is_valid(u->ref_uid))
2774 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2775 if (gid_is_valid(u->ref_gid))
2776 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2777
2778 if (!sd_id128_is_null(u->invocation_id))
2779 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
2780
2781 bus_track_serialize(u->bus_track, f, "ref");
2782
2783 if (serialize_jobs) {
2784 if (u->job) {
2785 fprintf(f, "job\n");
2786 job_serialize(u->job, f);
2787 }
2788
2789 if (u->nop_job) {
2790 fprintf(f, "job\n");
2791 job_serialize(u->nop_job, f);
2792 }
2793 }
2794
2795 /* End marker */
2796 fputc('\n', f);
2797 return 0;
2798 }
2799
2800 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2801 assert(u);
2802 assert(f);
2803 assert(key);
2804
2805 if (!value)
2806 return 0;
2807
2808 fputs(key, f);
2809 fputc('=', f);
2810 fputs(value, f);
2811 fputc('\n', f);
2812
2813 return 1;
2814 }
2815
2816 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2817 _cleanup_free_ char *c = NULL;
2818
2819 assert(u);
2820 assert(f);
2821 assert(key);
2822
2823 if (!value)
2824 return 0;
2825
2826 c = cescape(value);
2827 if (!c)
2828 return -ENOMEM;
2829
2830 fputs(key, f);
2831 fputc('=', f);
2832 fputs(c, f);
2833 fputc('\n', f);
2834
2835 return 1;
2836 }
2837
2838 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2839 int copy;
2840
2841 assert(u);
2842 assert(f);
2843 assert(key);
2844
2845 if (fd < 0)
2846 return 0;
2847
2848 copy = fdset_put_dup(fds, fd);
2849 if (copy < 0)
2850 return copy;
2851
2852 fprintf(f, "%s=%i\n", key, copy);
2853 return 1;
2854 }
2855
2856 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2857 va_list ap;
2858
2859 assert(u);
2860 assert(f);
2861 assert(key);
2862 assert(format);
2863
2864 fputs(key, f);
2865 fputc('=', f);
2866
2867 va_start(ap, format);
2868 vfprintf(f, format, ap);
2869 va_end(ap);
2870
2871 fputc('\n', f);
2872 }
2873
2874 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2875 ExecRuntime **rt = NULL;
2876 size_t offset;
2877 int r;
2878
2879 assert(u);
2880 assert(f);
2881 assert(fds);
2882
2883 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2884 if (offset > 0)
2885 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2886
2887 for (;;) {
2888 char line[LINE_MAX], *l, *v;
2889 size_t k;
2890
2891 if (!fgets(line, sizeof(line), f)) {
2892 if (feof(f))
2893 return 0;
2894 return -errno;
2895 }
2896
2897 char_array_0(line);
2898 l = strstrip(line);
2899
2900 /* End marker */
2901 if (isempty(l))
2902 break;
2903
2904 k = strcspn(l, "=");
2905
2906 if (l[k] == '=') {
2907 l[k] = 0;
2908 v = l+k+1;
2909 } else
2910 v = l+k;
2911
2912 if (streq(l, "job")) {
2913 if (v[0] == '\0') {
2914 /* new-style serialized job */
2915 Job *j;
2916
2917 j = job_new_raw(u);
2918 if (!j)
2919 return log_oom();
2920
2921 r = job_deserialize(j, f);
2922 if (r < 0) {
2923 job_free(j);
2924 return r;
2925 }
2926
2927 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2928 if (r < 0) {
2929 job_free(j);
2930 return r;
2931 }
2932
2933 r = job_install_deserialized(j);
2934 if (r < 0) {
2935 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2936 job_free(j);
2937 return r;
2938 }
2939 } else /* legacy for pre-44 */
2940 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2941 continue;
2942 } else if (streq(l, "state-change-timestamp")) {
2943 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2944 continue;
2945 } else if (streq(l, "inactive-exit-timestamp")) {
2946 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2947 continue;
2948 } else if (streq(l, "active-enter-timestamp")) {
2949 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2950 continue;
2951 } else if (streq(l, "active-exit-timestamp")) {
2952 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2953 continue;
2954 } else if (streq(l, "inactive-enter-timestamp")) {
2955 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2956 continue;
2957 } else if (streq(l, "condition-timestamp")) {
2958 dual_timestamp_deserialize(v, &u->condition_timestamp);
2959 continue;
2960 } else if (streq(l, "assert-timestamp")) {
2961 dual_timestamp_deserialize(v, &u->assert_timestamp);
2962 continue;
2963 } else if (streq(l, "condition-result")) {
2964
2965 r = parse_boolean(v);
2966 if (r < 0)
2967 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2968 else
2969 u->condition_result = r;
2970
2971 continue;
2972
2973 } else if (streq(l, "assert-result")) {
2974
2975 r = parse_boolean(v);
2976 if (r < 0)
2977 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2978 else
2979 u->assert_result = r;
2980
2981 continue;
2982
2983 } else if (streq(l, "transient")) {
2984
2985 r = parse_boolean(v);
2986 if (r < 0)
2987 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2988 else
2989 u->transient = r;
2990
2991 continue;
2992
2993 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2994
2995 r = safe_atou64(v, &u->cpu_usage_base);
2996 if (r < 0)
2997 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
2998
2999 continue;
3000
3001 } else if (streq(l, "cpu-usage-last")) {
3002
3003 r = safe_atou64(v, &u->cpu_usage_last);
3004 if (r < 0)
3005 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3006
3007 continue;
3008
3009 } else if (streq(l, "cgroup")) {
3010
3011 r = unit_set_cgroup_path(u, v);
3012 if (r < 0)
3013 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3014
3015 (void) unit_watch_cgroup(u);
3016
3017 continue;
3018 } else if (streq(l, "cgroup-realized")) {
3019 int b;
3020
3021 b = parse_boolean(v);
3022 if (b < 0)
3023 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3024 else
3025 u->cgroup_realized = b;
3026
3027 continue;
3028
3029 } else if (streq(l, "cgroup-realized-mask")) {
3030
3031 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3032 if (r < 0)
3033 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3034 continue;
3035
3036 } else if (streq(l, "cgroup-enabled-mask")) {
3037
3038 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3039 if (r < 0)
3040 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3041 continue;
3042
3043 } else if (streq(l, "ref-uid")) {
3044 uid_t uid;
3045
3046 r = parse_uid(v, &uid);
3047 if (r < 0)
3048 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3049 else
3050 unit_ref_uid_gid(u, uid, GID_INVALID);
3051
3052 continue;
3053
3054 } else if (streq(l, "ref-gid")) {
3055 gid_t gid;
3056
3057 r = parse_gid(v, &gid);
3058 if (r < 0)
3059 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3060 else
3061 unit_ref_uid_gid(u, UID_INVALID, gid);
3062
3063 } else if (streq(l, "ref")) {
3064
3065 r = strv_extend(&u->deserialized_refs, v);
3066 if (r < 0)
3067 log_oom();
3068
3069 continue;
3070 } else if (streq(l, "invocation-id")) {
3071 sd_id128_t id;
3072
3073 r = sd_id128_from_string(v, &id);
3074 if (r < 0)
3075 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3076 else {
3077 r = unit_set_invocation_id(u, id);
3078 if (r < 0)
3079 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3080 }
3081
3082 continue;
3083 }
3084
3085 if (unit_can_serialize(u)) {
3086 if (rt) {
3087 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3088 if (r < 0) {
3089 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3090 continue;
3091 }
3092
3093 /* Returns positive if key was handled by the call */
3094 if (r > 0)
3095 continue;
3096 }
3097
3098 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3099 if (r < 0)
3100 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3101 }
3102 }
3103
3104 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3105 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3106 * before 228 where the base for timeouts was not persistent across reboots. */
3107
3108 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3109 dual_timestamp_get(&u->state_change_timestamp);
3110
3111 return 0;
3112 }
3113
3114 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
3115 Unit *device;
3116 _cleanup_free_ char *e = NULL;
3117 int r;
3118
3119 assert(u);
3120
3121 /* Adds in links to the device node that this unit is based on */
3122 if (isempty(what))
3123 return 0;
3124
3125 if (!is_device_path(what))
3126 return 0;
3127
3128 /* When device units aren't supported (such as in a
3129 * container), don't create dependencies on them. */
3130 if (!unit_type_supported(UNIT_DEVICE))
3131 return 0;
3132
3133 r = unit_name_from_path(what, ".device", &e);
3134 if (r < 0)
3135 return r;
3136
3137 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3138 if (r < 0)
3139 return r;
3140
3141 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3142 dep = UNIT_BINDS_TO;
3143
3144 r = unit_add_two_dependencies(u, UNIT_AFTER,
3145 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3146 device, true);
3147 if (r < 0)
3148 return r;
3149
3150 if (wants) {
3151 r = unit_add_dependency(device, UNIT_WANTS, u, false);
3152 if (r < 0)
3153 return r;
3154 }
3155
3156 return 0;
3157 }
3158
3159 int unit_coldplug(Unit *u) {
3160 int r = 0, q;
3161 char **i;
3162
3163 assert(u);
3164
3165 /* Make sure we don't enter a loop, when coldplugging
3166 * recursively. */
3167 if (u->coldplugged)
3168 return 0;
3169
3170 u->coldplugged = true;
3171
3172 STRV_FOREACH(i, u->deserialized_refs) {
3173 q = bus_unit_track_add_name(u, *i);
3174 if (q < 0 && r >= 0)
3175 r = q;
3176 }
3177 u->deserialized_refs = strv_free(u->deserialized_refs);
3178
3179 if (UNIT_VTABLE(u)->coldplug) {
3180 q = UNIT_VTABLE(u)->coldplug(u);
3181 if (q < 0 && r >= 0)
3182 r = q;
3183 }
3184
3185 if (u->job) {
3186 q = job_coldplug(u->job);
3187 if (q < 0 && r >= 0)
3188 r = q;
3189 }
3190
3191 return r;
3192 }
3193
3194 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3195 struct stat st;
3196
3197 if (!path)
3198 return false;
3199
3200 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3201 * are never out-of-date. */
3202 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3203 return false;
3204
3205 if (stat(path, &st) < 0)
3206 /* What, cannot access this anymore? */
3207 return true;
3208
3209 if (path_masked)
3210 /* For masked files check if they are still so */
3211 return !null_or_empty(&st);
3212 else
3213 /* For non-empty files check the mtime */
3214 return timespec_load(&st.st_mtim) > mtime;
3215
3216 return false;
3217 }
3218
3219 bool unit_need_daemon_reload(Unit *u) {
3220 _cleanup_strv_free_ char **t = NULL;
3221 char **path;
3222
3223 assert(u);
3224
3225 /* For unit files, we allow masking… */
3226 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3227 u->load_state == UNIT_MASKED))
3228 return true;
3229
3230 /* Source paths should not be masked… */
3231 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3232 return true;
3233
3234 (void) unit_find_dropin_paths(u, &t);
3235 if (!strv_equal(u->dropin_paths, t))
3236 return true;
3237
3238 /* … any drop-ins that are masked are simply omitted from the list. */
3239 STRV_FOREACH(path, u->dropin_paths)
3240 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3241 return true;
3242
3243 return false;
3244 }
3245
3246 void unit_reset_failed(Unit *u) {
3247 assert(u);
3248
3249 if (UNIT_VTABLE(u)->reset_failed)
3250 UNIT_VTABLE(u)->reset_failed(u);
3251
3252 RATELIMIT_RESET(u->start_limit);
3253 u->start_limit_hit = false;
3254 }
3255
3256 Unit *unit_following(Unit *u) {
3257 assert(u);
3258
3259 if (UNIT_VTABLE(u)->following)
3260 return UNIT_VTABLE(u)->following(u);
3261
3262 return NULL;
3263 }
3264
3265 bool unit_stop_pending(Unit *u) {
3266 assert(u);
3267
3268 /* This call does check the current state of the unit. It's
3269 * hence useful to be called from state change calls of the
3270 * unit itself, where the state isn't updated yet. This is
3271 * different from unit_inactive_or_pending() which checks both
3272 * the current state and for a queued job. */
3273
3274 return u->job && u->job->type == JOB_STOP;
3275 }
3276
3277 bool unit_inactive_or_pending(Unit *u) {
3278 assert(u);
3279
3280 /* Returns true if the unit is inactive or going down */
3281
3282 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3283 return true;
3284
3285 if (unit_stop_pending(u))
3286 return true;
3287
3288 return false;
3289 }
3290
3291 bool unit_active_or_pending(Unit *u) {
3292 assert(u);
3293
3294 /* Returns true if the unit is active or going up */
3295
3296 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3297 return true;
3298
3299 if (u->job &&
3300 (u->job->type == JOB_START ||
3301 u->job->type == JOB_RELOAD_OR_START ||
3302 u->job->type == JOB_RESTART))
3303 return true;
3304
3305 return false;
3306 }
3307
3308 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3309 assert(u);
3310 assert(w >= 0 && w < _KILL_WHO_MAX);
3311 assert(SIGNAL_VALID(signo));
3312
3313 if (!UNIT_VTABLE(u)->kill)
3314 return -EOPNOTSUPP;
3315
3316 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3317 }
3318
3319 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3320 Set *pid_set;
3321 int r;
3322
3323 pid_set = set_new(NULL);
3324 if (!pid_set)
3325 return NULL;
3326
3327 /* Exclude the main/control pids from being killed via the cgroup */
3328 if (main_pid > 0) {
3329 r = set_put(pid_set, PID_TO_PTR(main_pid));
3330 if (r < 0)
3331 goto fail;
3332 }
3333
3334 if (control_pid > 0) {
3335 r = set_put(pid_set, PID_TO_PTR(control_pid));
3336 if (r < 0)
3337 goto fail;
3338 }
3339
3340 return pid_set;
3341
3342 fail:
3343 set_free(pid_set);
3344 return NULL;
3345 }
3346
3347 int unit_kill_common(
3348 Unit *u,
3349 KillWho who,
3350 int signo,
3351 pid_t main_pid,
3352 pid_t control_pid,
3353 sd_bus_error *error) {
3354
3355 int r = 0;
3356 bool killed = false;
3357
3358 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3359 if (main_pid < 0)
3360 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3361 else if (main_pid == 0)
3362 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3363 }
3364
3365 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3366 if (control_pid < 0)
3367 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3368 else if (control_pid == 0)
3369 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3370 }
3371
3372 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3373 if (control_pid > 0) {
3374 if (kill(control_pid, signo) < 0)
3375 r = -errno;
3376 else
3377 killed = true;
3378 }
3379
3380 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3381 if (main_pid > 0) {
3382 if (kill(main_pid, signo) < 0)
3383 r = -errno;
3384 else
3385 killed = true;
3386 }
3387
3388 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3389 _cleanup_set_free_ Set *pid_set = NULL;
3390 int q;
3391
3392 /* Exclude the main/control pids from being killed via the cgroup */
3393 pid_set = unit_pid_set(main_pid, control_pid);
3394 if (!pid_set)
3395 return -ENOMEM;
3396
3397 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3398 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3399 r = q;
3400 else
3401 killed = true;
3402 }
3403
3404 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3405 return -ESRCH;
3406
3407 return r;
3408 }
3409
3410 int unit_following_set(Unit *u, Set **s) {
3411 assert(u);
3412 assert(s);
3413
3414 if (UNIT_VTABLE(u)->following_set)
3415 return UNIT_VTABLE(u)->following_set(u, s);
3416
3417 *s = NULL;
3418 return 0;
3419 }
3420
3421 UnitFileState unit_get_unit_file_state(Unit *u) {
3422 int r;
3423
3424 assert(u);
3425
3426 if (u->unit_file_state < 0 && u->fragment_path) {
3427 r = unit_file_get_state(
3428 u->manager->unit_file_scope,
3429 NULL,
3430 basename(u->fragment_path),
3431 &u->unit_file_state);
3432 if (r < 0)
3433 u->unit_file_state = UNIT_FILE_BAD;
3434 }
3435
3436 return u->unit_file_state;
3437 }
3438
3439 int unit_get_unit_file_preset(Unit *u) {
3440 assert(u);
3441
3442 if (u->unit_file_preset < 0 && u->fragment_path)
3443 u->unit_file_preset = unit_file_query_preset(
3444 u->manager->unit_file_scope,
3445 NULL,
3446 basename(u->fragment_path));
3447
3448 return u->unit_file_preset;
3449 }
3450
3451 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3452 assert(ref);
3453 assert(u);
3454
3455 if (ref->unit)
3456 unit_ref_unset(ref);
3457
3458 ref->unit = u;
3459 LIST_PREPEND(refs, u->refs, ref);
3460 return u;
3461 }
3462
3463 void unit_ref_unset(UnitRef *ref) {
3464 assert(ref);
3465
3466 if (!ref->unit)
3467 return;
3468
3469 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3470 * be unreferenced now. */
3471 unit_add_to_gc_queue(ref->unit);
3472
3473 LIST_REMOVE(refs, ref->unit->refs, ref);
3474 ref->unit = NULL;
3475 }
3476
3477 static int user_from_unit_name(Unit *u, char **ret) {
3478
3479 static const uint8_t hash_key[] = {
3480 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3481 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3482 };
3483
3484 _cleanup_free_ char *n = NULL;
3485 int r;
3486
3487 r = unit_name_to_prefix(u->id, &n);
3488 if (r < 0)
3489 return r;
3490
3491 if (valid_user_group_name(n)) {
3492 *ret = n;
3493 n = NULL;
3494 return 0;
3495 }
3496
3497 /* If we can't use the unit name as a user name, then let's hash it and use that */
3498 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3499 return -ENOMEM;
3500
3501 return 0;
3502 }
3503
3504 int unit_patch_contexts(Unit *u) {
3505 CGroupContext *cc;
3506 ExecContext *ec;
3507 unsigned i;
3508 int r;
3509
3510 assert(u);
3511
3512 /* Patch in the manager defaults into the exec and cgroup
3513 * contexts, _after_ the rest of the settings have been
3514 * initialized */
3515
3516 ec = unit_get_exec_context(u);
3517 if (ec) {
3518 /* This only copies in the ones that need memory */
3519 for (i = 0; i < _RLIMIT_MAX; i++)
3520 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3521 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3522 if (!ec->rlimit[i])
3523 return -ENOMEM;
3524 }
3525
3526 if (MANAGER_IS_USER(u->manager) &&
3527 !ec->working_directory) {
3528
3529 r = get_home_dir(&ec->working_directory);
3530 if (r < 0)
3531 return r;
3532
3533 /* Allow user services to run, even if the
3534 * home directory is missing */
3535 ec->working_directory_missing_ok = true;
3536 }
3537
3538 if (ec->private_devices)
3539 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3540
3541 if (ec->protect_kernel_modules)
3542 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3543
3544 if (ec->dynamic_user) {
3545 if (!ec->user) {
3546 r = user_from_unit_name(u, &ec->user);
3547 if (r < 0)
3548 return r;
3549 }
3550
3551 if (!ec->group) {
3552 ec->group = strdup(ec->user);
3553 if (!ec->group)
3554 return -ENOMEM;
3555 }
3556
3557 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3558 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3559
3560 ec->private_tmp = true;
3561 ec->remove_ipc = true;
3562 ec->protect_system = PROTECT_SYSTEM_STRICT;
3563 if (ec->protect_home == PROTECT_HOME_NO)
3564 ec->protect_home = PROTECT_HOME_READ_ONLY;
3565 }
3566 }
3567
3568 cc = unit_get_cgroup_context(u);
3569 if (cc) {
3570
3571 if (ec &&
3572 ec->private_devices &&
3573 cc->device_policy == CGROUP_AUTO)
3574 cc->device_policy = CGROUP_CLOSED;
3575 }
3576
3577 return 0;
3578 }
3579
3580 ExecContext *unit_get_exec_context(Unit *u) {
3581 size_t offset;
3582 assert(u);
3583
3584 if (u->type < 0)
3585 return NULL;
3586
3587 offset = UNIT_VTABLE(u)->exec_context_offset;
3588 if (offset <= 0)
3589 return NULL;
3590
3591 return (ExecContext*) ((uint8_t*) u + offset);
3592 }
3593
3594 KillContext *unit_get_kill_context(Unit *u) {
3595 size_t offset;
3596 assert(u);
3597
3598 if (u->type < 0)
3599 return NULL;
3600
3601 offset = UNIT_VTABLE(u)->kill_context_offset;
3602 if (offset <= 0)
3603 return NULL;
3604
3605 return (KillContext*) ((uint8_t*) u + offset);
3606 }
3607
3608 CGroupContext *unit_get_cgroup_context(Unit *u) {
3609 size_t offset;
3610
3611 if (u->type < 0)
3612 return NULL;
3613
3614 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3615 if (offset <= 0)
3616 return NULL;
3617
3618 return (CGroupContext*) ((uint8_t*) u + offset);
3619 }
3620
3621 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3622 size_t offset;
3623
3624 if (u->type < 0)
3625 return NULL;
3626
3627 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3628 if (offset <= 0)
3629 return NULL;
3630
3631 return *(ExecRuntime**) ((uint8_t*) u + offset);
3632 }
3633
3634 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3635 assert(u);
3636
3637 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3638 return NULL;
3639
3640 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3641 return u->manager->lookup_paths.transient;
3642
3643 if (mode == UNIT_RUNTIME)
3644 return u->manager->lookup_paths.runtime_control;
3645
3646 if (mode == UNIT_PERSISTENT)
3647 return u->manager->lookup_paths.persistent_control;
3648
3649 return NULL;
3650 }
3651
3652 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3653 _cleanup_free_ char *p = NULL, *q = NULL;
3654 const char *dir, *wrapped;
3655 int r;
3656
3657 assert(u);
3658
3659 if (u->transient_file) {
3660 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3661 * write to the transient unit file. */
3662 fputs(data, u->transient_file);
3663 fputc('\n', u->transient_file);
3664 return 0;
3665 }
3666
3667 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3668 return 0;
3669
3670 dir = unit_drop_in_dir(u, mode);
3671 if (!dir)
3672 return -EINVAL;
3673
3674 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3675 "# or an equivalent operation. Do not edit.\n",
3676 data,
3677 "\n");
3678
3679 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3680 if (r < 0)
3681 return r;
3682
3683 (void) mkdir_p(p, 0755);
3684 r = write_string_file_atomic_label(q, wrapped);
3685 if (r < 0)
3686 return r;
3687
3688 r = strv_push(&u->dropin_paths, q);
3689 if (r < 0)
3690 return r;
3691 q = NULL;
3692
3693 strv_uniq(u->dropin_paths);
3694
3695 u->dropin_mtime = now(CLOCK_REALTIME);
3696
3697 return 0;
3698 }
3699
3700 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3701 _cleanup_free_ char *p = NULL;
3702 va_list ap;
3703 int r;
3704
3705 assert(u);
3706 assert(name);
3707 assert(format);
3708
3709 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3710 return 0;
3711
3712 va_start(ap, format);
3713 r = vasprintf(&p, format, ap);
3714 va_end(ap);
3715
3716 if (r < 0)
3717 return -ENOMEM;
3718
3719 return unit_write_drop_in(u, mode, name, p);
3720 }
3721
3722 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3723 const char *ndata;
3724
3725 assert(u);
3726 assert(name);
3727 assert(data);
3728
3729 if (!UNIT_VTABLE(u)->private_section)
3730 return -EINVAL;
3731
3732 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3733 return 0;
3734
3735 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3736
3737 return unit_write_drop_in(u, mode, name, ndata);
3738 }
3739
3740 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3741 _cleanup_free_ char *p = NULL;
3742 va_list ap;
3743 int r;
3744
3745 assert(u);
3746 assert(name);
3747 assert(format);
3748
3749 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3750 return 0;
3751
3752 va_start(ap, format);
3753 r = vasprintf(&p, format, ap);
3754 va_end(ap);
3755
3756 if (r < 0)
3757 return -ENOMEM;
3758
3759 return unit_write_drop_in_private(u, mode, name, p);
3760 }
3761
3762 int unit_make_transient(Unit *u) {
3763 FILE *f;
3764 char *path;
3765
3766 assert(u);
3767
3768 if (!UNIT_VTABLE(u)->can_transient)
3769 return -EOPNOTSUPP;
3770
3771 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
3772 if (!path)
3773 return -ENOMEM;
3774
3775 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3776 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3777
3778 RUN_WITH_UMASK(0022) {
3779 f = fopen(path, "we");
3780 if (!f) {
3781 free(path);
3782 return -errno;
3783 }
3784 }
3785
3786 if (u->transient_file)
3787 fclose(u->transient_file);
3788 u->transient_file = f;
3789
3790 free(u->fragment_path);
3791 u->fragment_path = path;
3792
3793 u->source_path = mfree(u->source_path);
3794 u->dropin_paths = strv_free(u->dropin_paths);
3795 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3796
3797 u->load_state = UNIT_STUB;
3798 u->load_error = 0;
3799 u->transient = true;
3800
3801 unit_add_to_dbus_queue(u);
3802 unit_add_to_gc_queue(u);
3803
3804 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3805 u->transient_file);
3806
3807 return 0;
3808 }
3809
3810 static void log_kill(pid_t pid, int sig, void *userdata) {
3811 _cleanup_free_ char *comm = NULL;
3812
3813 (void) get_process_comm(pid, &comm);
3814
3815 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3816 only, like for example systemd's own PAM stub process. */
3817 if (comm && comm[0] == '(')
3818 return;
3819
3820 log_unit_notice(userdata,
3821 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3822 pid,
3823 strna(comm),
3824 signal_to_string(sig));
3825 }
3826
3827 static int operation_to_signal(KillContext *c, KillOperation k) {
3828 assert(c);
3829
3830 switch (k) {
3831
3832 case KILL_TERMINATE:
3833 case KILL_TERMINATE_AND_LOG:
3834 return c->kill_signal;
3835
3836 case KILL_KILL:
3837 return SIGKILL;
3838
3839 case KILL_ABORT:
3840 return SIGABRT;
3841
3842 default:
3843 assert_not_reached("KillOperation unknown");
3844 }
3845 }
3846
3847 int unit_kill_context(
3848 Unit *u,
3849 KillContext *c,
3850 KillOperation k,
3851 pid_t main_pid,
3852 pid_t control_pid,
3853 bool main_pid_alien) {
3854
3855 bool wait_for_exit = false, send_sighup;
3856 cg_kill_log_func_t log_func = NULL;
3857 int sig, r;
3858
3859 assert(u);
3860 assert(c);
3861
3862 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
3863 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
3864
3865 if (c->kill_mode == KILL_NONE)
3866 return 0;
3867
3868 sig = operation_to_signal(c, k);
3869
3870 send_sighup =
3871 c->send_sighup &&
3872 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3873 sig != SIGHUP;
3874
3875 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
3876 log_func = log_kill;
3877
3878 if (main_pid > 0) {
3879 if (log_func)
3880 log_func(main_pid, sig, u);
3881
3882 r = kill_and_sigcont(main_pid, sig);
3883 if (r < 0 && r != -ESRCH) {
3884 _cleanup_free_ char *comm = NULL;
3885 (void) get_process_comm(main_pid, &comm);
3886
3887 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3888 } else {
3889 if (!main_pid_alien)
3890 wait_for_exit = true;
3891
3892 if (r != -ESRCH && send_sighup)
3893 (void) kill(main_pid, SIGHUP);
3894 }
3895 }
3896
3897 if (control_pid > 0) {
3898 if (log_func)
3899 log_func(control_pid, sig, u);
3900
3901 r = kill_and_sigcont(control_pid, sig);
3902 if (r < 0 && r != -ESRCH) {
3903 _cleanup_free_ char *comm = NULL;
3904 (void) get_process_comm(control_pid, &comm);
3905
3906 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3907 } else {
3908 wait_for_exit = true;
3909
3910 if (r != -ESRCH && send_sighup)
3911 (void) kill(control_pid, SIGHUP);
3912 }
3913 }
3914
3915 if (u->cgroup_path &&
3916 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3917 _cleanup_set_free_ Set *pid_set = NULL;
3918
3919 /* Exclude the main/control pids from being killed via the cgroup */
3920 pid_set = unit_pid_set(main_pid, control_pid);
3921 if (!pid_set)
3922 return -ENOMEM;
3923
3924 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3925 sig,
3926 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3927 pid_set,
3928 log_func, u);
3929 if (r < 0) {
3930 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3931 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3932
3933 } else if (r > 0) {
3934
3935 /* FIXME: For now, on the legacy hierarchy, we
3936 * will not wait for the cgroup members to die
3937 * if we are running in a container or if this
3938 * is a delegation unit, simply because cgroup
3939 * notification is unreliable in these
3940 * cases. It doesn't work at all in
3941 * containers, and outside of containers it
3942 * can be confused easily by left-over
3943 * directories in the cgroup — which however
3944 * should not exist in non-delegated units. On
3945 * the unified hierarchy that's different,
3946 * there we get proper events. Hence rely on
3947 * them. */
3948
3949 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3950 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3951 wait_for_exit = true;
3952
3953 if (send_sighup) {
3954 set_free(pid_set);
3955
3956 pid_set = unit_pid_set(main_pid, control_pid);
3957 if (!pid_set)
3958 return -ENOMEM;
3959
3960 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3961 SIGHUP,
3962 CGROUP_IGNORE_SELF,
3963 pid_set,
3964 NULL, NULL);
3965 }
3966 }
3967 }
3968
3969 return wait_for_exit;
3970 }
3971
3972 int unit_require_mounts_for(Unit *u, const char *path) {
3973 char prefix[strlen(path) + 1], *p;
3974 int r;
3975
3976 assert(u);
3977 assert(path);
3978
3979 /* Registers a unit for requiring a certain path and all its
3980 * prefixes. We keep a simple array of these paths in the
3981 * unit, since its usually short. However, we build a prefix
3982 * table for all possible prefixes so that new appearing mount
3983 * units can easily determine which units to make themselves a
3984 * dependency of. */
3985
3986 if (!path_is_absolute(path))
3987 return -EINVAL;
3988
3989 p = strdup(path);
3990 if (!p)
3991 return -ENOMEM;
3992
3993 path_kill_slashes(p);
3994
3995 if (!path_is_safe(p)) {
3996 free(p);
3997 return -EPERM;
3998 }
3999
4000 if (strv_contains(u->requires_mounts_for, p)) {
4001 free(p);
4002 return 0;
4003 }
4004
4005 r = strv_consume(&u->requires_mounts_for, p);
4006 if (r < 0)
4007 return r;
4008
4009 PATH_FOREACH_PREFIX_MORE(prefix, p) {
4010 Set *x;
4011
4012 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4013 if (!x) {
4014 char *q;
4015
4016 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
4017 if (r < 0)
4018 return r;
4019
4020 q = strdup(prefix);
4021 if (!q)
4022 return -ENOMEM;
4023
4024 x = set_new(NULL);
4025 if (!x) {
4026 free(q);
4027 return -ENOMEM;
4028 }
4029
4030 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4031 if (r < 0) {
4032 free(q);
4033 set_free(x);
4034 return r;
4035 }
4036 }
4037
4038 r = set_put(x, u);
4039 if (r < 0)
4040 return r;
4041 }
4042
4043 return 0;
4044 }
4045
4046 int unit_setup_exec_runtime(Unit *u) {
4047 ExecRuntime **rt;
4048 size_t offset;
4049 Iterator i;
4050 Unit *other;
4051
4052 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4053 assert(offset > 0);
4054
4055 /* Check if there already is an ExecRuntime for this unit? */
4056 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4057 if (*rt)
4058 return 0;
4059
4060 /* Try to get it from somebody else */
4061 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4062
4063 *rt = unit_get_exec_runtime(other);
4064 if (*rt) {
4065 exec_runtime_ref(*rt);
4066 return 0;
4067 }
4068 }
4069
4070 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4071 }
4072
4073 int unit_setup_dynamic_creds(Unit *u) {
4074 ExecContext *ec;
4075 DynamicCreds *dcreds;
4076 size_t offset;
4077
4078 assert(u);
4079
4080 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4081 assert(offset > 0);
4082 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4083
4084 ec = unit_get_exec_context(u);
4085 assert(ec);
4086
4087 if (!ec->dynamic_user)
4088 return 0;
4089
4090 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4091 }
4092
4093 bool unit_type_supported(UnitType t) {
4094 if (_unlikely_(t < 0))
4095 return false;
4096 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4097 return false;
4098
4099 if (!unit_vtable[t]->supported)
4100 return true;
4101
4102 return unit_vtable[t]->supported();
4103 }
4104
4105 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4106 int r;
4107
4108 assert(u);
4109 assert(where);
4110
4111 r = dir_is_empty(where);
4112 if (r > 0)
4113 return;
4114 if (r < 0) {
4115 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4116 return;
4117 }
4118
4119 log_struct(LOG_NOTICE,
4120 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4121 LOG_UNIT_ID(u),
4122 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4123 "WHERE=%s", where,
4124 NULL);
4125 }
4126
4127 int unit_fail_if_symlink(Unit *u, const char* where) {
4128 int r;
4129
4130 assert(u);
4131 assert(where);
4132
4133 r = is_symlink(where);
4134 if (r < 0) {
4135 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4136 return 0;
4137 }
4138 if (r == 0)
4139 return 0;
4140
4141 log_struct(LOG_ERR,
4142 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4143 LOG_UNIT_ID(u),
4144 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4145 "WHERE=%s", where,
4146 NULL);
4147
4148 return -ELOOP;
4149 }
4150
4151 bool unit_is_pristine(Unit *u) {
4152 assert(u);
4153
4154 /* Check if the unit already exists or is already around,
4155 * in a number of different ways. Note that to cater for unit
4156 * types such as slice, we are generally fine with units that
4157 * are marked UNIT_LOADED even though nothing was
4158 * actually loaded, as those unit types don't require a file
4159 * on disk to validly load. */
4160
4161 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4162 u->fragment_path ||
4163 u->source_path ||
4164 !strv_isempty(u->dropin_paths) ||
4165 u->job ||
4166 u->merged_into);
4167 }
4168
4169 pid_t unit_control_pid(Unit *u) {
4170 assert(u);
4171
4172 if (UNIT_VTABLE(u)->control_pid)
4173 return UNIT_VTABLE(u)->control_pid(u);
4174
4175 return 0;
4176 }
4177
4178 pid_t unit_main_pid(Unit *u) {
4179 assert(u);
4180
4181 if (UNIT_VTABLE(u)->main_pid)
4182 return UNIT_VTABLE(u)->main_pid(u);
4183
4184 return 0;
4185 }
4186
4187 static void unit_unref_uid_internal(
4188 Unit *u,
4189 uid_t *ref_uid,
4190 bool destroy_now,
4191 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4192
4193 assert(u);
4194 assert(ref_uid);
4195 assert(_manager_unref_uid);
4196
4197 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4198 * gid_t are actually the same time, with the same validity rules.
4199 *
4200 * Drops a reference to UID/GID from a unit. */
4201
4202 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4203 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4204
4205 if (!uid_is_valid(*ref_uid))
4206 return;
4207
4208 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4209 *ref_uid = UID_INVALID;
4210 }
4211
4212 void unit_unref_uid(Unit *u, bool destroy_now) {
4213 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4214 }
4215
4216 void unit_unref_gid(Unit *u, bool destroy_now) {
4217 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4218 }
4219
4220 static int unit_ref_uid_internal(
4221 Unit *u,
4222 uid_t *ref_uid,
4223 uid_t uid,
4224 bool clean_ipc,
4225 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4226
4227 int r;
4228
4229 assert(u);
4230 assert(ref_uid);
4231 assert(uid_is_valid(uid));
4232 assert(_manager_ref_uid);
4233
4234 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4235 * are actually the same type, and have the same validity rules.
4236 *
4237 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4238 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4239 * drops to zero. */
4240
4241 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4242 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4243
4244 if (*ref_uid == uid)
4245 return 0;
4246
4247 if (uid_is_valid(*ref_uid)) /* Already set? */
4248 return -EBUSY;
4249
4250 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4251 if (r < 0)
4252 return r;
4253
4254 *ref_uid = uid;
4255 return 1;
4256 }
4257
4258 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4259 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4260 }
4261
4262 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4263 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4264 }
4265
4266 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4267 int r = 0, q = 0;
4268
4269 assert(u);
4270
4271 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4272
4273 if (uid_is_valid(uid)) {
4274 r = unit_ref_uid(u, uid, clean_ipc);
4275 if (r < 0)
4276 return r;
4277 }
4278
4279 if (gid_is_valid(gid)) {
4280 q = unit_ref_gid(u, gid, clean_ipc);
4281 if (q < 0) {
4282 if (r > 0)
4283 unit_unref_uid(u, false);
4284
4285 return q;
4286 }
4287 }
4288
4289 return r > 0 || q > 0;
4290 }
4291
4292 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4293 ExecContext *c;
4294 int r;
4295
4296 assert(u);
4297
4298 c = unit_get_exec_context(u);
4299
4300 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4301 if (r < 0)
4302 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4303
4304 return r;
4305 }
4306
4307 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4308 assert(u);
4309
4310 unit_unref_uid(u, destroy_now);
4311 unit_unref_gid(u, destroy_now);
4312 }
4313
4314 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4315 int r;
4316
4317 assert(u);
4318
4319 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4320 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4321 * objects when no service references the UID/GID anymore. */
4322
4323 r = unit_ref_uid_gid(u, uid, gid);
4324 if (r > 0)
4325 bus_unit_send_change_signal(u);
4326 }
4327
4328 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4329 int r;
4330
4331 assert(u);
4332
4333 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4334
4335 if (sd_id128_equal(u->invocation_id, id))
4336 return 0;
4337
4338 if (!sd_id128_is_null(u->invocation_id))
4339 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4340
4341 if (sd_id128_is_null(id)) {
4342 r = 0;
4343 goto reset;
4344 }
4345
4346 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4347 if (r < 0)
4348 goto reset;
4349
4350 u->invocation_id = id;
4351 sd_id128_to_string(id, u->invocation_id_string);
4352
4353 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4354 if (r < 0)
4355 goto reset;
4356
4357 return 0;
4358
4359 reset:
4360 u->invocation_id = SD_ID128_NULL;
4361 u->invocation_id_string[0] = 0;
4362 return r;
4363 }
4364
4365 int unit_acquire_invocation_id(Unit *u) {
4366 sd_id128_t id;
4367 int r;
4368
4369 assert(u);
4370
4371 r = sd_id128_randomize(&id);
4372 if (r < 0)
4373 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4374
4375 r = unit_set_invocation_id(u, id);
4376 if (r < 0)
4377 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4378
4379 return 0;
4380 }