]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
util-lib: split string parsing related calls from util.[ch] into parse-util.[ch]
[thirdparty/systemd.git] / src / core / unit.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <errno.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "bus-common-errors.h"
32 #include "bus-util.h"
33 #include "cgroup-util.h"
34 #include "dbus-unit.h"
35 #include "dbus.h"
36 #include "dropin.h"
37 #include "escape.h"
38 #include "execute.h"
39 #include "fileio-label.h"
40 #include "formats-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
43 #include "log.h"
44 #include "macro.h"
45 #include "missing.h"
46 #include "mkdir.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
50 #include "set.h"
51 #include "special.h"
52 #include "string-util.h"
53 #include "strv.h"
54 #include "unit-name.h"
55 #include "unit.h"
56 #include "user-util.h"
57 #include "virt.h"
58
59 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
60 [UNIT_SERVICE] = &service_vtable,
61 [UNIT_SOCKET] = &socket_vtable,
62 [UNIT_BUSNAME] = &busname_vtable,
63 [UNIT_TARGET] = &target_vtable,
64 [UNIT_SNAPSHOT] = &snapshot_vtable,
65 [UNIT_DEVICE] = &device_vtable,
66 [UNIT_MOUNT] = &mount_vtable,
67 [UNIT_AUTOMOUNT] = &automount_vtable,
68 [UNIT_SWAP] = &swap_vtable,
69 [UNIT_TIMER] = &timer_vtable,
70 [UNIT_PATH] = &path_vtable,
71 [UNIT_SLICE] = &slice_vtable,
72 [UNIT_SCOPE] = &scope_vtable
73 };
74
75 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
76
77 Unit *unit_new(Manager *m, size_t size) {
78 Unit *u;
79
80 assert(m);
81 assert(size >= sizeof(Unit));
82
83 u = malloc0(size);
84 if (!u)
85 return NULL;
86
87 u->names = set_new(&string_hash_ops);
88 if (!u->names) {
89 free(u);
90 return NULL;
91 }
92
93 u->manager = m;
94 u->type = _UNIT_TYPE_INVALID;
95 u->default_dependencies = true;
96 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
97 u->unit_file_preset = -1;
98 u->on_failure_job_mode = JOB_REPLACE;
99 u->cgroup_inotify_wd = -1;
100
101 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
102
103 return u;
104 }
105
106 bool unit_has_name(Unit *u, const char *name) {
107 assert(u);
108 assert(name);
109
110 return !!set_get(u->names, (char*) name);
111 }
112
113 static void unit_init(Unit *u) {
114 CGroupContext *cc;
115 ExecContext *ec;
116 KillContext *kc;
117
118 assert(u);
119 assert(u->manager);
120 assert(u->type >= 0);
121
122 cc = unit_get_cgroup_context(u);
123 if (cc) {
124 cgroup_context_init(cc);
125
126 /* Copy in the manager defaults into the cgroup
127 * context, _before_ the rest of the settings have
128 * been initialized */
129
130 cc->cpu_accounting = u->manager->default_cpu_accounting;
131 cc->blockio_accounting = u->manager->default_blockio_accounting;
132 cc->memory_accounting = u->manager->default_memory_accounting;
133 cc->tasks_accounting = u->manager->default_tasks_accounting;
134 }
135
136 ec = unit_get_exec_context(u);
137 if (ec)
138 exec_context_init(ec);
139
140 kc = unit_get_kill_context(u);
141 if (kc)
142 kill_context_init(kc);
143
144 if (UNIT_VTABLE(u)->init)
145 UNIT_VTABLE(u)->init(u);
146 }
147
148 int unit_add_name(Unit *u, const char *text) {
149 _cleanup_free_ char *s = NULL, *i = NULL;
150 UnitType t;
151 int r;
152
153 assert(u);
154 assert(text);
155
156 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
157
158 if (!u->instance)
159 return -EINVAL;
160
161 r = unit_name_replace_instance(text, u->instance, &s);
162 if (r < 0)
163 return r;
164 } else {
165 s = strdup(text);
166 if (!s)
167 return -ENOMEM;
168 }
169
170 if (set_contains(u->names, s))
171 return 0;
172 if (hashmap_contains(u->manager->units, s))
173 return -EEXIST;
174
175 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
176 return -EINVAL;
177
178 t = unit_name_to_type(s);
179 if (t < 0)
180 return -EINVAL;
181
182 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
183 return -EINVAL;
184
185 r = unit_name_to_instance(s, &i);
186 if (r < 0)
187 return r;
188
189 if (i && unit_vtable[t]->no_instances)
190 return -EINVAL;
191
192 /* Ensure that this unit is either instanced or not instanced,
193 * but not both. Note that we do allow names with different
194 * instance names however! */
195 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
196 return -EINVAL;
197
198 if (unit_vtable[t]->no_alias && !set_isempty(u->names))
199 return -EEXIST;
200
201 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
202 return -E2BIG;
203
204 r = set_put(u->names, s);
205 if (r < 0)
206 return r;
207 assert(r > 0);
208
209 r = hashmap_put(u->manager->units, s, u);
210 if (r < 0) {
211 (void) set_remove(u->names, s);
212 return r;
213 }
214
215 if (u->type == _UNIT_TYPE_INVALID) {
216 u->type = t;
217 u->id = s;
218 u->instance = i;
219
220 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
221
222 unit_init(u);
223
224 i = NULL;
225 }
226
227 s = NULL;
228
229 unit_add_to_dbus_queue(u);
230 return 0;
231 }
232
233 int unit_choose_id(Unit *u, const char *name) {
234 _cleanup_free_ char *t = NULL;
235 char *s, *i;
236 int r;
237
238 assert(u);
239 assert(name);
240
241 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
242
243 if (!u->instance)
244 return -EINVAL;
245
246 r = unit_name_replace_instance(name, u->instance, &t);
247 if (r < 0)
248 return r;
249
250 name = t;
251 }
252
253 /* Selects one of the names of this unit as the id */
254 s = set_get(u->names, (char*) name);
255 if (!s)
256 return -ENOENT;
257
258 /* Determine the new instance from the new id */
259 r = unit_name_to_instance(s, &i);
260 if (r < 0)
261 return r;
262
263 u->id = s;
264
265 free(u->instance);
266 u->instance = i;
267
268 unit_add_to_dbus_queue(u);
269
270 return 0;
271 }
272
273 int unit_set_description(Unit *u, const char *description) {
274 char *s;
275
276 assert(u);
277
278 if (isempty(description))
279 s = NULL;
280 else {
281 s = strdup(description);
282 if (!s)
283 return -ENOMEM;
284 }
285
286 free(u->description);
287 u->description = s;
288
289 unit_add_to_dbus_queue(u);
290 return 0;
291 }
292
293 bool unit_check_gc(Unit *u) {
294 UnitActiveState state;
295 assert(u);
296
297 if (u->job)
298 return true;
299
300 if (u->nop_job)
301 return true;
302
303 state = unit_active_state(u);
304
305 /* If the unit is inactive and failed and no job is queued for
306 * it, then release its runtime resources */
307 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
308 UNIT_VTABLE(u)->release_resources)
309 UNIT_VTABLE(u)->release_resources(u);
310
311 /* But we keep the unit object around for longer when it is
312 * referenced or configured to not be gc'ed */
313 if (state != UNIT_INACTIVE)
314 return true;
315
316 if (UNIT_VTABLE(u)->no_gc)
317 return true;
318
319 if (u->no_gc)
320 return true;
321
322 if (u->refs)
323 return true;
324
325 if (UNIT_VTABLE(u)->check_gc)
326 if (UNIT_VTABLE(u)->check_gc(u))
327 return true;
328
329 return false;
330 }
331
332 void unit_add_to_load_queue(Unit *u) {
333 assert(u);
334 assert(u->type != _UNIT_TYPE_INVALID);
335
336 if (u->load_state != UNIT_STUB || u->in_load_queue)
337 return;
338
339 LIST_PREPEND(load_queue, u->manager->load_queue, u);
340 u->in_load_queue = true;
341 }
342
343 void unit_add_to_cleanup_queue(Unit *u) {
344 assert(u);
345
346 if (u->in_cleanup_queue)
347 return;
348
349 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
350 u->in_cleanup_queue = true;
351 }
352
353 void unit_add_to_gc_queue(Unit *u) {
354 assert(u);
355
356 if (u->in_gc_queue || u->in_cleanup_queue)
357 return;
358
359 if (unit_check_gc(u))
360 return;
361
362 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
363 u->in_gc_queue = true;
364
365 u->manager->n_in_gc_queue ++;
366 }
367
368 void unit_add_to_dbus_queue(Unit *u) {
369 assert(u);
370 assert(u->type != _UNIT_TYPE_INVALID);
371
372 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
373 return;
374
375 /* Shortcut things if nobody cares */
376 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
377 set_isempty(u->manager->private_buses)) {
378 u->sent_dbus_new_signal = true;
379 return;
380 }
381
382 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
383 u->in_dbus_queue = true;
384 }
385
386 static void bidi_set_free(Unit *u, Set *s) {
387 Iterator i;
388 Unit *other;
389
390 assert(u);
391
392 /* Frees the set and makes sure we are dropped from the
393 * inverse pointers */
394
395 SET_FOREACH(other, s, i) {
396 UnitDependency d;
397
398 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
399 set_remove(other->dependencies[d], u);
400
401 unit_add_to_gc_queue(other);
402 }
403
404 set_free(s);
405 }
406
407 static void unit_remove_transient(Unit *u) {
408 char **i;
409
410 assert(u);
411
412 if (!u->transient)
413 return;
414
415 if (u->fragment_path)
416 (void) unlink(u->fragment_path);
417
418 STRV_FOREACH(i, u->dropin_paths) {
419 _cleanup_free_ char *p = NULL;
420 int r;
421
422 (void) unlink(*i);
423
424 r = path_get_parent(*i, &p);
425 if (r >= 0)
426 (void) rmdir(p);
427 }
428 }
429
430 static void unit_free_requires_mounts_for(Unit *u) {
431 char **j;
432
433 STRV_FOREACH(j, u->requires_mounts_for) {
434 char s[strlen(*j) + 1];
435
436 PATH_FOREACH_PREFIX_MORE(s, *j) {
437 char *y;
438 Set *x;
439
440 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
441 if (!x)
442 continue;
443
444 set_remove(x, u);
445
446 if (set_isempty(x)) {
447 hashmap_remove(u->manager->units_requiring_mounts_for, y);
448 free(y);
449 set_free(x);
450 }
451 }
452 }
453
454 u->requires_mounts_for = strv_free(u->requires_mounts_for);
455 }
456
457 static void unit_done(Unit *u) {
458 ExecContext *ec;
459 CGroupContext *cc;
460 int r;
461
462 assert(u);
463
464 if (u->type < 0)
465 return;
466
467 if (UNIT_VTABLE(u)->done)
468 UNIT_VTABLE(u)->done(u);
469
470 ec = unit_get_exec_context(u);
471 if (ec)
472 exec_context_done(ec);
473
474 cc = unit_get_cgroup_context(u);
475 if (cc)
476 cgroup_context_done(cc);
477
478 r = unit_remove_from_netclass_cgroup(u);
479 if (r < 0)
480 log_warning_errno(r, "Unable to remove unit from netclass group: %m");
481 }
482
483 void unit_free(Unit *u) {
484 UnitDependency d;
485 Iterator i;
486 char *t;
487
488 assert(u);
489
490 if (u->manager->n_reloading <= 0)
491 unit_remove_transient(u);
492
493 bus_unit_send_removed_signal(u);
494
495 unit_done(u);
496
497 sd_bus_slot_unref(u->match_bus_slot);
498
499 unit_free_requires_mounts_for(u);
500
501 SET_FOREACH(t, u->names, i)
502 hashmap_remove_value(u->manager->units, t, u);
503
504 if (u->job) {
505 Job *j = u->job;
506 job_uninstall(j);
507 job_free(j);
508 }
509
510 if (u->nop_job) {
511 Job *j = u->nop_job;
512 job_uninstall(j);
513 job_free(j);
514 }
515
516 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
517 bidi_set_free(u, u->dependencies[d]);
518
519 if (u->type != _UNIT_TYPE_INVALID)
520 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
521
522 if (u->in_load_queue)
523 LIST_REMOVE(load_queue, u->manager->load_queue, u);
524
525 if (u->in_dbus_queue)
526 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
527
528 if (u->in_cleanup_queue)
529 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
530
531 if (u->in_gc_queue) {
532 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
533 u->manager->n_in_gc_queue--;
534 }
535
536 if (u->in_cgroup_queue)
537 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
538
539 unit_release_cgroup(u);
540
541 (void) manager_update_failed_units(u->manager, u, false);
542 set_remove(u->manager->startup_units, u);
543
544 free(u->description);
545 strv_free(u->documentation);
546 free(u->fragment_path);
547 free(u->source_path);
548 strv_free(u->dropin_paths);
549 free(u->instance);
550
551 free(u->job_timeout_reboot_arg);
552
553 set_free_free(u->names);
554
555 unit_unwatch_all_pids(u);
556
557 condition_free_list(u->conditions);
558 condition_free_list(u->asserts);
559
560 unit_ref_unset(&u->slice);
561
562 while (u->refs)
563 unit_ref_unset(u->refs);
564
565 free(u);
566 }
567
568 UnitActiveState unit_active_state(Unit *u) {
569 assert(u);
570
571 if (u->load_state == UNIT_MERGED)
572 return unit_active_state(unit_follow_merge(u));
573
574 /* After a reload it might happen that a unit is not correctly
575 * loaded but still has a process around. That's why we won't
576 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
577
578 return UNIT_VTABLE(u)->active_state(u);
579 }
580
581 const char* unit_sub_state_to_string(Unit *u) {
582 assert(u);
583
584 return UNIT_VTABLE(u)->sub_state_to_string(u);
585 }
586
587 static int complete_move(Set **s, Set **other) {
588 int r;
589
590 assert(s);
591 assert(other);
592
593 if (!*other)
594 return 0;
595
596 if (*s) {
597 r = set_move(*s, *other);
598 if (r < 0)
599 return r;
600 } else {
601 *s = *other;
602 *other = NULL;
603 }
604
605 return 0;
606 }
607
608 static int merge_names(Unit *u, Unit *other) {
609 char *t;
610 Iterator i;
611 int r;
612
613 assert(u);
614 assert(other);
615
616 r = complete_move(&u->names, &other->names);
617 if (r < 0)
618 return r;
619
620 set_free_free(other->names);
621 other->names = NULL;
622 other->id = NULL;
623
624 SET_FOREACH(t, u->names, i)
625 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
626
627 return 0;
628 }
629
630 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
631 unsigned n_reserve;
632
633 assert(u);
634 assert(other);
635 assert(d < _UNIT_DEPENDENCY_MAX);
636
637 /*
638 * If u does not have this dependency set allocated, there is no need
639 * to reserve anything. In that case other's set will be transferred
640 * as a whole to u by complete_move().
641 */
642 if (!u->dependencies[d])
643 return 0;
644
645 /* merge_dependencies() will skip a u-on-u dependency */
646 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
647
648 return set_reserve(u->dependencies[d], n_reserve);
649 }
650
651 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
652 Iterator i;
653 Unit *back;
654 int r;
655
656 assert(u);
657 assert(other);
658 assert(d < _UNIT_DEPENDENCY_MAX);
659
660 /* Fix backwards pointers */
661 SET_FOREACH(back, other->dependencies[d], i) {
662 UnitDependency k;
663
664 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
665 /* Do not add dependencies between u and itself */
666 if (back == u) {
667 if (set_remove(back->dependencies[k], other))
668 maybe_warn_about_dependency(u, other_id, k);
669 } else {
670 r = set_remove_and_put(back->dependencies[k], other, u);
671 if (r == -EEXIST)
672 set_remove(back->dependencies[k], other);
673 else
674 assert(r >= 0 || r == -ENOENT);
675 }
676 }
677 }
678
679 /* Also do not move dependencies on u to itself */
680 back = set_remove(other->dependencies[d], u);
681 if (back)
682 maybe_warn_about_dependency(u, other_id, d);
683
684 /* The move cannot fail. The caller must have performed a reservation. */
685 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
686
687 other->dependencies[d] = set_free(other->dependencies[d]);
688 }
689
690 int unit_merge(Unit *u, Unit *other) {
691 UnitDependency d;
692 const char *other_id = NULL;
693 int r;
694
695 assert(u);
696 assert(other);
697 assert(u->manager == other->manager);
698 assert(u->type != _UNIT_TYPE_INVALID);
699
700 other = unit_follow_merge(other);
701
702 if (other == u)
703 return 0;
704
705 if (u->type != other->type)
706 return -EINVAL;
707
708 if (!u->instance != !other->instance)
709 return -EINVAL;
710
711 if (other->load_state != UNIT_STUB &&
712 other->load_state != UNIT_NOT_FOUND)
713 return -EEXIST;
714
715 if (other->job)
716 return -EEXIST;
717
718 if (other->nop_job)
719 return -EEXIST;
720
721 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
722 return -EEXIST;
723
724 if (other->id)
725 other_id = strdupa(other->id);
726
727 /* Make reservations to ensure merge_dependencies() won't fail */
728 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
729 r = reserve_dependencies(u, other, d);
730 /*
731 * We don't rollback reservations if we fail. We don't have
732 * a way to undo reservations. A reservation is not a leak.
733 */
734 if (r < 0)
735 return r;
736 }
737
738 /* Merge names */
739 r = merge_names(u, other);
740 if (r < 0)
741 return r;
742
743 /* Redirect all references */
744 while (other->refs)
745 unit_ref_set(other->refs, u);
746
747 /* Merge dependencies */
748 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
749 merge_dependencies(u, other, other_id, d);
750
751 other->load_state = UNIT_MERGED;
752 other->merged_into = u;
753
754 /* If there is still some data attached to the other node, we
755 * don't need it anymore, and can free it. */
756 if (other->load_state != UNIT_STUB)
757 if (UNIT_VTABLE(other)->done)
758 UNIT_VTABLE(other)->done(other);
759
760 unit_add_to_dbus_queue(u);
761 unit_add_to_cleanup_queue(other);
762
763 return 0;
764 }
765
766 int unit_merge_by_name(Unit *u, const char *name) {
767 Unit *other;
768 int r;
769 _cleanup_free_ char *s = NULL;
770
771 assert(u);
772 assert(name);
773
774 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
775 if (!u->instance)
776 return -EINVAL;
777
778 r = unit_name_replace_instance(name, u->instance, &s);
779 if (r < 0)
780 return r;
781
782 name = s;
783 }
784
785 other = manager_get_unit(u->manager, name);
786 if (other)
787 return unit_merge(u, other);
788
789 return unit_add_name(u, name);
790 }
791
792 Unit* unit_follow_merge(Unit *u) {
793 assert(u);
794
795 while (u->load_state == UNIT_MERGED)
796 assert_se(u = u->merged_into);
797
798 return u;
799 }
800
801 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
802 int r;
803
804 assert(u);
805 assert(c);
806
807 if (c->working_directory) {
808 r = unit_require_mounts_for(u, c->working_directory);
809 if (r < 0)
810 return r;
811 }
812
813 if (c->root_directory) {
814 r = unit_require_mounts_for(u, c->root_directory);
815 if (r < 0)
816 return r;
817 }
818
819 if (u->manager->running_as != MANAGER_SYSTEM)
820 return 0;
821
822 if (c->private_tmp) {
823 r = unit_require_mounts_for(u, "/tmp");
824 if (r < 0)
825 return r;
826
827 r = unit_require_mounts_for(u, "/var/tmp");
828 if (r < 0)
829 return r;
830 }
831
832 if (c->std_output != EXEC_OUTPUT_KMSG &&
833 c->std_output != EXEC_OUTPUT_SYSLOG &&
834 c->std_output != EXEC_OUTPUT_JOURNAL &&
835 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
836 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
837 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
838 c->std_error != EXEC_OUTPUT_KMSG &&
839 c->std_error != EXEC_OUTPUT_SYSLOG &&
840 c->std_error != EXEC_OUTPUT_JOURNAL &&
841 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
842 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
843 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
844 return 0;
845
846 /* If syslog or kernel logging is requested, make sure our own
847 * logging daemon is run first. */
848
849 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
850 if (r < 0)
851 return r;
852
853 return 0;
854 }
855
856 const char *unit_description(Unit *u) {
857 assert(u);
858
859 if (u->description)
860 return u->description;
861
862 return strna(u->id);
863 }
864
865 void unit_dump(Unit *u, FILE *f, const char *prefix) {
866 char *t, **j;
867 UnitDependency d;
868 Iterator i;
869 const char *prefix2;
870 char
871 timestamp1[FORMAT_TIMESTAMP_MAX],
872 timestamp2[FORMAT_TIMESTAMP_MAX],
873 timestamp3[FORMAT_TIMESTAMP_MAX],
874 timestamp4[FORMAT_TIMESTAMP_MAX],
875 timespan[FORMAT_TIMESPAN_MAX];
876 Unit *following;
877 _cleanup_set_free_ Set *following_set = NULL;
878 int r;
879
880 assert(u);
881 assert(u->type >= 0);
882
883 prefix = strempty(prefix);
884 prefix2 = strjoina(prefix, "\t");
885
886 fprintf(f,
887 "%s-> Unit %s:\n"
888 "%s\tDescription: %s\n"
889 "%s\tInstance: %s\n"
890 "%s\tUnit Load State: %s\n"
891 "%s\tUnit Active State: %s\n"
892 "%s\tInactive Exit Timestamp: %s\n"
893 "%s\tActive Enter Timestamp: %s\n"
894 "%s\tActive Exit Timestamp: %s\n"
895 "%s\tInactive Enter Timestamp: %s\n"
896 "%s\tGC Check Good: %s\n"
897 "%s\tNeed Daemon Reload: %s\n"
898 "%s\tTransient: %s\n"
899 "%s\tSlice: %s\n"
900 "%s\tCGroup: %s\n"
901 "%s\tCGroup realized: %s\n"
902 "%s\tCGroup mask: 0x%x\n"
903 "%s\tCGroup members mask: 0x%x\n",
904 prefix, u->id,
905 prefix, unit_description(u),
906 prefix, strna(u->instance),
907 prefix, unit_load_state_to_string(u->load_state),
908 prefix, unit_active_state_to_string(unit_active_state(u)),
909 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
910 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
911 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
912 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
913 prefix, yes_no(unit_check_gc(u)),
914 prefix, yes_no(unit_need_daemon_reload(u)),
915 prefix, yes_no(u->transient),
916 prefix, strna(unit_slice_name(u)),
917 prefix, strna(u->cgroup_path),
918 prefix, yes_no(u->cgroup_realized),
919 prefix, u->cgroup_realized_mask,
920 prefix, u->cgroup_members_mask);
921
922 SET_FOREACH(t, u->names, i)
923 fprintf(f, "%s\tName: %s\n", prefix, t);
924
925 STRV_FOREACH(j, u->documentation)
926 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
927
928 following = unit_following(u);
929 if (following)
930 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
931
932 r = unit_following_set(u, &following_set);
933 if (r >= 0) {
934 Unit *other;
935
936 SET_FOREACH(other, following_set, i)
937 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
938 }
939
940 if (u->fragment_path)
941 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
942
943 if (u->source_path)
944 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
945
946 STRV_FOREACH(j, u->dropin_paths)
947 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
948
949 if (u->job_timeout > 0)
950 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
951
952 if (u->job_timeout_action != FAILURE_ACTION_NONE)
953 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
954
955 if (u->job_timeout_reboot_arg)
956 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
957
958 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
959 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
960
961 if (dual_timestamp_is_set(&u->condition_timestamp))
962 fprintf(f,
963 "%s\tCondition Timestamp: %s\n"
964 "%s\tCondition Result: %s\n",
965 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
966 prefix, yes_no(u->condition_result));
967
968 if (dual_timestamp_is_set(&u->assert_timestamp))
969 fprintf(f,
970 "%s\tAssert Timestamp: %s\n"
971 "%s\tAssert Result: %s\n",
972 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
973 prefix, yes_no(u->assert_result));
974
975 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
976 Unit *other;
977
978 SET_FOREACH(other, u->dependencies[d], i)
979 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
980 }
981
982 if (!strv_isempty(u->requires_mounts_for)) {
983 fprintf(f,
984 "%s\tRequiresMountsFor:", prefix);
985
986 STRV_FOREACH(j, u->requires_mounts_for)
987 fprintf(f, " %s", *j);
988
989 fputs("\n", f);
990 }
991
992 if (u->load_state == UNIT_LOADED) {
993
994 fprintf(f,
995 "%s\tStopWhenUnneeded: %s\n"
996 "%s\tRefuseManualStart: %s\n"
997 "%s\tRefuseManualStop: %s\n"
998 "%s\tDefaultDependencies: %s\n"
999 "%s\tOnFailureJobMode: %s\n"
1000 "%s\tIgnoreOnIsolate: %s\n"
1001 "%s\tIgnoreOnSnapshot: %s\n",
1002 prefix, yes_no(u->stop_when_unneeded),
1003 prefix, yes_no(u->refuse_manual_start),
1004 prefix, yes_no(u->refuse_manual_stop),
1005 prefix, yes_no(u->default_dependencies),
1006 prefix, job_mode_to_string(u->on_failure_job_mode),
1007 prefix, yes_no(u->ignore_on_isolate),
1008 prefix, yes_no(u->ignore_on_snapshot));
1009
1010 if (UNIT_VTABLE(u)->dump)
1011 UNIT_VTABLE(u)->dump(u, f, prefix2);
1012
1013 } else if (u->load_state == UNIT_MERGED)
1014 fprintf(f,
1015 "%s\tMerged into: %s\n",
1016 prefix, u->merged_into->id);
1017 else if (u->load_state == UNIT_ERROR)
1018 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1019
1020
1021 if (u->job)
1022 job_dump(u->job, f, prefix2);
1023
1024 if (u->nop_job)
1025 job_dump(u->nop_job, f, prefix2);
1026
1027 }
1028
1029 /* Common implementation for multiple backends */
1030 int unit_load_fragment_and_dropin(Unit *u) {
1031 int r;
1032
1033 assert(u);
1034
1035 /* Load a .{service,socket,...} file */
1036 r = unit_load_fragment(u);
1037 if (r < 0)
1038 return r;
1039
1040 if (u->load_state == UNIT_STUB)
1041 return -ENOENT;
1042
1043 /* Load drop-in directory data */
1044 r = unit_load_dropin(unit_follow_merge(u));
1045 if (r < 0)
1046 return r;
1047
1048 return 0;
1049 }
1050
1051 /* Common implementation for multiple backends */
1052 int unit_load_fragment_and_dropin_optional(Unit *u) {
1053 int r;
1054
1055 assert(u);
1056
1057 /* Same as unit_load_fragment_and_dropin(), but whether
1058 * something can be loaded or not doesn't matter. */
1059
1060 /* Load a .service file */
1061 r = unit_load_fragment(u);
1062 if (r < 0)
1063 return r;
1064
1065 if (u->load_state == UNIT_STUB)
1066 u->load_state = UNIT_LOADED;
1067
1068 /* Load drop-in directory data */
1069 r = unit_load_dropin(unit_follow_merge(u));
1070 if (r < 0)
1071 return r;
1072
1073 return 0;
1074 }
1075
1076 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1077 assert(u);
1078 assert(target);
1079
1080 if (target->type != UNIT_TARGET)
1081 return 0;
1082
1083 /* Only add the dependency if both units are loaded, so that
1084 * that loop check below is reliable */
1085 if (u->load_state != UNIT_LOADED ||
1086 target->load_state != UNIT_LOADED)
1087 return 0;
1088
1089 /* If either side wants no automatic dependencies, then let's
1090 * skip this */
1091 if (!u->default_dependencies ||
1092 !target->default_dependencies)
1093 return 0;
1094
1095 /* Don't create loops */
1096 if (set_get(target->dependencies[UNIT_BEFORE], u))
1097 return 0;
1098
1099 return unit_add_dependency(target, UNIT_AFTER, u, true);
1100 }
1101
1102 static int unit_add_target_dependencies(Unit *u) {
1103
1104 static const UnitDependency deps[] = {
1105 UNIT_REQUIRED_BY,
1106 UNIT_REQUIRED_BY_OVERRIDABLE,
1107 UNIT_REQUISITE_OF,
1108 UNIT_REQUISITE_OF_OVERRIDABLE,
1109 UNIT_WANTED_BY,
1110 UNIT_BOUND_BY
1111 };
1112
1113 Unit *target;
1114 Iterator i;
1115 unsigned k;
1116 int r = 0;
1117
1118 assert(u);
1119
1120 for (k = 0; k < ELEMENTSOF(deps); k++)
1121 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1122 r = unit_add_default_target_dependency(u, target);
1123 if (r < 0)
1124 return r;
1125 }
1126
1127 return r;
1128 }
1129
1130 static int unit_add_slice_dependencies(Unit *u) {
1131 assert(u);
1132
1133 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1134 return 0;
1135
1136 if (UNIT_ISSET(u->slice))
1137 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1138
1139 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1140 return 0;
1141
1142 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1143 }
1144
1145 static int unit_add_mount_dependencies(Unit *u) {
1146 char **i;
1147 int r;
1148
1149 assert(u);
1150
1151 STRV_FOREACH(i, u->requires_mounts_for) {
1152 char prefix[strlen(*i) + 1];
1153
1154 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1155 _cleanup_free_ char *p = NULL;
1156 Unit *m;
1157
1158 r = unit_name_from_path(prefix, ".mount", &p);
1159 if (r < 0)
1160 return r;
1161
1162 m = manager_get_unit(u->manager, p);
1163 if (!m) {
1164 /* Make sure to load the mount unit if
1165 * it exists. If so the dependencies
1166 * on this unit will be added later
1167 * during the loading of the mount
1168 * unit. */
1169 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1170 continue;
1171 }
1172 if (m == u)
1173 continue;
1174
1175 if (m->load_state != UNIT_LOADED)
1176 continue;
1177
1178 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1179 if (r < 0)
1180 return r;
1181
1182 if (m->fragment_path) {
1183 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1184 if (r < 0)
1185 return r;
1186 }
1187 }
1188 }
1189
1190 return 0;
1191 }
1192
1193 static int unit_add_startup_units(Unit *u) {
1194 CGroupContext *c;
1195 int r;
1196
1197 c = unit_get_cgroup_context(u);
1198 if (!c)
1199 return 0;
1200
1201 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1202 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1203 return 0;
1204
1205 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1206 if (r < 0)
1207 return r;
1208
1209 return set_put(u->manager->startup_units, u);
1210 }
1211
1212 int unit_load(Unit *u) {
1213 int r;
1214
1215 assert(u);
1216
1217 if (u->in_load_queue) {
1218 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1219 u->in_load_queue = false;
1220 }
1221
1222 if (u->type == _UNIT_TYPE_INVALID)
1223 return -EINVAL;
1224
1225 if (u->load_state != UNIT_STUB)
1226 return 0;
1227
1228 if (UNIT_VTABLE(u)->load) {
1229 r = UNIT_VTABLE(u)->load(u);
1230 if (r < 0)
1231 goto fail;
1232 }
1233
1234 if (u->load_state == UNIT_STUB) {
1235 r = -ENOENT;
1236 goto fail;
1237 }
1238
1239 if (u->load_state == UNIT_LOADED) {
1240
1241 r = unit_add_target_dependencies(u);
1242 if (r < 0)
1243 goto fail;
1244
1245 r = unit_add_slice_dependencies(u);
1246 if (r < 0)
1247 goto fail;
1248
1249 r = unit_add_mount_dependencies(u);
1250 if (r < 0)
1251 goto fail;
1252
1253 r = unit_add_startup_units(u);
1254 if (r < 0)
1255 goto fail;
1256
1257 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1258 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1259 r = -EINVAL;
1260 goto fail;
1261 }
1262
1263 unit_update_cgroup_members_masks(u);
1264
1265 /* If we are reloading, we need to wait for the deserializer
1266 * to restore the net_cls ids that have been set previously */
1267 if (u->manager->n_reloading <= 0) {
1268 r = unit_add_to_netclass_cgroup(u);
1269 if (r < 0)
1270 return r;
1271 }
1272 }
1273
1274 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1275
1276 unit_add_to_dbus_queue(unit_follow_merge(u));
1277 unit_add_to_gc_queue(u);
1278
1279 return 0;
1280
1281 fail:
1282 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1283 u->load_error = r;
1284 unit_add_to_dbus_queue(u);
1285 unit_add_to_gc_queue(u);
1286
1287 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1288
1289 return r;
1290 }
1291
1292 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1293 Condition *c;
1294 int triggered = -1;
1295
1296 assert(u);
1297 assert(to_string);
1298
1299 /* If the condition list is empty, then it is true */
1300 if (!first)
1301 return true;
1302
1303 /* Otherwise, if all of the non-trigger conditions apply and
1304 * if any of the trigger conditions apply (unless there are
1305 * none) we return true */
1306 LIST_FOREACH(conditions, c, first) {
1307 int r;
1308
1309 r = condition_test(c);
1310 if (r < 0)
1311 log_unit_warning(u,
1312 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1313 to_string(c->type),
1314 c->trigger ? "|" : "",
1315 c->negate ? "!" : "",
1316 c->parameter);
1317 else
1318 log_unit_debug(u,
1319 "%s=%s%s%s %s.",
1320 to_string(c->type),
1321 c->trigger ? "|" : "",
1322 c->negate ? "!" : "",
1323 c->parameter,
1324 condition_result_to_string(c->result));
1325
1326 if (!c->trigger && r <= 0)
1327 return false;
1328
1329 if (c->trigger && triggered <= 0)
1330 triggered = r > 0;
1331 }
1332
1333 return triggered != 0;
1334 }
1335
1336 static bool unit_condition_test(Unit *u) {
1337 assert(u);
1338
1339 dual_timestamp_get(&u->condition_timestamp);
1340 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1341
1342 return u->condition_result;
1343 }
1344
1345 static bool unit_assert_test(Unit *u) {
1346 assert(u);
1347
1348 dual_timestamp_get(&u->assert_timestamp);
1349 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1350
1351 return u->assert_result;
1352 }
1353
1354 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1355 const char *format;
1356 const UnitStatusMessageFormats *format_table;
1357
1358 assert(u);
1359 assert(t == JOB_START || t == JOB_STOP || t == JOB_RELOAD);
1360
1361 if (t != JOB_RELOAD) {
1362 format_table = &UNIT_VTABLE(u)->status_message_formats;
1363 if (format_table) {
1364 format = format_table->starting_stopping[t == JOB_STOP];
1365 if (format)
1366 return format;
1367 }
1368 }
1369
1370 /* Return generic strings */
1371 if (t == JOB_START)
1372 return "Starting %s.";
1373 else if (t == JOB_STOP)
1374 return "Stopping %s.";
1375 else
1376 return "Reloading %s.";
1377 }
1378
1379 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1380 const char *format;
1381
1382 assert(u);
1383
1384 format = unit_get_status_message_format(u, t);
1385
1386 DISABLE_WARNING_FORMAT_NONLITERAL;
1387 unit_status_printf(u, "", format);
1388 REENABLE_WARNING;
1389 }
1390
1391 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1392 const char *format;
1393 char buf[LINE_MAX];
1394 sd_id128_t mid;
1395
1396 assert(u);
1397
1398 if (t != JOB_START && t != JOB_STOP && t != JOB_RELOAD)
1399 return;
1400
1401 if (log_on_console())
1402 return;
1403
1404 /* We log status messages for all units and all operations. */
1405
1406 format = unit_get_status_message_format(u, t);
1407
1408 DISABLE_WARNING_FORMAT_NONLITERAL;
1409 snprintf(buf, sizeof(buf), format, unit_description(u));
1410 REENABLE_WARNING;
1411
1412 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1413 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1414 SD_MESSAGE_UNIT_RELOADING;
1415
1416 /* Note that we deliberately use LOG_MESSAGE() instead of
1417 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1418 * closely what is written to screen using the status output,
1419 * which is supposed the highest level, friendliest output
1420 * possible, which means we should avoid the low-level unit
1421 * name. */
1422 log_struct(LOG_INFO,
1423 LOG_MESSAGE_ID(mid),
1424 LOG_UNIT_ID(u),
1425 LOG_MESSAGE("%s", buf),
1426 NULL);
1427 }
1428
1429 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1430
1431 unit_status_log_starting_stopping_reloading(u, t);
1432
1433 /* Reload status messages have traditionally not been printed to console. */
1434 if (t != JOB_RELOAD)
1435 unit_status_print_starting_stopping(u, t);
1436 }
1437
1438 /* Errors:
1439 * -EBADR: This unit type does not support starting.
1440 * -EALREADY: Unit is already started.
1441 * -EAGAIN: An operation is already in progress. Retry later.
1442 * -ECANCELED: Too many requests for now.
1443 * -EPROTO: Assert failed
1444 */
1445 int unit_start(Unit *u) {
1446 UnitActiveState state;
1447 Unit *following;
1448
1449 assert(u);
1450
1451 /* Units that aren't loaded cannot be started */
1452 if (u->load_state != UNIT_LOADED)
1453 return -EINVAL;
1454
1455 /* If this is already started, then this will succeed. Note
1456 * that this will even succeed if this unit is not startable
1457 * by the user. This is relied on to detect when we need to
1458 * wait for units and when waiting is finished. */
1459 state = unit_active_state(u);
1460 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1461 return -EALREADY;
1462
1463 /* If the conditions failed, don't do anything at all. If we
1464 * already are activating this call might still be useful to
1465 * speed up activation in case there is some hold-off time,
1466 * but we don't want to recheck the condition in that case. */
1467 if (state != UNIT_ACTIVATING &&
1468 !unit_condition_test(u)) {
1469 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1470 return -EALREADY;
1471 }
1472
1473 /* If the asserts failed, fail the entire job */
1474 if (state != UNIT_ACTIVATING &&
1475 !unit_assert_test(u)) {
1476 log_unit_notice(u, "Starting requested but asserts failed.");
1477 return -EPROTO;
1478 }
1479
1480 /* Units of types that aren't supported cannot be
1481 * started. Note that we do this test only after the condition
1482 * checks, so that we rather return condition check errors
1483 * (which are usually not considered a true failure) than "not
1484 * supported" errors (which are considered a failure).
1485 */
1486 if (!unit_supported(u))
1487 return -EOPNOTSUPP;
1488
1489 /* Forward to the main object, if we aren't it. */
1490 following = unit_following(u);
1491 if (following) {
1492 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1493 return unit_start(following);
1494 }
1495
1496 /* If it is stopped, but we cannot start it, then fail */
1497 if (!UNIT_VTABLE(u)->start)
1498 return -EBADR;
1499
1500 /* We don't suppress calls to ->start() here when we are
1501 * already starting, to allow this request to be used as a
1502 * "hurry up" call, for example when the unit is in some "auto
1503 * restart" state where it waits for a holdoff timer to elapse
1504 * before it will start again. */
1505
1506 unit_add_to_dbus_queue(u);
1507
1508 return UNIT_VTABLE(u)->start(u);
1509 }
1510
1511 bool unit_can_start(Unit *u) {
1512 assert(u);
1513
1514 if (u->load_state != UNIT_LOADED)
1515 return false;
1516
1517 if (!unit_supported(u))
1518 return false;
1519
1520 return !!UNIT_VTABLE(u)->start;
1521 }
1522
1523 bool unit_can_isolate(Unit *u) {
1524 assert(u);
1525
1526 return unit_can_start(u) &&
1527 u->allow_isolate;
1528 }
1529
1530 /* Errors:
1531 * -EBADR: This unit type does not support stopping.
1532 * -EALREADY: Unit is already stopped.
1533 * -EAGAIN: An operation is already in progress. Retry later.
1534 */
1535 int unit_stop(Unit *u) {
1536 UnitActiveState state;
1537 Unit *following;
1538
1539 assert(u);
1540
1541 state = unit_active_state(u);
1542 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1543 return -EALREADY;
1544
1545 following = unit_following(u);
1546 if (following) {
1547 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1548 return unit_stop(following);
1549 }
1550
1551 if (!UNIT_VTABLE(u)->stop)
1552 return -EBADR;
1553
1554 unit_add_to_dbus_queue(u);
1555
1556 return UNIT_VTABLE(u)->stop(u);
1557 }
1558
1559 /* Errors:
1560 * -EBADR: This unit type does not support reloading.
1561 * -ENOEXEC: Unit is not started.
1562 * -EAGAIN: An operation is already in progress. Retry later.
1563 */
1564 int unit_reload(Unit *u) {
1565 UnitActiveState state;
1566 Unit *following;
1567
1568 assert(u);
1569
1570 if (u->load_state != UNIT_LOADED)
1571 return -EINVAL;
1572
1573 if (!unit_can_reload(u))
1574 return -EBADR;
1575
1576 state = unit_active_state(u);
1577 if (state == UNIT_RELOADING)
1578 return -EALREADY;
1579
1580 if (state != UNIT_ACTIVE) {
1581 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1582 return -ENOEXEC;
1583 }
1584
1585 following = unit_following(u);
1586 if (following) {
1587 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1588 return unit_reload(following);
1589 }
1590
1591 unit_add_to_dbus_queue(u);
1592
1593 return UNIT_VTABLE(u)->reload(u);
1594 }
1595
1596 bool unit_can_reload(Unit *u) {
1597 assert(u);
1598
1599 if (!UNIT_VTABLE(u)->reload)
1600 return false;
1601
1602 if (!UNIT_VTABLE(u)->can_reload)
1603 return true;
1604
1605 return UNIT_VTABLE(u)->can_reload(u);
1606 }
1607
1608 static void unit_check_unneeded(Unit *u) {
1609
1610 static const UnitDependency needed_dependencies[] = {
1611 UNIT_REQUIRED_BY,
1612 UNIT_REQUIRED_BY_OVERRIDABLE,
1613 UNIT_REQUISITE_OF,
1614 UNIT_REQUISITE_OF_OVERRIDABLE,
1615 UNIT_WANTED_BY,
1616 UNIT_BOUND_BY,
1617 };
1618
1619 Unit *other;
1620 Iterator i;
1621 unsigned j;
1622 int r;
1623
1624 assert(u);
1625
1626 /* If this service shall be shut down when unneeded then do
1627 * so. */
1628
1629 if (!u->stop_when_unneeded)
1630 return;
1631
1632 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1633 return;
1634
1635 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1636 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1637 if (unit_active_or_pending(other))
1638 return;
1639
1640 /* If stopping a unit fails continously we might enter a stop
1641 * loop here, hence stop acting on the service being
1642 * unnecessary after a while. */
1643 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1644 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1645 return;
1646 }
1647
1648 log_unit_info(u, "Unit not needed anymore. Stopping.");
1649
1650 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1651 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1652 if (r < 0)
1653 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %m");
1654 }
1655
1656 static void unit_check_binds_to(Unit *u) {
1657 bool stop = false;
1658 Unit *other;
1659 Iterator i;
1660 int r;
1661
1662 assert(u);
1663
1664 if (u->job)
1665 return;
1666
1667 if (unit_active_state(u) != UNIT_ACTIVE)
1668 return;
1669
1670 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1671 if (other->job)
1672 continue;
1673
1674 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1675 continue;
1676
1677 stop = true;
1678 break;
1679 }
1680
1681 if (!stop)
1682 return;
1683
1684 /* If stopping a unit fails continously we might enter a stop
1685 * loop here, hence stop acting on the service being
1686 * unnecessary after a while. */
1687 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1688 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1689 return;
1690 }
1691
1692 assert(other);
1693 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1694
1695 /* A unit we need to run is gone. Sniff. Let's stop this. */
1696 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, true, NULL, NULL);
1697 if (r < 0)
1698 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %m");
1699 }
1700
1701 static void retroactively_start_dependencies(Unit *u) {
1702 Iterator i;
1703 Unit *other;
1704
1705 assert(u);
1706 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1707
1708 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1709 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1710 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1711 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1712
1713 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1714 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1715 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1716 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, true, NULL, NULL);
1717
1718 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1719 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1720 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1721 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1722
1723 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1724 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1725 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1726 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, false, NULL, NULL);
1727
1728 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1729 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1730 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1731
1732 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1733 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1734 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1735 }
1736
1737 static void retroactively_stop_dependencies(Unit *u) {
1738 Iterator i;
1739 Unit *other;
1740
1741 assert(u);
1742 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1743
1744 /* Pull down units which are bound to us recursively if enabled */
1745 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1746 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1747 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, true, NULL, NULL);
1748 }
1749
1750 static void check_unneeded_dependencies(Unit *u) {
1751 Iterator i;
1752 Unit *other;
1753
1754 assert(u);
1755 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1756
1757 /* Garbage collect services that might not be needed anymore, if enabled */
1758 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1759 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1760 unit_check_unneeded(other);
1761 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
1762 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1763 unit_check_unneeded(other);
1764 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1765 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1766 unit_check_unneeded(other);
1767 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1768 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1769 unit_check_unneeded(other);
1770 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
1771 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1772 unit_check_unneeded(other);
1773 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1774 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1775 unit_check_unneeded(other);
1776 }
1777
1778 void unit_start_on_failure(Unit *u) {
1779 Unit *other;
1780 Iterator i;
1781
1782 assert(u);
1783
1784 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1785 return;
1786
1787 log_unit_info(u, "Triggering OnFailure= dependencies.");
1788
1789 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1790 int r;
1791
1792 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, true, NULL, NULL);
1793 if (r < 0)
1794 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1795 }
1796 }
1797
1798 void unit_trigger_notify(Unit *u) {
1799 Unit *other;
1800 Iterator i;
1801
1802 assert(u);
1803
1804 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1805 if (UNIT_VTABLE(other)->trigger_notify)
1806 UNIT_VTABLE(other)->trigger_notify(other, u);
1807 }
1808
1809 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1810 Manager *m;
1811 bool unexpected;
1812
1813 assert(u);
1814 assert(os < _UNIT_ACTIVE_STATE_MAX);
1815 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1816
1817 /* Note that this is called for all low-level state changes,
1818 * even if they might map to the same high-level
1819 * UnitActiveState! That means that ns == os is an expected
1820 * behavior here. For example: if a mount point is remounted
1821 * this function will be called too! */
1822
1823 m = u->manager;
1824
1825 /* Update timestamps for state changes */
1826 if (m->n_reloading <= 0) {
1827 dual_timestamp ts;
1828
1829 dual_timestamp_get(&ts);
1830
1831 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1832 u->inactive_exit_timestamp = ts;
1833 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1834 u->inactive_enter_timestamp = ts;
1835
1836 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1837 u->active_enter_timestamp = ts;
1838 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1839 u->active_exit_timestamp = ts;
1840 }
1841
1842 /* Keep track of failed units */
1843 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1844
1845 /* Make sure the cgroup is always removed when we become inactive */
1846 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1847 unit_prune_cgroup(u);
1848
1849 /* Note that this doesn't apply to RemainAfterExit services exiting
1850 * successfully, since there's no change of state in that case. Which is
1851 * why it is handled in service_set_state() */
1852 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1853 ExecContext *ec;
1854
1855 ec = unit_get_exec_context(u);
1856 if (ec && exec_context_may_touch_console(ec)) {
1857 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1858 m->n_on_console --;
1859
1860 if (m->n_on_console == 0)
1861 /* unset no_console_output flag, since the console is free */
1862 m->no_console_output = false;
1863 } else
1864 m->n_on_console ++;
1865 }
1866 }
1867
1868 if (u->job) {
1869 unexpected = false;
1870
1871 if (u->job->state == JOB_WAITING)
1872
1873 /* So we reached a different state for this
1874 * job. Let's see if we can run it now if it
1875 * failed previously due to EAGAIN. */
1876 job_add_to_run_queue(u->job);
1877
1878 /* Let's check whether this state change constitutes a
1879 * finished job, or maybe contradicts a running job and
1880 * hence needs to invalidate jobs. */
1881
1882 switch (u->job->type) {
1883
1884 case JOB_START:
1885 case JOB_VERIFY_ACTIVE:
1886
1887 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1888 job_finish_and_invalidate(u->job, JOB_DONE, true);
1889 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1890 unexpected = true;
1891
1892 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1893 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1894 }
1895
1896 break;
1897
1898 case JOB_RELOAD:
1899 case JOB_RELOAD_OR_START:
1900
1901 if (u->job->state == JOB_RUNNING) {
1902 if (ns == UNIT_ACTIVE)
1903 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1904 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1905 unexpected = true;
1906
1907 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1908 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1909 }
1910 }
1911
1912 break;
1913
1914 case JOB_STOP:
1915 case JOB_RESTART:
1916 case JOB_TRY_RESTART:
1917
1918 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1919 job_finish_and_invalidate(u->job, JOB_DONE, true);
1920 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1921 unexpected = true;
1922 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1923 }
1924
1925 break;
1926
1927 default:
1928 assert_not_reached("Job type unknown");
1929 }
1930
1931 } else
1932 unexpected = true;
1933
1934 if (m->n_reloading <= 0) {
1935
1936 /* If this state change happened without being
1937 * requested by a job, then let's retroactively start
1938 * or stop dependencies. We skip that step when
1939 * deserializing, since we don't want to create any
1940 * additional jobs just because something is already
1941 * activated. */
1942
1943 if (unexpected) {
1944 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1945 retroactively_start_dependencies(u);
1946 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1947 retroactively_stop_dependencies(u);
1948 }
1949
1950 /* stop unneeded units regardless if going down was expected or not */
1951 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1952 check_unneeded_dependencies(u);
1953
1954 if (ns != os && ns == UNIT_FAILED) {
1955 log_unit_notice(u, "Unit entered failed state.");
1956 unit_start_on_failure(u);
1957 }
1958 }
1959
1960 /* Some names are special */
1961 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1962
1963 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1964 /* The bus might have just become available,
1965 * hence try to connect to it, if we aren't
1966 * yet connected. */
1967 bus_init(m, true);
1968
1969 if (u->type == UNIT_SERVICE &&
1970 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1971 m->n_reloading <= 0) {
1972 /* Write audit record if we have just finished starting up */
1973 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1974 u->in_audit = true;
1975 }
1976
1977 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1978 manager_send_unit_plymouth(m, u);
1979
1980 } else {
1981
1982 /* We don't care about D-Bus here, since we'll get an
1983 * asynchronous notification for it anyway. */
1984
1985 if (u->type == UNIT_SERVICE &&
1986 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1987 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1988 m->n_reloading <= 0) {
1989
1990 /* Hmm, if there was no start record written
1991 * write it now, so that we always have a nice
1992 * pair */
1993 if (!u->in_audit) {
1994 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1995
1996 if (ns == UNIT_INACTIVE)
1997 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1998 } else
1999 /* Write audit record if we have just finished shutting down */
2000 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2001
2002 u->in_audit = false;
2003 }
2004 }
2005
2006 manager_recheck_journal(m);
2007 unit_trigger_notify(u);
2008
2009 if (u->manager->n_reloading <= 0) {
2010 /* Maybe we finished startup and are now ready for
2011 * being stopped because unneeded? */
2012 unit_check_unneeded(u);
2013
2014 /* Maybe we finished startup, but something we needed
2015 * has vanished? Let's die then. (This happens when
2016 * something BindsTo= to a Type=oneshot unit, as these
2017 * units go directly from starting to inactive,
2018 * without ever entering started.) */
2019 unit_check_binds_to(u);
2020 }
2021
2022 unit_add_to_dbus_queue(u);
2023 unit_add_to_gc_queue(u);
2024 }
2025
2026 int unit_watch_pid(Unit *u, pid_t pid) {
2027 int q, r;
2028
2029 assert(u);
2030 assert(pid >= 1);
2031
2032 /* Watch a specific PID. We only support one or two units
2033 * watching each PID for now, not more. */
2034
2035 r = set_ensure_allocated(&u->pids, NULL);
2036 if (r < 0)
2037 return r;
2038
2039 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2040 if (r < 0)
2041 return r;
2042
2043 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2044 if (r == -EEXIST) {
2045 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2046 if (r < 0)
2047 return r;
2048
2049 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2050 }
2051
2052 q = set_put(u->pids, PID_TO_PTR(pid));
2053 if (q < 0)
2054 return q;
2055
2056 return r;
2057 }
2058
2059 void unit_unwatch_pid(Unit *u, pid_t pid) {
2060 assert(u);
2061 assert(pid >= 1);
2062
2063 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2064 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2065 (void) set_remove(u->pids, PID_TO_PTR(pid));
2066 }
2067
2068 void unit_unwatch_all_pids(Unit *u) {
2069 assert(u);
2070
2071 while (!set_isempty(u->pids))
2072 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2073
2074 u->pids = set_free(u->pids);
2075 }
2076
2077 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2078 Iterator i;
2079 void *e;
2080
2081 assert(u);
2082
2083 /* Cleans dead PIDs from our list */
2084
2085 SET_FOREACH(e, u->pids, i) {
2086 pid_t pid = PTR_TO_PID(e);
2087
2088 if (pid == except1 || pid == except2)
2089 continue;
2090
2091 if (!pid_is_unwaited(pid))
2092 unit_unwatch_pid(u, pid);
2093 }
2094 }
2095
2096 bool unit_job_is_applicable(Unit *u, JobType j) {
2097 assert(u);
2098 assert(j >= 0 && j < _JOB_TYPE_MAX);
2099
2100 switch (j) {
2101
2102 case JOB_VERIFY_ACTIVE:
2103 case JOB_START:
2104 case JOB_STOP:
2105 case JOB_NOP:
2106 return true;
2107
2108 case JOB_RESTART:
2109 case JOB_TRY_RESTART:
2110 return unit_can_start(u);
2111
2112 case JOB_RELOAD:
2113 return unit_can_reload(u);
2114
2115 case JOB_RELOAD_OR_START:
2116 return unit_can_reload(u) && unit_can_start(u);
2117
2118 default:
2119 assert_not_reached("Invalid job type");
2120 }
2121 }
2122
2123 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2124 assert(u);
2125
2126 /* Only warn about some unit types */
2127 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2128 return;
2129
2130 if (streq_ptr(u->id, other))
2131 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2132 else
2133 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2134 }
2135
2136 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2137
2138 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2139 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2140 [UNIT_REQUIRES_OVERRIDABLE] = UNIT_REQUIRED_BY_OVERRIDABLE,
2141 [UNIT_WANTS] = UNIT_WANTED_BY,
2142 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2143 [UNIT_REQUISITE_OVERRIDABLE] = UNIT_REQUISITE_OF_OVERRIDABLE,
2144 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2145 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2146 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2147 [UNIT_REQUIRED_BY_OVERRIDABLE] = UNIT_REQUIRES_OVERRIDABLE,
2148 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2149 [UNIT_REQUISITE_OF_OVERRIDABLE] = UNIT_REQUISITE_OVERRIDABLE,
2150 [UNIT_WANTED_BY] = UNIT_WANTS,
2151 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2152 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2153 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2154 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2155 [UNIT_BEFORE] = UNIT_AFTER,
2156 [UNIT_AFTER] = UNIT_BEFORE,
2157 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2158 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2159 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2160 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2161 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2162 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2163 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2164 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2165 };
2166 int r, q = 0, v = 0, w = 0;
2167 Unit *orig_u = u, *orig_other = other;
2168
2169 assert(u);
2170 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2171 assert(other);
2172
2173 u = unit_follow_merge(u);
2174 other = unit_follow_merge(other);
2175
2176 /* We won't allow dependencies on ourselves. We will not
2177 * consider them an error however. */
2178 if (u == other) {
2179 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2180 return 0;
2181 }
2182
2183 r = set_ensure_allocated(&u->dependencies[d], NULL);
2184 if (r < 0)
2185 return r;
2186
2187 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2188 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2189 if (r < 0)
2190 return r;
2191 }
2192
2193 if (add_reference) {
2194 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2195 if (r < 0)
2196 return r;
2197
2198 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2199 if (r < 0)
2200 return r;
2201 }
2202
2203 q = set_put(u->dependencies[d], other);
2204 if (q < 0)
2205 return q;
2206
2207 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2208 v = set_put(other->dependencies[inverse_table[d]], u);
2209 if (v < 0) {
2210 r = v;
2211 goto fail;
2212 }
2213 }
2214
2215 if (add_reference) {
2216 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2217 if (w < 0) {
2218 r = w;
2219 goto fail;
2220 }
2221
2222 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2223 if (r < 0)
2224 goto fail;
2225 }
2226
2227 unit_add_to_dbus_queue(u);
2228 return 0;
2229
2230 fail:
2231 if (q > 0)
2232 set_remove(u->dependencies[d], other);
2233
2234 if (v > 0)
2235 set_remove(other->dependencies[inverse_table[d]], u);
2236
2237 if (w > 0)
2238 set_remove(u->dependencies[UNIT_REFERENCES], other);
2239
2240 return r;
2241 }
2242
2243 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2244 int r;
2245
2246 assert(u);
2247
2248 r = unit_add_dependency(u, d, other, add_reference);
2249 if (r < 0)
2250 return r;
2251
2252 return unit_add_dependency(u, e, other, add_reference);
2253 }
2254
2255 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2256 int r;
2257
2258 assert(u);
2259 assert(name || path);
2260 assert(buf);
2261 assert(ret);
2262
2263 if (!name)
2264 name = basename(path);
2265
2266 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2267 *buf = NULL;
2268 *ret = name;
2269 return 0;
2270 }
2271
2272 if (u->instance)
2273 r = unit_name_replace_instance(name, u->instance, buf);
2274 else {
2275 _cleanup_free_ char *i = NULL;
2276
2277 r = unit_name_to_prefix(u->id, &i);
2278 if (r < 0)
2279 return r;
2280
2281 r = unit_name_replace_instance(name, i, buf);
2282 }
2283 if (r < 0)
2284 return r;
2285
2286 *ret = *buf;
2287 return 0;
2288 }
2289
2290 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2291 _cleanup_free_ char *buf = NULL;
2292 Unit *other;
2293 int r;
2294
2295 assert(u);
2296 assert(name || path);
2297
2298 r = resolve_template(u, name, path, &buf, &name);
2299 if (r < 0)
2300 return r;
2301
2302 r = manager_load_unit(u->manager, name, path, NULL, &other);
2303 if (r < 0)
2304 return r;
2305
2306 return unit_add_dependency(u, d, other, add_reference);
2307 }
2308
2309 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2310 _cleanup_free_ char *buf = NULL;
2311 Unit *other;
2312 int r;
2313
2314 assert(u);
2315 assert(name || path);
2316
2317 r = resolve_template(u, name, path, &buf, &name);
2318 if (r < 0)
2319 return r;
2320
2321 r = manager_load_unit(u->manager, name, path, NULL, &other);
2322 if (r < 0)
2323 return r;
2324
2325 return unit_add_two_dependencies(u, d, e, other, add_reference);
2326 }
2327
2328 int set_unit_path(const char *p) {
2329 /* This is mostly for debug purposes */
2330 if (setenv("SYSTEMD_UNIT_PATH", p, 0) < 0)
2331 return -errno;
2332
2333 return 0;
2334 }
2335
2336 char *unit_dbus_path(Unit *u) {
2337 assert(u);
2338
2339 if (!u->id)
2340 return NULL;
2341
2342 return unit_dbus_path_from_name(u->id);
2343 }
2344
2345 int unit_set_slice(Unit *u, Unit *slice) {
2346 assert(u);
2347 assert(slice);
2348
2349 /* Sets the unit slice if it has not been set before. Is extra
2350 * careful, to only allow this for units that actually have a
2351 * cgroup context. Also, we don't allow to set this for slices
2352 * (since the parent slice is derived from the name). Make
2353 * sure the unit we set is actually a slice. */
2354
2355 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2356 return -EOPNOTSUPP;
2357
2358 if (u->type == UNIT_SLICE)
2359 return -EINVAL;
2360
2361 if (unit_active_state(u) != UNIT_INACTIVE)
2362 return -EBUSY;
2363
2364 if (slice->type != UNIT_SLICE)
2365 return -EINVAL;
2366
2367 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2368 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2369 return -EPERM;
2370
2371 if (UNIT_DEREF(u->slice) == slice)
2372 return 0;
2373
2374 if (UNIT_ISSET(u->slice))
2375 return -EBUSY;
2376
2377 unit_ref_set(&u->slice, slice);
2378 return 1;
2379 }
2380
2381 int unit_set_default_slice(Unit *u) {
2382 _cleanup_free_ char *b = NULL;
2383 const char *slice_name;
2384 Unit *slice;
2385 int r;
2386
2387 assert(u);
2388
2389 if (UNIT_ISSET(u->slice))
2390 return 0;
2391
2392 if (u->instance) {
2393 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2394
2395 /* Implicitly place all instantiated units in their
2396 * own per-template slice */
2397
2398 r = unit_name_to_prefix(u->id, &prefix);
2399 if (r < 0)
2400 return r;
2401
2402 /* The prefix is already escaped, but it might include
2403 * "-" which has a special meaning for slice units,
2404 * hence escape it here extra. */
2405 escaped = unit_name_escape(prefix);
2406 if (!escaped)
2407 return -ENOMEM;
2408
2409 if (u->manager->running_as == MANAGER_SYSTEM)
2410 b = strjoin("system-", escaped, ".slice", NULL);
2411 else
2412 b = strappend(escaped, ".slice");
2413 if (!b)
2414 return -ENOMEM;
2415
2416 slice_name = b;
2417 } else
2418 slice_name =
2419 u->manager->running_as == MANAGER_SYSTEM && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2420 ? SPECIAL_SYSTEM_SLICE
2421 : SPECIAL_ROOT_SLICE;
2422
2423 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2424 if (r < 0)
2425 return r;
2426
2427 return unit_set_slice(u, slice);
2428 }
2429
2430 const char *unit_slice_name(Unit *u) {
2431 assert(u);
2432
2433 if (!UNIT_ISSET(u->slice))
2434 return NULL;
2435
2436 return UNIT_DEREF(u->slice)->id;
2437 }
2438
2439 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2440 _cleanup_free_ char *t = NULL;
2441 int r;
2442
2443 assert(u);
2444 assert(type);
2445 assert(_found);
2446
2447 r = unit_name_change_suffix(u->id, type, &t);
2448 if (r < 0)
2449 return r;
2450 if (unit_has_name(u, t))
2451 return -EINVAL;
2452
2453 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2454 assert(r < 0 || *_found != u);
2455 return r;
2456 }
2457
2458 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2459 const char *name, *old_owner, *new_owner;
2460 Unit *u = userdata;
2461 int r;
2462
2463 assert(message);
2464 assert(u);
2465
2466 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2467 if (r < 0) {
2468 bus_log_parse_error(r);
2469 return 0;
2470 }
2471
2472 if (UNIT_VTABLE(u)->bus_name_owner_change)
2473 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2474
2475 return 0;
2476 }
2477
2478 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2479 const char *match;
2480
2481 assert(u);
2482 assert(bus);
2483 assert(name);
2484
2485 if (u->match_bus_slot)
2486 return -EBUSY;
2487
2488 match = strjoina("type='signal',"
2489 "sender='org.freedesktop.DBus',"
2490 "path='/org/freedesktop/DBus',"
2491 "interface='org.freedesktop.DBus',"
2492 "member='NameOwnerChanged',"
2493 "arg0='", name, "'",
2494 NULL);
2495
2496 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2497 }
2498
2499 int unit_watch_bus_name(Unit *u, const char *name) {
2500 int r;
2501
2502 assert(u);
2503 assert(name);
2504
2505 /* Watch a specific name on the bus. We only support one unit
2506 * watching each name for now. */
2507
2508 if (u->manager->api_bus) {
2509 /* If the bus is already available, install the match directly.
2510 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2511 r = unit_install_bus_match(u, u->manager->api_bus, name);
2512 if (r < 0)
2513 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal: %m");
2514 }
2515
2516 r = hashmap_put(u->manager->watch_bus, name, u);
2517 if (r < 0) {
2518 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2519 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2520 }
2521
2522 return 0;
2523 }
2524
2525 void unit_unwatch_bus_name(Unit *u, const char *name) {
2526 assert(u);
2527 assert(name);
2528
2529 hashmap_remove_value(u->manager->watch_bus, name, u);
2530 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2531 }
2532
2533 bool unit_can_serialize(Unit *u) {
2534 assert(u);
2535
2536 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2537 }
2538
2539 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2540 int r;
2541
2542 assert(u);
2543 assert(f);
2544 assert(fds);
2545
2546 if (unit_can_serialize(u)) {
2547 ExecRuntime *rt;
2548
2549 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2550 if (r < 0)
2551 return r;
2552
2553 rt = unit_get_exec_runtime(u);
2554 if (rt) {
2555 r = exec_runtime_serialize(u, rt, f, fds);
2556 if (r < 0)
2557 return r;
2558 }
2559 }
2560
2561 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2562 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2563 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2564 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2565 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2566 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2567
2568 if (dual_timestamp_is_set(&u->condition_timestamp))
2569 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2570
2571 if (dual_timestamp_is_set(&u->assert_timestamp))
2572 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2573
2574 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2575 unit_serialize_item_format(u, f, "cpuacct-usage-base", "%" PRIu64, u->cpuacct_usage_base);
2576
2577 if (u->cgroup_path)
2578 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2579 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2580
2581 if (u->cgroup_netclass_id)
2582 unit_serialize_item_format(u, f, "netclass-id", "%" PRIu32, u->cgroup_netclass_id);
2583
2584 if (serialize_jobs) {
2585 if (u->job) {
2586 fprintf(f, "job\n");
2587 job_serialize(u->job, f, fds);
2588 }
2589
2590 if (u->nop_job) {
2591 fprintf(f, "job\n");
2592 job_serialize(u->nop_job, f, fds);
2593 }
2594 }
2595
2596 /* End marker */
2597 fputc('\n', f);
2598 return 0;
2599 }
2600
2601 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2602 assert(u);
2603 assert(f);
2604 assert(key);
2605
2606 if (!value)
2607 return 0;
2608
2609 fputs(key, f);
2610 fputc('=', f);
2611 fputs(value, f);
2612 fputc('\n', f);
2613
2614 return 1;
2615 }
2616
2617 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2618 _cleanup_free_ char *c = NULL;
2619
2620 assert(u);
2621 assert(f);
2622 assert(key);
2623
2624 if (!value)
2625 return 0;
2626
2627 c = cescape(value);
2628 if (!c)
2629 return -ENOMEM;
2630
2631 fputs(key, f);
2632 fputc('=', f);
2633 fputs(c, f);
2634 fputc('\n', f);
2635
2636 return 1;
2637 }
2638
2639 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2640 int copy;
2641
2642 assert(u);
2643 assert(f);
2644 assert(key);
2645
2646 if (fd < 0)
2647 return 0;
2648
2649 copy = fdset_put_dup(fds, fd);
2650 if (copy < 0)
2651 return copy;
2652
2653 fprintf(f, "%s=%i\n", key, copy);
2654 return 1;
2655 }
2656
2657 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2658 va_list ap;
2659
2660 assert(u);
2661 assert(f);
2662 assert(key);
2663 assert(format);
2664
2665 fputs(key, f);
2666 fputc('=', f);
2667
2668 va_start(ap, format);
2669 vfprintf(f, format, ap);
2670 va_end(ap);
2671
2672 fputc('\n', f);
2673 }
2674
2675 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2676 ExecRuntime **rt = NULL;
2677 size_t offset;
2678 int r;
2679
2680 assert(u);
2681 assert(f);
2682 assert(fds);
2683
2684 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2685 if (offset > 0)
2686 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2687
2688 for (;;) {
2689 char line[LINE_MAX], *l, *v;
2690 size_t k;
2691
2692 if (!fgets(line, sizeof(line), f)) {
2693 if (feof(f))
2694 return 0;
2695 return -errno;
2696 }
2697
2698 char_array_0(line);
2699 l = strstrip(line);
2700
2701 /* End marker */
2702 if (isempty(l))
2703 return 0;
2704
2705 k = strcspn(l, "=");
2706
2707 if (l[k] == '=') {
2708 l[k] = 0;
2709 v = l+k+1;
2710 } else
2711 v = l+k;
2712
2713 if (streq(l, "job")) {
2714 if (v[0] == '\0') {
2715 /* new-style serialized job */
2716 Job *j;
2717
2718 j = job_new_raw(u);
2719 if (!j)
2720 return log_oom();
2721
2722 r = job_deserialize(j, f, fds);
2723 if (r < 0) {
2724 job_free(j);
2725 return r;
2726 }
2727
2728 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2729 if (r < 0) {
2730 job_free(j);
2731 return r;
2732 }
2733
2734 r = job_install_deserialized(j);
2735 if (r < 0) {
2736 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2737 job_free(j);
2738 return r;
2739 }
2740 } else /* legacy for pre-44 */
2741 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2742 continue;
2743 } else if (streq(l, "inactive-exit-timestamp")) {
2744 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2745 continue;
2746 } else if (streq(l, "active-enter-timestamp")) {
2747 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2748 continue;
2749 } else if (streq(l, "active-exit-timestamp")) {
2750 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2751 continue;
2752 } else if (streq(l, "inactive-enter-timestamp")) {
2753 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2754 continue;
2755 } else if (streq(l, "condition-timestamp")) {
2756 dual_timestamp_deserialize(v, &u->condition_timestamp);
2757 continue;
2758 } else if (streq(l, "assert-timestamp")) {
2759 dual_timestamp_deserialize(v, &u->assert_timestamp);
2760 continue;
2761 } else if (streq(l, "condition-result")) {
2762
2763 r = parse_boolean(v);
2764 if (r < 0)
2765 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2766 else
2767 u->condition_result = r;
2768
2769 continue;
2770
2771 } else if (streq(l, "assert-result")) {
2772
2773 r = parse_boolean(v);
2774 if (r < 0)
2775 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2776 else
2777 u->assert_result = r;
2778
2779 continue;
2780
2781 } else if (streq(l, "transient")) {
2782
2783 r = parse_boolean(v);
2784 if (r < 0)
2785 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2786 else
2787 u->transient = r;
2788
2789 continue;
2790
2791 } else if (streq(l, "cpuacct-usage-base")) {
2792
2793 r = safe_atou64(v, &u->cpuacct_usage_base);
2794 if (r < 0)
2795 log_unit_debug(u, "Failed to parse CPU usage %s, ignoring.", v);
2796
2797 continue;
2798
2799 } else if (streq(l, "cgroup")) {
2800
2801 r = unit_set_cgroup_path(u, v);
2802 if (r < 0)
2803 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2804
2805 (void) unit_watch_cgroup(u);
2806
2807 continue;
2808 } else if (streq(l, "cgroup-realized")) {
2809 int b;
2810
2811 b = parse_boolean(v);
2812 if (b < 0)
2813 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2814 else
2815 u->cgroup_realized = b;
2816
2817 continue;
2818 } else if (streq(l, "netclass-id")) {
2819 r = safe_atou32(v, &u->cgroup_netclass_id);
2820 if (r < 0)
2821 log_unit_debug(u, "Failed to parse netclass ID %s, ignoring.", v);
2822 else {
2823 r = unit_add_to_netclass_cgroup(u);
2824 if (r < 0)
2825 log_unit_debug_errno(u, r, "Failed to add unit to netclass cgroup, ignoring: %m");
2826 }
2827
2828 continue;
2829 }
2830
2831 if (unit_can_serialize(u)) {
2832 if (rt) {
2833 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
2834 if (r < 0) {
2835 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
2836 continue;
2837 }
2838
2839 /* Returns positive if key was handled by the call */
2840 if (r > 0)
2841 continue;
2842 }
2843
2844 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2845 if (r < 0)
2846 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
2847 }
2848 }
2849 }
2850
2851 int unit_add_node_link(Unit *u, const char *what, bool wants) {
2852 Unit *device;
2853 _cleanup_free_ char *e = NULL;
2854 int r;
2855
2856 assert(u);
2857
2858 /* Adds in links to the device node that this unit is based on */
2859 if (isempty(what))
2860 return 0;
2861
2862 if (!is_device_path(what))
2863 return 0;
2864
2865 /* When device units aren't supported (such as in a
2866 * container), don't create dependencies on them. */
2867 if (!unit_type_supported(UNIT_DEVICE))
2868 return 0;
2869
2870 r = unit_name_from_path(what, ".device", &e);
2871 if (r < 0)
2872 return r;
2873
2874 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2875 if (r < 0)
2876 return r;
2877
2878 r = unit_add_two_dependencies(u, UNIT_AFTER, u->manager->running_as == MANAGER_SYSTEM ? UNIT_BINDS_TO : UNIT_WANTS, device, true);
2879 if (r < 0)
2880 return r;
2881
2882 if (wants) {
2883 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2884 if (r < 0)
2885 return r;
2886 }
2887
2888 return 0;
2889 }
2890
2891 int unit_coldplug(Unit *u) {
2892 int r;
2893
2894 assert(u);
2895
2896 /* Make sure we don't enter a loop, when coldplugging
2897 * recursively. */
2898 if (u->coldplugged)
2899 return 0;
2900
2901 u->coldplugged = true;
2902
2903 if (UNIT_VTABLE(u)->coldplug) {
2904 r = UNIT_VTABLE(u)->coldplug(u);
2905 if (r < 0)
2906 return r;
2907 }
2908
2909 if (u->job) {
2910 r = job_coldplug(u->job);
2911 if (r < 0)
2912 return r;
2913 }
2914
2915 return 0;
2916 }
2917
2918 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
2919 DISABLE_WARNING_FORMAT_NONLITERAL;
2920 manager_status_printf(u->manager, STATUS_TYPE_NORMAL,
2921 status, unit_status_msg_format, unit_description(u));
2922 REENABLE_WARNING;
2923 }
2924
2925 bool unit_need_daemon_reload(Unit *u) {
2926 _cleanup_strv_free_ char **t = NULL;
2927 char **path;
2928 struct stat st;
2929 unsigned loaded_cnt, current_cnt;
2930
2931 assert(u);
2932
2933 if (u->fragment_path) {
2934 zero(st);
2935 if (stat(u->fragment_path, &st) < 0)
2936 /* What, cannot access this anymore? */
2937 return true;
2938
2939 if (u->fragment_mtime > 0 &&
2940 timespec_load(&st.st_mtim) != u->fragment_mtime)
2941 return true;
2942 }
2943
2944 if (u->source_path) {
2945 zero(st);
2946 if (stat(u->source_path, &st) < 0)
2947 return true;
2948
2949 if (u->source_mtime > 0 &&
2950 timespec_load(&st.st_mtim) != u->source_mtime)
2951 return true;
2952 }
2953
2954 (void) unit_find_dropin_paths(u, &t);
2955 loaded_cnt = strv_length(t);
2956 current_cnt = strv_length(u->dropin_paths);
2957
2958 if (loaded_cnt == current_cnt) {
2959 if (loaded_cnt == 0)
2960 return false;
2961
2962 if (strv_overlap(u->dropin_paths, t)) {
2963 STRV_FOREACH(path, u->dropin_paths) {
2964 zero(st);
2965 if (stat(*path, &st) < 0)
2966 return true;
2967
2968 if (u->dropin_mtime > 0 &&
2969 timespec_load(&st.st_mtim) > u->dropin_mtime)
2970 return true;
2971 }
2972
2973 return false;
2974 } else
2975 return true;
2976 } else
2977 return true;
2978 }
2979
2980 void unit_reset_failed(Unit *u) {
2981 assert(u);
2982
2983 if (UNIT_VTABLE(u)->reset_failed)
2984 UNIT_VTABLE(u)->reset_failed(u);
2985 }
2986
2987 Unit *unit_following(Unit *u) {
2988 assert(u);
2989
2990 if (UNIT_VTABLE(u)->following)
2991 return UNIT_VTABLE(u)->following(u);
2992
2993 return NULL;
2994 }
2995
2996 bool unit_stop_pending(Unit *u) {
2997 assert(u);
2998
2999 /* This call does check the current state of the unit. It's
3000 * hence useful to be called from state change calls of the
3001 * unit itself, where the state isn't updated yet. This is
3002 * different from unit_inactive_or_pending() which checks both
3003 * the current state and for a queued job. */
3004
3005 return u->job && u->job->type == JOB_STOP;
3006 }
3007
3008 bool unit_inactive_or_pending(Unit *u) {
3009 assert(u);
3010
3011 /* Returns true if the unit is inactive or going down */
3012
3013 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3014 return true;
3015
3016 if (unit_stop_pending(u))
3017 return true;
3018
3019 return false;
3020 }
3021
3022 bool unit_active_or_pending(Unit *u) {
3023 assert(u);
3024
3025 /* Returns true if the unit is active or going up */
3026
3027 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3028 return true;
3029
3030 if (u->job &&
3031 (u->job->type == JOB_START ||
3032 u->job->type == JOB_RELOAD_OR_START ||
3033 u->job->type == JOB_RESTART))
3034 return true;
3035
3036 return false;
3037 }
3038
3039 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3040 assert(u);
3041 assert(w >= 0 && w < _KILL_WHO_MAX);
3042 assert(signo > 0);
3043 assert(signo < _NSIG);
3044
3045 if (!UNIT_VTABLE(u)->kill)
3046 return -EOPNOTSUPP;
3047
3048 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3049 }
3050
3051 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3052 Set *pid_set;
3053 int r;
3054
3055 pid_set = set_new(NULL);
3056 if (!pid_set)
3057 return NULL;
3058
3059 /* Exclude the main/control pids from being killed via the cgroup */
3060 if (main_pid > 0) {
3061 r = set_put(pid_set, PID_TO_PTR(main_pid));
3062 if (r < 0)
3063 goto fail;
3064 }
3065
3066 if (control_pid > 0) {
3067 r = set_put(pid_set, PID_TO_PTR(control_pid));
3068 if (r < 0)
3069 goto fail;
3070 }
3071
3072 return pid_set;
3073
3074 fail:
3075 set_free(pid_set);
3076 return NULL;
3077 }
3078
3079 int unit_kill_common(
3080 Unit *u,
3081 KillWho who,
3082 int signo,
3083 pid_t main_pid,
3084 pid_t control_pid,
3085 sd_bus_error *error) {
3086
3087 int r = 0;
3088 bool killed = false;
3089
3090 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3091 if (main_pid < 0)
3092 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3093 else if (main_pid == 0)
3094 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3095 }
3096
3097 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3098 if (control_pid < 0)
3099 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3100 else if (control_pid == 0)
3101 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3102 }
3103
3104 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3105 if (control_pid > 0) {
3106 if (kill(control_pid, signo) < 0)
3107 r = -errno;
3108 else
3109 killed = true;
3110 }
3111
3112 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3113 if (main_pid > 0) {
3114 if (kill(main_pid, signo) < 0)
3115 r = -errno;
3116 else
3117 killed = true;
3118 }
3119
3120 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3121 _cleanup_set_free_ Set *pid_set = NULL;
3122 int q;
3123
3124 /* Exclude the main/control pids from being killed via the cgroup */
3125 pid_set = unit_pid_set(main_pid, control_pid);
3126 if (!pid_set)
3127 return -ENOMEM;
3128
3129 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, false, false, pid_set);
3130 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3131 r = q;
3132 else
3133 killed = true;
3134 }
3135
3136 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_ALL_FAIL))
3137 return -ESRCH;
3138
3139 return r;
3140 }
3141
3142 int unit_following_set(Unit *u, Set **s) {
3143 assert(u);
3144 assert(s);
3145
3146 if (UNIT_VTABLE(u)->following_set)
3147 return UNIT_VTABLE(u)->following_set(u, s);
3148
3149 *s = NULL;
3150 return 0;
3151 }
3152
3153 UnitFileState unit_get_unit_file_state(Unit *u) {
3154 assert(u);
3155
3156 if (u->unit_file_state < 0 && u->fragment_path)
3157 u->unit_file_state = unit_file_get_state(
3158 u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3159 NULL, basename(u->fragment_path));
3160
3161 return u->unit_file_state;
3162 }
3163
3164 int unit_get_unit_file_preset(Unit *u) {
3165 assert(u);
3166
3167 if (u->unit_file_preset < 0 && u->fragment_path)
3168 u->unit_file_preset = unit_file_query_preset(
3169 u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3170 NULL, basename(u->fragment_path));
3171
3172 return u->unit_file_preset;
3173 }
3174
3175 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3176 assert(ref);
3177 assert(u);
3178
3179 if (ref->unit)
3180 unit_ref_unset(ref);
3181
3182 ref->unit = u;
3183 LIST_PREPEND(refs, u->refs, ref);
3184 return u;
3185 }
3186
3187 void unit_ref_unset(UnitRef *ref) {
3188 assert(ref);
3189
3190 if (!ref->unit)
3191 return;
3192
3193 LIST_REMOVE(refs, ref->unit->refs, ref);
3194 ref->unit = NULL;
3195 }
3196
3197 int unit_patch_contexts(Unit *u) {
3198 CGroupContext *cc;
3199 ExecContext *ec;
3200 unsigned i;
3201 int r;
3202
3203 assert(u);
3204
3205 /* Patch in the manager defaults into the exec and cgroup
3206 * contexts, _after_ the rest of the settings have been
3207 * initialized */
3208
3209 ec = unit_get_exec_context(u);
3210 if (ec) {
3211 /* This only copies in the ones that need memory */
3212 for (i = 0; i < _RLIMIT_MAX; i++)
3213 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3214 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3215 if (!ec->rlimit[i])
3216 return -ENOMEM;
3217 }
3218
3219 if (u->manager->running_as == MANAGER_USER &&
3220 !ec->working_directory) {
3221
3222 r = get_home_dir(&ec->working_directory);
3223 if (r < 0)
3224 return r;
3225
3226 /* Allow user services to run, even if the
3227 * home directory is missing */
3228 ec->working_directory_missing_ok = true;
3229 }
3230
3231 if (u->manager->running_as == MANAGER_USER &&
3232 (ec->syscall_whitelist ||
3233 !set_isempty(ec->syscall_filter) ||
3234 !set_isempty(ec->syscall_archs) ||
3235 ec->address_families_whitelist ||
3236 !set_isempty(ec->address_families)))
3237 ec->no_new_privileges = true;
3238
3239 if (ec->private_devices)
3240 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3241 }
3242
3243 cc = unit_get_cgroup_context(u);
3244 if (cc) {
3245
3246 if (ec &&
3247 ec->private_devices &&
3248 cc->device_policy == CGROUP_AUTO)
3249 cc->device_policy = CGROUP_CLOSED;
3250 }
3251
3252 return 0;
3253 }
3254
3255 ExecContext *unit_get_exec_context(Unit *u) {
3256 size_t offset;
3257 assert(u);
3258
3259 if (u->type < 0)
3260 return NULL;
3261
3262 offset = UNIT_VTABLE(u)->exec_context_offset;
3263 if (offset <= 0)
3264 return NULL;
3265
3266 return (ExecContext*) ((uint8_t*) u + offset);
3267 }
3268
3269 KillContext *unit_get_kill_context(Unit *u) {
3270 size_t offset;
3271 assert(u);
3272
3273 if (u->type < 0)
3274 return NULL;
3275
3276 offset = UNIT_VTABLE(u)->kill_context_offset;
3277 if (offset <= 0)
3278 return NULL;
3279
3280 return (KillContext*) ((uint8_t*) u + offset);
3281 }
3282
3283 CGroupContext *unit_get_cgroup_context(Unit *u) {
3284 size_t offset;
3285
3286 if (u->type < 0)
3287 return NULL;
3288
3289 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3290 if (offset <= 0)
3291 return NULL;
3292
3293 return (CGroupContext*) ((uint8_t*) u + offset);
3294 }
3295
3296 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3297 size_t offset;
3298
3299 if (u->type < 0)
3300 return NULL;
3301
3302 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3303 if (offset <= 0)
3304 return NULL;
3305
3306 return *(ExecRuntime**) ((uint8_t*) u + offset);
3307 }
3308
3309 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3310 assert(u);
3311
3312 if (u->manager->running_as == MANAGER_USER) {
3313 int r;
3314
3315 if (mode == UNIT_PERSISTENT && !transient)
3316 r = user_config_home(dir);
3317 else
3318 r = user_runtime_dir(dir);
3319 if (r == 0)
3320 return -ENOENT;
3321
3322 return r;
3323 }
3324
3325 if (mode == UNIT_PERSISTENT && !transient)
3326 *dir = strdup("/etc/systemd/system");
3327 else
3328 *dir = strdup("/run/systemd/system");
3329 if (!*dir)
3330 return -ENOMEM;
3331
3332 return 0;
3333 }
3334
3335 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3336
3337 _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
3338 int r;
3339
3340 assert(u);
3341
3342 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3343 return 0;
3344
3345 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3346 if (r < 0)
3347 return r;
3348
3349 r = write_drop_in(dir, u->id, 50, name, data);
3350 if (r < 0)
3351 return r;
3352
3353 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3354 if (r < 0)
3355 return r;
3356
3357 r = strv_extend(&u->dropin_paths, q);
3358 if (r < 0)
3359 return r;
3360
3361 strv_sort(u->dropin_paths);
3362 strv_uniq(u->dropin_paths);
3363
3364 u->dropin_mtime = now(CLOCK_REALTIME);
3365
3366 return 0;
3367 }
3368
3369 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3370 _cleanup_free_ char *p = NULL;
3371 va_list ap;
3372 int r;
3373
3374 assert(u);
3375 assert(name);
3376 assert(format);
3377
3378 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3379 return 0;
3380
3381 va_start(ap, format);
3382 r = vasprintf(&p, format, ap);
3383 va_end(ap);
3384
3385 if (r < 0)
3386 return -ENOMEM;
3387
3388 return unit_write_drop_in(u, mode, name, p);
3389 }
3390
3391 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3392 _cleanup_free_ char *ndata = NULL;
3393
3394 assert(u);
3395 assert(name);
3396 assert(data);
3397
3398 if (!UNIT_VTABLE(u)->private_section)
3399 return -EINVAL;
3400
3401 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3402 return 0;
3403
3404 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3405 if (!ndata)
3406 return -ENOMEM;
3407
3408 return unit_write_drop_in(u, mode, name, ndata);
3409 }
3410
3411 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3412 _cleanup_free_ char *p = NULL;
3413 va_list ap;
3414 int r;
3415
3416 assert(u);
3417 assert(name);
3418 assert(format);
3419
3420 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3421 return 0;
3422
3423 va_start(ap, format);
3424 r = vasprintf(&p, format, ap);
3425 va_end(ap);
3426
3427 if (r < 0)
3428 return -ENOMEM;
3429
3430 return unit_write_drop_in_private(u, mode, name, p);
3431 }
3432
3433 int unit_make_transient(Unit *u) {
3434 assert(u);
3435
3436 if (!UNIT_VTABLE(u)->can_transient)
3437 return -EOPNOTSUPP;
3438
3439 u->load_state = UNIT_STUB;
3440 u->load_error = 0;
3441 u->transient = true;
3442 u->fragment_path = mfree(u->fragment_path);
3443
3444 return 0;
3445 }
3446
3447 int unit_kill_context(
3448 Unit *u,
3449 KillContext *c,
3450 KillOperation k,
3451 pid_t main_pid,
3452 pid_t control_pid,
3453 bool main_pid_alien) {
3454
3455 bool wait_for_exit = false;
3456 int sig, r;
3457
3458 assert(u);
3459 assert(c);
3460
3461 if (c->kill_mode == KILL_NONE)
3462 return 0;
3463
3464 switch (k) {
3465 case KILL_KILL:
3466 sig = SIGKILL;
3467 break;
3468 case KILL_ABORT:
3469 sig = SIGABRT;
3470 break;
3471 case KILL_TERMINATE:
3472 sig = c->kill_signal;
3473 break;
3474 default:
3475 assert_not_reached("KillOperation unknown");
3476 }
3477
3478 if (main_pid > 0) {
3479 r = kill_and_sigcont(main_pid, sig);
3480
3481 if (r < 0 && r != -ESRCH) {
3482 _cleanup_free_ char *comm = NULL;
3483 get_process_comm(main_pid, &comm);
3484
3485 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3486 } else {
3487 if (!main_pid_alien)
3488 wait_for_exit = true;
3489
3490 if (c->send_sighup && k == KILL_TERMINATE)
3491 (void) kill(main_pid, SIGHUP);
3492 }
3493 }
3494
3495 if (control_pid > 0) {
3496 r = kill_and_sigcont(control_pid, sig);
3497
3498 if (r < 0 && r != -ESRCH) {
3499 _cleanup_free_ char *comm = NULL;
3500 get_process_comm(control_pid, &comm);
3501
3502 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3503 } else {
3504 wait_for_exit = true;
3505
3506 if (c->send_sighup && k == KILL_TERMINATE)
3507 (void) kill(control_pid, SIGHUP);
3508 }
3509 }
3510
3511 if (u->cgroup_path &&
3512 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3513 _cleanup_set_free_ Set *pid_set = NULL;
3514
3515 /* Exclude the main/control pids from being killed via the cgroup */
3516 pid_set = unit_pid_set(main_pid, control_pid);
3517 if (!pid_set)
3518 return -ENOMEM;
3519
3520 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, k != KILL_TERMINATE, false, pid_set);
3521 if (r < 0) {
3522 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3523 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3524
3525 } else if (r > 0) {
3526
3527 /* FIXME: For now, on the legacy hierarchy, we
3528 * will not wait for the cgroup members to die
3529 * if we are running in a container or if this
3530 * is a delegation unit, simply because cgroup
3531 * notification is unreliable in these
3532 * cases. It doesn't work at all in
3533 * containers, and outside of containers it
3534 * can be confused easily by left-over
3535 * directories in the cgroup -- which however
3536 * should not exist in non-delegated units. On
3537 * the unified hierarchy that's different,
3538 * there we get proper events. Hence rely on
3539 * them.*/
3540
3541 if (cg_unified() > 0 ||
3542 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3543 wait_for_exit = true;
3544
3545 if (c->send_sighup && k != KILL_KILL) {
3546 set_free(pid_set);
3547
3548 pid_set = unit_pid_set(main_pid, control_pid);
3549 if (!pid_set)
3550 return -ENOMEM;
3551
3552 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3553 }
3554 }
3555 }
3556
3557 return wait_for_exit;
3558 }
3559
3560 int unit_require_mounts_for(Unit *u, const char *path) {
3561 char prefix[strlen(path) + 1], *p;
3562 int r;
3563
3564 assert(u);
3565 assert(path);
3566
3567 /* Registers a unit for requiring a certain path and all its
3568 * prefixes. We keep a simple array of these paths in the
3569 * unit, since its usually short. However, we build a prefix
3570 * table for all possible prefixes so that new appearing mount
3571 * units can easily determine which units to make themselves a
3572 * dependency of. */
3573
3574 if (!path_is_absolute(path))
3575 return -EINVAL;
3576
3577 p = strdup(path);
3578 if (!p)
3579 return -ENOMEM;
3580
3581 path_kill_slashes(p);
3582
3583 if (!path_is_safe(p)) {
3584 free(p);
3585 return -EPERM;
3586 }
3587
3588 if (strv_contains(u->requires_mounts_for, p)) {
3589 free(p);
3590 return 0;
3591 }
3592
3593 r = strv_consume(&u->requires_mounts_for, p);
3594 if (r < 0)
3595 return r;
3596
3597 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3598 Set *x;
3599
3600 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3601 if (!x) {
3602 char *q;
3603
3604 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3605 if (r < 0)
3606 return r;
3607
3608 q = strdup(prefix);
3609 if (!q)
3610 return -ENOMEM;
3611
3612 x = set_new(NULL);
3613 if (!x) {
3614 free(q);
3615 return -ENOMEM;
3616 }
3617
3618 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3619 if (r < 0) {
3620 free(q);
3621 set_free(x);
3622 return r;
3623 }
3624 }
3625
3626 r = set_put(x, u);
3627 if (r < 0)
3628 return r;
3629 }
3630
3631 return 0;
3632 }
3633
3634 int unit_setup_exec_runtime(Unit *u) {
3635 ExecRuntime **rt;
3636 size_t offset;
3637 Iterator i;
3638 Unit *other;
3639
3640 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3641 assert(offset > 0);
3642
3643 /* Check if there already is an ExecRuntime for this unit? */
3644 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3645 if (*rt)
3646 return 0;
3647
3648 /* Try to get it from somebody else */
3649 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3650
3651 *rt = unit_get_exec_runtime(other);
3652 if (*rt) {
3653 exec_runtime_ref(*rt);
3654 return 0;
3655 }
3656 }
3657
3658 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3659 }
3660
3661 bool unit_type_supported(UnitType t) {
3662 if (_unlikely_(t < 0))
3663 return false;
3664 if (_unlikely_(t >= _UNIT_TYPE_MAX))
3665 return false;
3666
3667 if (!unit_vtable[t]->supported)
3668 return true;
3669
3670 return unit_vtable[t]->supported();
3671 }
3672
3673 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
3674 int r;
3675
3676 assert(u);
3677 assert(where);
3678
3679 r = dir_is_empty(where);
3680 if (r > 0)
3681 return;
3682 if (r < 0) {
3683 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
3684 return;
3685 }
3686
3687 log_struct(LOG_NOTICE,
3688 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3689 LOG_UNIT_ID(u),
3690 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
3691 "WHERE=%s", where,
3692 NULL);
3693 }
3694
3695 int unit_fail_if_symlink(Unit *u, const char* where) {
3696 int r;
3697
3698 assert(u);
3699 assert(where);
3700
3701 r = is_symlink(where);
3702 if (r < 0) {
3703 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
3704 return 0;
3705 }
3706 if (r == 0)
3707 return 0;
3708
3709 log_struct(LOG_ERR,
3710 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3711 LOG_UNIT_ID(u),
3712 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
3713 "WHERE=%s", where,
3714 NULL);
3715
3716 return -ELOOP;
3717 }