]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #2294 from zonque/in_set
[thirdparty/systemd.git] / src / core / unit.c
1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3 /***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20 ***/
21
22 #include <errno.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "sd-id128.h"
29 #include "sd-messages.h"
30
31 #include "alloc-util.h"
32 #include "bus-common-errors.h"
33 #include "bus-util.h"
34 #include "cgroup-util.h"
35 #include "dbus-unit.h"
36 #include "dbus.h"
37 #include "dropin.h"
38 #include "escape.h"
39 #include "execute.h"
40 #include "fileio-label.h"
41 #include "formats-util.h"
42 #include "load-dropin.h"
43 #include "load-fragment.h"
44 #include "log.h"
45 #include "macro.h"
46 #include "missing.h"
47 #include "mkdir.h"
48 #include "parse-util.h"
49 #include "path-util.h"
50 #include "process-util.h"
51 #include "set.h"
52 #include "special.h"
53 #include "stat-util.h"
54 #include "string-util.h"
55 #include "strv.h"
56 #include "unit-name.h"
57 #include "unit.h"
58 #include "user-util.h"
59 #include "virt.h"
60
61 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
62 [UNIT_SERVICE] = &service_vtable,
63 [UNIT_SOCKET] = &socket_vtable,
64 [UNIT_BUSNAME] = &busname_vtable,
65 [UNIT_TARGET] = &target_vtable,
66 [UNIT_DEVICE] = &device_vtable,
67 [UNIT_MOUNT] = &mount_vtable,
68 [UNIT_AUTOMOUNT] = &automount_vtable,
69 [UNIT_SWAP] = &swap_vtable,
70 [UNIT_TIMER] = &timer_vtable,
71 [UNIT_PATH] = &path_vtable,
72 [UNIT_SLICE] = &slice_vtable,
73 [UNIT_SCOPE] = &scope_vtable
74 };
75
76 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
77
78 Unit *unit_new(Manager *m, size_t size) {
79 Unit *u;
80
81 assert(m);
82 assert(size >= sizeof(Unit));
83
84 u = malloc0(size);
85 if (!u)
86 return NULL;
87
88 u->names = set_new(&string_hash_ops);
89 if (!u->names) {
90 free(u);
91 return NULL;
92 }
93
94 u->manager = m;
95 u->type = _UNIT_TYPE_INVALID;
96 u->default_dependencies = true;
97 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
98 u->unit_file_preset = -1;
99 u->on_failure_job_mode = JOB_REPLACE;
100 u->cgroup_inotify_wd = -1;
101
102 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
103
104 return u;
105 }
106
107 bool unit_has_name(Unit *u, const char *name) {
108 assert(u);
109 assert(name);
110
111 return !!set_get(u->names, (char*) name);
112 }
113
114 static void unit_init(Unit *u) {
115 CGroupContext *cc;
116 ExecContext *ec;
117 KillContext *kc;
118
119 assert(u);
120 assert(u->manager);
121 assert(u->type >= 0);
122
123 cc = unit_get_cgroup_context(u);
124 if (cc) {
125 cgroup_context_init(cc);
126
127 /* Copy in the manager defaults into the cgroup
128 * context, _before_ the rest of the settings have
129 * been initialized */
130
131 cc->cpu_accounting = u->manager->default_cpu_accounting;
132 cc->blockio_accounting = u->manager->default_blockio_accounting;
133 cc->memory_accounting = u->manager->default_memory_accounting;
134 cc->tasks_accounting = u->manager->default_tasks_accounting;
135
136 if (u->type != UNIT_SLICE)
137 cc->tasks_max = u->manager->default_tasks_max;
138 }
139
140 ec = unit_get_exec_context(u);
141 if (ec)
142 exec_context_init(ec);
143
144 kc = unit_get_kill_context(u);
145 if (kc)
146 kill_context_init(kc);
147
148 if (UNIT_VTABLE(u)->init)
149 UNIT_VTABLE(u)->init(u);
150 }
151
152 int unit_add_name(Unit *u, const char *text) {
153 _cleanup_free_ char *s = NULL, *i = NULL;
154 UnitType t;
155 int r;
156
157 assert(u);
158 assert(text);
159
160 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
161
162 if (!u->instance)
163 return -EINVAL;
164
165 r = unit_name_replace_instance(text, u->instance, &s);
166 if (r < 0)
167 return r;
168 } else {
169 s = strdup(text);
170 if (!s)
171 return -ENOMEM;
172 }
173
174 if (set_contains(u->names, s))
175 return 0;
176 if (hashmap_contains(u->manager->units, s))
177 return -EEXIST;
178
179 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
180 return -EINVAL;
181
182 t = unit_name_to_type(s);
183 if (t < 0)
184 return -EINVAL;
185
186 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
187 return -EINVAL;
188
189 r = unit_name_to_instance(s, &i);
190 if (r < 0)
191 return r;
192
193 if (i && unit_vtable[t]->no_instances)
194 return -EINVAL;
195
196 /* Ensure that this unit is either instanced or not instanced,
197 * but not both. Note that we do allow names with different
198 * instance names however! */
199 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
200 return -EINVAL;
201
202 if (unit_vtable[t]->no_alias && !set_isempty(u->names))
203 return -EEXIST;
204
205 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
206 return -E2BIG;
207
208 r = set_put(u->names, s);
209 if (r < 0)
210 return r;
211 assert(r > 0);
212
213 r = hashmap_put(u->manager->units, s, u);
214 if (r < 0) {
215 (void) set_remove(u->names, s);
216 return r;
217 }
218
219 if (u->type == _UNIT_TYPE_INVALID) {
220 u->type = t;
221 u->id = s;
222 u->instance = i;
223
224 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
225
226 unit_init(u);
227
228 i = NULL;
229 }
230
231 s = NULL;
232
233 unit_add_to_dbus_queue(u);
234 return 0;
235 }
236
237 int unit_choose_id(Unit *u, const char *name) {
238 _cleanup_free_ char *t = NULL;
239 char *s, *i;
240 int r;
241
242 assert(u);
243 assert(name);
244
245 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
246
247 if (!u->instance)
248 return -EINVAL;
249
250 r = unit_name_replace_instance(name, u->instance, &t);
251 if (r < 0)
252 return r;
253
254 name = t;
255 }
256
257 /* Selects one of the names of this unit as the id */
258 s = set_get(u->names, (char*) name);
259 if (!s)
260 return -ENOENT;
261
262 /* Determine the new instance from the new id */
263 r = unit_name_to_instance(s, &i);
264 if (r < 0)
265 return r;
266
267 u->id = s;
268
269 free(u->instance);
270 u->instance = i;
271
272 unit_add_to_dbus_queue(u);
273
274 return 0;
275 }
276
277 int unit_set_description(Unit *u, const char *description) {
278 char *s;
279
280 assert(u);
281
282 if (isempty(description))
283 s = NULL;
284 else {
285 s = strdup(description);
286 if (!s)
287 return -ENOMEM;
288 }
289
290 free(u->description);
291 u->description = s;
292
293 unit_add_to_dbus_queue(u);
294 return 0;
295 }
296
297 bool unit_check_gc(Unit *u) {
298 UnitActiveState state;
299 assert(u);
300
301 if (u->job)
302 return true;
303
304 if (u->nop_job)
305 return true;
306
307 state = unit_active_state(u);
308
309 /* If the unit is inactive and failed and no job is queued for
310 * it, then release its runtime resources */
311 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
312 UNIT_VTABLE(u)->release_resources)
313 UNIT_VTABLE(u)->release_resources(u);
314
315 /* But we keep the unit object around for longer when it is
316 * referenced or configured to not be gc'ed */
317 if (state != UNIT_INACTIVE)
318 return true;
319
320 if (u->no_gc)
321 return true;
322
323 if (u->refs)
324 return true;
325
326 if (UNIT_VTABLE(u)->check_gc)
327 if (UNIT_VTABLE(u)->check_gc(u))
328 return true;
329
330 return false;
331 }
332
333 void unit_add_to_load_queue(Unit *u) {
334 assert(u);
335 assert(u->type != _UNIT_TYPE_INVALID);
336
337 if (u->load_state != UNIT_STUB || u->in_load_queue)
338 return;
339
340 LIST_PREPEND(load_queue, u->manager->load_queue, u);
341 u->in_load_queue = true;
342 }
343
344 void unit_add_to_cleanup_queue(Unit *u) {
345 assert(u);
346
347 if (u->in_cleanup_queue)
348 return;
349
350 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
351 u->in_cleanup_queue = true;
352 }
353
354 void unit_add_to_gc_queue(Unit *u) {
355 assert(u);
356
357 if (u->in_gc_queue || u->in_cleanup_queue)
358 return;
359
360 if (unit_check_gc(u))
361 return;
362
363 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
364 u->in_gc_queue = true;
365
366 u->manager->n_in_gc_queue ++;
367 }
368
369 void unit_add_to_dbus_queue(Unit *u) {
370 assert(u);
371 assert(u->type != _UNIT_TYPE_INVALID);
372
373 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
374 return;
375
376 /* Shortcut things if nobody cares */
377 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
378 set_isempty(u->manager->private_buses)) {
379 u->sent_dbus_new_signal = true;
380 return;
381 }
382
383 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
384 u->in_dbus_queue = true;
385 }
386
387 static void bidi_set_free(Unit *u, Set *s) {
388 Iterator i;
389 Unit *other;
390
391 assert(u);
392
393 /* Frees the set and makes sure we are dropped from the
394 * inverse pointers */
395
396 SET_FOREACH(other, s, i) {
397 UnitDependency d;
398
399 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
400 set_remove(other->dependencies[d], u);
401
402 unit_add_to_gc_queue(other);
403 }
404
405 set_free(s);
406 }
407
408 static void unit_remove_transient(Unit *u) {
409 char **i;
410
411 assert(u);
412
413 if (!u->transient)
414 return;
415
416 if (u->fragment_path)
417 (void) unlink(u->fragment_path);
418
419 STRV_FOREACH(i, u->dropin_paths) {
420 _cleanup_free_ char *p = NULL;
421
422 (void) unlink(*i);
423
424 p = dirname_malloc(*i);
425 if (p)
426 (void) rmdir(p);
427 }
428 }
429
430 static void unit_free_requires_mounts_for(Unit *u) {
431 char **j;
432
433 STRV_FOREACH(j, u->requires_mounts_for) {
434 char s[strlen(*j) + 1];
435
436 PATH_FOREACH_PREFIX_MORE(s, *j) {
437 char *y;
438 Set *x;
439
440 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
441 if (!x)
442 continue;
443
444 set_remove(x, u);
445
446 if (set_isempty(x)) {
447 hashmap_remove(u->manager->units_requiring_mounts_for, y);
448 free(y);
449 set_free(x);
450 }
451 }
452 }
453
454 u->requires_mounts_for = strv_free(u->requires_mounts_for);
455 }
456
457 static void unit_done(Unit *u) {
458 ExecContext *ec;
459 CGroupContext *cc;
460 int r;
461
462 assert(u);
463
464 if (u->type < 0)
465 return;
466
467 if (UNIT_VTABLE(u)->done)
468 UNIT_VTABLE(u)->done(u);
469
470 ec = unit_get_exec_context(u);
471 if (ec)
472 exec_context_done(ec);
473
474 cc = unit_get_cgroup_context(u);
475 if (cc)
476 cgroup_context_done(cc);
477
478 r = unit_remove_from_netclass_cgroup(u);
479 if (r < 0)
480 log_warning_errno(r, "Unable to remove unit from netclass group: %m");
481 }
482
483 void unit_free(Unit *u) {
484 UnitDependency d;
485 Iterator i;
486 char *t;
487
488 assert(u);
489
490 if (u->manager->n_reloading <= 0)
491 unit_remove_transient(u);
492
493 bus_unit_send_removed_signal(u);
494
495 unit_done(u);
496
497 sd_bus_slot_unref(u->match_bus_slot);
498
499 unit_free_requires_mounts_for(u);
500
501 SET_FOREACH(t, u->names, i)
502 hashmap_remove_value(u->manager->units, t, u);
503
504 if (u->job) {
505 Job *j = u->job;
506 job_uninstall(j);
507 job_free(j);
508 }
509
510 if (u->nop_job) {
511 Job *j = u->nop_job;
512 job_uninstall(j);
513 job_free(j);
514 }
515
516 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
517 bidi_set_free(u, u->dependencies[d]);
518
519 if (u->type != _UNIT_TYPE_INVALID)
520 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
521
522 if (u->in_load_queue)
523 LIST_REMOVE(load_queue, u->manager->load_queue, u);
524
525 if (u->in_dbus_queue)
526 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
527
528 if (u->in_cleanup_queue)
529 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
530
531 if (u->in_gc_queue) {
532 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
533 u->manager->n_in_gc_queue--;
534 }
535
536 if (u->in_cgroup_queue)
537 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
538
539 unit_release_cgroup(u);
540
541 (void) manager_update_failed_units(u->manager, u, false);
542 set_remove(u->manager->startup_units, u);
543
544 free(u->description);
545 strv_free(u->documentation);
546 free(u->fragment_path);
547 free(u->source_path);
548 strv_free(u->dropin_paths);
549 free(u->instance);
550
551 free(u->job_timeout_reboot_arg);
552
553 set_free_free(u->names);
554
555 unit_unwatch_all_pids(u);
556
557 condition_free_list(u->conditions);
558 condition_free_list(u->asserts);
559
560 unit_ref_unset(&u->slice);
561
562 while (u->refs)
563 unit_ref_unset(u->refs);
564
565 free(u);
566 }
567
568 UnitActiveState unit_active_state(Unit *u) {
569 assert(u);
570
571 if (u->load_state == UNIT_MERGED)
572 return unit_active_state(unit_follow_merge(u));
573
574 /* After a reload it might happen that a unit is not correctly
575 * loaded but still has a process around. That's why we won't
576 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
577
578 return UNIT_VTABLE(u)->active_state(u);
579 }
580
581 const char* unit_sub_state_to_string(Unit *u) {
582 assert(u);
583
584 return UNIT_VTABLE(u)->sub_state_to_string(u);
585 }
586
587 static int complete_move(Set **s, Set **other) {
588 int r;
589
590 assert(s);
591 assert(other);
592
593 if (!*other)
594 return 0;
595
596 if (*s) {
597 r = set_move(*s, *other);
598 if (r < 0)
599 return r;
600 } else {
601 *s = *other;
602 *other = NULL;
603 }
604
605 return 0;
606 }
607
608 static int merge_names(Unit *u, Unit *other) {
609 char *t;
610 Iterator i;
611 int r;
612
613 assert(u);
614 assert(other);
615
616 r = complete_move(&u->names, &other->names);
617 if (r < 0)
618 return r;
619
620 set_free_free(other->names);
621 other->names = NULL;
622 other->id = NULL;
623
624 SET_FOREACH(t, u->names, i)
625 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
626
627 return 0;
628 }
629
630 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
631 unsigned n_reserve;
632
633 assert(u);
634 assert(other);
635 assert(d < _UNIT_DEPENDENCY_MAX);
636
637 /*
638 * If u does not have this dependency set allocated, there is no need
639 * to reserve anything. In that case other's set will be transferred
640 * as a whole to u by complete_move().
641 */
642 if (!u->dependencies[d])
643 return 0;
644
645 /* merge_dependencies() will skip a u-on-u dependency */
646 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
647
648 return set_reserve(u->dependencies[d], n_reserve);
649 }
650
651 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
652 Iterator i;
653 Unit *back;
654 int r;
655
656 assert(u);
657 assert(other);
658 assert(d < _UNIT_DEPENDENCY_MAX);
659
660 /* Fix backwards pointers */
661 SET_FOREACH(back, other->dependencies[d], i) {
662 UnitDependency k;
663
664 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
665 /* Do not add dependencies between u and itself */
666 if (back == u) {
667 if (set_remove(back->dependencies[k], other))
668 maybe_warn_about_dependency(u, other_id, k);
669 } else {
670 r = set_remove_and_put(back->dependencies[k], other, u);
671 if (r == -EEXIST)
672 set_remove(back->dependencies[k], other);
673 else
674 assert(r >= 0 || r == -ENOENT);
675 }
676 }
677 }
678
679 /* Also do not move dependencies on u to itself */
680 back = set_remove(other->dependencies[d], u);
681 if (back)
682 maybe_warn_about_dependency(u, other_id, d);
683
684 /* The move cannot fail. The caller must have performed a reservation. */
685 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
686
687 other->dependencies[d] = set_free(other->dependencies[d]);
688 }
689
690 int unit_merge(Unit *u, Unit *other) {
691 UnitDependency d;
692 const char *other_id = NULL;
693 int r;
694
695 assert(u);
696 assert(other);
697 assert(u->manager == other->manager);
698 assert(u->type != _UNIT_TYPE_INVALID);
699
700 other = unit_follow_merge(other);
701
702 if (other == u)
703 return 0;
704
705 if (u->type != other->type)
706 return -EINVAL;
707
708 if (!u->instance != !other->instance)
709 return -EINVAL;
710
711 if (other->load_state != UNIT_STUB &&
712 other->load_state != UNIT_NOT_FOUND)
713 return -EEXIST;
714
715 if (other->job)
716 return -EEXIST;
717
718 if (other->nop_job)
719 return -EEXIST;
720
721 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
722 return -EEXIST;
723
724 if (other->id)
725 other_id = strdupa(other->id);
726
727 /* Make reservations to ensure merge_dependencies() won't fail */
728 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
729 r = reserve_dependencies(u, other, d);
730 /*
731 * We don't rollback reservations if we fail. We don't have
732 * a way to undo reservations. A reservation is not a leak.
733 */
734 if (r < 0)
735 return r;
736 }
737
738 /* Merge names */
739 r = merge_names(u, other);
740 if (r < 0)
741 return r;
742
743 /* Redirect all references */
744 while (other->refs)
745 unit_ref_set(other->refs, u);
746
747 /* Merge dependencies */
748 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
749 merge_dependencies(u, other, other_id, d);
750
751 other->load_state = UNIT_MERGED;
752 other->merged_into = u;
753
754 /* If there is still some data attached to the other node, we
755 * don't need it anymore, and can free it. */
756 if (other->load_state != UNIT_STUB)
757 if (UNIT_VTABLE(other)->done)
758 UNIT_VTABLE(other)->done(other);
759
760 unit_add_to_dbus_queue(u);
761 unit_add_to_cleanup_queue(other);
762
763 return 0;
764 }
765
766 int unit_merge_by_name(Unit *u, const char *name) {
767 Unit *other;
768 int r;
769 _cleanup_free_ char *s = NULL;
770
771 assert(u);
772 assert(name);
773
774 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
775 if (!u->instance)
776 return -EINVAL;
777
778 r = unit_name_replace_instance(name, u->instance, &s);
779 if (r < 0)
780 return r;
781
782 name = s;
783 }
784
785 other = manager_get_unit(u->manager, name);
786 if (other)
787 return unit_merge(u, other);
788
789 return unit_add_name(u, name);
790 }
791
792 Unit* unit_follow_merge(Unit *u) {
793 assert(u);
794
795 while (u->load_state == UNIT_MERGED)
796 assert_se(u = u->merged_into);
797
798 return u;
799 }
800
801 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
802 int r;
803
804 assert(u);
805 assert(c);
806
807 if (c->working_directory) {
808 r = unit_require_mounts_for(u, c->working_directory);
809 if (r < 0)
810 return r;
811 }
812
813 if (c->root_directory) {
814 r = unit_require_mounts_for(u, c->root_directory);
815 if (r < 0)
816 return r;
817 }
818
819 if (u->manager->running_as != MANAGER_SYSTEM)
820 return 0;
821
822 if (c->private_tmp) {
823 r = unit_require_mounts_for(u, "/tmp");
824 if (r < 0)
825 return r;
826
827 r = unit_require_mounts_for(u, "/var/tmp");
828 if (r < 0)
829 return r;
830 }
831
832 if (c->std_output != EXEC_OUTPUT_KMSG &&
833 c->std_output != EXEC_OUTPUT_SYSLOG &&
834 c->std_output != EXEC_OUTPUT_JOURNAL &&
835 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
836 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
837 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
838 c->std_error != EXEC_OUTPUT_KMSG &&
839 c->std_error != EXEC_OUTPUT_SYSLOG &&
840 c->std_error != EXEC_OUTPUT_JOURNAL &&
841 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
842 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
843 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
844 return 0;
845
846 /* If syslog or kernel logging is requested, make sure our own
847 * logging daemon is run first. */
848
849 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
850 if (r < 0)
851 return r;
852
853 return 0;
854 }
855
856 const char *unit_description(Unit *u) {
857 assert(u);
858
859 if (u->description)
860 return u->description;
861
862 return strna(u->id);
863 }
864
865 void unit_dump(Unit *u, FILE *f, const char *prefix) {
866 char *t, **j;
867 UnitDependency d;
868 Iterator i;
869 const char *prefix2;
870 char
871 timestamp1[FORMAT_TIMESTAMP_MAX],
872 timestamp2[FORMAT_TIMESTAMP_MAX],
873 timestamp3[FORMAT_TIMESTAMP_MAX],
874 timestamp4[FORMAT_TIMESTAMP_MAX],
875 timespan[FORMAT_TIMESPAN_MAX];
876 Unit *following;
877 _cleanup_set_free_ Set *following_set = NULL;
878 int r;
879
880 assert(u);
881 assert(u->type >= 0);
882
883 prefix = strempty(prefix);
884 prefix2 = strjoina(prefix, "\t");
885
886 fprintf(f,
887 "%s-> Unit %s:\n"
888 "%s\tDescription: %s\n"
889 "%s\tInstance: %s\n"
890 "%s\tUnit Load State: %s\n"
891 "%s\tUnit Active State: %s\n"
892 "%s\tInactive Exit Timestamp: %s\n"
893 "%s\tActive Enter Timestamp: %s\n"
894 "%s\tActive Exit Timestamp: %s\n"
895 "%s\tInactive Enter Timestamp: %s\n"
896 "%s\tGC Check Good: %s\n"
897 "%s\tNeed Daemon Reload: %s\n"
898 "%s\tTransient: %s\n"
899 "%s\tSlice: %s\n"
900 "%s\tCGroup: %s\n"
901 "%s\tCGroup realized: %s\n"
902 "%s\tCGroup mask: 0x%x\n"
903 "%s\tCGroup members mask: 0x%x\n",
904 prefix, u->id,
905 prefix, unit_description(u),
906 prefix, strna(u->instance),
907 prefix, unit_load_state_to_string(u->load_state),
908 prefix, unit_active_state_to_string(unit_active_state(u)),
909 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
910 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
911 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
912 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
913 prefix, yes_no(unit_check_gc(u)),
914 prefix, yes_no(unit_need_daemon_reload(u)),
915 prefix, yes_no(u->transient),
916 prefix, strna(unit_slice_name(u)),
917 prefix, strna(u->cgroup_path),
918 prefix, yes_no(u->cgroup_realized),
919 prefix, u->cgroup_realized_mask,
920 prefix, u->cgroup_members_mask);
921
922 SET_FOREACH(t, u->names, i)
923 fprintf(f, "%s\tName: %s\n", prefix, t);
924
925 STRV_FOREACH(j, u->documentation)
926 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
927
928 following = unit_following(u);
929 if (following)
930 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
931
932 r = unit_following_set(u, &following_set);
933 if (r >= 0) {
934 Unit *other;
935
936 SET_FOREACH(other, following_set, i)
937 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
938 }
939
940 if (u->fragment_path)
941 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
942
943 if (u->source_path)
944 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
945
946 STRV_FOREACH(j, u->dropin_paths)
947 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
948
949 if (u->job_timeout > 0)
950 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
951
952 if (u->job_timeout_action != FAILURE_ACTION_NONE)
953 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
954
955 if (u->job_timeout_reboot_arg)
956 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
957
958 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
959 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
960
961 if (dual_timestamp_is_set(&u->condition_timestamp))
962 fprintf(f,
963 "%s\tCondition Timestamp: %s\n"
964 "%s\tCondition Result: %s\n",
965 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
966 prefix, yes_no(u->condition_result));
967
968 if (dual_timestamp_is_set(&u->assert_timestamp))
969 fprintf(f,
970 "%s\tAssert Timestamp: %s\n"
971 "%s\tAssert Result: %s\n",
972 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
973 prefix, yes_no(u->assert_result));
974
975 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
976 Unit *other;
977
978 SET_FOREACH(other, u->dependencies[d], i)
979 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
980 }
981
982 if (!strv_isempty(u->requires_mounts_for)) {
983 fprintf(f,
984 "%s\tRequiresMountsFor:", prefix);
985
986 STRV_FOREACH(j, u->requires_mounts_for)
987 fprintf(f, " %s", *j);
988
989 fputs("\n", f);
990 }
991
992 if (u->load_state == UNIT_LOADED) {
993
994 fprintf(f,
995 "%s\tStopWhenUnneeded: %s\n"
996 "%s\tRefuseManualStart: %s\n"
997 "%s\tRefuseManualStop: %s\n"
998 "%s\tDefaultDependencies: %s\n"
999 "%s\tOnFailureJobMode: %s\n"
1000 "%s\tIgnoreOnIsolate: %s\n",
1001 prefix, yes_no(u->stop_when_unneeded),
1002 prefix, yes_no(u->refuse_manual_start),
1003 prefix, yes_no(u->refuse_manual_stop),
1004 prefix, yes_no(u->default_dependencies),
1005 prefix, job_mode_to_string(u->on_failure_job_mode),
1006 prefix, yes_no(u->ignore_on_isolate));
1007
1008 if (UNIT_VTABLE(u)->dump)
1009 UNIT_VTABLE(u)->dump(u, f, prefix2);
1010
1011 } else if (u->load_state == UNIT_MERGED)
1012 fprintf(f,
1013 "%s\tMerged into: %s\n",
1014 prefix, u->merged_into->id);
1015 else if (u->load_state == UNIT_ERROR)
1016 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1017
1018
1019 if (u->job)
1020 job_dump(u->job, f, prefix2);
1021
1022 if (u->nop_job)
1023 job_dump(u->nop_job, f, prefix2);
1024
1025 }
1026
1027 /* Common implementation for multiple backends */
1028 int unit_load_fragment_and_dropin(Unit *u) {
1029 int r;
1030
1031 assert(u);
1032
1033 /* Load a .{service,socket,...} file */
1034 r = unit_load_fragment(u);
1035 if (r < 0)
1036 return r;
1037
1038 if (u->load_state == UNIT_STUB)
1039 return -ENOENT;
1040
1041 /* Load drop-in directory data */
1042 r = unit_load_dropin(unit_follow_merge(u));
1043 if (r < 0)
1044 return r;
1045
1046 return 0;
1047 }
1048
1049 /* Common implementation for multiple backends */
1050 int unit_load_fragment_and_dropin_optional(Unit *u) {
1051 int r;
1052
1053 assert(u);
1054
1055 /* Same as unit_load_fragment_and_dropin(), but whether
1056 * something can be loaded or not doesn't matter. */
1057
1058 /* Load a .service file */
1059 r = unit_load_fragment(u);
1060 if (r < 0)
1061 return r;
1062
1063 if (u->load_state == UNIT_STUB)
1064 u->load_state = UNIT_LOADED;
1065
1066 /* Load drop-in directory data */
1067 r = unit_load_dropin(unit_follow_merge(u));
1068 if (r < 0)
1069 return r;
1070
1071 return 0;
1072 }
1073
1074 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1075 assert(u);
1076 assert(target);
1077
1078 if (target->type != UNIT_TARGET)
1079 return 0;
1080
1081 /* Only add the dependency if both units are loaded, so that
1082 * that loop check below is reliable */
1083 if (u->load_state != UNIT_LOADED ||
1084 target->load_state != UNIT_LOADED)
1085 return 0;
1086
1087 /* If either side wants no automatic dependencies, then let's
1088 * skip this */
1089 if (!u->default_dependencies ||
1090 !target->default_dependencies)
1091 return 0;
1092
1093 /* Don't create loops */
1094 if (set_get(target->dependencies[UNIT_BEFORE], u))
1095 return 0;
1096
1097 return unit_add_dependency(target, UNIT_AFTER, u, true);
1098 }
1099
1100 static int unit_add_target_dependencies(Unit *u) {
1101
1102 static const UnitDependency deps[] = {
1103 UNIT_REQUIRED_BY,
1104 UNIT_REQUISITE_OF,
1105 UNIT_WANTED_BY,
1106 UNIT_BOUND_BY
1107 };
1108
1109 Unit *target;
1110 Iterator i;
1111 unsigned k;
1112 int r = 0;
1113
1114 assert(u);
1115
1116 for (k = 0; k < ELEMENTSOF(deps); k++)
1117 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1118 r = unit_add_default_target_dependency(u, target);
1119 if (r < 0)
1120 return r;
1121 }
1122
1123 return r;
1124 }
1125
1126 static int unit_add_slice_dependencies(Unit *u) {
1127 assert(u);
1128
1129 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1130 return 0;
1131
1132 if (UNIT_ISSET(u->slice))
1133 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1134
1135 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1136 return 0;
1137
1138 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1139 }
1140
1141 static int unit_add_mount_dependencies(Unit *u) {
1142 char **i;
1143 int r;
1144
1145 assert(u);
1146
1147 STRV_FOREACH(i, u->requires_mounts_for) {
1148 char prefix[strlen(*i) + 1];
1149
1150 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1151 _cleanup_free_ char *p = NULL;
1152 Unit *m;
1153
1154 r = unit_name_from_path(prefix, ".mount", &p);
1155 if (r < 0)
1156 return r;
1157
1158 m = manager_get_unit(u->manager, p);
1159 if (!m) {
1160 /* Make sure to load the mount unit if
1161 * it exists. If so the dependencies
1162 * on this unit will be added later
1163 * during the loading of the mount
1164 * unit. */
1165 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1166 continue;
1167 }
1168 if (m == u)
1169 continue;
1170
1171 if (m->load_state != UNIT_LOADED)
1172 continue;
1173
1174 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1175 if (r < 0)
1176 return r;
1177
1178 if (m->fragment_path) {
1179 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1180 if (r < 0)
1181 return r;
1182 }
1183 }
1184 }
1185
1186 return 0;
1187 }
1188
1189 static int unit_add_startup_units(Unit *u) {
1190 CGroupContext *c;
1191 int r;
1192
1193 c = unit_get_cgroup_context(u);
1194 if (!c)
1195 return 0;
1196
1197 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1198 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1199 return 0;
1200
1201 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1202 if (r < 0)
1203 return r;
1204
1205 return set_put(u->manager->startup_units, u);
1206 }
1207
1208 int unit_load(Unit *u) {
1209 int r;
1210
1211 assert(u);
1212
1213 if (u->in_load_queue) {
1214 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1215 u->in_load_queue = false;
1216 }
1217
1218 if (u->type == _UNIT_TYPE_INVALID)
1219 return -EINVAL;
1220
1221 if (u->load_state != UNIT_STUB)
1222 return 0;
1223
1224 if (UNIT_VTABLE(u)->load) {
1225 r = UNIT_VTABLE(u)->load(u);
1226 if (r < 0)
1227 goto fail;
1228 }
1229
1230 if (u->load_state == UNIT_STUB) {
1231 r = -ENOENT;
1232 goto fail;
1233 }
1234
1235 if (u->load_state == UNIT_LOADED) {
1236
1237 r = unit_add_target_dependencies(u);
1238 if (r < 0)
1239 goto fail;
1240
1241 r = unit_add_slice_dependencies(u);
1242 if (r < 0)
1243 goto fail;
1244
1245 r = unit_add_mount_dependencies(u);
1246 if (r < 0)
1247 goto fail;
1248
1249 r = unit_add_startup_units(u);
1250 if (r < 0)
1251 goto fail;
1252
1253 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1254 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1255 r = -EINVAL;
1256 goto fail;
1257 }
1258
1259 unit_update_cgroup_members_masks(u);
1260
1261 /* If we are reloading, we need to wait for the deserializer
1262 * to restore the net_cls ids that have been set previously */
1263 if (u->manager->n_reloading <= 0) {
1264 r = unit_add_to_netclass_cgroup(u);
1265 if (r < 0)
1266 return r;
1267 }
1268 }
1269
1270 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1271
1272 unit_add_to_dbus_queue(unit_follow_merge(u));
1273 unit_add_to_gc_queue(u);
1274
1275 return 0;
1276
1277 fail:
1278 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1279 u->load_error = r;
1280 unit_add_to_dbus_queue(u);
1281 unit_add_to_gc_queue(u);
1282
1283 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1284
1285 return r;
1286 }
1287
1288 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1289 Condition *c;
1290 int triggered = -1;
1291
1292 assert(u);
1293 assert(to_string);
1294
1295 /* If the condition list is empty, then it is true */
1296 if (!first)
1297 return true;
1298
1299 /* Otherwise, if all of the non-trigger conditions apply and
1300 * if any of the trigger conditions apply (unless there are
1301 * none) we return true */
1302 LIST_FOREACH(conditions, c, first) {
1303 int r;
1304
1305 r = condition_test(c);
1306 if (r < 0)
1307 log_unit_warning(u,
1308 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1309 to_string(c->type),
1310 c->trigger ? "|" : "",
1311 c->negate ? "!" : "",
1312 c->parameter);
1313 else
1314 log_unit_debug(u,
1315 "%s=%s%s%s %s.",
1316 to_string(c->type),
1317 c->trigger ? "|" : "",
1318 c->negate ? "!" : "",
1319 c->parameter,
1320 condition_result_to_string(c->result));
1321
1322 if (!c->trigger && r <= 0)
1323 return false;
1324
1325 if (c->trigger && triggered <= 0)
1326 triggered = r > 0;
1327 }
1328
1329 return triggered != 0;
1330 }
1331
1332 static bool unit_condition_test(Unit *u) {
1333 assert(u);
1334
1335 dual_timestamp_get(&u->condition_timestamp);
1336 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1337
1338 return u->condition_result;
1339 }
1340
1341 static bool unit_assert_test(Unit *u) {
1342 assert(u);
1343
1344 dual_timestamp_get(&u->assert_timestamp);
1345 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1346
1347 return u->assert_result;
1348 }
1349
1350 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1351 DISABLE_WARNING_FORMAT_NONLITERAL;
1352 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1353 REENABLE_WARNING;
1354 }
1355
1356 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1357 const char *format;
1358 const UnitStatusMessageFormats *format_table;
1359
1360 assert(u);
1361 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1362
1363 if (t != JOB_RELOAD) {
1364 format_table = &UNIT_VTABLE(u)->status_message_formats;
1365 if (format_table) {
1366 format = format_table->starting_stopping[t == JOB_STOP];
1367 if (format)
1368 return format;
1369 }
1370 }
1371
1372 /* Return generic strings */
1373 if (t == JOB_START)
1374 return "Starting %s.";
1375 else if (t == JOB_STOP)
1376 return "Stopping %s.";
1377 else
1378 return "Reloading %s.";
1379 }
1380
1381 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1382 const char *format;
1383
1384 assert(u);
1385
1386 /* Reload status messages have traditionally not been printed to console. */
1387 if (!IN_SET(t, JOB_START, JOB_STOP))
1388 return;
1389
1390 format = unit_get_status_message_format(u, t);
1391
1392 DISABLE_WARNING_FORMAT_NONLITERAL;
1393 unit_status_printf(u, "", format);
1394 REENABLE_WARNING;
1395 }
1396
1397 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1398 const char *format;
1399 char buf[LINE_MAX];
1400 sd_id128_t mid;
1401
1402 assert(u);
1403
1404 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1405 return;
1406
1407 if (log_on_console())
1408 return;
1409
1410 /* We log status messages for all units and all operations. */
1411
1412 format = unit_get_status_message_format(u, t);
1413
1414 DISABLE_WARNING_FORMAT_NONLITERAL;
1415 snprintf(buf, sizeof(buf), format, unit_description(u));
1416 REENABLE_WARNING;
1417
1418 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1419 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1420 SD_MESSAGE_UNIT_RELOADING;
1421
1422 /* Note that we deliberately use LOG_MESSAGE() instead of
1423 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1424 * closely what is written to screen using the status output,
1425 * which is supposed the highest level, friendliest output
1426 * possible, which means we should avoid the low-level unit
1427 * name. */
1428 log_struct(LOG_INFO,
1429 LOG_MESSAGE_ID(mid),
1430 LOG_UNIT_ID(u),
1431 LOG_MESSAGE("%s", buf),
1432 NULL);
1433 }
1434
1435 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1436 assert(u);
1437 assert(t >= 0);
1438 assert(t < _JOB_TYPE_MAX);
1439
1440 unit_status_log_starting_stopping_reloading(u, t);
1441 unit_status_print_starting_stopping(u, t);
1442 }
1443
1444 /* Errors:
1445 * -EBADR: This unit type does not support starting.
1446 * -EALREADY: Unit is already started.
1447 * -EAGAIN: An operation is already in progress. Retry later.
1448 * -ECANCELED: Too many requests for now.
1449 * -EPROTO: Assert failed
1450 */
1451 int unit_start(Unit *u) {
1452 UnitActiveState state;
1453 Unit *following;
1454
1455 assert(u);
1456
1457 /* Units that aren't loaded cannot be started */
1458 if (u->load_state != UNIT_LOADED)
1459 return -EINVAL;
1460
1461 /* If this is already started, then this will succeed. Note
1462 * that this will even succeed if this unit is not startable
1463 * by the user. This is relied on to detect when we need to
1464 * wait for units and when waiting is finished. */
1465 state = unit_active_state(u);
1466 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1467 return -EALREADY;
1468
1469 /* If the conditions failed, don't do anything at all. If we
1470 * already are activating this call might still be useful to
1471 * speed up activation in case there is some hold-off time,
1472 * but we don't want to recheck the condition in that case. */
1473 if (state != UNIT_ACTIVATING &&
1474 !unit_condition_test(u)) {
1475 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1476 return -EALREADY;
1477 }
1478
1479 /* If the asserts failed, fail the entire job */
1480 if (state != UNIT_ACTIVATING &&
1481 !unit_assert_test(u)) {
1482 log_unit_notice(u, "Starting requested but asserts failed.");
1483 return -EPROTO;
1484 }
1485
1486 /* Units of types that aren't supported cannot be
1487 * started. Note that we do this test only after the condition
1488 * checks, so that we rather return condition check errors
1489 * (which are usually not considered a true failure) than "not
1490 * supported" errors (which are considered a failure).
1491 */
1492 if (!unit_supported(u))
1493 return -EOPNOTSUPP;
1494
1495 /* Forward to the main object, if we aren't it. */
1496 following = unit_following(u);
1497 if (following) {
1498 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1499 return unit_start(following);
1500 }
1501
1502 /* If it is stopped, but we cannot start it, then fail */
1503 if (!UNIT_VTABLE(u)->start)
1504 return -EBADR;
1505
1506 /* We don't suppress calls to ->start() here when we are
1507 * already starting, to allow this request to be used as a
1508 * "hurry up" call, for example when the unit is in some "auto
1509 * restart" state where it waits for a holdoff timer to elapse
1510 * before it will start again. */
1511
1512 unit_add_to_dbus_queue(u);
1513
1514 return UNIT_VTABLE(u)->start(u);
1515 }
1516
1517 bool unit_can_start(Unit *u) {
1518 assert(u);
1519
1520 if (u->load_state != UNIT_LOADED)
1521 return false;
1522
1523 if (!unit_supported(u))
1524 return false;
1525
1526 return !!UNIT_VTABLE(u)->start;
1527 }
1528
1529 bool unit_can_isolate(Unit *u) {
1530 assert(u);
1531
1532 return unit_can_start(u) &&
1533 u->allow_isolate;
1534 }
1535
1536 /* Errors:
1537 * -EBADR: This unit type does not support stopping.
1538 * -EALREADY: Unit is already stopped.
1539 * -EAGAIN: An operation is already in progress. Retry later.
1540 */
1541 int unit_stop(Unit *u) {
1542 UnitActiveState state;
1543 Unit *following;
1544
1545 assert(u);
1546
1547 state = unit_active_state(u);
1548 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1549 return -EALREADY;
1550
1551 following = unit_following(u);
1552 if (following) {
1553 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1554 return unit_stop(following);
1555 }
1556
1557 if (!UNIT_VTABLE(u)->stop)
1558 return -EBADR;
1559
1560 unit_add_to_dbus_queue(u);
1561
1562 return UNIT_VTABLE(u)->stop(u);
1563 }
1564
1565 /* Errors:
1566 * -EBADR: This unit type does not support reloading.
1567 * -ENOEXEC: Unit is not started.
1568 * -EAGAIN: An operation is already in progress. Retry later.
1569 */
1570 int unit_reload(Unit *u) {
1571 UnitActiveState state;
1572 Unit *following;
1573
1574 assert(u);
1575
1576 if (u->load_state != UNIT_LOADED)
1577 return -EINVAL;
1578
1579 if (!unit_can_reload(u))
1580 return -EBADR;
1581
1582 state = unit_active_state(u);
1583 if (state == UNIT_RELOADING)
1584 return -EALREADY;
1585
1586 if (state != UNIT_ACTIVE) {
1587 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1588 return -ENOEXEC;
1589 }
1590
1591 following = unit_following(u);
1592 if (following) {
1593 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1594 return unit_reload(following);
1595 }
1596
1597 unit_add_to_dbus_queue(u);
1598
1599 return UNIT_VTABLE(u)->reload(u);
1600 }
1601
1602 bool unit_can_reload(Unit *u) {
1603 assert(u);
1604
1605 if (!UNIT_VTABLE(u)->reload)
1606 return false;
1607
1608 if (!UNIT_VTABLE(u)->can_reload)
1609 return true;
1610
1611 return UNIT_VTABLE(u)->can_reload(u);
1612 }
1613
1614 static void unit_check_unneeded(Unit *u) {
1615
1616 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1617
1618 static const UnitDependency needed_dependencies[] = {
1619 UNIT_REQUIRED_BY,
1620 UNIT_REQUISITE_OF,
1621 UNIT_WANTED_BY,
1622 UNIT_BOUND_BY,
1623 };
1624
1625 Unit *other;
1626 Iterator i;
1627 unsigned j;
1628 int r;
1629
1630 assert(u);
1631
1632 /* If this service shall be shut down when unneeded then do
1633 * so. */
1634
1635 if (!u->stop_when_unneeded)
1636 return;
1637
1638 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1639 return;
1640
1641 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1642 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1643 if (unit_active_or_pending(other))
1644 return;
1645
1646 /* If stopping a unit fails continously we might enter a stop
1647 * loop here, hence stop acting on the service being
1648 * unnecessary after a while. */
1649 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1650 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1651 return;
1652 }
1653
1654 log_unit_info(u, "Unit not needed anymore. Stopping.");
1655
1656 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1657 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1658 if (r < 0)
1659 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1660 }
1661
1662 static void unit_check_binds_to(Unit *u) {
1663 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1664 bool stop = false;
1665 Unit *other;
1666 Iterator i;
1667 int r;
1668
1669 assert(u);
1670
1671 if (u->job)
1672 return;
1673
1674 if (unit_active_state(u) != UNIT_ACTIVE)
1675 return;
1676
1677 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1678 if (other->job)
1679 continue;
1680
1681 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1682 continue;
1683
1684 stop = true;
1685 break;
1686 }
1687
1688 if (!stop)
1689 return;
1690
1691 /* If stopping a unit fails continously we might enter a stop
1692 * loop here, hence stop acting on the service being
1693 * unnecessary after a while. */
1694 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1695 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1696 return;
1697 }
1698
1699 assert(other);
1700 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1701
1702 /* A unit we need to run is gone. Sniff. Let's stop this. */
1703 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1704 if (r < 0)
1705 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1706 }
1707
1708 static void retroactively_start_dependencies(Unit *u) {
1709 Iterator i;
1710 Unit *other;
1711
1712 assert(u);
1713 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1714
1715 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1716 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1717 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1718 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1719
1720 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1721 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1722 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1723 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1724
1725 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1726 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1727 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1728 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1729
1730 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1731 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1732 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1733
1734 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1735 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1736 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1737 }
1738
1739 static void retroactively_stop_dependencies(Unit *u) {
1740 Iterator i;
1741 Unit *other;
1742
1743 assert(u);
1744 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1745
1746 /* Pull down units which are bound to us recursively if enabled */
1747 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1748 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1749 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1750 }
1751
1752 static void check_unneeded_dependencies(Unit *u) {
1753 Iterator i;
1754 Unit *other;
1755
1756 assert(u);
1757 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1758
1759 /* Garbage collect services that might not be needed anymore, if enabled */
1760 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1761 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1762 unit_check_unneeded(other);
1763 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1764 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1765 unit_check_unneeded(other);
1766 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1767 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1768 unit_check_unneeded(other);
1769 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1770 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1771 unit_check_unneeded(other);
1772 }
1773
1774 void unit_start_on_failure(Unit *u) {
1775 Unit *other;
1776 Iterator i;
1777
1778 assert(u);
1779
1780 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1781 return;
1782
1783 log_unit_info(u, "Triggering OnFailure= dependencies.");
1784
1785 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1786 int r;
1787
1788 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1789 if (r < 0)
1790 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1791 }
1792 }
1793
1794 void unit_trigger_notify(Unit *u) {
1795 Unit *other;
1796 Iterator i;
1797
1798 assert(u);
1799
1800 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1801 if (UNIT_VTABLE(other)->trigger_notify)
1802 UNIT_VTABLE(other)->trigger_notify(other, u);
1803 }
1804
1805 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1806 Manager *m;
1807 bool unexpected;
1808
1809 assert(u);
1810 assert(os < _UNIT_ACTIVE_STATE_MAX);
1811 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1812
1813 /* Note that this is called for all low-level state changes,
1814 * even if they might map to the same high-level
1815 * UnitActiveState! That means that ns == os is an expected
1816 * behavior here. For example: if a mount point is remounted
1817 * this function will be called too! */
1818
1819 m = u->manager;
1820
1821 /* Update timestamps for state changes */
1822 if (m->n_reloading <= 0) {
1823 dual_timestamp ts;
1824
1825 dual_timestamp_get(&ts);
1826
1827 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1828 u->inactive_exit_timestamp = ts;
1829 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1830 u->inactive_enter_timestamp = ts;
1831
1832 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1833 u->active_enter_timestamp = ts;
1834 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1835 u->active_exit_timestamp = ts;
1836 }
1837
1838 /* Keep track of failed units */
1839 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1840
1841 /* Make sure the cgroup is always removed when we become inactive */
1842 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1843 unit_prune_cgroup(u);
1844
1845 /* Note that this doesn't apply to RemainAfterExit services exiting
1846 * successfully, since there's no change of state in that case. Which is
1847 * why it is handled in service_set_state() */
1848 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1849 ExecContext *ec;
1850
1851 ec = unit_get_exec_context(u);
1852 if (ec && exec_context_may_touch_console(ec)) {
1853 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1854 m->n_on_console --;
1855
1856 if (m->n_on_console == 0)
1857 /* unset no_console_output flag, since the console is free */
1858 m->no_console_output = false;
1859 } else
1860 m->n_on_console ++;
1861 }
1862 }
1863
1864 if (u->job) {
1865 unexpected = false;
1866
1867 if (u->job->state == JOB_WAITING)
1868
1869 /* So we reached a different state for this
1870 * job. Let's see if we can run it now if it
1871 * failed previously due to EAGAIN. */
1872 job_add_to_run_queue(u->job);
1873
1874 /* Let's check whether this state change constitutes a
1875 * finished job, or maybe contradicts a running job and
1876 * hence needs to invalidate jobs. */
1877
1878 switch (u->job->type) {
1879
1880 case JOB_START:
1881 case JOB_VERIFY_ACTIVE:
1882
1883 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1884 job_finish_and_invalidate(u->job, JOB_DONE, true);
1885 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1886 unexpected = true;
1887
1888 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1889 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1890 }
1891
1892 break;
1893
1894 case JOB_RELOAD:
1895 case JOB_RELOAD_OR_START:
1896
1897 if (u->job->state == JOB_RUNNING) {
1898 if (ns == UNIT_ACTIVE)
1899 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
1900 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1901 unexpected = true;
1902
1903 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1904 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
1905 }
1906 }
1907
1908 break;
1909
1910 case JOB_STOP:
1911 case JOB_RESTART:
1912 case JOB_TRY_RESTART:
1913
1914 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1915 job_finish_and_invalidate(u->job, JOB_DONE, true);
1916 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1917 unexpected = true;
1918 job_finish_and_invalidate(u->job, JOB_FAILED, true);
1919 }
1920
1921 break;
1922
1923 default:
1924 assert_not_reached("Job type unknown");
1925 }
1926
1927 } else
1928 unexpected = true;
1929
1930 if (m->n_reloading <= 0) {
1931
1932 /* If this state change happened without being
1933 * requested by a job, then let's retroactively start
1934 * or stop dependencies. We skip that step when
1935 * deserializing, since we don't want to create any
1936 * additional jobs just because something is already
1937 * activated. */
1938
1939 if (unexpected) {
1940 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1941 retroactively_start_dependencies(u);
1942 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1943 retroactively_stop_dependencies(u);
1944 }
1945
1946 /* stop unneeded units regardless if going down was expected or not */
1947 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1948 check_unneeded_dependencies(u);
1949
1950 if (ns != os && ns == UNIT_FAILED) {
1951 log_unit_notice(u, "Unit entered failed state.");
1952 unit_start_on_failure(u);
1953 }
1954 }
1955
1956 /* Some names are special */
1957 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
1958
1959 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
1960 /* The bus might have just become available,
1961 * hence try to connect to it, if we aren't
1962 * yet connected. */
1963 bus_init(m, true);
1964
1965 if (u->type == UNIT_SERVICE &&
1966 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
1967 m->n_reloading <= 0) {
1968 /* Write audit record if we have just finished starting up */
1969 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
1970 u->in_audit = true;
1971 }
1972
1973 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
1974 manager_send_unit_plymouth(m, u);
1975
1976 } else {
1977
1978 /* We don't care about D-Bus here, since we'll get an
1979 * asynchronous notification for it anyway. */
1980
1981 if (u->type == UNIT_SERVICE &&
1982 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
1983 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
1984 m->n_reloading <= 0) {
1985
1986 /* Hmm, if there was no start record written
1987 * write it now, so that we always have a nice
1988 * pair */
1989 if (!u->in_audit) {
1990 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
1991
1992 if (ns == UNIT_INACTIVE)
1993 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
1994 } else
1995 /* Write audit record if we have just finished shutting down */
1996 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
1997
1998 u->in_audit = false;
1999 }
2000 }
2001
2002 manager_recheck_journal(m);
2003 unit_trigger_notify(u);
2004
2005 if (u->manager->n_reloading <= 0) {
2006 /* Maybe we finished startup and are now ready for
2007 * being stopped because unneeded? */
2008 unit_check_unneeded(u);
2009
2010 /* Maybe we finished startup, but something we needed
2011 * has vanished? Let's die then. (This happens when
2012 * something BindsTo= to a Type=oneshot unit, as these
2013 * units go directly from starting to inactive,
2014 * without ever entering started.) */
2015 unit_check_binds_to(u);
2016 }
2017
2018 unit_add_to_dbus_queue(u);
2019 unit_add_to_gc_queue(u);
2020 }
2021
2022 int unit_watch_pid(Unit *u, pid_t pid) {
2023 int q, r;
2024
2025 assert(u);
2026 assert(pid >= 1);
2027
2028 /* Watch a specific PID. We only support one or two units
2029 * watching each PID for now, not more. */
2030
2031 r = set_ensure_allocated(&u->pids, NULL);
2032 if (r < 0)
2033 return r;
2034
2035 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2036 if (r < 0)
2037 return r;
2038
2039 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2040 if (r == -EEXIST) {
2041 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2042 if (r < 0)
2043 return r;
2044
2045 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2046 }
2047
2048 q = set_put(u->pids, PID_TO_PTR(pid));
2049 if (q < 0)
2050 return q;
2051
2052 return r;
2053 }
2054
2055 void unit_unwatch_pid(Unit *u, pid_t pid) {
2056 assert(u);
2057 assert(pid >= 1);
2058
2059 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2060 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2061 (void) set_remove(u->pids, PID_TO_PTR(pid));
2062 }
2063
2064 void unit_unwatch_all_pids(Unit *u) {
2065 assert(u);
2066
2067 while (!set_isempty(u->pids))
2068 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2069
2070 u->pids = set_free(u->pids);
2071 }
2072
2073 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2074 Iterator i;
2075 void *e;
2076
2077 assert(u);
2078
2079 /* Cleans dead PIDs from our list */
2080
2081 SET_FOREACH(e, u->pids, i) {
2082 pid_t pid = PTR_TO_PID(e);
2083
2084 if (pid == except1 || pid == except2)
2085 continue;
2086
2087 if (!pid_is_unwaited(pid))
2088 unit_unwatch_pid(u, pid);
2089 }
2090 }
2091
2092 bool unit_job_is_applicable(Unit *u, JobType j) {
2093 assert(u);
2094 assert(j >= 0 && j < _JOB_TYPE_MAX);
2095
2096 switch (j) {
2097
2098 case JOB_VERIFY_ACTIVE:
2099 case JOB_START:
2100 case JOB_STOP:
2101 case JOB_NOP:
2102 return true;
2103
2104 case JOB_RESTART:
2105 case JOB_TRY_RESTART:
2106 return unit_can_start(u);
2107
2108 case JOB_RELOAD:
2109 return unit_can_reload(u);
2110
2111 case JOB_RELOAD_OR_START:
2112 return unit_can_reload(u) && unit_can_start(u);
2113
2114 default:
2115 assert_not_reached("Invalid job type");
2116 }
2117 }
2118
2119 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2120 assert(u);
2121
2122 /* Only warn about some unit types */
2123 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2124 return;
2125
2126 if (streq_ptr(u->id, other))
2127 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2128 else
2129 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2130 }
2131
2132 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2133
2134 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2135 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2136 [UNIT_WANTS] = UNIT_WANTED_BY,
2137 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2138 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2139 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2140 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2141 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2142 [UNIT_WANTED_BY] = UNIT_WANTS,
2143 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2144 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2145 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2146 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2147 [UNIT_BEFORE] = UNIT_AFTER,
2148 [UNIT_AFTER] = UNIT_BEFORE,
2149 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2150 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2151 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2152 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2153 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2154 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2155 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2156 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2157 };
2158 int r, q = 0, v = 0, w = 0;
2159 Unit *orig_u = u, *orig_other = other;
2160
2161 assert(u);
2162 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2163 assert(other);
2164
2165 u = unit_follow_merge(u);
2166 other = unit_follow_merge(other);
2167
2168 /* We won't allow dependencies on ourselves. We will not
2169 * consider them an error however. */
2170 if (u == other) {
2171 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2172 return 0;
2173 }
2174
2175 r = set_ensure_allocated(&u->dependencies[d], NULL);
2176 if (r < 0)
2177 return r;
2178
2179 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2180 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2181 if (r < 0)
2182 return r;
2183 }
2184
2185 if (add_reference) {
2186 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2187 if (r < 0)
2188 return r;
2189
2190 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2191 if (r < 0)
2192 return r;
2193 }
2194
2195 q = set_put(u->dependencies[d], other);
2196 if (q < 0)
2197 return q;
2198
2199 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2200 v = set_put(other->dependencies[inverse_table[d]], u);
2201 if (v < 0) {
2202 r = v;
2203 goto fail;
2204 }
2205 }
2206
2207 if (add_reference) {
2208 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2209 if (w < 0) {
2210 r = w;
2211 goto fail;
2212 }
2213
2214 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2215 if (r < 0)
2216 goto fail;
2217 }
2218
2219 unit_add_to_dbus_queue(u);
2220 return 0;
2221
2222 fail:
2223 if (q > 0)
2224 set_remove(u->dependencies[d], other);
2225
2226 if (v > 0)
2227 set_remove(other->dependencies[inverse_table[d]], u);
2228
2229 if (w > 0)
2230 set_remove(u->dependencies[UNIT_REFERENCES], other);
2231
2232 return r;
2233 }
2234
2235 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2236 int r;
2237
2238 assert(u);
2239
2240 r = unit_add_dependency(u, d, other, add_reference);
2241 if (r < 0)
2242 return r;
2243
2244 return unit_add_dependency(u, e, other, add_reference);
2245 }
2246
2247 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2248 int r;
2249
2250 assert(u);
2251 assert(name || path);
2252 assert(buf);
2253 assert(ret);
2254
2255 if (!name)
2256 name = basename(path);
2257
2258 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2259 *buf = NULL;
2260 *ret = name;
2261 return 0;
2262 }
2263
2264 if (u->instance)
2265 r = unit_name_replace_instance(name, u->instance, buf);
2266 else {
2267 _cleanup_free_ char *i = NULL;
2268
2269 r = unit_name_to_prefix(u->id, &i);
2270 if (r < 0)
2271 return r;
2272
2273 r = unit_name_replace_instance(name, i, buf);
2274 }
2275 if (r < 0)
2276 return r;
2277
2278 *ret = *buf;
2279 return 0;
2280 }
2281
2282 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2283 _cleanup_free_ char *buf = NULL;
2284 Unit *other;
2285 int r;
2286
2287 assert(u);
2288 assert(name || path);
2289
2290 r = resolve_template(u, name, path, &buf, &name);
2291 if (r < 0)
2292 return r;
2293
2294 r = manager_load_unit(u->manager, name, path, NULL, &other);
2295 if (r < 0)
2296 return r;
2297
2298 return unit_add_dependency(u, d, other, add_reference);
2299 }
2300
2301 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2302 _cleanup_free_ char *buf = NULL;
2303 Unit *other;
2304 int r;
2305
2306 assert(u);
2307 assert(name || path);
2308
2309 r = resolve_template(u, name, path, &buf, &name);
2310 if (r < 0)
2311 return r;
2312
2313 r = manager_load_unit(u->manager, name, path, NULL, &other);
2314 if (r < 0)
2315 return r;
2316
2317 return unit_add_two_dependencies(u, d, e, other, add_reference);
2318 }
2319
2320 int set_unit_path(const char *p) {
2321 /* This is mostly for debug purposes */
2322 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2323 return -errno;
2324
2325 return 0;
2326 }
2327
2328 char *unit_dbus_path(Unit *u) {
2329 assert(u);
2330
2331 if (!u->id)
2332 return NULL;
2333
2334 return unit_dbus_path_from_name(u->id);
2335 }
2336
2337 int unit_set_slice(Unit *u, Unit *slice) {
2338 assert(u);
2339 assert(slice);
2340
2341 /* Sets the unit slice if it has not been set before. Is extra
2342 * careful, to only allow this for units that actually have a
2343 * cgroup context. Also, we don't allow to set this for slices
2344 * (since the parent slice is derived from the name). Make
2345 * sure the unit we set is actually a slice. */
2346
2347 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2348 return -EOPNOTSUPP;
2349
2350 if (u->type == UNIT_SLICE)
2351 return -EINVAL;
2352
2353 if (unit_active_state(u) != UNIT_INACTIVE)
2354 return -EBUSY;
2355
2356 if (slice->type != UNIT_SLICE)
2357 return -EINVAL;
2358
2359 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2360 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2361 return -EPERM;
2362
2363 if (UNIT_DEREF(u->slice) == slice)
2364 return 0;
2365
2366 if (UNIT_ISSET(u->slice))
2367 return -EBUSY;
2368
2369 unit_ref_set(&u->slice, slice);
2370 return 1;
2371 }
2372
2373 int unit_set_default_slice(Unit *u) {
2374 _cleanup_free_ char *b = NULL;
2375 const char *slice_name;
2376 Unit *slice;
2377 int r;
2378
2379 assert(u);
2380
2381 if (UNIT_ISSET(u->slice))
2382 return 0;
2383
2384 if (u->instance) {
2385 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2386
2387 /* Implicitly place all instantiated units in their
2388 * own per-template slice */
2389
2390 r = unit_name_to_prefix(u->id, &prefix);
2391 if (r < 0)
2392 return r;
2393
2394 /* The prefix is already escaped, but it might include
2395 * "-" which has a special meaning for slice units,
2396 * hence escape it here extra. */
2397 escaped = unit_name_escape(prefix);
2398 if (!escaped)
2399 return -ENOMEM;
2400
2401 if (u->manager->running_as == MANAGER_SYSTEM)
2402 b = strjoin("system-", escaped, ".slice", NULL);
2403 else
2404 b = strappend(escaped, ".slice");
2405 if (!b)
2406 return -ENOMEM;
2407
2408 slice_name = b;
2409 } else
2410 slice_name =
2411 u->manager->running_as == MANAGER_SYSTEM && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2412 ? SPECIAL_SYSTEM_SLICE
2413 : SPECIAL_ROOT_SLICE;
2414
2415 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2416 if (r < 0)
2417 return r;
2418
2419 return unit_set_slice(u, slice);
2420 }
2421
2422 const char *unit_slice_name(Unit *u) {
2423 assert(u);
2424
2425 if (!UNIT_ISSET(u->slice))
2426 return NULL;
2427
2428 return UNIT_DEREF(u->slice)->id;
2429 }
2430
2431 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2432 _cleanup_free_ char *t = NULL;
2433 int r;
2434
2435 assert(u);
2436 assert(type);
2437 assert(_found);
2438
2439 r = unit_name_change_suffix(u->id, type, &t);
2440 if (r < 0)
2441 return r;
2442 if (unit_has_name(u, t))
2443 return -EINVAL;
2444
2445 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2446 assert(r < 0 || *_found != u);
2447 return r;
2448 }
2449
2450 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2451 const char *name, *old_owner, *new_owner;
2452 Unit *u = userdata;
2453 int r;
2454
2455 assert(message);
2456 assert(u);
2457
2458 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2459 if (r < 0) {
2460 bus_log_parse_error(r);
2461 return 0;
2462 }
2463
2464 if (UNIT_VTABLE(u)->bus_name_owner_change)
2465 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2466
2467 return 0;
2468 }
2469
2470 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2471 const char *match;
2472
2473 assert(u);
2474 assert(bus);
2475 assert(name);
2476
2477 if (u->match_bus_slot)
2478 return -EBUSY;
2479
2480 match = strjoina("type='signal',"
2481 "sender='org.freedesktop.DBus',"
2482 "path='/org/freedesktop/DBus',"
2483 "interface='org.freedesktop.DBus',"
2484 "member='NameOwnerChanged',"
2485 "arg0='", name, "'",
2486 NULL);
2487
2488 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2489 }
2490
2491 int unit_watch_bus_name(Unit *u, const char *name) {
2492 int r;
2493
2494 assert(u);
2495 assert(name);
2496
2497 /* Watch a specific name on the bus. We only support one unit
2498 * watching each name for now. */
2499
2500 if (u->manager->api_bus) {
2501 /* If the bus is already available, install the match directly.
2502 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2503 r = unit_install_bus_match(u, u->manager->api_bus, name);
2504 if (r < 0)
2505 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2506 }
2507
2508 r = hashmap_put(u->manager->watch_bus, name, u);
2509 if (r < 0) {
2510 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2511 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2512 }
2513
2514 return 0;
2515 }
2516
2517 void unit_unwatch_bus_name(Unit *u, const char *name) {
2518 assert(u);
2519 assert(name);
2520
2521 hashmap_remove_value(u->manager->watch_bus, name, u);
2522 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2523 }
2524
2525 bool unit_can_serialize(Unit *u) {
2526 assert(u);
2527
2528 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2529 }
2530
2531 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2532 int r;
2533
2534 assert(u);
2535 assert(f);
2536 assert(fds);
2537
2538 if (unit_can_serialize(u)) {
2539 ExecRuntime *rt;
2540
2541 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2542 if (r < 0)
2543 return r;
2544
2545 rt = unit_get_exec_runtime(u);
2546 if (rt) {
2547 r = exec_runtime_serialize(u, rt, f, fds);
2548 if (r < 0)
2549 return r;
2550 }
2551 }
2552
2553 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2554 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2555 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2556 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2557 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2558 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2559
2560 if (dual_timestamp_is_set(&u->condition_timestamp))
2561 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2562
2563 if (dual_timestamp_is_set(&u->assert_timestamp))
2564 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2565
2566 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2567 unit_serialize_item_format(u, f, "cpuacct-usage-base", "%" PRIu64, u->cpuacct_usage_base);
2568
2569 if (u->cgroup_path)
2570 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2571 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2572
2573 if (u->cgroup_netclass_id)
2574 unit_serialize_item_format(u, f, "netclass-id", "%" PRIu32, u->cgroup_netclass_id);
2575
2576 if (serialize_jobs) {
2577 if (u->job) {
2578 fprintf(f, "job\n");
2579 job_serialize(u->job, f, fds);
2580 }
2581
2582 if (u->nop_job) {
2583 fprintf(f, "job\n");
2584 job_serialize(u->nop_job, f, fds);
2585 }
2586 }
2587
2588 /* End marker */
2589 fputc('\n', f);
2590 return 0;
2591 }
2592
2593 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2594 assert(u);
2595 assert(f);
2596 assert(key);
2597
2598 if (!value)
2599 return 0;
2600
2601 fputs(key, f);
2602 fputc('=', f);
2603 fputs(value, f);
2604 fputc('\n', f);
2605
2606 return 1;
2607 }
2608
2609 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2610 _cleanup_free_ char *c = NULL;
2611
2612 assert(u);
2613 assert(f);
2614 assert(key);
2615
2616 if (!value)
2617 return 0;
2618
2619 c = cescape(value);
2620 if (!c)
2621 return -ENOMEM;
2622
2623 fputs(key, f);
2624 fputc('=', f);
2625 fputs(c, f);
2626 fputc('\n', f);
2627
2628 return 1;
2629 }
2630
2631 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2632 int copy;
2633
2634 assert(u);
2635 assert(f);
2636 assert(key);
2637
2638 if (fd < 0)
2639 return 0;
2640
2641 copy = fdset_put_dup(fds, fd);
2642 if (copy < 0)
2643 return copy;
2644
2645 fprintf(f, "%s=%i\n", key, copy);
2646 return 1;
2647 }
2648
2649 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2650 va_list ap;
2651
2652 assert(u);
2653 assert(f);
2654 assert(key);
2655 assert(format);
2656
2657 fputs(key, f);
2658 fputc('=', f);
2659
2660 va_start(ap, format);
2661 vfprintf(f, format, ap);
2662 va_end(ap);
2663
2664 fputc('\n', f);
2665 }
2666
2667 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2668 ExecRuntime **rt = NULL;
2669 size_t offset;
2670 int r;
2671
2672 assert(u);
2673 assert(f);
2674 assert(fds);
2675
2676 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2677 if (offset > 0)
2678 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2679
2680 for (;;) {
2681 char line[LINE_MAX], *l, *v;
2682 size_t k;
2683
2684 if (!fgets(line, sizeof(line), f)) {
2685 if (feof(f))
2686 return 0;
2687 return -errno;
2688 }
2689
2690 char_array_0(line);
2691 l = strstrip(line);
2692
2693 /* End marker */
2694 if (isempty(l))
2695 return 0;
2696
2697 k = strcspn(l, "=");
2698
2699 if (l[k] == '=') {
2700 l[k] = 0;
2701 v = l+k+1;
2702 } else
2703 v = l+k;
2704
2705 if (streq(l, "job")) {
2706 if (v[0] == '\0') {
2707 /* new-style serialized job */
2708 Job *j;
2709
2710 j = job_new_raw(u);
2711 if (!j)
2712 return log_oom();
2713
2714 r = job_deserialize(j, f, fds);
2715 if (r < 0) {
2716 job_free(j);
2717 return r;
2718 }
2719
2720 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2721 if (r < 0) {
2722 job_free(j);
2723 return r;
2724 }
2725
2726 r = job_install_deserialized(j);
2727 if (r < 0) {
2728 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2729 job_free(j);
2730 return r;
2731 }
2732 } else /* legacy for pre-44 */
2733 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2734 continue;
2735 } else if (streq(l, "inactive-exit-timestamp")) {
2736 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2737 continue;
2738 } else if (streq(l, "active-enter-timestamp")) {
2739 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2740 continue;
2741 } else if (streq(l, "active-exit-timestamp")) {
2742 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2743 continue;
2744 } else if (streq(l, "inactive-enter-timestamp")) {
2745 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2746 continue;
2747 } else if (streq(l, "condition-timestamp")) {
2748 dual_timestamp_deserialize(v, &u->condition_timestamp);
2749 continue;
2750 } else if (streq(l, "assert-timestamp")) {
2751 dual_timestamp_deserialize(v, &u->assert_timestamp);
2752 continue;
2753 } else if (streq(l, "condition-result")) {
2754
2755 r = parse_boolean(v);
2756 if (r < 0)
2757 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2758 else
2759 u->condition_result = r;
2760
2761 continue;
2762
2763 } else if (streq(l, "assert-result")) {
2764
2765 r = parse_boolean(v);
2766 if (r < 0)
2767 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2768 else
2769 u->assert_result = r;
2770
2771 continue;
2772
2773 } else if (streq(l, "transient")) {
2774
2775 r = parse_boolean(v);
2776 if (r < 0)
2777 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2778 else
2779 u->transient = r;
2780
2781 continue;
2782
2783 } else if (streq(l, "cpuacct-usage-base")) {
2784
2785 r = safe_atou64(v, &u->cpuacct_usage_base);
2786 if (r < 0)
2787 log_unit_debug(u, "Failed to parse CPU usage %s, ignoring.", v);
2788
2789 continue;
2790
2791 } else if (streq(l, "cgroup")) {
2792
2793 r = unit_set_cgroup_path(u, v);
2794 if (r < 0)
2795 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2796
2797 (void) unit_watch_cgroup(u);
2798
2799 continue;
2800 } else if (streq(l, "cgroup-realized")) {
2801 int b;
2802
2803 b = parse_boolean(v);
2804 if (b < 0)
2805 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2806 else
2807 u->cgroup_realized = b;
2808
2809 continue;
2810 } else if (streq(l, "netclass-id")) {
2811 r = safe_atou32(v, &u->cgroup_netclass_id);
2812 if (r < 0)
2813 log_unit_debug(u, "Failed to parse netclass ID %s, ignoring.", v);
2814 else {
2815 r = unit_add_to_netclass_cgroup(u);
2816 if (r < 0)
2817 log_unit_debug_errno(u, r, "Failed to add unit to netclass cgroup, ignoring: %m");
2818 }
2819
2820 continue;
2821 }
2822
2823 if (unit_can_serialize(u)) {
2824 if (rt) {
2825 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
2826 if (r < 0) {
2827 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
2828 continue;
2829 }
2830
2831 /* Returns positive if key was handled by the call */
2832 if (r > 0)
2833 continue;
2834 }
2835
2836 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2837 if (r < 0)
2838 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
2839 }
2840 }
2841 }
2842
2843 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
2844 Unit *device;
2845 _cleanup_free_ char *e = NULL;
2846 int r;
2847
2848 assert(u);
2849
2850 /* Adds in links to the device node that this unit is based on */
2851 if (isempty(what))
2852 return 0;
2853
2854 if (!is_device_path(what))
2855 return 0;
2856
2857 /* When device units aren't supported (such as in a
2858 * container), don't create dependencies on them. */
2859 if (!unit_type_supported(UNIT_DEVICE))
2860 return 0;
2861
2862 r = unit_name_from_path(what, ".device", &e);
2863 if (r < 0)
2864 return r;
2865
2866 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2867 if (r < 0)
2868 return r;
2869
2870 r = unit_add_two_dependencies(u, UNIT_AFTER,
2871 u->manager->running_as == MANAGER_SYSTEM ? dep : UNIT_WANTS,
2872 device, true);
2873 if (r < 0)
2874 return r;
2875
2876 if (wants) {
2877 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2878 if (r < 0)
2879 return r;
2880 }
2881
2882 return 0;
2883 }
2884
2885 int unit_coldplug(Unit *u) {
2886 int r = 0, q = 0;
2887
2888 assert(u);
2889
2890 /* Make sure we don't enter a loop, when coldplugging
2891 * recursively. */
2892 if (u->coldplugged)
2893 return 0;
2894
2895 u->coldplugged = true;
2896
2897 if (UNIT_VTABLE(u)->coldplug)
2898 r = UNIT_VTABLE(u)->coldplug(u);
2899
2900 if (u->job)
2901 q = job_coldplug(u->job);
2902
2903 if (r < 0)
2904 return r;
2905 if (q < 0)
2906 return q;
2907
2908 return 0;
2909 }
2910
2911 bool unit_need_daemon_reload(Unit *u) {
2912 _cleanup_strv_free_ char **t = NULL;
2913 char **path;
2914 struct stat st;
2915 unsigned loaded_cnt, current_cnt;
2916
2917 assert(u);
2918
2919 if (u->fragment_path) {
2920 zero(st);
2921 if (stat(u->fragment_path, &st) < 0)
2922 /* What, cannot access this anymore? */
2923 return true;
2924
2925 if (u->fragment_mtime > 0 &&
2926 timespec_load(&st.st_mtim) != u->fragment_mtime)
2927 return true;
2928 }
2929
2930 if (u->source_path) {
2931 zero(st);
2932 if (stat(u->source_path, &st) < 0)
2933 return true;
2934
2935 if (u->source_mtime > 0 &&
2936 timespec_load(&st.st_mtim) != u->source_mtime)
2937 return true;
2938 }
2939
2940 (void) unit_find_dropin_paths(u, &t);
2941 loaded_cnt = strv_length(t);
2942 current_cnt = strv_length(u->dropin_paths);
2943
2944 if (loaded_cnt == current_cnt) {
2945 if (loaded_cnt == 0)
2946 return false;
2947
2948 if (strv_overlap(u->dropin_paths, t)) {
2949 STRV_FOREACH(path, u->dropin_paths) {
2950 zero(st);
2951 if (stat(*path, &st) < 0)
2952 return true;
2953
2954 if (u->dropin_mtime > 0 &&
2955 timespec_load(&st.st_mtim) > u->dropin_mtime)
2956 return true;
2957 }
2958
2959 return false;
2960 } else
2961 return true;
2962 } else
2963 return true;
2964 }
2965
2966 void unit_reset_failed(Unit *u) {
2967 assert(u);
2968
2969 if (UNIT_VTABLE(u)->reset_failed)
2970 UNIT_VTABLE(u)->reset_failed(u);
2971 }
2972
2973 Unit *unit_following(Unit *u) {
2974 assert(u);
2975
2976 if (UNIT_VTABLE(u)->following)
2977 return UNIT_VTABLE(u)->following(u);
2978
2979 return NULL;
2980 }
2981
2982 bool unit_stop_pending(Unit *u) {
2983 assert(u);
2984
2985 /* This call does check the current state of the unit. It's
2986 * hence useful to be called from state change calls of the
2987 * unit itself, where the state isn't updated yet. This is
2988 * different from unit_inactive_or_pending() which checks both
2989 * the current state and for a queued job. */
2990
2991 return u->job && u->job->type == JOB_STOP;
2992 }
2993
2994 bool unit_inactive_or_pending(Unit *u) {
2995 assert(u);
2996
2997 /* Returns true if the unit is inactive or going down */
2998
2999 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3000 return true;
3001
3002 if (unit_stop_pending(u))
3003 return true;
3004
3005 return false;
3006 }
3007
3008 bool unit_active_or_pending(Unit *u) {
3009 assert(u);
3010
3011 /* Returns true if the unit is active or going up */
3012
3013 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3014 return true;
3015
3016 if (u->job &&
3017 (u->job->type == JOB_START ||
3018 u->job->type == JOB_RELOAD_OR_START ||
3019 u->job->type == JOB_RESTART))
3020 return true;
3021
3022 return false;
3023 }
3024
3025 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3026 assert(u);
3027 assert(w >= 0 && w < _KILL_WHO_MAX);
3028 assert(signo > 0);
3029 assert(signo < _NSIG);
3030
3031 if (!UNIT_VTABLE(u)->kill)
3032 return -EOPNOTSUPP;
3033
3034 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3035 }
3036
3037 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3038 Set *pid_set;
3039 int r;
3040
3041 pid_set = set_new(NULL);
3042 if (!pid_set)
3043 return NULL;
3044
3045 /* Exclude the main/control pids from being killed via the cgroup */
3046 if (main_pid > 0) {
3047 r = set_put(pid_set, PID_TO_PTR(main_pid));
3048 if (r < 0)
3049 goto fail;
3050 }
3051
3052 if (control_pid > 0) {
3053 r = set_put(pid_set, PID_TO_PTR(control_pid));
3054 if (r < 0)
3055 goto fail;
3056 }
3057
3058 return pid_set;
3059
3060 fail:
3061 set_free(pid_set);
3062 return NULL;
3063 }
3064
3065 int unit_kill_common(
3066 Unit *u,
3067 KillWho who,
3068 int signo,
3069 pid_t main_pid,
3070 pid_t control_pid,
3071 sd_bus_error *error) {
3072
3073 int r = 0;
3074 bool killed = false;
3075
3076 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3077 if (main_pid < 0)
3078 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3079 else if (main_pid == 0)
3080 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3081 }
3082
3083 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3084 if (control_pid < 0)
3085 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3086 else if (control_pid == 0)
3087 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3088 }
3089
3090 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3091 if (control_pid > 0) {
3092 if (kill(control_pid, signo) < 0)
3093 r = -errno;
3094 else
3095 killed = true;
3096 }
3097
3098 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3099 if (main_pid > 0) {
3100 if (kill(main_pid, signo) < 0)
3101 r = -errno;
3102 else
3103 killed = true;
3104 }
3105
3106 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3107 _cleanup_set_free_ Set *pid_set = NULL;
3108 int q;
3109
3110 /* Exclude the main/control pids from being killed via the cgroup */
3111 pid_set = unit_pid_set(main_pid, control_pid);
3112 if (!pid_set)
3113 return -ENOMEM;
3114
3115 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, false, false, false, pid_set);
3116 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3117 r = q;
3118 else
3119 killed = true;
3120 }
3121
3122 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3123 return -ESRCH;
3124
3125 return r;
3126 }
3127
3128 int unit_following_set(Unit *u, Set **s) {
3129 assert(u);
3130 assert(s);
3131
3132 if (UNIT_VTABLE(u)->following_set)
3133 return UNIT_VTABLE(u)->following_set(u, s);
3134
3135 *s = NULL;
3136 return 0;
3137 }
3138
3139 UnitFileState unit_get_unit_file_state(Unit *u) {
3140 int r;
3141
3142 assert(u);
3143
3144 if (u->unit_file_state < 0 && u->fragment_path) {
3145 r = unit_file_get_state(
3146 u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3147 NULL,
3148 basename(u->fragment_path),
3149 &u->unit_file_state);
3150 if (r < 0)
3151 u->unit_file_state = UNIT_FILE_BAD;
3152 }
3153
3154 return u->unit_file_state;
3155 }
3156
3157 int unit_get_unit_file_preset(Unit *u) {
3158 assert(u);
3159
3160 if (u->unit_file_preset < 0 && u->fragment_path)
3161 u->unit_file_preset = unit_file_query_preset(
3162 u->manager->running_as == MANAGER_SYSTEM ? UNIT_FILE_SYSTEM : UNIT_FILE_USER,
3163 NULL,
3164 basename(u->fragment_path));
3165
3166 return u->unit_file_preset;
3167 }
3168
3169 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3170 assert(ref);
3171 assert(u);
3172
3173 if (ref->unit)
3174 unit_ref_unset(ref);
3175
3176 ref->unit = u;
3177 LIST_PREPEND(refs, u->refs, ref);
3178 return u;
3179 }
3180
3181 void unit_ref_unset(UnitRef *ref) {
3182 assert(ref);
3183
3184 if (!ref->unit)
3185 return;
3186
3187 LIST_REMOVE(refs, ref->unit->refs, ref);
3188 ref->unit = NULL;
3189 }
3190
3191 int unit_patch_contexts(Unit *u) {
3192 CGroupContext *cc;
3193 ExecContext *ec;
3194 unsigned i;
3195 int r;
3196
3197 assert(u);
3198
3199 /* Patch in the manager defaults into the exec and cgroup
3200 * contexts, _after_ the rest of the settings have been
3201 * initialized */
3202
3203 ec = unit_get_exec_context(u);
3204 if (ec) {
3205 /* This only copies in the ones that need memory */
3206 for (i = 0; i < _RLIMIT_MAX; i++)
3207 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3208 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3209 if (!ec->rlimit[i])
3210 return -ENOMEM;
3211 }
3212
3213 if (u->manager->running_as == MANAGER_USER &&
3214 !ec->working_directory) {
3215
3216 r = get_home_dir(&ec->working_directory);
3217 if (r < 0)
3218 return r;
3219
3220 /* Allow user services to run, even if the
3221 * home directory is missing */
3222 ec->working_directory_missing_ok = true;
3223 }
3224
3225 if (u->manager->running_as == MANAGER_USER &&
3226 (ec->syscall_whitelist ||
3227 !set_isempty(ec->syscall_filter) ||
3228 !set_isempty(ec->syscall_archs) ||
3229 ec->address_families_whitelist ||
3230 !set_isempty(ec->address_families)))
3231 ec->no_new_privileges = true;
3232
3233 if (ec->private_devices)
3234 ec->capability_bounding_set_drop |= (uint64_t) 1ULL << (uint64_t) CAP_MKNOD;
3235 }
3236
3237 cc = unit_get_cgroup_context(u);
3238 if (cc) {
3239
3240 if (ec &&
3241 ec->private_devices &&
3242 cc->device_policy == CGROUP_AUTO)
3243 cc->device_policy = CGROUP_CLOSED;
3244 }
3245
3246 return 0;
3247 }
3248
3249 ExecContext *unit_get_exec_context(Unit *u) {
3250 size_t offset;
3251 assert(u);
3252
3253 if (u->type < 0)
3254 return NULL;
3255
3256 offset = UNIT_VTABLE(u)->exec_context_offset;
3257 if (offset <= 0)
3258 return NULL;
3259
3260 return (ExecContext*) ((uint8_t*) u + offset);
3261 }
3262
3263 KillContext *unit_get_kill_context(Unit *u) {
3264 size_t offset;
3265 assert(u);
3266
3267 if (u->type < 0)
3268 return NULL;
3269
3270 offset = UNIT_VTABLE(u)->kill_context_offset;
3271 if (offset <= 0)
3272 return NULL;
3273
3274 return (KillContext*) ((uint8_t*) u + offset);
3275 }
3276
3277 CGroupContext *unit_get_cgroup_context(Unit *u) {
3278 size_t offset;
3279
3280 if (u->type < 0)
3281 return NULL;
3282
3283 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3284 if (offset <= 0)
3285 return NULL;
3286
3287 return (CGroupContext*) ((uint8_t*) u + offset);
3288 }
3289
3290 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3291 size_t offset;
3292
3293 if (u->type < 0)
3294 return NULL;
3295
3296 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3297 if (offset <= 0)
3298 return NULL;
3299
3300 return *(ExecRuntime**) ((uint8_t*) u + offset);
3301 }
3302
3303 static int unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode, bool transient, char **dir) {
3304 assert(u);
3305
3306 if (u->manager->running_as == MANAGER_USER) {
3307 int r;
3308
3309 if (mode == UNIT_PERSISTENT && !transient)
3310 r = user_config_home(dir);
3311 else
3312 r = user_runtime_dir(dir);
3313 if (r == 0)
3314 return -ENOENT;
3315
3316 return r;
3317 }
3318
3319 if (mode == UNIT_PERSISTENT && !transient)
3320 *dir = strdup("/etc/systemd/system");
3321 else
3322 *dir = strdup("/run/systemd/system");
3323 if (!*dir)
3324 return -ENOMEM;
3325
3326 return 0;
3327 }
3328
3329 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3330
3331 _cleanup_free_ char *dir = NULL, *p = NULL, *q = NULL;
3332 int r;
3333
3334 assert(u);
3335
3336 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3337 return 0;
3338
3339 r = unit_drop_in_dir(u, mode, u->transient, &dir);
3340 if (r < 0)
3341 return r;
3342
3343 r = write_drop_in(dir, u->id, 50, name, data);
3344 if (r < 0)
3345 return r;
3346
3347 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3348 if (r < 0)
3349 return r;
3350
3351 r = strv_extend(&u->dropin_paths, q);
3352 if (r < 0)
3353 return r;
3354
3355 strv_sort(u->dropin_paths);
3356 strv_uniq(u->dropin_paths);
3357
3358 u->dropin_mtime = now(CLOCK_REALTIME);
3359
3360 return 0;
3361 }
3362
3363 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3364 _cleanup_free_ char *p = NULL;
3365 va_list ap;
3366 int r;
3367
3368 assert(u);
3369 assert(name);
3370 assert(format);
3371
3372 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3373 return 0;
3374
3375 va_start(ap, format);
3376 r = vasprintf(&p, format, ap);
3377 va_end(ap);
3378
3379 if (r < 0)
3380 return -ENOMEM;
3381
3382 return unit_write_drop_in(u, mode, name, p);
3383 }
3384
3385 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3386 _cleanup_free_ char *ndata = NULL;
3387
3388 assert(u);
3389 assert(name);
3390 assert(data);
3391
3392 if (!UNIT_VTABLE(u)->private_section)
3393 return -EINVAL;
3394
3395 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3396 return 0;
3397
3398 ndata = strjoin("[", UNIT_VTABLE(u)->private_section, "]\n", data, NULL);
3399 if (!ndata)
3400 return -ENOMEM;
3401
3402 return unit_write_drop_in(u, mode, name, ndata);
3403 }
3404
3405 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3406 _cleanup_free_ char *p = NULL;
3407 va_list ap;
3408 int r;
3409
3410 assert(u);
3411 assert(name);
3412 assert(format);
3413
3414 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3415 return 0;
3416
3417 va_start(ap, format);
3418 r = vasprintf(&p, format, ap);
3419 va_end(ap);
3420
3421 if (r < 0)
3422 return -ENOMEM;
3423
3424 return unit_write_drop_in_private(u, mode, name, p);
3425 }
3426
3427 int unit_make_transient(Unit *u) {
3428 assert(u);
3429
3430 if (!UNIT_VTABLE(u)->can_transient)
3431 return -EOPNOTSUPP;
3432
3433 u->load_state = UNIT_STUB;
3434 u->load_error = 0;
3435 u->transient = true;
3436
3437 u->fragment_path = mfree(u->fragment_path);
3438 u->source_path = mfree(u->source_path);
3439 u->dropin_paths = strv_free(u->dropin_paths);
3440 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3441
3442 unit_add_to_dbus_queue(u);
3443 unit_add_to_gc_queue(u);
3444 unit_add_to_load_queue(u);
3445
3446 return 0;
3447 }
3448
3449 int unit_kill_context(
3450 Unit *u,
3451 KillContext *c,
3452 KillOperation k,
3453 pid_t main_pid,
3454 pid_t control_pid,
3455 bool main_pid_alien) {
3456
3457 bool wait_for_exit = false;
3458 int sig, r;
3459
3460 assert(u);
3461 assert(c);
3462
3463 if (c->kill_mode == KILL_NONE)
3464 return 0;
3465
3466 switch (k) {
3467 case KILL_KILL:
3468 sig = SIGKILL;
3469 break;
3470 case KILL_ABORT:
3471 sig = SIGABRT;
3472 break;
3473 case KILL_TERMINATE:
3474 sig = c->kill_signal;
3475 break;
3476 default:
3477 assert_not_reached("KillOperation unknown");
3478 }
3479
3480 if (main_pid > 0) {
3481 r = kill_and_sigcont(main_pid, sig);
3482
3483 if (r < 0 && r != -ESRCH) {
3484 _cleanup_free_ char *comm = NULL;
3485 get_process_comm(main_pid, &comm);
3486
3487 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3488 } else {
3489 if (!main_pid_alien)
3490 wait_for_exit = true;
3491
3492 if (c->send_sighup && k == KILL_TERMINATE)
3493 (void) kill(main_pid, SIGHUP);
3494 }
3495 }
3496
3497 if (control_pid > 0) {
3498 r = kill_and_sigcont(control_pid, sig);
3499
3500 if (r < 0 && r != -ESRCH) {
3501 _cleanup_free_ char *comm = NULL;
3502 get_process_comm(control_pid, &comm);
3503
3504 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3505 } else {
3506 wait_for_exit = true;
3507
3508 if (c->send_sighup && k == KILL_TERMINATE)
3509 (void) kill(control_pid, SIGHUP);
3510 }
3511 }
3512
3513 if (u->cgroup_path &&
3514 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3515 _cleanup_set_free_ Set *pid_set = NULL;
3516
3517 /* Exclude the main/control pids from being killed via the cgroup */
3518 pid_set = unit_pid_set(main_pid, control_pid);
3519 if (!pid_set)
3520 return -ENOMEM;
3521
3522 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, sig, true, k != KILL_TERMINATE, false, pid_set);
3523 if (r < 0) {
3524 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3525 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3526
3527 } else if (r > 0) {
3528
3529 /* FIXME: For now, on the legacy hierarchy, we
3530 * will not wait for the cgroup members to die
3531 * if we are running in a container or if this
3532 * is a delegation unit, simply because cgroup
3533 * notification is unreliable in these
3534 * cases. It doesn't work at all in
3535 * containers, and outside of containers it
3536 * can be confused easily by left-over
3537 * directories in the cgroup -- which however
3538 * should not exist in non-delegated units. On
3539 * the unified hierarchy that's different,
3540 * there we get proper events. Hence rely on
3541 * them.*/
3542
3543 if (cg_unified() > 0 ||
3544 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3545 wait_for_exit = true;
3546
3547 if (c->send_sighup && k != KILL_KILL) {
3548 set_free(pid_set);
3549
3550 pid_set = unit_pid_set(main_pid, control_pid);
3551 if (!pid_set)
3552 return -ENOMEM;
3553
3554 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, SIGHUP, false, true, false, pid_set);
3555 }
3556 }
3557 }
3558
3559 return wait_for_exit;
3560 }
3561
3562 int unit_require_mounts_for(Unit *u, const char *path) {
3563 char prefix[strlen(path) + 1], *p;
3564 int r;
3565
3566 assert(u);
3567 assert(path);
3568
3569 /* Registers a unit for requiring a certain path and all its
3570 * prefixes. We keep a simple array of these paths in the
3571 * unit, since its usually short. However, we build a prefix
3572 * table for all possible prefixes so that new appearing mount
3573 * units can easily determine which units to make themselves a
3574 * dependency of. */
3575
3576 if (!path_is_absolute(path))
3577 return -EINVAL;
3578
3579 p = strdup(path);
3580 if (!p)
3581 return -ENOMEM;
3582
3583 path_kill_slashes(p);
3584
3585 if (!path_is_safe(p)) {
3586 free(p);
3587 return -EPERM;
3588 }
3589
3590 if (strv_contains(u->requires_mounts_for, p)) {
3591 free(p);
3592 return 0;
3593 }
3594
3595 r = strv_consume(&u->requires_mounts_for, p);
3596 if (r < 0)
3597 return r;
3598
3599 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3600 Set *x;
3601
3602 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3603 if (!x) {
3604 char *q;
3605
3606 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3607 if (r < 0)
3608 return r;
3609
3610 q = strdup(prefix);
3611 if (!q)
3612 return -ENOMEM;
3613
3614 x = set_new(NULL);
3615 if (!x) {
3616 free(q);
3617 return -ENOMEM;
3618 }
3619
3620 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3621 if (r < 0) {
3622 free(q);
3623 set_free(x);
3624 return r;
3625 }
3626 }
3627
3628 r = set_put(x, u);
3629 if (r < 0)
3630 return r;
3631 }
3632
3633 return 0;
3634 }
3635
3636 int unit_setup_exec_runtime(Unit *u) {
3637 ExecRuntime **rt;
3638 size_t offset;
3639 Iterator i;
3640 Unit *other;
3641
3642 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3643 assert(offset > 0);
3644
3645 /* Check if there already is an ExecRuntime for this unit? */
3646 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3647 if (*rt)
3648 return 0;
3649
3650 /* Try to get it from somebody else */
3651 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3652
3653 *rt = unit_get_exec_runtime(other);
3654 if (*rt) {
3655 exec_runtime_ref(*rt);
3656 return 0;
3657 }
3658 }
3659
3660 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3661 }
3662
3663 bool unit_type_supported(UnitType t) {
3664 if (_unlikely_(t < 0))
3665 return false;
3666 if (_unlikely_(t >= _UNIT_TYPE_MAX))
3667 return false;
3668
3669 if (!unit_vtable[t]->supported)
3670 return true;
3671
3672 return unit_vtable[t]->supported();
3673 }
3674
3675 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
3676 int r;
3677
3678 assert(u);
3679 assert(where);
3680
3681 r = dir_is_empty(where);
3682 if (r > 0)
3683 return;
3684 if (r < 0) {
3685 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
3686 return;
3687 }
3688
3689 log_struct(LOG_NOTICE,
3690 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3691 LOG_UNIT_ID(u),
3692 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
3693 "WHERE=%s", where,
3694 NULL);
3695 }
3696
3697 int unit_fail_if_symlink(Unit *u, const char* where) {
3698 int r;
3699
3700 assert(u);
3701 assert(where);
3702
3703 r = is_symlink(where);
3704 if (r < 0) {
3705 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
3706 return 0;
3707 }
3708 if (r == 0)
3709 return 0;
3710
3711 log_struct(LOG_ERR,
3712 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3713 LOG_UNIT_ID(u),
3714 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
3715 "WHERE=%s", where,
3716 NULL);
3717
3718 return -ELOOP;
3719 }
3720
3721 bool unit_is_pristine(Unit *u) {
3722 assert(u);
3723
3724 /* Check if the unit already exists or is already around,
3725 * in a number of different ways. Note that to cater for unit
3726 * types such as slice, we are generally fine with units that
3727 * are marked UNIT_LOADED even even though nothing was
3728 * actually loaded, as those unit types don't require a file
3729 * on disk to validly load. */
3730
3731 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
3732 u->fragment_path ||
3733 u->source_path ||
3734 !strv_isempty(u->dropin_paths) ||
3735 u->job ||
3736 u->merged_into);
3737 }