]> git.ipfire.org Git - thirdparty/systemd.git/blame_incremental - src/core/unit.c
units: make enablement of s-n-wait-online.service follow systemd-networkd.service...
[thirdparty/systemd.git] / src / core / unit.c
... / ...
CommitLineData
1/***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18***/
19
20#include <errno.h>
21#include <stdlib.h>
22#include <string.h>
23#include <sys/stat.h>
24#include <unistd.h>
25
26#include "sd-id128.h"
27#include "sd-messages.h"
28
29#include "alloc-util.h"
30#include "bus-common-errors.h"
31#include "bus-util.h"
32#include "cgroup-util.h"
33#include "dbus-unit.h"
34#include "dbus.h"
35#include "dropin.h"
36#include "escape.h"
37#include "execute.h"
38#include "fileio-label.h"
39#include "format-util.h"
40#include "id128-util.h"
41#include "load-dropin.h"
42#include "load-fragment.h"
43#include "log.h"
44#include "macro.h"
45#include "missing.h"
46#include "mkdir.h"
47#include "parse-util.h"
48#include "path-util.h"
49#include "process-util.h"
50#include "set.h"
51#include "signal-util.h"
52#include "special.h"
53#include "stat-util.h"
54#include "stdio-util.h"
55#include "string-util.h"
56#include "strv.h"
57#include "umask-util.h"
58#include "unit-name.h"
59#include "unit.h"
60#include "user-util.h"
61#include "virt.h"
62
63const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
64 [UNIT_SERVICE] = &service_vtable,
65 [UNIT_SOCKET] = &socket_vtable,
66 [UNIT_BUSNAME] = &busname_vtable,
67 [UNIT_TARGET] = &target_vtable,
68 [UNIT_DEVICE] = &device_vtable,
69 [UNIT_MOUNT] = &mount_vtable,
70 [UNIT_AUTOMOUNT] = &automount_vtable,
71 [UNIT_SWAP] = &swap_vtable,
72 [UNIT_TIMER] = &timer_vtable,
73 [UNIT_PATH] = &path_vtable,
74 [UNIT_SLICE] = &slice_vtable,
75 [UNIT_SCOPE] = &scope_vtable
76};
77
78static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
79
80Unit *unit_new(Manager *m, size_t size) {
81 Unit *u;
82
83 assert(m);
84 assert(size >= sizeof(Unit));
85
86 u = malloc0(size);
87 if (!u)
88 return NULL;
89
90 u->names = set_new(&string_hash_ops);
91 if (!u->names)
92 return mfree(u);
93
94 u->manager = m;
95 u->type = _UNIT_TYPE_INVALID;
96 u->default_dependencies = true;
97 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
98 u->unit_file_preset = -1;
99 u->on_failure_job_mode = JOB_REPLACE;
100 u->cgroup_inotify_wd = -1;
101 u->job_timeout = USEC_INFINITY;
102 u->ref_uid = UID_INVALID;
103 u->ref_gid = GID_INVALID;
104 u->cpu_usage_last = NSEC_INFINITY;
105
106 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
107 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
108
109 return u;
110}
111
112int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
113 Unit *u;
114 int r;
115
116 u = unit_new(m, size);
117 if (!u)
118 return -ENOMEM;
119
120 r = unit_add_name(u, name);
121 if (r < 0) {
122 unit_free(u);
123 return r;
124 }
125
126 *ret = u;
127 return r;
128}
129
130bool unit_has_name(Unit *u, const char *name) {
131 assert(u);
132 assert(name);
133
134 return set_contains(u->names, (char*) name);
135}
136
137static void unit_init(Unit *u) {
138 CGroupContext *cc;
139 ExecContext *ec;
140 KillContext *kc;
141
142 assert(u);
143 assert(u->manager);
144 assert(u->type >= 0);
145
146 cc = unit_get_cgroup_context(u);
147 if (cc) {
148 cgroup_context_init(cc);
149
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
153
154 cc->cpu_accounting = u->manager->default_cpu_accounting;
155 cc->io_accounting = u->manager->default_io_accounting;
156 cc->blockio_accounting = u->manager->default_blockio_accounting;
157 cc->memory_accounting = u->manager->default_memory_accounting;
158 cc->tasks_accounting = u->manager->default_tasks_accounting;
159
160 if (u->type != UNIT_SLICE)
161 cc->tasks_max = u->manager->default_tasks_max;
162 }
163
164 ec = unit_get_exec_context(u);
165 if (ec)
166 exec_context_init(ec);
167
168 kc = unit_get_kill_context(u);
169 if (kc)
170 kill_context_init(kc);
171
172 if (UNIT_VTABLE(u)->init)
173 UNIT_VTABLE(u)->init(u);
174}
175
176int unit_add_name(Unit *u, const char *text) {
177 _cleanup_free_ char *s = NULL, *i = NULL;
178 UnitType t;
179 int r;
180
181 assert(u);
182 assert(text);
183
184 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
185
186 if (!u->instance)
187 return -EINVAL;
188
189 r = unit_name_replace_instance(text, u->instance, &s);
190 if (r < 0)
191 return r;
192 } else {
193 s = strdup(text);
194 if (!s)
195 return -ENOMEM;
196 }
197
198 if (set_contains(u->names, s))
199 return 0;
200 if (hashmap_contains(u->manager->units, s))
201 return -EEXIST;
202
203 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
204 return -EINVAL;
205
206 t = unit_name_to_type(s);
207 if (t < 0)
208 return -EINVAL;
209
210 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
211 return -EINVAL;
212
213 r = unit_name_to_instance(s, &i);
214 if (r < 0)
215 return r;
216
217 if (i && !unit_type_may_template(t))
218 return -EINVAL;
219
220 /* Ensure that this unit is either instanced or not instanced,
221 * but not both. Note that we do allow names with different
222 * instance names however! */
223 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
224 return -EINVAL;
225
226 if (!unit_type_may_alias(t) && !set_isempty(u->names))
227 return -EEXIST;
228
229 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
230 return -E2BIG;
231
232 r = set_put(u->names, s);
233 if (r < 0)
234 return r;
235 assert(r > 0);
236
237 r = hashmap_put(u->manager->units, s, u);
238 if (r < 0) {
239 (void) set_remove(u->names, s);
240 return r;
241 }
242
243 if (u->type == _UNIT_TYPE_INVALID) {
244 u->type = t;
245 u->id = s;
246 u->instance = i;
247
248 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
249
250 unit_init(u);
251
252 i = NULL;
253 }
254
255 s = NULL;
256
257 unit_add_to_dbus_queue(u);
258 return 0;
259}
260
261int unit_choose_id(Unit *u, const char *name) {
262 _cleanup_free_ char *t = NULL;
263 char *s, *i;
264 int r;
265
266 assert(u);
267 assert(name);
268
269 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
270
271 if (!u->instance)
272 return -EINVAL;
273
274 r = unit_name_replace_instance(name, u->instance, &t);
275 if (r < 0)
276 return r;
277
278 name = t;
279 }
280
281 /* Selects one of the names of this unit as the id */
282 s = set_get(u->names, (char*) name);
283 if (!s)
284 return -ENOENT;
285
286 /* Determine the new instance from the new id */
287 r = unit_name_to_instance(s, &i);
288 if (r < 0)
289 return r;
290
291 u->id = s;
292
293 free(u->instance);
294 u->instance = i;
295
296 unit_add_to_dbus_queue(u);
297
298 return 0;
299}
300
301int unit_set_description(Unit *u, const char *description) {
302 char *s;
303
304 assert(u);
305
306 if (isempty(description))
307 s = NULL;
308 else {
309 s = strdup(description);
310 if (!s)
311 return -ENOMEM;
312 }
313
314 free(u->description);
315 u->description = s;
316
317 unit_add_to_dbus_queue(u);
318 return 0;
319}
320
321bool unit_check_gc(Unit *u) {
322 UnitActiveState state;
323 bool inactive;
324 assert(u);
325
326 if (u->job)
327 return true;
328
329 if (u->nop_job)
330 return true;
331
332 state = unit_active_state(u);
333 inactive = state == UNIT_INACTIVE;
334
335 /* If the unit is inactive and failed and no job is queued for
336 * it, then release its runtime resources */
337 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
338 UNIT_VTABLE(u)->release_resources)
339 UNIT_VTABLE(u)->release_resources(u, inactive);
340
341 /* But we keep the unit object around for longer when it is
342 * referenced or configured to not be gc'ed */
343 if (!inactive)
344 return true;
345
346 if (u->perpetual)
347 return true;
348
349 if (u->refs)
350 return true;
351
352 if (sd_bus_track_count(u->bus_track) > 0)
353 return true;
354
355 if (UNIT_VTABLE(u)->check_gc)
356 if (UNIT_VTABLE(u)->check_gc(u))
357 return true;
358
359 return false;
360}
361
362void unit_add_to_load_queue(Unit *u) {
363 assert(u);
364 assert(u->type != _UNIT_TYPE_INVALID);
365
366 if (u->load_state != UNIT_STUB || u->in_load_queue)
367 return;
368
369 LIST_PREPEND(load_queue, u->manager->load_queue, u);
370 u->in_load_queue = true;
371}
372
373void unit_add_to_cleanup_queue(Unit *u) {
374 assert(u);
375
376 if (u->in_cleanup_queue)
377 return;
378
379 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
380 u->in_cleanup_queue = true;
381}
382
383void unit_add_to_gc_queue(Unit *u) {
384 assert(u);
385
386 if (u->in_gc_queue || u->in_cleanup_queue)
387 return;
388
389 if (unit_check_gc(u))
390 return;
391
392 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
393 u->in_gc_queue = true;
394}
395
396void unit_add_to_dbus_queue(Unit *u) {
397 assert(u);
398 assert(u->type != _UNIT_TYPE_INVALID);
399
400 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
401 return;
402
403 /* Shortcut things if nobody cares */
404 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
405 sd_bus_track_count(u->bus_track) <= 0 &&
406 set_isempty(u->manager->private_buses)) {
407 u->sent_dbus_new_signal = true;
408 return;
409 }
410
411 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
412 u->in_dbus_queue = true;
413}
414
415static void bidi_set_free(Unit *u, Set *s) {
416 Iterator i;
417 Unit *other;
418
419 assert(u);
420
421 /* Frees the set and makes sure we are dropped from the
422 * inverse pointers */
423
424 SET_FOREACH(other, s, i) {
425 UnitDependency d;
426
427 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
428 set_remove(other->dependencies[d], u);
429
430 unit_add_to_gc_queue(other);
431 }
432
433 set_free(s);
434}
435
436static void unit_remove_transient(Unit *u) {
437 char **i;
438
439 assert(u);
440
441 if (!u->transient)
442 return;
443
444 if (u->fragment_path)
445 (void) unlink(u->fragment_path);
446
447 STRV_FOREACH(i, u->dropin_paths) {
448 _cleanup_free_ char *p = NULL, *pp = NULL;
449
450 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
451 if (!p)
452 continue;
453
454 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
455 if (!pp)
456 continue;
457
458 /* Only drop transient drop-ins */
459 if (!path_equal(u->manager->lookup_paths.transient, pp))
460 continue;
461
462 (void) unlink(*i);
463 (void) rmdir(p);
464 }
465}
466
467static void unit_free_requires_mounts_for(Unit *u) {
468 char **j;
469
470 STRV_FOREACH(j, u->requires_mounts_for) {
471 char s[strlen(*j) + 1];
472
473 PATH_FOREACH_PREFIX_MORE(s, *j) {
474 char *y;
475 Set *x;
476
477 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
478 if (!x)
479 continue;
480
481 set_remove(x, u);
482
483 if (set_isempty(x)) {
484 hashmap_remove(u->manager->units_requiring_mounts_for, y);
485 free(y);
486 set_free(x);
487 }
488 }
489 }
490
491 u->requires_mounts_for = strv_free(u->requires_mounts_for);
492}
493
494static void unit_done(Unit *u) {
495 ExecContext *ec;
496 CGroupContext *cc;
497
498 assert(u);
499
500 if (u->type < 0)
501 return;
502
503 if (UNIT_VTABLE(u)->done)
504 UNIT_VTABLE(u)->done(u);
505
506 ec = unit_get_exec_context(u);
507 if (ec)
508 exec_context_done(ec);
509
510 cc = unit_get_cgroup_context(u);
511 if (cc)
512 cgroup_context_done(cc);
513}
514
515void unit_free(Unit *u) {
516 UnitDependency d;
517 Iterator i;
518 char *t;
519
520 if (!u)
521 return;
522
523 if (u->transient_file)
524 fclose(u->transient_file);
525
526 if (!MANAGER_IS_RELOADING(u->manager))
527 unit_remove_transient(u);
528
529 bus_unit_send_removed_signal(u);
530
531 unit_done(u);
532
533 sd_bus_slot_unref(u->match_bus_slot);
534
535 sd_bus_track_unref(u->bus_track);
536 u->deserialized_refs = strv_free(u->deserialized_refs);
537
538 unit_free_requires_mounts_for(u);
539
540 SET_FOREACH(t, u->names, i)
541 hashmap_remove_value(u->manager->units, t, u);
542
543 if (!sd_id128_is_null(u->invocation_id))
544 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
545
546 if (u->job) {
547 Job *j = u->job;
548 job_uninstall(j);
549 job_free(j);
550 }
551
552 if (u->nop_job) {
553 Job *j = u->nop_job;
554 job_uninstall(j);
555 job_free(j);
556 }
557
558 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
559 bidi_set_free(u, u->dependencies[d]);
560
561 if (u->type != _UNIT_TYPE_INVALID)
562 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
563
564 if (u->in_load_queue)
565 LIST_REMOVE(load_queue, u->manager->load_queue, u);
566
567 if (u->in_dbus_queue)
568 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
569
570 if (u->in_cleanup_queue)
571 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
572
573 if (u->in_gc_queue)
574 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
575
576 if (u->in_cgroup_queue)
577 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
578
579 unit_release_cgroup(u);
580
581 unit_unref_uid_gid(u, false);
582
583 (void) manager_update_failed_units(u->manager, u, false);
584 set_remove(u->manager->startup_units, u);
585
586 free(u->description);
587 strv_free(u->documentation);
588 free(u->fragment_path);
589 free(u->source_path);
590 strv_free(u->dropin_paths);
591 free(u->instance);
592
593 free(u->job_timeout_reboot_arg);
594
595 set_free_free(u->names);
596
597 unit_unwatch_all_pids(u);
598
599 condition_free_list(u->conditions);
600 condition_free_list(u->asserts);
601
602 free(u->reboot_arg);
603
604 unit_ref_unset(&u->slice);
605
606 while (u->refs)
607 unit_ref_unset(u->refs);
608
609 free(u);
610}
611
612UnitActiveState unit_active_state(Unit *u) {
613 assert(u);
614
615 if (u->load_state == UNIT_MERGED)
616 return unit_active_state(unit_follow_merge(u));
617
618 /* After a reload it might happen that a unit is not correctly
619 * loaded but still has a process around. That's why we won't
620 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
621
622 return UNIT_VTABLE(u)->active_state(u);
623}
624
625const char* unit_sub_state_to_string(Unit *u) {
626 assert(u);
627
628 return UNIT_VTABLE(u)->sub_state_to_string(u);
629}
630
631static int complete_move(Set **s, Set **other) {
632 int r;
633
634 assert(s);
635 assert(other);
636
637 if (!*other)
638 return 0;
639
640 if (*s) {
641 r = set_move(*s, *other);
642 if (r < 0)
643 return r;
644 } else {
645 *s = *other;
646 *other = NULL;
647 }
648
649 return 0;
650}
651
652static int merge_names(Unit *u, Unit *other) {
653 char *t;
654 Iterator i;
655 int r;
656
657 assert(u);
658 assert(other);
659
660 r = complete_move(&u->names, &other->names);
661 if (r < 0)
662 return r;
663
664 set_free_free(other->names);
665 other->names = NULL;
666 other->id = NULL;
667
668 SET_FOREACH(t, u->names, i)
669 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
670
671 return 0;
672}
673
674static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
675 unsigned n_reserve;
676
677 assert(u);
678 assert(other);
679 assert(d < _UNIT_DEPENDENCY_MAX);
680
681 /*
682 * If u does not have this dependency set allocated, there is no need
683 * to reserve anything. In that case other's set will be transferred
684 * as a whole to u by complete_move().
685 */
686 if (!u->dependencies[d])
687 return 0;
688
689 /* merge_dependencies() will skip a u-on-u dependency */
690 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
691
692 return set_reserve(u->dependencies[d], n_reserve);
693}
694
695static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
696 Iterator i;
697 Unit *back;
698 int r;
699
700 assert(u);
701 assert(other);
702 assert(d < _UNIT_DEPENDENCY_MAX);
703
704 /* Fix backwards pointers */
705 SET_FOREACH(back, other->dependencies[d], i) {
706 UnitDependency k;
707
708 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
709 /* Do not add dependencies between u and itself */
710 if (back == u) {
711 if (set_remove(back->dependencies[k], other))
712 maybe_warn_about_dependency(u, other_id, k);
713 } else {
714 r = set_remove_and_put(back->dependencies[k], other, u);
715 if (r == -EEXIST)
716 set_remove(back->dependencies[k], other);
717 else
718 assert(r >= 0 || r == -ENOENT);
719 }
720 }
721 }
722
723 /* Also do not move dependencies on u to itself */
724 back = set_remove(other->dependencies[d], u);
725 if (back)
726 maybe_warn_about_dependency(u, other_id, d);
727
728 /* The move cannot fail. The caller must have performed a reservation. */
729 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
730
731 other->dependencies[d] = set_free(other->dependencies[d]);
732}
733
734int unit_merge(Unit *u, Unit *other) {
735 UnitDependency d;
736 const char *other_id = NULL;
737 int r;
738
739 assert(u);
740 assert(other);
741 assert(u->manager == other->manager);
742 assert(u->type != _UNIT_TYPE_INVALID);
743
744 other = unit_follow_merge(other);
745
746 if (other == u)
747 return 0;
748
749 if (u->type != other->type)
750 return -EINVAL;
751
752 if (!u->instance != !other->instance)
753 return -EINVAL;
754
755 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
756 return -EEXIST;
757
758 if (other->load_state != UNIT_STUB &&
759 other->load_state != UNIT_NOT_FOUND)
760 return -EEXIST;
761
762 if (other->job)
763 return -EEXIST;
764
765 if (other->nop_job)
766 return -EEXIST;
767
768 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
769 return -EEXIST;
770
771 if (other->id)
772 other_id = strdupa(other->id);
773
774 /* Make reservations to ensure merge_dependencies() won't fail */
775 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
776 r = reserve_dependencies(u, other, d);
777 /*
778 * We don't rollback reservations if we fail. We don't have
779 * a way to undo reservations. A reservation is not a leak.
780 */
781 if (r < 0)
782 return r;
783 }
784
785 /* Merge names */
786 r = merge_names(u, other);
787 if (r < 0)
788 return r;
789
790 /* Redirect all references */
791 while (other->refs)
792 unit_ref_set(other->refs, u);
793
794 /* Merge dependencies */
795 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
796 merge_dependencies(u, other, other_id, d);
797
798 other->load_state = UNIT_MERGED;
799 other->merged_into = u;
800
801 /* If there is still some data attached to the other node, we
802 * don't need it anymore, and can free it. */
803 if (other->load_state != UNIT_STUB)
804 if (UNIT_VTABLE(other)->done)
805 UNIT_VTABLE(other)->done(other);
806
807 unit_add_to_dbus_queue(u);
808 unit_add_to_cleanup_queue(other);
809
810 return 0;
811}
812
813int unit_merge_by_name(Unit *u, const char *name) {
814 _cleanup_free_ char *s = NULL;
815 Unit *other;
816 int r;
817
818 assert(u);
819 assert(name);
820
821 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
822 if (!u->instance)
823 return -EINVAL;
824
825 r = unit_name_replace_instance(name, u->instance, &s);
826 if (r < 0)
827 return r;
828
829 name = s;
830 }
831
832 other = manager_get_unit(u->manager, name);
833 if (other)
834 return unit_merge(u, other);
835
836 return unit_add_name(u, name);
837}
838
839Unit* unit_follow_merge(Unit *u) {
840 assert(u);
841
842 while (u->load_state == UNIT_MERGED)
843 assert_se(u = u->merged_into);
844
845 return u;
846}
847
848int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
849 int r;
850
851 assert(u);
852 assert(c);
853
854 if (c->working_directory) {
855 r = unit_require_mounts_for(u, c->working_directory);
856 if (r < 0)
857 return r;
858 }
859
860 if (c->root_directory) {
861 r = unit_require_mounts_for(u, c->root_directory);
862 if (r < 0)
863 return r;
864 }
865
866 if (c->root_image) {
867 r = unit_require_mounts_for(u, c->root_image);
868 if (r < 0)
869 return r;
870 }
871
872 if (!MANAGER_IS_SYSTEM(u->manager))
873 return 0;
874
875 if (c->private_tmp) {
876 const char *p;
877
878 FOREACH_STRING(p, "/tmp", "/var/tmp") {
879 r = unit_require_mounts_for(u, p);
880 if (r < 0)
881 return r;
882 }
883
884 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, NULL, true);
885 if (r < 0)
886 return r;
887 }
888
889 if (!IN_SET(c->std_output,
890 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
891 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
892 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
893 !IN_SET(c->std_error,
894 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
895 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
896 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
897 return 0;
898
899 /* If syslog or kernel logging is requested, make sure our own
900 * logging daemon is run first. */
901
902 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
903 if (r < 0)
904 return r;
905
906 return 0;
907}
908
909const char *unit_description(Unit *u) {
910 assert(u);
911
912 if (u->description)
913 return u->description;
914
915 return strna(u->id);
916}
917
918void unit_dump(Unit *u, FILE *f, const char *prefix) {
919 char *t, **j;
920 UnitDependency d;
921 Iterator i;
922 const char *prefix2;
923 char
924 timestamp0[FORMAT_TIMESTAMP_MAX],
925 timestamp1[FORMAT_TIMESTAMP_MAX],
926 timestamp2[FORMAT_TIMESTAMP_MAX],
927 timestamp3[FORMAT_TIMESTAMP_MAX],
928 timestamp4[FORMAT_TIMESTAMP_MAX],
929 timespan[FORMAT_TIMESPAN_MAX];
930 Unit *following;
931 _cleanup_set_free_ Set *following_set = NULL;
932 int r;
933 const char *n;
934
935 assert(u);
936 assert(u->type >= 0);
937
938 prefix = strempty(prefix);
939 prefix2 = strjoina(prefix, "\t");
940
941 fprintf(f,
942 "%s-> Unit %s:\n"
943 "%s\tDescription: %s\n"
944 "%s\tInstance: %s\n"
945 "%s\tUnit Load State: %s\n"
946 "%s\tUnit Active State: %s\n"
947 "%s\tState Change Timestamp: %s\n"
948 "%s\tInactive Exit Timestamp: %s\n"
949 "%s\tActive Enter Timestamp: %s\n"
950 "%s\tActive Exit Timestamp: %s\n"
951 "%s\tInactive Enter Timestamp: %s\n"
952 "%s\tGC Check Good: %s\n"
953 "%s\tNeed Daemon Reload: %s\n"
954 "%s\tTransient: %s\n"
955 "%s\tPerpetual: %s\n"
956 "%s\tSlice: %s\n"
957 "%s\tCGroup: %s\n"
958 "%s\tCGroup realized: %s\n"
959 "%s\tCGroup mask: 0x%x\n"
960 "%s\tCGroup members mask: 0x%x\n",
961 prefix, u->id,
962 prefix, unit_description(u),
963 prefix, strna(u->instance),
964 prefix, unit_load_state_to_string(u->load_state),
965 prefix, unit_active_state_to_string(unit_active_state(u)),
966 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
967 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
968 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
969 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
970 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
971 prefix, yes_no(unit_check_gc(u)),
972 prefix, yes_no(unit_need_daemon_reload(u)),
973 prefix, yes_no(u->transient),
974 prefix, yes_no(u->perpetual),
975 prefix, strna(unit_slice_name(u)),
976 prefix, strna(u->cgroup_path),
977 prefix, yes_no(u->cgroup_realized),
978 prefix, u->cgroup_realized_mask,
979 prefix, u->cgroup_members_mask);
980
981 SET_FOREACH(t, u->names, i)
982 fprintf(f, "%s\tName: %s\n", prefix, t);
983
984 if (!sd_id128_is_null(u->invocation_id))
985 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
986 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
987
988 STRV_FOREACH(j, u->documentation)
989 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
990
991 following = unit_following(u);
992 if (following)
993 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
994
995 r = unit_following_set(u, &following_set);
996 if (r >= 0) {
997 Unit *other;
998
999 SET_FOREACH(other, following_set, i)
1000 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1001 }
1002
1003 if (u->fragment_path)
1004 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1005
1006 if (u->source_path)
1007 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1008
1009 STRV_FOREACH(j, u->dropin_paths)
1010 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1011
1012 if (u->job_timeout != USEC_INFINITY)
1013 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1014
1015 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1016 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1017
1018 if (u->job_timeout_reboot_arg)
1019 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1020
1021 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1022 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1023
1024 if (dual_timestamp_is_set(&u->condition_timestamp))
1025 fprintf(f,
1026 "%s\tCondition Timestamp: %s\n"
1027 "%s\tCondition Result: %s\n",
1028 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1029 prefix, yes_no(u->condition_result));
1030
1031 if (dual_timestamp_is_set(&u->assert_timestamp))
1032 fprintf(f,
1033 "%s\tAssert Timestamp: %s\n"
1034 "%s\tAssert Result: %s\n",
1035 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1036 prefix, yes_no(u->assert_result));
1037
1038 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1039 Unit *other;
1040
1041 SET_FOREACH(other, u->dependencies[d], i)
1042 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1043 }
1044
1045 if (!strv_isempty(u->requires_mounts_for)) {
1046 fprintf(f,
1047 "%s\tRequiresMountsFor:", prefix);
1048
1049 STRV_FOREACH(j, u->requires_mounts_for)
1050 fprintf(f, " %s", *j);
1051
1052 fputs("\n", f);
1053 }
1054
1055 if (u->load_state == UNIT_LOADED) {
1056
1057 fprintf(f,
1058 "%s\tStopWhenUnneeded: %s\n"
1059 "%s\tRefuseManualStart: %s\n"
1060 "%s\tRefuseManualStop: %s\n"
1061 "%s\tDefaultDependencies: %s\n"
1062 "%s\tOnFailureJobMode: %s\n"
1063 "%s\tIgnoreOnIsolate: %s\n",
1064 prefix, yes_no(u->stop_when_unneeded),
1065 prefix, yes_no(u->refuse_manual_start),
1066 prefix, yes_no(u->refuse_manual_stop),
1067 prefix, yes_no(u->default_dependencies),
1068 prefix, job_mode_to_string(u->on_failure_job_mode),
1069 prefix, yes_no(u->ignore_on_isolate));
1070
1071 if (UNIT_VTABLE(u)->dump)
1072 UNIT_VTABLE(u)->dump(u, f, prefix2);
1073
1074 } else if (u->load_state == UNIT_MERGED)
1075 fprintf(f,
1076 "%s\tMerged into: %s\n",
1077 prefix, u->merged_into->id);
1078 else if (u->load_state == UNIT_ERROR)
1079 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1080
1081 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1082 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1083
1084 if (u->job)
1085 job_dump(u->job, f, prefix2);
1086
1087 if (u->nop_job)
1088 job_dump(u->nop_job, f, prefix2);
1089}
1090
1091/* Common implementation for multiple backends */
1092int unit_load_fragment_and_dropin(Unit *u) {
1093 Unit *t;
1094 int r;
1095
1096 assert(u);
1097
1098 /* Load a .{service,socket,...} file */
1099 r = unit_load_fragment(u);
1100 if (r < 0)
1101 return r;
1102
1103 if (u->load_state == UNIT_STUB)
1104 return -ENOENT;
1105
1106 /* If the unit is an alias and the final unit has already been
1107 * loaded, there's no point in reloading the dropins one more time. */
1108 t = unit_follow_merge(u);
1109 if (t != u && t->load_state != UNIT_STUB)
1110 return 0;
1111
1112 return unit_load_dropin(t);
1113}
1114
1115/* Common implementation for multiple backends */
1116int unit_load_fragment_and_dropin_optional(Unit *u) {
1117 Unit *t;
1118 int r;
1119
1120 assert(u);
1121
1122 /* Same as unit_load_fragment_and_dropin(), but whether
1123 * something can be loaded or not doesn't matter. */
1124
1125 /* Load a .service file */
1126 r = unit_load_fragment(u);
1127 if (r < 0)
1128 return r;
1129
1130 if (u->load_state == UNIT_STUB)
1131 u->load_state = UNIT_LOADED;
1132
1133 /* If the unit is an alias and the final unit has already been
1134 * loaded, there's no point in reloading the dropins one more time. */
1135 t = unit_follow_merge(u);
1136 if (t != u && t->load_state != UNIT_STUB)
1137 return 0;
1138
1139 return unit_load_dropin(t);
1140}
1141
1142int unit_add_default_target_dependency(Unit *u, Unit *target) {
1143 assert(u);
1144 assert(target);
1145
1146 if (target->type != UNIT_TARGET)
1147 return 0;
1148
1149 /* Only add the dependency if both units are loaded, so that
1150 * that loop check below is reliable */
1151 if (u->load_state != UNIT_LOADED ||
1152 target->load_state != UNIT_LOADED)
1153 return 0;
1154
1155 /* If either side wants no automatic dependencies, then let's
1156 * skip this */
1157 if (!u->default_dependencies ||
1158 !target->default_dependencies)
1159 return 0;
1160
1161 /* Don't create loops */
1162 if (set_get(target->dependencies[UNIT_BEFORE], u))
1163 return 0;
1164
1165 return unit_add_dependency(target, UNIT_AFTER, u, true);
1166}
1167
1168static int unit_add_target_dependencies(Unit *u) {
1169
1170 static const UnitDependency deps[] = {
1171 UNIT_REQUIRED_BY,
1172 UNIT_REQUISITE_OF,
1173 UNIT_WANTED_BY,
1174 UNIT_BOUND_BY
1175 };
1176
1177 Unit *target;
1178 Iterator i;
1179 unsigned k;
1180 int r = 0;
1181
1182 assert(u);
1183
1184 for (k = 0; k < ELEMENTSOF(deps); k++)
1185 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1186 r = unit_add_default_target_dependency(u, target);
1187 if (r < 0)
1188 return r;
1189 }
1190
1191 return r;
1192}
1193
1194static int unit_add_slice_dependencies(Unit *u) {
1195 assert(u);
1196
1197 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1198 return 0;
1199
1200 if (UNIT_ISSET(u->slice))
1201 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1202
1203 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1204 return 0;
1205
1206 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1207}
1208
1209static int unit_add_mount_dependencies(Unit *u) {
1210 char **i;
1211 int r;
1212
1213 assert(u);
1214
1215 STRV_FOREACH(i, u->requires_mounts_for) {
1216 char prefix[strlen(*i) + 1];
1217
1218 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1219 _cleanup_free_ char *p = NULL;
1220 Unit *m;
1221
1222 r = unit_name_from_path(prefix, ".mount", &p);
1223 if (r < 0)
1224 return r;
1225
1226 m = manager_get_unit(u->manager, p);
1227 if (!m) {
1228 /* Make sure to load the mount unit if
1229 * it exists. If so the dependencies
1230 * on this unit will be added later
1231 * during the loading of the mount
1232 * unit. */
1233 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1234 continue;
1235 }
1236 if (m == u)
1237 continue;
1238
1239 if (m->load_state != UNIT_LOADED)
1240 continue;
1241
1242 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1243 if (r < 0)
1244 return r;
1245
1246 if (m->fragment_path) {
1247 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1248 if (r < 0)
1249 return r;
1250 }
1251 }
1252 }
1253
1254 return 0;
1255}
1256
1257static int unit_add_startup_units(Unit *u) {
1258 CGroupContext *c;
1259 int r;
1260
1261 c = unit_get_cgroup_context(u);
1262 if (!c)
1263 return 0;
1264
1265 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1266 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1267 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1268 return 0;
1269
1270 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1271 if (r < 0)
1272 return r;
1273
1274 return set_put(u->manager->startup_units, u);
1275}
1276
1277int unit_load(Unit *u) {
1278 int r;
1279
1280 assert(u);
1281
1282 if (u->in_load_queue) {
1283 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1284 u->in_load_queue = false;
1285 }
1286
1287 if (u->type == _UNIT_TYPE_INVALID)
1288 return -EINVAL;
1289
1290 if (u->load_state != UNIT_STUB)
1291 return 0;
1292
1293 if (u->transient_file) {
1294 r = fflush_and_check(u->transient_file);
1295 if (r < 0)
1296 goto fail;
1297
1298 fclose(u->transient_file);
1299 u->transient_file = NULL;
1300
1301 u->fragment_mtime = now(CLOCK_REALTIME);
1302 }
1303
1304 if (UNIT_VTABLE(u)->load) {
1305 r = UNIT_VTABLE(u)->load(u);
1306 if (r < 0)
1307 goto fail;
1308 }
1309
1310 if (u->load_state == UNIT_STUB) {
1311 r = -ENOENT;
1312 goto fail;
1313 }
1314
1315 if (u->load_state == UNIT_LOADED) {
1316
1317 r = unit_add_target_dependencies(u);
1318 if (r < 0)
1319 goto fail;
1320
1321 r = unit_add_slice_dependencies(u);
1322 if (r < 0)
1323 goto fail;
1324
1325 r = unit_add_mount_dependencies(u);
1326 if (r < 0)
1327 goto fail;
1328
1329 r = unit_add_startup_units(u);
1330 if (r < 0)
1331 goto fail;
1332
1333 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1334 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1335 r = -EINVAL;
1336 goto fail;
1337 }
1338
1339 unit_update_cgroup_members_masks(u);
1340 }
1341
1342 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1343
1344 unit_add_to_dbus_queue(unit_follow_merge(u));
1345 unit_add_to_gc_queue(u);
1346
1347 return 0;
1348
1349fail:
1350 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1351 u->load_error = r;
1352 unit_add_to_dbus_queue(u);
1353 unit_add_to_gc_queue(u);
1354
1355 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1356
1357 return r;
1358}
1359
1360static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1361 Condition *c;
1362 int triggered = -1;
1363
1364 assert(u);
1365 assert(to_string);
1366
1367 /* If the condition list is empty, then it is true */
1368 if (!first)
1369 return true;
1370
1371 /* Otherwise, if all of the non-trigger conditions apply and
1372 * if any of the trigger conditions apply (unless there are
1373 * none) we return true */
1374 LIST_FOREACH(conditions, c, first) {
1375 int r;
1376
1377 r = condition_test(c);
1378 if (r < 0)
1379 log_unit_warning(u,
1380 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1381 to_string(c->type),
1382 c->trigger ? "|" : "",
1383 c->negate ? "!" : "",
1384 c->parameter);
1385 else
1386 log_unit_debug(u,
1387 "%s=%s%s%s %s.",
1388 to_string(c->type),
1389 c->trigger ? "|" : "",
1390 c->negate ? "!" : "",
1391 c->parameter,
1392 condition_result_to_string(c->result));
1393
1394 if (!c->trigger && r <= 0)
1395 return false;
1396
1397 if (c->trigger && triggered <= 0)
1398 triggered = r > 0;
1399 }
1400
1401 return triggered != 0;
1402}
1403
1404static bool unit_condition_test(Unit *u) {
1405 assert(u);
1406
1407 dual_timestamp_get(&u->condition_timestamp);
1408 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1409
1410 return u->condition_result;
1411}
1412
1413static bool unit_assert_test(Unit *u) {
1414 assert(u);
1415
1416 dual_timestamp_get(&u->assert_timestamp);
1417 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1418
1419 return u->assert_result;
1420}
1421
1422void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1423 DISABLE_WARNING_FORMAT_NONLITERAL;
1424 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1425 REENABLE_WARNING;
1426}
1427
1428_pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1429 const char *format;
1430 const UnitStatusMessageFormats *format_table;
1431
1432 assert(u);
1433 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1434
1435 if (t != JOB_RELOAD) {
1436 format_table = &UNIT_VTABLE(u)->status_message_formats;
1437 if (format_table) {
1438 format = format_table->starting_stopping[t == JOB_STOP];
1439 if (format)
1440 return format;
1441 }
1442 }
1443
1444 /* Return generic strings */
1445 if (t == JOB_START)
1446 return "Starting %s.";
1447 else if (t == JOB_STOP)
1448 return "Stopping %s.";
1449 else
1450 return "Reloading %s.";
1451}
1452
1453static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1454 const char *format;
1455
1456 assert(u);
1457
1458 /* Reload status messages have traditionally not been printed to console. */
1459 if (!IN_SET(t, JOB_START, JOB_STOP))
1460 return;
1461
1462 format = unit_get_status_message_format(u, t);
1463
1464 DISABLE_WARNING_FORMAT_NONLITERAL;
1465 unit_status_printf(u, "", format);
1466 REENABLE_WARNING;
1467}
1468
1469static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1470 const char *format, *mid;
1471 char buf[LINE_MAX];
1472
1473 assert(u);
1474
1475 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1476 return;
1477
1478 if (log_on_console())
1479 return;
1480
1481 /* We log status messages for all units and all operations. */
1482
1483 format = unit_get_status_message_format(u, t);
1484
1485 DISABLE_WARNING_FORMAT_NONLITERAL;
1486 snprintf(buf, sizeof buf, format, unit_description(u));
1487 REENABLE_WARNING;
1488
1489 mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
1490 t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
1491 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
1492
1493 /* Note that we deliberately use LOG_MESSAGE() instead of
1494 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1495 * closely what is written to screen using the status output,
1496 * which is supposed the highest level, friendliest output
1497 * possible, which means we should avoid the low-level unit
1498 * name. */
1499 log_struct(LOG_INFO,
1500 mid,
1501 LOG_UNIT_ID(u),
1502 LOG_MESSAGE("%s", buf),
1503 NULL);
1504}
1505
1506void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1507 assert(u);
1508 assert(t >= 0);
1509 assert(t < _JOB_TYPE_MAX);
1510
1511 unit_status_log_starting_stopping_reloading(u, t);
1512 unit_status_print_starting_stopping(u, t);
1513}
1514
1515int unit_start_limit_test(Unit *u) {
1516 assert(u);
1517
1518 if (ratelimit_test(&u->start_limit)) {
1519 u->start_limit_hit = false;
1520 return 0;
1521 }
1522
1523 log_unit_warning(u, "Start request repeated too quickly.");
1524 u->start_limit_hit = true;
1525
1526 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1527}
1528
1529bool unit_shall_confirm_spawn(Unit *u) {
1530 assert(u);
1531
1532 if (manager_is_confirm_spawn_disabled(u->manager))
1533 return false;
1534
1535 /* For some reasons units remaining in the same process group
1536 * as PID 1 fail to acquire the console even if it's not used
1537 * by any process. So skip the confirmation question for them. */
1538 return !unit_get_exec_context(u)->same_pgrp;
1539}
1540
1541static bool unit_verify_deps(Unit *u) {
1542 Unit *other;
1543 Iterator j;
1544
1545 assert(u);
1546
1547 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1548 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1549 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1550 * conjunction with After= as for them any such check would make things entirely racy. */
1551
1552 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], j) {
1553
1554 if (!set_contains(u->dependencies[UNIT_AFTER], other))
1555 continue;
1556
1557 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1558 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1559 return false;
1560 }
1561 }
1562
1563 return true;
1564}
1565
1566/* Errors:
1567 * -EBADR: This unit type does not support starting.
1568 * -EALREADY: Unit is already started.
1569 * -EAGAIN: An operation is already in progress. Retry later.
1570 * -ECANCELED: Too many requests for now.
1571 * -EPROTO: Assert failed
1572 * -EINVAL: Unit not loaded
1573 * -EOPNOTSUPP: Unit type not supported
1574 * -ENOLINK: The necessary dependencies are not fulfilled.
1575 */
1576int unit_start(Unit *u) {
1577 UnitActiveState state;
1578 Unit *following;
1579
1580 assert(u);
1581
1582 /* If this is already started, then this will succeed. Note
1583 * that this will even succeed if this unit is not startable
1584 * by the user. This is relied on to detect when we need to
1585 * wait for units and when waiting is finished. */
1586 state = unit_active_state(u);
1587 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1588 return -EALREADY;
1589
1590 /* Units that aren't loaded cannot be started */
1591 if (u->load_state != UNIT_LOADED)
1592 return -EINVAL;
1593
1594 /* If the conditions failed, don't do anything at all. If we
1595 * already are activating this call might still be useful to
1596 * speed up activation in case there is some hold-off time,
1597 * but we don't want to recheck the condition in that case. */
1598 if (state != UNIT_ACTIVATING &&
1599 !unit_condition_test(u)) {
1600 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1601 return -EALREADY;
1602 }
1603
1604 /* If the asserts failed, fail the entire job */
1605 if (state != UNIT_ACTIVATING &&
1606 !unit_assert_test(u)) {
1607 log_unit_notice(u, "Starting requested but asserts failed.");
1608 return -EPROTO;
1609 }
1610
1611 /* Units of types that aren't supported cannot be
1612 * started. Note that we do this test only after the condition
1613 * checks, so that we rather return condition check errors
1614 * (which are usually not considered a true failure) than "not
1615 * supported" errors (which are considered a failure).
1616 */
1617 if (!unit_supported(u))
1618 return -EOPNOTSUPP;
1619
1620 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1621 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1622 * effect anymore, due to a reload or due to a failed condition. */
1623 if (!unit_verify_deps(u))
1624 return -ENOLINK;
1625
1626 /* Forward to the main object, if we aren't it. */
1627 following = unit_following(u);
1628 if (following) {
1629 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1630 return unit_start(following);
1631 }
1632
1633 /* If it is stopped, but we cannot start it, then fail */
1634 if (!UNIT_VTABLE(u)->start)
1635 return -EBADR;
1636
1637 /* We don't suppress calls to ->start() here when we are
1638 * already starting, to allow this request to be used as a
1639 * "hurry up" call, for example when the unit is in some "auto
1640 * restart" state where it waits for a holdoff timer to elapse
1641 * before it will start again. */
1642
1643 unit_add_to_dbus_queue(u);
1644
1645 return UNIT_VTABLE(u)->start(u);
1646}
1647
1648bool unit_can_start(Unit *u) {
1649 assert(u);
1650
1651 if (u->load_state != UNIT_LOADED)
1652 return false;
1653
1654 if (!unit_supported(u))
1655 return false;
1656
1657 return !!UNIT_VTABLE(u)->start;
1658}
1659
1660bool unit_can_isolate(Unit *u) {
1661 assert(u);
1662
1663 return unit_can_start(u) &&
1664 u->allow_isolate;
1665}
1666
1667/* Errors:
1668 * -EBADR: This unit type does not support stopping.
1669 * -EALREADY: Unit is already stopped.
1670 * -EAGAIN: An operation is already in progress. Retry later.
1671 */
1672int unit_stop(Unit *u) {
1673 UnitActiveState state;
1674 Unit *following;
1675
1676 assert(u);
1677
1678 state = unit_active_state(u);
1679 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1680 return -EALREADY;
1681
1682 following = unit_following(u);
1683 if (following) {
1684 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1685 return unit_stop(following);
1686 }
1687
1688 if (!UNIT_VTABLE(u)->stop)
1689 return -EBADR;
1690
1691 unit_add_to_dbus_queue(u);
1692
1693 return UNIT_VTABLE(u)->stop(u);
1694}
1695
1696bool unit_can_stop(Unit *u) {
1697 assert(u);
1698
1699 if (!unit_supported(u))
1700 return false;
1701
1702 if (u->perpetual)
1703 return false;
1704
1705 return !!UNIT_VTABLE(u)->stop;
1706}
1707
1708/* Errors:
1709 * -EBADR: This unit type does not support reloading.
1710 * -ENOEXEC: Unit is not started.
1711 * -EAGAIN: An operation is already in progress. Retry later.
1712 */
1713int unit_reload(Unit *u) {
1714 UnitActiveState state;
1715 Unit *following;
1716
1717 assert(u);
1718
1719 if (u->load_state != UNIT_LOADED)
1720 return -EINVAL;
1721
1722 if (!unit_can_reload(u))
1723 return -EBADR;
1724
1725 state = unit_active_state(u);
1726 if (state == UNIT_RELOADING)
1727 return -EALREADY;
1728
1729 if (state != UNIT_ACTIVE) {
1730 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1731 return -ENOEXEC;
1732 }
1733
1734 following = unit_following(u);
1735 if (following) {
1736 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1737 return unit_reload(following);
1738 }
1739
1740 unit_add_to_dbus_queue(u);
1741
1742 return UNIT_VTABLE(u)->reload(u);
1743}
1744
1745bool unit_can_reload(Unit *u) {
1746 assert(u);
1747
1748 if (!UNIT_VTABLE(u)->reload)
1749 return false;
1750
1751 if (!UNIT_VTABLE(u)->can_reload)
1752 return true;
1753
1754 return UNIT_VTABLE(u)->can_reload(u);
1755}
1756
1757static void unit_check_unneeded(Unit *u) {
1758
1759 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1760
1761 static const UnitDependency needed_dependencies[] = {
1762 UNIT_REQUIRED_BY,
1763 UNIT_REQUISITE_OF,
1764 UNIT_WANTED_BY,
1765 UNIT_BOUND_BY,
1766 };
1767
1768 Unit *other;
1769 Iterator i;
1770 unsigned j;
1771 int r;
1772
1773 assert(u);
1774
1775 /* If this service shall be shut down when unneeded then do
1776 * so. */
1777
1778 if (!u->stop_when_unneeded)
1779 return;
1780
1781 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1782 return;
1783
1784 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1785 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1786 if (unit_active_or_pending(other))
1787 return;
1788
1789 /* If stopping a unit fails continuously we might enter a stop
1790 * loop here, hence stop acting on the service being
1791 * unnecessary after a while. */
1792 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1793 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1794 return;
1795 }
1796
1797 log_unit_info(u, "Unit not needed anymore. Stopping.");
1798
1799 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1800 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1801 if (r < 0)
1802 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1803}
1804
1805static void unit_check_binds_to(Unit *u) {
1806 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1807 bool stop = false;
1808 Unit *other;
1809 Iterator i;
1810 int r;
1811
1812 assert(u);
1813
1814 if (u->job)
1815 return;
1816
1817 if (unit_active_state(u) != UNIT_ACTIVE)
1818 return;
1819
1820 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1821 if (other->job)
1822 continue;
1823
1824 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1825 continue;
1826
1827 stop = true;
1828 break;
1829 }
1830
1831 if (!stop)
1832 return;
1833
1834 /* If stopping a unit fails continuously we might enter a stop
1835 * loop here, hence stop acting on the service being
1836 * unnecessary after a while. */
1837 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1838 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1839 return;
1840 }
1841
1842 assert(other);
1843 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1844
1845 /* A unit we need to run is gone. Sniff. Let's stop this. */
1846 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1847 if (r < 0)
1848 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1849}
1850
1851static void retroactively_start_dependencies(Unit *u) {
1852 Iterator i;
1853 Unit *other;
1854
1855 assert(u);
1856 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1857
1858 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1859 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1860 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1861 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1862
1863 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1864 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1865 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1866 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1867
1868 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1869 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1870 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1871 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1872
1873 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1874 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1875 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1876
1877 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1878 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1879 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1880}
1881
1882static void retroactively_stop_dependencies(Unit *u) {
1883 Iterator i;
1884 Unit *other;
1885
1886 assert(u);
1887 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1888
1889 /* Pull down units which are bound to us recursively if enabled */
1890 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1891 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1892 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1893}
1894
1895static void check_unneeded_dependencies(Unit *u) {
1896 Iterator i;
1897 Unit *other;
1898
1899 assert(u);
1900 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1901
1902 /* Garbage collect services that might not be needed anymore, if enabled */
1903 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1904 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1905 unit_check_unneeded(other);
1906 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1907 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1908 unit_check_unneeded(other);
1909 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1910 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1911 unit_check_unneeded(other);
1912 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1913 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1914 unit_check_unneeded(other);
1915}
1916
1917void unit_start_on_failure(Unit *u) {
1918 Unit *other;
1919 Iterator i;
1920
1921 assert(u);
1922
1923 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1924 return;
1925
1926 log_unit_info(u, "Triggering OnFailure= dependencies.");
1927
1928 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1929 int r;
1930
1931 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1932 if (r < 0)
1933 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1934 }
1935}
1936
1937void unit_trigger_notify(Unit *u) {
1938 Unit *other;
1939 Iterator i;
1940
1941 assert(u);
1942
1943 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1944 if (UNIT_VTABLE(other)->trigger_notify)
1945 UNIT_VTABLE(other)->trigger_notify(other, u);
1946}
1947
1948void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1949 Manager *m;
1950 bool unexpected;
1951
1952 assert(u);
1953 assert(os < _UNIT_ACTIVE_STATE_MAX);
1954 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1955
1956 /* Note that this is called for all low-level state changes,
1957 * even if they might map to the same high-level
1958 * UnitActiveState! That means that ns == os is an expected
1959 * behavior here. For example: if a mount point is remounted
1960 * this function will be called too! */
1961
1962 m = u->manager;
1963
1964 /* Update timestamps for state changes */
1965 if (!MANAGER_IS_RELOADING(m)) {
1966 dual_timestamp_get(&u->state_change_timestamp);
1967
1968 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1969 u->inactive_exit_timestamp = u->state_change_timestamp;
1970 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1971 u->inactive_enter_timestamp = u->state_change_timestamp;
1972
1973 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1974 u->active_enter_timestamp = u->state_change_timestamp;
1975 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1976 u->active_exit_timestamp = u->state_change_timestamp;
1977 }
1978
1979 /* Keep track of failed units */
1980 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1981
1982 /* Make sure the cgroup is always removed when we become inactive */
1983 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1984 unit_prune_cgroup(u);
1985
1986 /* Note that this doesn't apply to RemainAfterExit services exiting
1987 * successfully, since there's no change of state in that case. Which is
1988 * why it is handled in service_set_state() */
1989 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1990 ExecContext *ec;
1991
1992 ec = unit_get_exec_context(u);
1993 if (ec && exec_context_may_touch_console(ec)) {
1994 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1995 m->n_on_console--;
1996
1997 if (m->n_on_console == 0)
1998 /* unset no_console_output flag, since the console is free */
1999 m->no_console_output = false;
2000 } else
2001 m->n_on_console++;
2002 }
2003 }
2004
2005 if (u->job) {
2006 unexpected = false;
2007
2008 if (u->job->state == JOB_WAITING)
2009
2010 /* So we reached a different state for this
2011 * job. Let's see if we can run it now if it
2012 * failed previously due to EAGAIN. */
2013 job_add_to_run_queue(u->job);
2014
2015 /* Let's check whether this state change constitutes a
2016 * finished job, or maybe contradicts a running job and
2017 * hence needs to invalidate jobs. */
2018
2019 switch (u->job->type) {
2020
2021 case JOB_START:
2022 case JOB_VERIFY_ACTIVE:
2023
2024 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2025 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2026 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2027 unexpected = true;
2028
2029 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2030 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2031 }
2032
2033 break;
2034
2035 case JOB_RELOAD:
2036 case JOB_RELOAD_OR_START:
2037 case JOB_TRY_RELOAD:
2038
2039 if (u->job->state == JOB_RUNNING) {
2040 if (ns == UNIT_ACTIVE)
2041 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
2042 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
2043 unexpected = true;
2044
2045 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2046 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2047 }
2048 }
2049
2050 break;
2051
2052 case JOB_STOP:
2053 case JOB_RESTART:
2054 case JOB_TRY_RESTART:
2055
2056 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2057 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2058 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2059 unexpected = true;
2060 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2061 }
2062
2063 break;
2064
2065 default:
2066 assert_not_reached("Job type unknown");
2067 }
2068
2069 } else
2070 unexpected = true;
2071
2072 if (!MANAGER_IS_RELOADING(m)) {
2073
2074 /* If this state change happened without being
2075 * requested by a job, then let's retroactively start
2076 * or stop dependencies. We skip that step when
2077 * deserializing, since we don't want to create any
2078 * additional jobs just because something is already
2079 * activated. */
2080
2081 if (unexpected) {
2082 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2083 retroactively_start_dependencies(u);
2084 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2085 retroactively_stop_dependencies(u);
2086 }
2087
2088 /* stop unneeded units regardless if going down was expected or not */
2089 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2090 check_unneeded_dependencies(u);
2091
2092 if (ns != os && ns == UNIT_FAILED) {
2093 log_unit_notice(u, "Unit entered failed state.");
2094 unit_start_on_failure(u);
2095 }
2096 }
2097
2098 /* Some names are special */
2099 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2100
2101 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2102 /* The bus might have just become available,
2103 * hence try to connect to it, if we aren't
2104 * yet connected. */
2105 bus_init(m, true);
2106
2107 if (u->type == UNIT_SERVICE &&
2108 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2109 !MANAGER_IS_RELOADING(m)) {
2110 /* Write audit record if we have just finished starting up */
2111 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2112 u->in_audit = true;
2113 }
2114
2115 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2116 manager_send_unit_plymouth(m, u);
2117
2118 } else {
2119
2120 /* We don't care about D-Bus here, since we'll get an
2121 * asynchronous notification for it anyway. */
2122
2123 if (u->type == UNIT_SERVICE &&
2124 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2125 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2126 !MANAGER_IS_RELOADING(m)) {
2127
2128 /* Hmm, if there was no start record written
2129 * write it now, so that we always have a nice
2130 * pair */
2131 if (!u->in_audit) {
2132 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2133
2134 if (ns == UNIT_INACTIVE)
2135 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2136 } else
2137 /* Write audit record if we have just finished shutting down */
2138 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2139
2140 u->in_audit = false;
2141 }
2142 }
2143
2144 manager_recheck_journal(m);
2145 unit_trigger_notify(u);
2146
2147 if (!MANAGER_IS_RELOADING(u->manager)) {
2148 /* Maybe we finished startup and are now ready for
2149 * being stopped because unneeded? */
2150 unit_check_unneeded(u);
2151
2152 /* Maybe we finished startup, but something we needed
2153 * has vanished? Let's die then. (This happens when
2154 * something BindsTo= to a Type=oneshot unit, as these
2155 * units go directly from starting to inactive,
2156 * without ever entering started.) */
2157 unit_check_binds_to(u);
2158 }
2159
2160 unit_add_to_dbus_queue(u);
2161 unit_add_to_gc_queue(u);
2162}
2163
2164int unit_watch_pid(Unit *u, pid_t pid) {
2165 int q, r;
2166
2167 assert(u);
2168 assert(pid >= 1);
2169
2170 /* Watch a specific PID. We only support one or two units
2171 * watching each PID for now, not more. */
2172
2173 r = set_ensure_allocated(&u->pids, NULL);
2174 if (r < 0)
2175 return r;
2176
2177 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2178 if (r < 0)
2179 return r;
2180
2181 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2182 if (r == -EEXIST) {
2183 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2184 if (r < 0)
2185 return r;
2186
2187 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2188 }
2189
2190 q = set_put(u->pids, PID_TO_PTR(pid));
2191 if (q < 0)
2192 return q;
2193
2194 return r;
2195}
2196
2197void unit_unwatch_pid(Unit *u, pid_t pid) {
2198 assert(u);
2199 assert(pid >= 1);
2200
2201 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2202 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2203 (void) set_remove(u->pids, PID_TO_PTR(pid));
2204}
2205
2206void unit_unwatch_all_pids(Unit *u) {
2207 assert(u);
2208
2209 while (!set_isempty(u->pids))
2210 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2211
2212 u->pids = set_free(u->pids);
2213}
2214
2215void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2216 Iterator i;
2217 void *e;
2218
2219 assert(u);
2220
2221 /* Cleans dead PIDs from our list */
2222
2223 SET_FOREACH(e, u->pids, i) {
2224 pid_t pid = PTR_TO_PID(e);
2225
2226 if (pid == except1 || pid == except2)
2227 continue;
2228
2229 if (!pid_is_unwaited(pid))
2230 unit_unwatch_pid(u, pid);
2231 }
2232}
2233
2234bool unit_job_is_applicable(Unit *u, JobType j) {
2235 assert(u);
2236 assert(j >= 0 && j < _JOB_TYPE_MAX);
2237
2238 switch (j) {
2239
2240 case JOB_VERIFY_ACTIVE:
2241 case JOB_START:
2242 case JOB_NOP:
2243 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2244 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2245 * jobs for it. */
2246 return true;
2247
2248 case JOB_STOP:
2249 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2250 * external events), hence it makes no sense to permit enqueing such a request either. */
2251 return !u->perpetual;
2252
2253 case JOB_RESTART:
2254 case JOB_TRY_RESTART:
2255 return unit_can_stop(u) && unit_can_start(u);
2256
2257 case JOB_RELOAD:
2258 case JOB_TRY_RELOAD:
2259 return unit_can_reload(u);
2260
2261 case JOB_RELOAD_OR_START:
2262 return unit_can_reload(u) && unit_can_start(u);
2263
2264 default:
2265 assert_not_reached("Invalid job type");
2266 }
2267}
2268
2269static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2270 assert(u);
2271
2272 /* Only warn about some unit types */
2273 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2274 return;
2275
2276 if (streq_ptr(u->id, other))
2277 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2278 else
2279 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2280}
2281
2282int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2283
2284 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2285 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2286 [UNIT_WANTS] = UNIT_WANTED_BY,
2287 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2288 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2289 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2290 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2291 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2292 [UNIT_WANTED_BY] = UNIT_WANTS,
2293 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2294 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2295 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2296 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2297 [UNIT_BEFORE] = UNIT_AFTER,
2298 [UNIT_AFTER] = UNIT_BEFORE,
2299 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2300 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2301 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2302 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2303 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2304 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2305 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2306 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2307 };
2308 int r, q = 0, v = 0, w = 0;
2309 Unit *orig_u = u, *orig_other = other;
2310
2311 assert(u);
2312 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2313 assert(other);
2314
2315 u = unit_follow_merge(u);
2316 other = unit_follow_merge(other);
2317
2318 /* We won't allow dependencies on ourselves. We will not
2319 * consider them an error however. */
2320 if (u == other) {
2321 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2322 return 0;
2323 }
2324
2325 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2326 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2327 return 0;
2328 }
2329
2330 r = set_ensure_allocated(&u->dependencies[d], NULL);
2331 if (r < 0)
2332 return r;
2333
2334 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2335 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2336 if (r < 0)
2337 return r;
2338 }
2339
2340 if (add_reference) {
2341 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2342 if (r < 0)
2343 return r;
2344
2345 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2346 if (r < 0)
2347 return r;
2348 }
2349
2350 q = set_put(u->dependencies[d], other);
2351 if (q < 0)
2352 return q;
2353
2354 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2355 v = set_put(other->dependencies[inverse_table[d]], u);
2356 if (v < 0) {
2357 r = v;
2358 goto fail;
2359 }
2360 }
2361
2362 if (add_reference) {
2363 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2364 if (w < 0) {
2365 r = w;
2366 goto fail;
2367 }
2368
2369 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2370 if (r < 0)
2371 goto fail;
2372 }
2373
2374 unit_add_to_dbus_queue(u);
2375 return 0;
2376
2377fail:
2378 if (q > 0)
2379 set_remove(u->dependencies[d], other);
2380
2381 if (v > 0)
2382 set_remove(other->dependencies[inverse_table[d]], u);
2383
2384 if (w > 0)
2385 set_remove(u->dependencies[UNIT_REFERENCES], other);
2386
2387 return r;
2388}
2389
2390int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2391 int r;
2392
2393 assert(u);
2394
2395 r = unit_add_dependency(u, d, other, add_reference);
2396 if (r < 0)
2397 return r;
2398
2399 return unit_add_dependency(u, e, other, add_reference);
2400}
2401
2402static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2403 int r;
2404
2405 assert(u);
2406 assert(name || path);
2407 assert(buf);
2408 assert(ret);
2409
2410 if (!name)
2411 name = basename(path);
2412
2413 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2414 *buf = NULL;
2415 *ret = name;
2416 return 0;
2417 }
2418
2419 if (u->instance)
2420 r = unit_name_replace_instance(name, u->instance, buf);
2421 else {
2422 _cleanup_free_ char *i = NULL;
2423
2424 r = unit_name_to_prefix(u->id, &i);
2425 if (r < 0)
2426 return r;
2427
2428 r = unit_name_replace_instance(name, i, buf);
2429 }
2430 if (r < 0)
2431 return r;
2432
2433 *ret = *buf;
2434 return 0;
2435}
2436
2437int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2438 _cleanup_free_ char *buf = NULL;
2439 Unit *other;
2440 int r;
2441
2442 assert(u);
2443 assert(name || path);
2444
2445 r = resolve_template(u, name, path, &buf, &name);
2446 if (r < 0)
2447 return r;
2448
2449 r = manager_load_unit(u->manager, name, path, NULL, &other);
2450 if (r < 0)
2451 return r;
2452
2453 return unit_add_dependency(u, d, other, add_reference);
2454}
2455
2456int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2457 _cleanup_free_ char *buf = NULL;
2458 Unit *other;
2459 int r;
2460
2461 assert(u);
2462 assert(name || path);
2463
2464 r = resolve_template(u, name, path, &buf, &name);
2465 if (r < 0)
2466 return r;
2467
2468 r = manager_load_unit(u->manager, name, path, NULL, &other);
2469 if (r < 0)
2470 return r;
2471
2472 return unit_add_two_dependencies(u, d, e, other, add_reference);
2473}
2474
2475int set_unit_path(const char *p) {
2476 /* This is mostly for debug purposes */
2477 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2478 return -errno;
2479
2480 return 0;
2481}
2482
2483char *unit_dbus_path(Unit *u) {
2484 assert(u);
2485
2486 if (!u->id)
2487 return NULL;
2488
2489 return unit_dbus_path_from_name(u->id);
2490}
2491
2492char *unit_dbus_path_invocation_id(Unit *u) {
2493 assert(u);
2494
2495 if (sd_id128_is_null(u->invocation_id))
2496 return NULL;
2497
2498 return unit_dbus_path_from_name(u->invocation_id_string);
2499}
2500
2501int unit_set_slice(Unit *u, Unit *slice) {
2502 assert(u);
2503 assert(slice);
2504
2505 /* Sets the unit slice if it has not been set before. Is extra
2506 * careful, to only allow this for units that actually have a
2507 * cgroup context. Also, we don't allow to set this for slices
2508 * (since the parent slice is derived from the name). Make
2509 * sure the unit we set is actually a slice. */
2510
2511 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2512 return -EOPNOTSUPP;
2513
2514 if (u->type == UNIT_SLICE)
2515 return -EINVAL;
2516
2517 if (unit_active_state(u) != UNIT_INACTIVE)
2518 return -EBUSY;
2519
2520 if (slice->type != UNIT_SLICE)
2521 return -EINVAL;
2522
2523 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2524 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2525 return -EPERM;
2526
2527 if (UNIT_DEREF(u->slice) == slice)
2528 return 0;
2529
2530 /* Disallow slice changes if @u is already bound to cgroups */
2531 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2532 return -EBUSY;
2533
2534 unit_ref_unset(&u->slice);
2535 unit_ref_set(&u->slice, slice);
2536 return 1;
2537}
2538
2539int unit_set_default_slice(Unit *u) {
2540 _cleanup_free_ char *b = NULL;
2541 const char *slice_name;
2542 Unit *slice;
2543 int r;
2544
2545 assert(u);
2546
2547 if (UNIT_ISSET(u->slice))
2548 return 0;
2549
2550 if (u->instance) {
2551 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2552
2553 /* Implicitly place all instantiated units in their
2554 * own per-template slice */
2555
2556 r = unit_name_to_prefix(u->id, &prefix);
2557 if (r < 0)
2558 return r;
2559
2560 /* The prefix is already escaped, but it might include
2561 * "-" which has a special meaning for slice units,
2562 * hence escape it here extra. */
2563 escaped = unit_name_escape(prefix);
2564 if (!escaped)
2565 return -ENOMEM;
2566
2567 if (MANAGER_IS_SYSTEM(u->manager))
2568 b = strjoin("system-", escaped, ".slice");
2569 else
2570 b = strappend(escaped, ".slice");
2571 if (!b)
2572 return -ENOMEM;
2573
2574 slice_name = b;
2575 } else
2576 slice_name =
2577 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2578 ? SPECIAL_SYSTEM_SLICE
2579 : SPECIAL_ROOT_SLICE;
2580
2581 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2582 if (r < 0)
2583 return r;
2584
2585 return unit_set_slice(u, slice);
2586}
2587
2588const char *unit_slice_name(Unit *u) {
2589 assert(u);
2590
2591 if (!UNIT_ISSET(u->slice))
2592 return NULL;
2593
2594 return UNIT_DEREF(u->slice)->id;
2595}
2596
2597int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2598 _cleanup_free_ char *t = NULL;
2599 int r;
2600
2601 assert(u);
2602 assert(type);
2603 assert(_found);
2604
2605 r = unit_name_change_suffix(u->id, type, &t);
2606 if (r < 0)
2607 return r;
2608 if (unit_has_name(u, t))
2609 return -EINVAL;
2610
2611 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2612 assert(r < 0 || *_found != u);
2613 return r;
2614}
2615
2616static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2617 const char *name, *old_owner, *new_owner;
2618 Unit *u = userdata;
2619 int r;
2620
2621 assert(message);
2622 assert(u);
2623
2624 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2625 if (r < 0) {
2626 bus_log_parse_error(r);
2627 return 0;
2628 }
2629
2630 if (UNIT_VTABLE(u)->bus_name_owner_change)
2631 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2632
2633 return 0;
2634}
2635
2636int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2637 const char *match;
2638
2639 assert(u);
2640 assert(bus);
2641 assert(name);
2642
2643 if (u->match_bus_slot)
2644 return -EBUSY;
2645
2646 match = strjoina("type='signal',"
2647 "sender='org.freedesktop.DBus',"
2648 "path='/org/freedesktop/DBus',"
2649 "interface='org.freedesktop.DBus',"
2650 "member='NameOwnerChanged',"
2651 "arg0='", name, "'");
2652
2653 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2654}
2655
2656int unit_watch_bus_name(Unit *u, const char *name) {
2657 int r;
2658
2659 assert(u);
2660 assert(name);
2661
2662 /* Watch a specific name on the bus. We only support one unit
2663 * watching each name for now. */
2664
2665 if (u->manager->api_bus) {
2666 /* If the bus is already available, install the match directly.
2667 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2668 r = unit_install_bus_match(u, u->manager->api_bus, name);
2669 if (r < 0)
2670 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2671 }
2672
2673 r = hashmap_put(u->manager->watch_bus, name, u);
2674 if (r < 0) {
2675 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2676 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2677 }
2678
2679 return 0;
2680}
2681
2682void unit_unwatch_bus_name(Unit *u, const char *name) {
2683 assert(u);
2684 assert(name);
2685
2686 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
2687 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2688}
2689
2690bool unit_can_serialize(Unit *u) {
2691 assert(u);
2692
2693 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2694}
2695
2696int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2697 int r;
2698
2699 assert(u);
2700 assert(f);
2701 assert(fds);
2702
2703 if (unit_can_serialize(u)) {
2704 ExecRuntime *rt;
2705
2706 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2707 if (r < 0)
2708 return r;
2709
2710 rt = unit_get_exec_runtime(u);
2711 if (rt) {
2712 r = exec_runtime_serialize(u, rt, f, fds);
2713 if (r < 0)
2714 return r;
2715 }
2716 }
2717
2718 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2719
2720 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2721 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2722 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2723 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2724
2725 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2726 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2727
2728 if (dual_timestamp_is_set(&u->condition_timestamp))
2729 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2730
2731 if (dual_timestamp_is_set(&u->assert_timestamp))
2732 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2733
2734 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2735
2736 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2737 if (u->cpu_usage_last != NSEC_INFINITY)
2738 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2739
2740 if (u->cgroup_path)
2741 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2742 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2743
2744 if (uid_is_valid(u->ref_uid))
2745 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2746 if (gid_is_valid(u->ref_gid))
2747 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2748
2749 if (!sd_id128_is_null(u->invocation_id))
2750 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
2751
2752 bus_track_serialize(u->bus_track, f, "ref");
2753
2754 if (serialize_jobs) {
2755 if (u->job) {
2756 fprintf(f, "job\n");
2757 job_serialize(u->job, f);
2758 }
2759
2760 if (u->nop_job) {
2761 fprintf(f, "job\n");
2762 job_serialize(u->nop_job, f);
2763 }
2764 }
2765
2766 /* End marker */
2767 fputc('\n', f);
2768 return 0;
2769}
2770
2771int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2772 assert(u);
2773 assert(f);
2774 assert(key);
2775
2776 if (!value)
2777 return 0;
2778
2779 fputs(key, f);
2780 fputc('=', f);
2781 fputs(value, f);
2782 fputc('\n', f);
2783
2784 return 1;
2785}
2786
2787int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2788 _cleanup_free_ char *c = NULL;
2789
2790 assert(u);
2791 assert(f);
2792 assert(key);
2793
2794 if (!value)
2795 return 0;
2796
2797 c = cescape(value);
2798 if (!c)
2799 return -ENOMEM;
2800
2801 fputs(key, f);
2802 fputc('=', f);
2803 fputs(c, f);
2804 fputc('\n', f);
2805
2806 return 1;
2807}
2808
2809int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2810 int copy;
2811
2812 assert(u);
2813 assert(f);
2814 assert(key);
2815
2816 if (fd < 0)
2817 return 0;
2818
2819 copy = fdset_put_dup(fds, fd);
2820 if (copy < 0)
2821 return copy;
2822
2823 fprintf(f, "%s=%i\n", key, copy);
2824 return 1;
2825}
2826
2827void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2828 va_list ap;
2829
2830 assert(u);
2831 assert(f);
2832 assert(key);
2833 assert(format);
2834
2835 fputs(key, f);
2836 fputc('=', f);
2837
2838 va_start(ap, format);
2839 vfprintf(f, format, ap);
2840 va_end(ap);
2841
2842 fputc('\n', f);
2843}
2844
2845int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2846 ExecRuntime **rt = NULL;
2847 size_t offset;
2848 int r;
2849
2850 assert(u);
2851 assert(f);
2852 assert(fds);
2853
2854 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2855 if (offset > 0)
2856 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2857
2858 for (;;) {
2859 char line[LINE_MAX], *l, *v;
2860 size_t k;
2861
2862 if (!fgets(line, sizeof(line), f)) {
2863 if (feof(f))
2864 return 0;
2865 return -errno;
2866 }
2867
2868 char_array_0(line);
2869 l = strstrip(line);
2870
2871 /* End marker */
2872 if (isempty(l))
2873 break;
2874
2875 k = strcspn(l, "=");
2876
2877 if (l[k] == '=') {
2878 l[k] = 0;
2879 v = l+k+1;
2880 } else
2881 v = l+k;
2882
2883 if (streq(l, "job")) {
2884 if (v[0] == '\0') {
2885 /* new-style serialized job */
2886 Job *j;
2887
2888 j = job_new_raw(u);
2889 if (!j)
2890 return log_oom();
2891
2892 r = job_deserialize(j, f);
2893 if (r < 0) {
2894 job_free(j);
2895 return r;
2896 }
2897
2898 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2899 if (r < 0) {
2900 job_free(j);
2901 return r;
2902 }
2903
2904 r = job_install_deserialized(j);
2905 if (r < 0) {
2906 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2907 job_free(j);
2908 return r;
2909 }
2910 } else /* legacy for pre-44 */
2911 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2912 continue;
2913 } else if (streq(l, "state-change-timestamp")) {
2914 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2915 continue;
2916 } else if (streq(l, "inactive-exit-timestamp")) {
2917 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2918 continue;
2919 } else if (streq(l, "active-enter-timestamp")) {
2920 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2921 continue;
2922 } else if (streq(l, "active-exit-timestamp")) {
2923 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2924 continue;
2925 } else if (streq(l, "inactive-enter-timestamp")) {
2926 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2927 continue;
2928 } else if (streq(l, "condition-timestamp")) {
2929 dual_timestamp_deserialize(v, &u->condition_timestamp);
2930 continue;
2931 } else if (streq(l, "assert-timestamp")) {
2932 dual_timestamp_deserialize(v, &u->assert_timestamp);
2933 continue;
2934 } else if (streq(l, "condition-result")) {
2935
2936 r = parse_boolean(v);
2937 if (r < 0)
2938 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2939 else
2940 u->condition_result = r;
2941
2942 continue;
2943
2944 } else if (streq(l, "assert-result")) {
2945
2946 r = parse_boolean(v);
2947 if (r < 0)
2948 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2949 else
2950 u->assert_result = r;
2951
2952 continue;
2953
2954 } else if (streq(l, "transient")) {
2955
2956 r = parse_boolean(v);
2957 if (r < 0)
2958 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2959 else
2960 u->transient = r;
2961
2962 continue;
2963
2964 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2965
2966 r = safe_atou64(v, &u->cpu_usage_base);
2967 if (r < 0)
2968 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
2969
2970 continue;
2971
2972 } else if (streq(l, "cpu-usage-last")) {
2973
2974 r = safe_atou64(v, &u->cpu_usage_last);
2975 if (r < 0)
2976 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
2977
2978 continue;
2979
2980 } else if (streq(l, "cgroup")) {
2981
2982 r = unit_set_cgroup_path(u, v);
2983 if (r < 0)
2984 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2985
2986 (void) unit_watch_cgroup(u);
2987
2988 continue;
2989 } else if (streq(l, "cgroup-realized")) {
2990 int b;
2991
2992 b = parse_boolean(v);
2993 if (b < 0)
2994 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2995 else
2996 u->cgroup_realized = b;
2997
2998 continue;
2999
3000 } else if (streq(l, "ref-uid")) {
3001 uid_t uid;
3002
3003 r = parse_uid(v, &uid);
3004 if (r < 0)
3005 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3006 else
3007 unit_ref_uid_gid(u, uid, GID_INVALID);
3008
3009 continue;
3010
3011 } else if (streq(l, "ref-gid")) {
3012 gid_t gid;
3013
3014 r = parse_gid(v, &gid);
3015 if (r < 0)
3016 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3017 else
3018 unit_ref_uid_gid(u, UID_INVALID, gid);
3019
3020 } else if (streq(l, "ref")) {
3021
3022 r = strv_extend(&u->deserialized_refs, v);
3023 if (r < 0)
3024 log_oom();
3025
3026 continue;
3027 } else if (streq(l, "invocation-id")) {
3028 sd_id128_t id;
3029
3030 r = sd_id128_from_string(v, &id);
3031 if (r < 0)
3032 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3033 else {
3034 r = unit_set_invocation_id(u, id);
3035 if (r < 0)
3036 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3037 }
3038
3039 continue;
3040 }
3041
3042 if (unit_can_serialize(u)) {
3043 if (rt) {
3044 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
3045 if (r < 0) {
3046 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3047 continue;
3048 }
3049
3050 /* Returns positive if key was handled by the call */
3051 if (r > 0)
3052 continue;
3053 }
3054
3055 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3056 if (r < 0)
3057 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3058 }
3059 }
3060
3061 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3062 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3063 * before 228 where the base for timeouts was not persistent across reboots. */
3064
3065 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3066 dual_timestamp_get(&u->state_change_timestamp);
3067
3068 return 0;
3069}
3070
3071int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
3072 Unit *device;
3073 _cleanup_free_ char *e = NULL;
3074 int r;
3075
3076 assert(u);
3077
3078 /* Adds in links to the device node that this unit is based on */
3079 if (isempty(what))
3080 return 0;
3081
3082 if (!is_device_path(what))
3083 return 0;
3084
3085 /* When device units aren't supported (such as in a
3086 * container), don't create dependencies on them. */
3087 if (!unit_type_supported(UNIT_DEVICE))
3088 return 0;
3089
3090 r = unit_name_from_path(what, ".device", &e);
3091 if (r < 0)
3092 return r;
3093
3094 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3095 if (r < 0)
3096 return r;
3097
3098 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3099 dep = UNIT_BINDS_TO;
3100
3101 r = unit_add_two_dependencies(u, UNIT_AFTER,
3102 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3103 device, true);
3104 if (r < 0)
3105 return r;
3106
3107 if (wants) {
3108 r = unit_add_dependency(device, UNIT_WANTS, u, false);
3109 if (r < 0)
3110 return r;
3111 }
3112
3113 return 0;
3114}
3115
3116int unit_coldplug(Unit *u) {
3117 int r = 0, q;
3118 char **i;
3119
3120 assert(u);
3121
3122 /* Make sure we don't enter a loop, when coldplugging
3123 * recursively. */
3124 if (u->coldplugged)
3125 return 0;
3126
3127 u->coldplugged = true;
3128
3129 STRV_FOREACH(i, u->deserialized_refs) {
3130 q = bus_unit_track_add_name(u, *i);
3131 if (q < 0 && r >= 0)
3132 r = q;
3133 }
3134 u->deserialized_refs = strv_free(u->deserialized_refs);
3135
3136 if (UNIT_VTABLE(u)->coldplug) {
3137 q = UNIT_VTABLE(u)->coldplug(u);
3138 if (q < 0 && r >= 0)
3139 r = q;
3140 }
3141
3142 if (u->job) {
3143 q = job_coldplug(u->job);
3144 if (q < 0 && r >= 0)
3145 r = q;
3146 }
3147
3148 return r;
3149}
3150
3151static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3152 struct stat st;
3153
3154 if (!path)
3155 return false;
3156
3157 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3158 * are never out-of-date. */
3159 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3160 return false;
3161
3162 if (stat(path, &st) < 0)
3163 /* What, cannot access this anymore? */
3164 return true;
3165
3166 if (path_masked)
3167 /* For masked files check if they are still so */
3168 return !null_or_empty(&st);
3169 else
3170 /* For non-empty files check the mtime */
3171 return timespec_load(&st.st_mtim) > mtime;
3172
3173 return false;
3174}
3175
3176bool unit_need_daemon_reload(Unit *u) {
3177 _cleanup_strv_free_ char **t = NULL;
3178 char **path;
3179
3180 assert(u);
3181
3182 /* For unit files, we allow masking… */
3183 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3184 u->load_state == UNIT_MASKED))
3185 return true;
3186
3187 /* Source paths should not be masked… */
3188 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3189 return true;
3190
3191 (void) unit_find_dropin_paths(u, &t);
3192 if (!strv_equal(u->dropin_paths, t))
3193 return true;
3194
3195 /* … any drop-ins that are masked are simply omitted from the list. */
3196 STRV_FOREACH(path, u->dropin_paths)
3197 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3198 return true;
3199
3200 return false;
3201}
3202
3203void unit_reset_failed(Unit *u) {
3204 assert(u);
3205
3206 if (UNIT_VTABLE(u)->reset_failed)
3207 UNIT_VTABLE(u)->reset_failed(u);
3208
3209 RATELIMIT_RESET(u->start_limit);
3210 u->start_limit_hit = false;
3211}
3212
3213Unit *unit_following(Unit *u) {
3214 assert(u);
3215
3216 if (UNIT_VTABLE(u)->following)
3217 return UNIT_VTABLE(u)->following(u);
3218
3219 return NULL;
3220}
3221
3222bool unit_stop_pending(Unit *u) {
3223 assert(u);
3224
3225 /* This call does check the current state of the unit. It's
3226 * hence useful to be called from state change calls of the
3227 * unit itself, where the state isn't updated yet. This is
3228 * different from unit_inactive_or_pending() which checks both
3229 * the current state and for a queued job. */
3230
3231 return u->job && u->job->type == JOB_STOP;
3232}
3233
3234bool unit_inactive_or_pending(Unit *u) {
3235 assert(u);
3236
3237 /* Returns true if the unit is inactive or going down */
3238
3239 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3240 return true;
3241
3242 if (unit_stop_pending(u))
3243 return true;
3244
3245 return false;
3246}
3247
3248bool unit_active_or_pending(Unit *u) {
3249 assert(u);
3250
3251 /* Returns true if the unit is active or going up */
3252
3253 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3254 return true;
3255
3256 if (u->job &&
3257 (u->job->type == JOB_START ||
3258 u->job->type == JOB_RELOAD_OR_START ||
3259 u->job->type == JOB_RESTART))
3260 return true;
3261
3262 return false;
3263}
3264
3265int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3266 assert(u);
3267 assert(w >= 0 && w < _KILL_WHO_MAX);
3268 assert(SIGNAL_VALID(signo));
3269
3270 if (!UNIT_VTABLE(u)->kill)
3271 return -EOPNOTSUPP;
3272
3273 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3274}
3275
3276static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3277 Set *pid_set;
3278 int r;
3279
3280 pid_set = set_new(NULL);
3281 if (!pid_set)
3282 return NULL;
3283
3284 /* Exclude the main/control pids from being killed via the cgroup */
3285 if (main_pid > 0) {
3286 r = set_put(pid_set, PID_TO_PTR(main_pid));
3287 if (r < 0)
3288 goto fail;
3289 }
3290
3291 if (control_pid > 0) {
3292 r = set_put(pid_set, PID_TO_PTR(control_pid));
3293 if (r < 0)
3294 goto fail;
3295 }
3296
3297 return pid_set;
3298
3299fail:
3300 set_free(pid_set);
3301 return NULL;
3302}
3303
3304int unit_kill_common(
3305 Unit *u,
3306 KillWho who,
3307 int signo,
3308 pid_t main_pid,
3309 pid_t control_pid,
3310 sd_bus_error *error) {
3311
3312 int r = 0;
3313 bool killed = false;
3314
3315 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3316 if (main_pid < 0)
3317 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3318 else if (main_pid == 0)
3319 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3320 }
3321
3322 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3323 if (control_pid < 0)
3324 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3325 else if (control_pid == 0)
3326 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3327 }
3328
3329 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3330 if (control_pid > 0) {
3331 if (kill(control_pid, signo) < 0)
3332 r = -errno;
3333 else
3334 killed = true;
3335 }
3336
3337 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3338 if (main_pid > 0) {
3339 if (kill(main_pid, signo) < 0)
3340 r = -errno;
3341 else
3342 killed = true;
3343 }
3344
3345 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3346 _cleanup_set_free_ Set *pid_set = NULL;
3347 int q;
3348
3349 /* Exclude the main/control pids from being killed via the cgroup */
3350 pid_set = unit_pid_set(main_pid, control_pid);
3351 if (!pid_set)
3352 return -ENOMEM;
3353
3354 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3355 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3356 r = q;
3357 else
3358 killed = true;
3359 }
3360
3361 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3362 return -ESRCH;
3363
3364 return r;
3365}
3366
3367int unit_following_set(Unit *u, Set **s) {
3368 assert(u);
3369 assert(s);
3370
3371 if (UNIT_VTABLE(u)->following_set)
3372 return UNIT_VTABLE(u)->following_set(u, s);
3373
3374 *s = NULL;
3375 return 0;
3376}
3377
3378UnitFileState unit_get_unit_file_state(Unit *u) {
3379 int r;
3380
3381 assert(u);
3382
3383 if (u->unit_file_state < 0 && u->fragment_path) {
3384 r = unit_file_get_state(
3385 u->manager->unit_file_scope,
3386 NULL,
3387 basename(u->fragment_path),
3388 &u->unit_file_state);
3389 if (r < 0)
3390 u->unit_file_state = UNIT_FILE_BAD;
3391 }
3392
3393 return u->unit_file_state;
3394}
3395
3396int unit_get_unit_file_preset(Unit *u) {
3397 assert(u);
3398
3399 if (u->unit_file_preset < 0 && u->fragment_path)
3400 u->unit_file_preset = unit_file_query_preset(
3401 u->manager->unit_file_scope,
3402 NULL,
3403 basename(u->fragment_path));
3404
3405 return u->unit_file_preset;
3406}
3407
3408Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3409 assert(ref);
3410 assert(u);
3411
3412 if (ref->unit)
3413 unit_ref_unset(ref);
3414
3415 ref->unit = u;
3416 LIST_PREPEND(refs, u->refs, ref);
3417 return u;
3418}
3419
3420void unit_ref_unset(UnitRef *ref) {
3421 assert(ref);
3422
3423 if (!ref->unit)
3424 return;
3425
3426 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3427 * be unreferenced now. */
3428 unit_add_to_gc_queue(ref->unit);
3429
3430 LIST_REMOVE(refs, ref->unit->refs, ref);
3431 ref->unit = NULL;
3432}
3433
3434static int user_from_unit_name(Unit *u, char **ret) {
3435
3436 static const uint8_t hash_key[] = {
3437 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3438 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3439 };
3440
3441 _cleanup_free_ char *n = NULL;
3442 int r;
3443
3444 r = unit_name_to_prefix(u->id, &n);
3445 if (r < 0)
3446 return r;
3447
3448 if (valid_user_group_name(n)) {
3449 *ret = n;
3450 n = NULL;
3451 return 0;
3452 }
3453
3454 /* If we can't use the unit name as a user name, then let's hash it and use that */
3455 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3456 return -ENOMEM;
3457
3458 return 0;
3459}
3460
3461int unit_patch_contexts(Unit *u) {
3462 CGroupContext *cc;
3463 ExecContext *ec;
3464 unsigned i;
3465 int r;
3466
3467 assert(u);
3468
3469 /* Patch in the manager defaults into the exec and cgroup
3470 * contexts, _after_ the rest of the settings have been
3471 * initialized */
3472
3473 ec = unit_get_exec_context(u);
3474 if (ec) {
3475 /* This only copies in the ones that need memory */
3476 for (i = 0; i < _RLIMIT_MAX; i++)
3477 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3478 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3479 if (!ec->rlimit[i])
3480 return -ENOMEM;
3481 }
3482
3483 if (MANAGER_IS_USER(u->manager) &&
3484 !ec->working_directory) {
3485
3486 r = get_home_dir(&ec->working_directory);
3487 if (r < 0)
3488 return r;
3489
3490 /* Allow user services to run, even if the
3491 * home directory is missing */
3492 ec->working_directory_missing_ok = true;
3493 }
3494
3495 if (ec->private_devices)
3496 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3497
3498 if (ec->protect_kernel_modules)
3499 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3500
3501 if (ec->dynamic_user) {
3502 if (!ec->user) {
3503 r = user_from_unit_name(u, &ec->user);
3504 if (r < 0)
3505 return r;
3506 }
3507
3508 if (!ec->group) {
3509 ec->group = strdup(ec->user);
3510 if (!ec->group)
3511 return -ENOMEM;
3512 }
3513
3514 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3515 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3516
3517 ec->private_tmp = true;
3518 ec->remove_ipc = true;
3519 ec->protect_system = PROTECT_SYSTEM_STRICT;
3520 if (ec->protect_home == PROTECT_HOME_NO)
3521 ec->protect_home = PROTECT_HOME_READ_ONLY;
3522 }
3523 }
3524
3525 cc = unit_get_cgroup_context(u);
3526 if (cc) {
3527
3528 if (ec &&
3529 ec->private_devices &&
3530 cc->device_policy == CGROUP_AUTO)
3531 cc->device_policy = CGROUP_CLOSED;
3532 }
3533
3534 return 0;
3535}
3536
3537ExecContext *unit_get_exec_context(Unit *u) {
3538 size_t offset;
3539 assert(u);
3540
3541 if (u->type < 0)
3542 return NULL;
3543
3544 offset = UNIT_VTABLE(u)->exec_context_offset;
3545 if (offset <= 0)
3546 return NULL;
3547
3548 return (ExecContext*) ((uint8_t*) u + offset);
3549}
3550
3551KillContext *unit_get_kill_context(Unit *u) {
3552 size_t offset;
3553 assert(u);
3554
3555 if (u->type < 0)
3556 return NULL;
3557
3558 offset = UNIT_VTABLE(u)->kill_context_offset;
3559 if (offset <= 0)
3560 return NULL;
3561
3562 return (KillContext*) ((uint8_t*) u + offset);
3563}
3564
3565CGroupContext *unit_get_cgroup_context(Unit *u) {
3566 size_t offset;
3567
3568 if (u->type < 0)
3569 return NULL;
3570
3571 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3572 if (offset <= 0)
3573 return NULL;
3574
3575 return (CGroupContext*) ((uint8_t*) u + offset);
3576}
3577
3578ExecRuntime *unit_get_exec_runtime(Unit *u) {
3579 size_t offset;
3580
3581 if (u->type < 0)
3582 return NULL;
3583
3584 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3585 if (offset <= 0)
3586 return NULL;
3587
3588 return *(ExecRuntime**) ((uint8_t*) u + offset);
3589}
3590
3591static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3592 assert(u);
3593
3594 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3595 return NULL;
3596
3597 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3598 return u->manager->lookup_paths.transient;
3599
3600 if (mode == UNIT_RUNTIME)
3601 return u->manager->lookup_paths.runtime_control;
3602
3603 if (mode == UNIT_PERSISTENT)
3604 return u->manager->lookup_paths.persistent_control;
3605
3606 return NULL;
3607}
3608
3609int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3610 _cleanup_free_ char *p = NULL, *q = NULL;
3611 const char *dir, *wrapped;
3612 int r;
3613
3614 assert(u);
3615
3616 if (u->transient_file) {
3617 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3618 * write to the transient unit file. */
3619 fputs(data, u->transient_file);
3620 fputc('\n', u->transient_file);
3621 return 0;
3622 }
3623
3624 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3625 return 0;
3626
3627 dir = unit_drop_in_dir(u, mode);
3628 if (!dir)
3629 return -EINVAL;
3630
3631 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3632 "# or an equivalent operation. Do not edit.\n",
3633 data,
3634 "\n");
3635
3636 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3637 if (r < 0)
3638 return r;
3639
3640 (void) mkdir_p(p, 0755);
3641 r = write_string_file_atomic_label(q, wrapped);
3642 if (r < 0)
3643 return r;
3644
3645 r = strv_push(&u->dropin_paths, q);
3646 if (r < 0)
3647 return r;
3648 q = NULL;
3649
3650 strv_uniq(u->dropin_paths);
3651
3652 u->dropin_mtime = now(CLOCK_REALTIME);
3653
3654 return 0;
3655}
3656
3657int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3658 _cleanup_free_ char *p = NULL;
3659 va_list ap;
3660 int r;
3661
3662 assert(u);
3663 assert(name);
3664 assert(format);
3665
3666 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3667 return 0;
3668
3669 va_start(ap, format);
3670 r = vasprintf(&p, format, ap);
3671 va_end(ap);
3672
3673 if (r < 0)
3674 return -ENOMEM;
3675
3676 return unit_write_drop_in(u, mode, name, p);
3677}
3678
3679int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3680 const char *ndata;
3681
3682 assert(u);
3683 assert(name);
3684 assert(data);
3685
3686 if (!UNIT_VTABLE(u)->private_section)
3687 return -EINVAL;
3688
3689 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3690 return 0;
3691
3692 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3693
3694 return unit_write_drop_in(u, mode, name, ndata);
3695}
3696
3697int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3698 _cleanup_free_ char *p = NULL;
3699 va_list ap;
3700 int r;
3701
3702 assert(u);
3703 assert(name);
3704 assert(format);
3705
3706 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3707 return 0;
3708
3709 va_start(ap, format);
3710 r = vasprintf(&p, format, ap);
3711 va_end(ap);
3712
3713 if (r < 0)
3714 return -ENOMEM;
3715
3716 return unit_write_drop_in_private(u, mode, name, p);
3717}
3718
3719int unit_make_transient(Unit *u) {
3720 FILE *f;
3721 char *path;
3722
3723 assert(u);
3724
3725 if (!UNIT_VTABLE(u)->can_transient)
3726 return -EOPNOTSUPP;
3727
3728 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
3729 if (!path)
3730 return -ENOMEM;
3731
3732 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3733 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3734
3735 RUN_WITH_UMASK(0022) {
3736 f = fopen(path, "we");
3737 if (!f) {
3738 free(path);
3739 return -errno;
3740 }
3741 }
3742
3743 if (u->transient_file)
3744 fclose(u->transient_file);
3745 u->transient_file = f;
3746
3747 free(u->fragment_path);
3748 u->fragment_path = path;
3749
3750 u->source_path = mfree(u->source_path);
3751 u->dropin_paths = strv_free(u->dropin_paths);
3752 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3753
3754 u->load_state = UNIT_STUB;
3755 u->load_error = 0;
3756 u->transient = true;
3757
3758 unit_add_to_dbus_queue(u);
3759 unit_add_to_gc_queue(u);
3760
3761 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3762 u->transient_file);
3763
3764 return 0;
3765}
3766
3767static void log_kill(pid_t pid, int sig, void *userdata) {
3768 _cleanup_free_ char *comm = NULL;
3769
3770 (void) get_process_comm(pid, &comm);
3771
3772 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3773 only, like for example systemd's own PAM stub process. */
3774 if (comm && comm[0] == '(')
3775 return;
3776
3777 log_unit_notice(userdata,
3778 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3779 pid,
3780 strna(comm),
3781 signal_to_string(sig));
3782}
3783
3784static int operation_to_signal(KillContext *c, KillOperation k) {
3785 assert(c);
3786
3787 switch (k) {
3788
3789 case KILL_TERMINATE:
3790 case KILL_TERMINATE_AND_LOG:
3791 return c->kill_signal;
3792
3793 case KILL_KILL:
3794 return SIGKILL;
3795
3796 case KILL_ABORT:
3797 return SIGABRT;
3798
3799 default:
3800 assert_not_reached("KillOperation unknown");
3801 }
3802}
3803
3804int unit_kill_context(
3805 Unit *u,
3806 KillContext *c,
3807 KillOperation k,
3808 pid_t main_pid,
3809 pid_t control_pid,
3810 bool main_pid_alien) {
3811
3812 bool wait_for_exit = false, send_sighup;
3813 cg_kill_log_func_t log_func = NULL;
3814 int sig, r;
3815
3816 assert(u);
3817 assert(c);
3818
3819 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
3820 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
3821
3822 if (c->kill_mode == KILL_NONE)
3823 return 0;
3824
3825 sig = operation_to_signal(c, k);
3826
3827 send_sighup =
3828 c->send_sighup &&
3829 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3830 sig != SIGHUP;
3831
3832 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
3833 log_func = log_kill;
3834
3835 if (main_pid > 0) {
3836 if (log_func)
3837 log_func(main_pid, sig, u);
3838
3839 r = kill_and_sigcont(main_pid, sig);
3840 if (r < 0 && r != -ESRCH) {
3841 _cleanup_free_ char *comm = NULL;
3842 (void) get_process_comm(main_pid, &comm);
3843
3844 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3845 } else {
3846 if (!main_pid_alien)
3847 wait_for_exit = true;
3848
3849 if (r != -ESRCH && send_sighup)
3850 (void) kill(main_pid, SIGHUP);
3851 }
3852 }
3853
3854 if (control_pid > 0) {
3855 if (log_func)
3856 log_func(control_pid, sig, u);
3857
3858 r = kill_and_sigcont(control_pid, sig);
3859 if (r < 0 && r != -ESRCH) {
3860 _cleanup_free_ char *comm = NULL;
3861 (void) get_process_comm(control_pid, &comm);
3862
3863 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3864 } else {
3865 wait_for_exit = true;
3866
3867 if (r != -ESRCH && send_sighup)
3868 (void) kill(control_pid, SIGHUP);
3869 }
3870 }
3871
3872 if (u->cgroup_path &&
3873 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3874 _cleanup_set_free_ Set *pid_set = NULL;
3875
3876 /* Exclude the main/control pids from being killed via the cgroup */
3877 pid_set = unit_pid_set(main_pid, control_pid);
3878 if (!pid_set)
3879 return -ENOMEM;
3880
3881 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3882 sig,
3883 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3884 pid_set,
3885 log_func, u);
3886 if (r < 0) {
3887 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3888 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3889
3890 } else if (r > 0) {
3891
3892 /* FIXME: For now, on the legacy hierarchy, we
3893 * will not wait for the cgroup members to die
3894 * if we are running in a container or if this
3895 * is a delegation unit, simply because cgroup
3896 * notification is unreliable in these
3897 * cases. It doesn't work at all in
3898 * containers, and outside of containers it
3899 * can be confused easily by left-over
3900 * directories in the cgroup — which however
3901 * should not exist in non-delegated units. On
3902 * the unified hierarchy that's different,
3903 * there we get proper events. Hence rely on
3904 * them. */
3905
3906 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3907 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3908 wait_for_exit = true;
3909
3910 if (send_sighup) {
3911 set_free(pid_set);
3912
3913 pid_set = unit_pid_set(main_pid, control_pid);
3914 if (!pid_set)
3915 return -ENOMEM;
3916
3917 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3918 SIGHUP,
3919 CGROUP_IGNORE_SELF,
3920 pid_set,
3921 NULL, NULL);
3922 }
3923 }
3924 }
3925
3926 return wait_for_exit;
3927}
3928
3929int unit_require_mounts_for(Unit *u, const char *path) {
3930 char prefix[strlen(path) + 1], *p;
3931 int r;
3932
3933 assert(u);
3934 assert(path);
3935
3936 /* Registers a unit for requiring a certain path and all its
3937 * prefixes. We keep a simple array of these paths in the
3938 * unit, since its usually short. However, we build a prefix
3939 * table for all possible prefixes so that new appearing mount
3940 * units can easily determine which units to make themselves a
3941 * dependency of. */
3942
3943 if (!path_is_absolute(path))
3944 return -EINVAL;
3945
3946 p = strdup(path);
3947 if (!p)
3948 return -ENOMEM;
3949
3950 path_kill_slashes(p);
3951
3952 if (!path_is_safe(p)) {
3953 free(p);
3954 return -EPERM;
3955 }
3956
3957 if (strv_contains(u->requires_mounts_for, p)) {
3958 free(p);
3959 return 0;
3960 }
3961
3962 r = strv_consume(&u->requires_mounts_for, p);
3963 if (r < 0)
3964 return r;
3965
3966 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3967 Set *x;
3968
3969 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3970 if (!x) {
3971 char *q;
3972
3973 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3974 if (r < 0)
3975 return r;
3976
3977 q = strdup(prefix);
3978 if (!q)
3979 return -ENOMEM;
3980
3981 x = set_new(NULL);
3982 if (!x) {
3983 free(q);
3984 return -ENOMEM;
3985 }
3986
3987 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3988 if (r < 0) {
3989 free(q);
3990 set_free(x);
3991 return r;
3992 }
3993 }
3994
3995 r = set_put(x, u);
3996 if (r < 0)
3997 return r;
3998 }
3999
4000 return 0;
4001}
4002
4003int unit_setup_exec_runtime(Unit *u) {
4004 ExecRuntime **rt;
4005 size_t offset;
4006 Iterator i;
4007 Unit *other;
4008
4009 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4010 assert(offset > 0);
4011
4012 /* Check if there already is an ExecRuntime for this unit? */
4013 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4014 if (*rt)
4015 return 0;
4016
4017 /* Try to get it from somebody else */
4018 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4019
4020 *rt = unit_get_exec_runtime(other);
4021 if (*rt) {
4022 exec_runtime_ref(*rt);
4023 return 0;
4024 }
4025 }
4026
4027 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
4028}
4029
4030int unit_setup_dynamic_creds(Unit *u) {
4031 ExecContext *ec;
4032 DynamicCreds *dcreds;
4033 size_t offset;
4034
4035 assert(u);
4036
4037 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4038 assert(offset > 0);
4039 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4040
4041 ec = unit_get_exec_context(u);
4042 assert(ec);
4043
4044 if (!ec->dynamic_user)
4045 return 0;
4046
4047 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4048}
4049
4050bool unit_type_supported(UnitType t) {
4051 if (_unlikely_(t < 0))
4052 return false;
4053 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4054 return false;
4055
4056 if (!unit_vtable[t]->supported)
4057 return true;
4058
4059 return unit_vtable[t]->supported();
4060}
4061
4062void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4063 int r;
4064
4065 assert(u);
4066 assert(where);
4067
4068 r = dir_is_empty(where);
4069 if (r > 0)
4070 return;
4071 if (r < 0) {
4072 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4073 return;
4074 }
4075
4076 log_struct(LOG_NOTICE,
4077 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4078 LOG_UNIT_ID(u),
4079 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4080 "WHERE=%s", where,
4081 NULL);
4082}
4083
4084int unit_fail_if_symlink(Unit *u, const char* where) {
4085 int r;
4086
4087 assert(u);
4088 assert(where);
4089
4090 r = is_symlink(where);
4091 if (r < 0) {
4092 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4093 return 0;
4094 }
4095 if (r == 0)
4096 return 0;
4097
4098 log_struct(LOG_ERR,
4099 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4100 LOG_UNIT_ID(u),
4101 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4102 "WHERE=%s", where,
4103 NULL);
4104
4105 return -ELOOP;
4106}
4107
4108bool unit_is_pristine(Unit *u) {
4109 assert(u);
4110
4111 /* Check if the unit already exists or is already around,
4112 * in a number of different ways. Note that to cater for unit
4113 * types such as slice, we are generally fine with units that
4114 * are marked UNIT_LOADED even though nothing was
4115 * actually loaded, as those unit types don't require a file
4116 * on disk to validly load. */
4117
4118 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4119 u->fragment_path ||
4120 u->source_path ||
4121 !strv_isempty(u->dropin_paths) ||
4122 u->job ||
4123 u->merged_into);
4124}
4125
4126pid_t unit_control_pid(Unit *u) {
4127 assert(u);
4128
4129 if (UNIT_VTABLE(u)->control_pid)
4130 return UNIT_VTABLE(u)->control_pid(u);
4131
4132 return 0;
4133}
4134
4135pid_t unit_main_pid(Unit *u) {
4136 assert(u);
4137
4138 if (UNIT_VTABLE(u)->main_pid)
4139 return UNIT_VTABLE(u)->main_pid(u);
4140
4141 return 0;
4142}
4143
4144static void unit_unref_uid_internal(
4145 Unit *u,
4146 uid_t *ref_uid,
4147 bool destroy_now,
4148 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4149
4150 assert(u);
4151 assert(ref_uid);
4152 assert(_manager_unref_uid);
4153
4154 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4155 * gid_t are actually the same time, with the same validity rules.
4156 *
4157 * Drops a reference to UID/GID from a unit. */
4158
4159 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4160 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4161
4162 if (!uid_is_valid(*ref_uid))
4163 return;
4164
4165 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4166 *ref_uid = UID_INVALID;
4167}
4168
4169void unit_unref_uid(Unit *u, bool destroy_now) {
4170 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4171}
4172
4173void unit_unref_gid(Unit *u, bool destroy_now) {
4174 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4175}
4176
4177static int unit_ref_uid_internal(
4178 Unit *u,
4179 uid_t *ref_uid,
4180 uid_t uid,
4181 bool clean_ipc,
4182 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4183
4184 int r;
4185
4186 assert(u);
4187 assert(ref_uid);
4188 assert(uid_is_valid(uid));
4189 assert(_manager_ref_uid);
4190
4191 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4192 * are actually the same type, and have the same validity rules.
4193 *
4194 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4195 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4196 * drops to zero. */
4197
4198 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4199 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4200
4201 if (*ref_uid == uid)
4202 return 0;
4203
4204 if (uid_is_valid(*ref_uid)) /* Already set? */
4205 return -EBUSY;
4206
4207 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4208 if (r < 0)
4209 return r;
4210
4211 *ref_uid = uid;
4212 return 1;
4213}
4214
4215int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4216 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4217}
4218
4219int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4220 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4221}
4222
4223static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4224 int r = 0, q = 0;
4225
4226 assert(u);
4227
4228 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4229
4230 if (uid_is_valid(uid)) {
4231 r = unit_ref_uid(u, uid, clean_ipc);
4232 if (r < 0)
4233 return r;
4234 }
4235
4236 if (gid_is_valid(gid)) {
4237 q = unit_ref_gid(u, gid, clean_ipc);
4238 if (q < 0) {
4239 if (r > 0)
4240 unit_unref_uid(u, false);
4241
4242 return q;
4243 }
4244 }
4245
4246 return r > 0 || q > 0;
4247}
4248
4249int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4250 ExecContext *c;
4251 int r;
4252
4253 assert(u);
4254
4255 c = unit_get_exec_context(u);
4256
4257 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4258 if (r < 0)
4259 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4260
4261 return r;
4262}
4263
4264void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4265 assert(u);
4266
4267 unit_unref_uid(u, destroy_now);
4268 unit_unref_gid(u, destroy_now);
4269}
4270
4271void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4272 int r;
4273
4274 assert(u);
4275
4276 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4277 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4278 * objects when no service references the UID/GID anymore. */
4279
4280 r = unit_ref_uid_gid(u, uid, gid);
4281 if (r > 0)
4282 bus_unit_send_change_signal(u);
4283}
4284
4285int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4286 int r;
4287
4288 assert(u);
4289
4290 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4291
4292 if (sd_id128_equal(u->invocation_id, id))
4293 return 0;
4294
4295 if (!sd_id128_is_null(u->invocation_id))
4296 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4297
4298 if (sd_id128_is_null(id)) {
4299 r = 0;
4300 goto reset;
4301 }
4302
4303 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4304 if (r < 0)
4305 goto reset;
4306
4307 u->invocation_id = id;
4308 sd_id128_to_string(id, u->invocation_id_string);
4309
4310 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4311 if (r < 0)
4312 goto reset;
4313
4314 return 0;
4315
4316reset:
4317 u->invocation_id = SD_ID128_NULL;
4318 u->invocation_id_string[0] = 0;
4319 return r;
4320}
4321
4322int unit_acquire_invocation_id(Unit *u) {
4323 sd_id128_t id;
4324 int r;
4325
4326 assert(u);
4327
4328 r = sd_id128_randomize(&id);
4329 if (r < 0)
4330 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4331
4332 r = unit_set_invocation_id(u, id);
4333 if (r < 0)
4334 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4335
4336 return 0;
4337}