]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #4115 from yuwata/completion-fix
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fileio-label.h"
39 #include "formats-util.h"
40 #include "load-dropin.h"
41 #include "load-fragment.h"
42 #include "log.h"
43 #include "macro.h"
44 #include "missing.h"
45 #include "mkdir.h"
46 #include "parse-util.h"
47 #include "path-util.h"
48 #include "process-util.h"
49 #include "set.h"
50 #include "signal-util.h"
51 #include "special.h"
52 #include "stat-util.h"
53 #include "stdio-util.h"
54 #include "string-util.h"
55 #include "strv.h"
56 #include "umask-util.h"
57 #include "unit-name.h"
58 #include "unit.h"
59 #include "user-util.h"
60 #include "virt.h"
61
62 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
63 [UNIT_SERVICE] = &service_vtable,
64 [UNIT_SOCKET] = &socket_vtable,
65 [UNIT_BUSNAME] = &busname_vtable,
66 [UNIT_TARGET] = &target_vtable,
67 [UNIT_DEVICE] = &device_vtable,
68 [UNIT_MOUNT] = &mount_vtable,
69 [UNIT_AUTOMOUNT] = &automount_vtable,
70 [UNIT_SWAP] = &swap_vtable,
71 [UNIT_TIMER] = &timer_vtable,
72 [UNIT_PATH] = &path_vtable,
73 [UNIT_SLICE] = &slice_vtable,
74 [UNIT_SCOPE] = &scope_vtable
75 };
76
77 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
78
79 Unit *unit_new(Manager *m, size_t size) {
80 Unit *u;
81
82 assert(m);
83 assert(size >= sizeof(Unit));
84
85 u = malloc0(size);
86 if (!u)
87 return NULL;
88
89 u->names = set_new(&string_hash_ops);
90 if (!u->names) {
91 free(u);
92 return NULL;
93 }
94
95 u->manager = m;
96 u->type = _UNIT_TYPE_INVALID;
97 u->default_dependencies = true;
98 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
99 u->unit_file_preset = -1;
100 u->on_failure_job_mode = JOB_REPLACE;
101 u->cgroup_inotify_wd = -1;
102 u->job_timeout = USEC_INFINITY;
103 u->ref_uid = UID_INVALID;
104 u->ref_gid = GID_INVALID;
105 u->cpu_usage_last = NSEC_INFINITY;
106
107 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
108 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
109
110 return u;
111 }
112
113 bool unit_has_name(Unit *u, const char *name) {
114 assert(u);
115 assert(name);
116
117 return set_contains(u->names, (char*) name);
118 }
119
120 static void unit_init(Unit *u) {
121 CGroupContext *cc;
122 ExecContext *ec;
123 KillContext *kc;
124
125 assert(u);
126 assert(u->manager);
127 assert(u->type >= 0);
128
129 cc = unit_get_cgroup_context(u);
130 if (cc) {
131 cgroup_context_init(cc);
132
133 /* Copy in the manager defaults into the cgroup
134 * context, _before_ the rest of the settings have
135 * been initialized */
136
137 cc->cpu_accounting = u->manager->default_cpu_accounting;
138 cc->io_accounting = u->manager->default_io_accounting;
139 cc->blockio_accounting = u->manager->default_blockio_accounting;
140 cc->memory_accounting = u->manager->default_memory_accounting;
141 cc->tasks_accounting = u->manager->default_tasks_accounting;
142
143 if (u->type != UNIT_SLICE)
144 cc->tasks_max = u->manager->default_tasks_max;
145 }
146
147 ec = unit_get_exec_context(u);
148 if (ec)
149 exec_context_init(ec);
150
151 kc = unit_get_kill_context(u);
152 if (kc)
153 kill_context_init(kc);
154
155 if (UNIT_VTABLE(u)->init)
156 UNIT_VTABLE(u)->init(u);
157 }
158
159 int unit_add_name(Unit *u, const char *text) {
160 _cleanup_free_ char *s = NULL, *i = NULL;
161 UnitType t;
162 int r;
163
164 assert(u);
165 assert(text);
166
167 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
168
169 if (!u->instance)
170 return -EINVAL;
171
172 r = unit_name_replace_instance(text, u->instance, &s);
173 if (r < 0)
174 return r;
175 } else {
176 s = strdup(text);
177 if (!s)
178 return -ENOMEM;
179 }
180
181 if (set_contains(u->names, s))
182 return 0;
183 if (hashmap_contains(u->manager->units, s))
184 return -EEXIST;
185
186 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
187 return -EINVAL;
188
189 t = unit_name_to_type(s);
190 if (t < 0)
191 return -EINVAL;
192
193 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
194 return -EINVAL;
195
196 r = unit_name_to_instance(s, &i);
197 if (r < 0)
198 return r;
199
200 if (i && !unit_type_may_template(t))
201 return -EINVAL;
202
203 /* Ensure that this unit is either instanced or not instanced,
204 * but not both. Note that we do allow names with different
205 * instance names however! */
206 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
207 return -EINVAL;
208
209 if (!unit_type_may_alias(t) && !set_isempty(u->names))
210 return -EEXIST;
211
212 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
213 return -E2BIG;
214
215 r = set_put(u->names, s);
216 if (r < 0)
217 return r;
218 assert(r > 0);
219
220 r = hashmap_put(u->manager->units, s, u);
221 if (r < 0) {
222 (void) set_remove(u->names, s);
223 return r;
224 }
225
226 if (u->type == _UNIT_TYPE_INVALID) {
227 u->type = t;
228 u->id = s;
229 u->instance = i;
230
231 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
232
233 unit_init(u);
234
235 i = NULL;
236 }
237
238 s = NULL;
239
240 unit_add_to_dbus_queue(u);
241 return 0;
242 }
243
244 int unit_choose_id(Unit *u, const char *name) {
245 _cleanup_free_ char *t = NULL;
246 char *s, *i;
247 int r;
248
249 assert(u);
250 assert(name);
251
252 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
253
254 if (!u->instance)
255 return -EINVAL;
256
257 r = unit_name_replace_instance(name, u->instance, &t);
258 if (r < 0)
259 return r;
260
261 name = t;
262 }
263
264 /* Selects one of the names of this unit as the id */
265 s = set_get(u->names, (char*) name);
266 if (!s)
267 return -ENOENT;
268
269 /* Determine the new instance from the new id */
270 r = unit_name_to_instance(s, &i);
271 if (r < 0)
272 return r;
273
274 u->id = s;
275
276 free(u->instance);
277 u->instance = i;
278
279 unit_add_to_dbus_queue(u);
280
281 return 0;
282 }
283
284 int unit_set_description(Unit *u, const char *description) {
285 char *s;
286
287 assert(u);
288
289 if (isempty(description))
290 s = NULL;
291 else {
292 s = strdup(description);
293 if (!s)
294 return -ENOMEM;
295 }
296
297 free(u->description);
298 u->description = s;
299
300 unit_add_to_dbus_queue(u);
301 return 0;
302 }
303
304 bool unit_check_gc(Unit *u) {
305 UnitActiveState state;
306 assert(u);
307
308 if (u->job)
309 return true;
310
311 if (u->nop_job)
312 return true;
313
314 state = unit_active_state(u);
315
316 /* If the unit is inactive and failed and no job is queued for
317 * it, then release its runtime resources */
318 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
319 UNIT_VTABLE(u)->release_resources)
320 UNIT_VTABLE(u)->release_resources(u);
321
322 /* But we keep the unit object around for longer when it is
323 * referenced or configured to not be gc'ed */
324 if (state != UNIT_INACTIVE)
325 return true;
326
327 if (u->no_gc)
328 return true;
329
330 if (u->refs)
331 return true;
332
333 if (sd_bus_track_count(u->bus_track) > 0)
334 return true;
335
336 if (UNIT_VTABLE(u)->check_gc)
337 if (UNIT_VTABLE(u)->check_gc(u))
338 return true;
339
340 return false;
341 }
342
343 void unit_add_to_load_queue(Unit *u) {
344 assert(u);
345 assert(u->type != _UNIT_TYPE_INVALID);
346
347 if (u->load_state != UNIT_STUB || u->in_load_queue)
348 return;
349
350 LIST_PREPEND(load_queue, u->manager->load_queue, u);
351 u->in_load_queue = true;
352 }
353
354 void unit_add_to_cleanup_queue(Unit *u) {
355 assert(u);
356
357 if (u->in_cleanup_queue)
358 return;
359
360 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
361 u->in_cleanup_queue = true;
362 }
363
364 void unit_add_to_gc_queue(Unit *u) {
365 assert(u);
366
367 if (u->in_gc_queue || u->in_cleanup_queue)
368 return;
369
370 if (unit_check_gc(u))
371 return;
372
373 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
374 u->in_gc_queue = true;
375
376 u->manager->n_in_gc_queue++;
377 }
378
379 void unit_add_to_dbus_queue(Unit *u) {
380 assert(u);
381 assert(u->type != _UNIT_TYPE_INVALID);
382
383 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
384 return;
385
386 /* Shortcut things if nobody cares */
387 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
388 set_isempty(u->manager->private_buses)) {
389 u->sent_dbus_new_signal = true;
390 return;
391 }
392
393 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
394 u->in_dbus_queue = true;
395 }
396
397 static void bidi_set_free(Unit *u, Set *s) {
398 Iterator i;
399 Unit *other;
400
401 assert(u);
402
403 /* Frees the set and makes sure we are dropped from the
404 * inverse pointers */
405
406 SET_FOREACH(other, s, i) {
407 UnitDependency d;
408
409 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
410 set_remove(other->dependencies[d], u);
411
412 unit_add_to_gc_queue(other);
413 }
414
415 set_free(s);
416 }
417
418 static void unit_remove_transient(Unit *u) {
419 char **i;
420
421 assert(u);
422
423 if (!u->transient)
424 return;
425
426 if (u->fragment_path)
427 (void) unlink(u->fragment_path);
428
429 STRV_FOREACH(i, u->dropin_paths) {
430 _cleanup_free_ char *p = NULL, *pp = NULL;
431
432 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
433 if (!p)
434 continue;
435
436 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
437 if (!pp)
438 continue;
439
440 /* Only drop transient drop-ins */
441 if (!path_equal(u->manager->lookup_paths.transient, pp))
442 continue;
443
444 (void) unlink(*i);
445 (void) rmdir(p);
446 }
447 }
448
449 static void unit_free_requires_mounts_for(Unit *u) {
450 char **j;
451
452 STRV_FOREACH(j, u->requires_mounts_for) {
453 char s[strlen(*j) + 1];
454
455 PATH_FOREACH_PREFIX_MORE(s, *j) {
456 char *y;
457 Set *x;
458
459 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
460 if (!x)
461 continue;
462
463 set_remove(x, u);
464
465 if (set_isempty(x)) {
466 hashmap_remove(u->manager->units_requiring_mounts_for, y);
467 free(y);
468 set_free(x);
469 }
470 }
471 }
472
473 u->requires_mounts_for = strv_free(u->requires_mounts_for);
474 }
475
476 static void unit_done(Unit *u) {
477 ExecContext *ec;
478 CGroupContext *cc;
479
480 assert(u);
481
482 if (u->type < 0)
483 return;
484
485 if (UNIT_VTABLE(u)->done)
486 UNIT_VTABLE(u)->done(u);
487
488 ec = unit_get_exec_context(u);
489 if (ec)
490 exec_context_done(ec);
491
492 cc = unit_get_cgroup_context(u);
493 if (cc)
494 cgroup_context_done(cc);
495 }
496
497 void unit_free(Unit *u) {
498 UnitDependency d;
499 Iterator i;
500 char *t;
501
502 assert(u);
503
504 if (u->transient_file)
505 fclose(u->transient_file);
506
507 if (!MANAGER_IS_RELOADING(u->manager))
508 unit_remove_transient(u);
509
510 bus_unit_send_removed_signal(u);
511
512 unit_done(u);
513
514 sd_bus_slot_unref(u->match_bus_slot);
515
516 sd_bus_track_unref(u->bus_track);
517 u->deserialized_refs = strv_free(u->deserialized_refs);
518
519 unit_free_requires_mounts_for(u);
520
521 SET_FOREACH(t, u->names, i)
522 hashmap_remove_value(u->manager->units, t, u);
523
524 if (u->job) {
525 Job *j = u->job;
526 job_uninstall(j);
527 job_free(j);
528 }
529
530 if (u->nop_job) {
531 Job *j = u->nop_job;
532 job_uninstall(j);
533 job_free(j);
534 }
535
536 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
537 bidi_set_free(u, u->dependencies[d]);
538
539 if (u->type != _UNIT_TYPE_INVALID)
540 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
541
542 if (u->in_load_queue)
543 LIST_REMOVE(load_queue, u->manager->load_queue, u);
544
545 if (u->in_dbus_queue)
546 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
547
548 if (u->in_cleanup_queue)
549 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
550
551 if (u->in_gc_queue) {
552 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
553 u->manager->n_in_gc_queue--;
554 }
555
556 if (u->in_cgroup_queue)
557 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
558
559 unit_release_cgroup(u);
560
561 unit_unref_uid_gid(u, false);
562
563 (void) manager_update_failed_units(u->manager, u, false);
564 set_remove(u->manager->startup_units, u);
565
566 free(u->description);
567 strv_free(u->documentation);
568 free(u->fragment_path);
569 free(u->source_path);
570 strv_free(u->dropin_paths);
571 free(u->instance);
572
573 free(u->job_timeout_reboot_arg);
574
575 set_free_free(u->names);
576
577 unit_unwatch_all_pids(u);
578
579 condition_free_list(u->conditions);
580 condition_free_list(u->asserts);
581
582 free(u->reboot_arg);
583
584 unit_ref_unset(&u->slice);
585
586 while (u->refs)
587 unit_ref_unset(u->refs);
588
589 free(u);
590 }
591
592 UnitActiveState unit_active_state(Unit *u) {
593 assert(u);
594
595 if (u->load_state == UNIT_MERGED)
596 return unit_active_state(unit_follow_merge(u));
597
598 /* After a reload it might happen that a unit is not correctly
599 * loaded but still has a process around. That's why we won't
600 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
601
602 return UNIT_VTABLE(u)->active_state(u);
603 }
604
605 const char* unit_sub_state_to_string(Unit *u) {
606 assert(u);
607
608 return UNIT_VTABLE(u)->sub_state_to_string(u);
609 }
610
611 static int complete_move(Set **s, Set **other) {
612 int r;
613
614 assert(s);
615 assert(other);
616
617 if (!*other)
618 return 0;
619
620 if (*s) {
621 r = set_move(*s, *other);
622 if (r < 0)
623 return r;
624 } else {
625 *s = *other;
626 *other = NULL;
627 }
628
629 return 0;
630 }
631
632 static int merge_names(Unit *u, Unit *other) {
633 char *t;
634 Iterator i;
635 int r;
636
637 assert(u);
638 assert(other);
639
640 r = complete_move(&u->names, &other->names);
641 if (r < 0)
642 return r;
643
644 set_free_free(other->names);
645 other->names = NULL;
646 other->id = NULL;
647
648 SET_FOREACH(t, u->names, i)
649 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
650
651 return 0;
652 }
653
654 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
655 unsigned n_reserve;
656
657 assert(u);
658 assert(other);
659 assert(d < _UNIT_DEPENDENCY_MAX);
660
661 /*
662 * If u does not have this dependency set allocated, there is no need
663 * to reserve anything. In that case other's set will be transferred
664 * as a whole to u by complete_move().
665 */
666 if (!u->dependencies[d])
667 return 0;
668
669 /* merge_dependencies() will skip a u-on-u dependency */
670 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
671
672 return set_reserve(u->dependencies[d], n_reserve);
673 }
674
675 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
676 Iterator i;
677 Unit *back;
678 int r;
679
680 assert(u);
681 assert(other);
682 assert(d < _UNIT_DEPENDENCY_MAX);
683
684 /* Fix backwards pointers */
685 SET_FOREACH(back, other->dependencies[d], i) {
686 UnitDependency k;
687
688 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
689 /* Do not add dependencies between u and itself */
690 if (back == u) {
691 if (set_remove(back->dependencies[k], other))
692 maybe_warn_about_dependency(u, other_id, k);
693 } else {
694 r = set_remove_and_put(back->dependencies[k], other, u);
695 if (r == -EEXIST)
696 set_remove(back->dependencies[k], other);
697 else
698 assert(r >= 0 || r == -ENOENT);
699 }
700 }
701 }
702
703 /* Also do not move dependencies on u to itself */
704 back = set_remove(other->dependencies[d], u);
705 if (back)
706 maybe_warn_about_dependency(u, other_id, d);
707
708 /* The move cannot fail. The caller must have performed a reservation. */
709 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
710
711 other->dependencies[d] = set_free(other->dependencies[d]);
712 }
713
714 int unit_merge(Unit *u, Unit *other) {
715 UnitDependency d;
716 const char *other_id = NULL;
717 int r;
718
719 assert(u);
720 assert(other);
721 assert(u->manager == other->manager);
722 assert(u->type != _UNIT_TYPE_INVALID);
723
724 other = unit_follow_merge(other);
725
726 if (other == u)
727 return 0;
728
729 if (u->type != other->type)
730 return -EINVAL;
731
732 if (!u->instance != !other->instance)
733 return -EINVAL;
734
735 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
736 return -EEXIST;
737
738 if (other->load_state != UNIT_STUB &&
739 other->load_state != UNIT_NOT_FOUND)
740 return -EEXIST;
741
742 if (other->job)
743 return -EEXIST;
744
745 if (other->nop_job)
746 return -EEXIST;
747
748 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
749 return -EEXIST;
750
751 if (other->id)
752 other_id = strdupa(other->id);
753
754 /* Make reservations to ensure merge_dependencies() won't fail */
755 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
756 r = reserve_dependencies(u, other, d);
757 /*
758 * We don't rollback reservations if we fail. We don't have
759 * a way to undo reservations. A reservation is not a leak.
760 */
761 if (r < 0)
762 return r;
763 }
764
765 /* Merge names */
766 r = merge_names(u, other);
767 if (r < 0)
768 return r;
769
770 /* Redirect all references */
771 while (other->refs)
772 unit_ref_set(other->refs, u);
773
774 /* Merge dependencies */
775 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
776 merge_dependencies(u, other, other_id, d);
777
778 other->load_state = UNIT_MERGED;
779 other->merged_into = u;
780
781 /* If there is still some data attached to the other node, we
782 * don't need it anymore, and can free it. */
783 if (other->load_state != UNIT_STUB)
784 if (UNIT_VTABLE(other)->done)
785 UNIT_VTABLE(other)->done(other);
786
787 unit_add_to_dbus_queue(u);
788 unit_add_to_cleanup_queue(other);
789
790 return 0;
791 }
792
793 int unit_merge_by_name(Unit *u, const char *name) {
794 _cleanup_free_ char *s = NULL;
795 Unit *other;
796 int r;
797
798 assert(u);
799 assert(name);
800
801 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
802 if (!u->instance)
803 return -EINVAL;
804
805 r = unit_name_replace_instance(name, u->instance, &s);
806 if (r < 0)
807 return r;
808
809 name = s;
810 }
811
812 other = manager_get_unit(u->manager, name);
813 if (other)
814 return unit_merge(u, other);
815
816 return unit_add_name(u, name);
817 }
818
819 Unit* unit_follow_merge(Unit *u) {
820 assert(u);
821
822 while (u->load_state == UNIT_MERGED)
823 assert_se(u = u->merged_into);
824
825 return u;
826 }
827
828 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
829 int r;
830
831 assert(u);
832 assert(c);
833
834 if (c->working_directory) {
835 r = unit_require_mounts_for(u, c->working_directory);
836 if (r < 0)
837 return r;
838 }
839
840 if (c->root_directory) {
841 r = unit_require_mounts_for(u, c->root_directory);
842 if (r < 0)
843 return r;
844 }
845
846 if (!MANAGER_IS_SYSTEM(u->manager))
847 return 0;
848
849 if (c->private_tmp) {
850 r = unit_require_mounts_for(u, "/tmp");
851 if (r < 0)
852 return r;
853
854 r = unit_require_mounts_for(u, "/var/tmp");
855 if (r < 0)
856 return r;
857 }
858
859 if (c->std_output != EXEC_OUTPUT_KMSG &&
860 c->std_output != EXEC_OUTPUT_SYSLOG &&
861 c->std_output != EXEC_OUTPUT_JOURNAL &&
862 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
863 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
864 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
865 c->std_error != EXEC_OUTPUT_KMSG &&
866 c->std_error != EXEC_OUTPUT_SYSLOG &&
867 c->std_error != EXEC_OUTPUT_JOURNAL &&
868 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
869 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
870 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
871 return 0;
872
873 /* If syslog or kernel logging is requested, make sure our own
874 * logging daemon is run first. */
875
876 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
877 if (r < 0)
878 return r;
879
880 return 0;
881 }
882
883 const char *unit_description(Unit *u) {
884 assert(u);
885
886 if (u->description)
887 return u->description;
888
889 return strna(u->id);
890 }
891
892 void unit_dump(Unit *u, FILE *f, const char *prefix) {
893 char *t, **j;
894 UnitDependency d;
895 Iterator i;
896 const char *prefix2;
897 char
898 timestamp0[FORMAT_TIMESTAMP_MAX],
899 timestamp1[FORMAT_TIMESTAMP_MAX],
900 timestamp2[FORMAT_TIMESTAMP_MAX],
901 timestamp3[FORMAT_TIMESTAMP_MAX],
902 timestamp4[FORMAT_TIMESTAMP_MAX],
903 timespan[FORMAT_TIMESPAN_MAX];
904 Unit *following;
905 _cleanup_set_free_ Set *following_set = NULL;
906 int r;
907 const char *n;
908
909 assert(u);
910 assert(u->type >= 0);
911
912 prefix = strempty(prefix);
913 prefix2 = strjoina(prefix, "\t");
914
915 fprintf(f,
916 "%s-> Unit %s:\n"
917 "%s\tDescription: %s\n"
918 "%s\tInstance: %s\n"
919 "%s\tUnit Load State: %s\n"
920 "%s\tUnit Active State: %s\n"
921 "%s\tState Change Timestamp: %s\n"
922 "%s\tInactive Exit Timestamp: %s\n"
923 "%s\tActive Enter Timestamp: %s\n"
924 "%s\tActive Exit Timestamp: %s\n"
925 "%s\tInactive Enter Timestamp: %s\n"
926 "%s\tGC Check Good: %s\n"
927 "%s\tNeed Daemon Reload: %s\n"
928 "%s\tTransient: %s\n"
929 "%s\tSlice: %s\n"
930 "%s\tCGroup: %s\n"
931 "%s\tCGroup realized: %s\n"
932 "%s\tCGroup mask: 0x%x\n"
933 "%s\tCGroup members mask: 0x%x\n",
934 prefix, u->id,
935 prefix, unit_description(u),
936 prefix, strna(u->instance),
937 prefix, unit_load_state_to_string(u->load_state),
938 prefix, unit_active_state_to_string(unit_active_state(u)),
939 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
940 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
941 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
942 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
943 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
944 prefix, yes_no(unit_check_gc(u)),
945 prefix, yes_no(unit_need_daemon_reload(u)),
946 prefix, yes_no(u->transient),
947 prefix, strna(unit_slice_name(u)),
948 prefix, strna(u->cgroup_path),
949 prefix, yes_no(u->cgroup_realized),
950 prefix, u->cgroup_realized_mask,
951 prefix, u->cgroup_members_mask);
952
953 SET_FOREACH(t, u->names, i)
954 fprintf(f, "%s\tName: %s\n", prefix, t);
955
956 STRV_FOREACH(j, u->documentation)
957 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
958
959 following = unit_following(u);
960 if (following)
961 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
962
963 r = unit_following_set(u, &following_set);
964 if (r >= 0) {
965 Unit *other;
966
967 SET_FOREACH(other, following_set, i)
968 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
969 }
970
971 if (u->fragment_path)
972 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
973
974 if (u->source_path)
975 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
976
977 STRV_FOREACH(j, u->dropin_paths)
978 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
979
980 if (u->job_timeout != USEC_INFINITY)
981 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
982
983 if (u->job_timeout_action != FAILURE_ACTION_NONE)
984 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
985
986 if (u->job_timeout_reboot_arg)
987 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
988
989 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
990 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
991
992 if (dual_timestamp_is_set(&u->condition_timestamp))
993 fprintf(f,
994 "%s\tCondition Timestamp: %s\n"
995 "%s\tCondition Result: %s\n",
996 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
997 prefix, yes_no(u->condition_result));
998
999 if (dual_timestamp_is_set(&u->assert_timestamp))
1000 fprintf(f,
1001 "%s\tAssert Timestamp: %s\n"
1002 "%s\tAssert Result: %s\n",
1003 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1004 prefix, yes_no(u->assert_result));
1005
1006 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1007 Unit *other;
1008
1009 SET_FOREACH(other, u->dependencies[d], i)
1010 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1011 }
1012
1013 if (!strv_isempty(u->requires_mounts_for)) {
1014 fprintf(f,
1015 "%s\tRequiresMountsFor:", prefix);
1016
1017 STRV_FOREACH(j, u->requires_mounts_for)
1018 fprintf(f, " %s", *j);
1019
1020 fputs("\n", f);
1021 }
1022
1023 if (u->load_state == UNIT_LOADED) {
1024
1025 fprintf(f,
1026 "%s\tStopWhenUnneeded: %s\n"
1027 "%s\tRefuseManualStart: %s\n"
1028 "%s\tRefuseManualStop: %s\n"
1029 "%s\tDefaultDependencies: %s\n"
1030 "%s\tOnFailureJobMode: %s\n"
1031 "%s\tIgnoreOnIsolate: %s\n",
1032 prefix, yes_no(u->stop_when_unneeded),
1033 prefix, yes_no(u->refuse_manual_start),
1034 prefix, yes_no(u->refuse_manual_stop),
1035 prefix, yes_no(u->default_dependencies),
1036 prefix, job_mode_to_string(u->on_failure_job_mode),
1037 prefix, yes_no(u->ignore_on_isolate));
1038
1039 if (UNIT_VTABLE(u)->dump)
1040 UNIT_VTABLE(u)->dump(u, f, prefix2);
1041
1042 } else if (u->load_state == UNIT_MERGED)
1043 fprintf(f,
1044 "%s\tMerged into: %s\n",
1045 prefix, u->merged_into->id);
1046 else if (u->load_state == UNIT_ERROR)
1047 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1048
1049 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1050 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1051
1052 if (u->job)
1053 job_dump(u->job, f, prefix2);
1054
1055 if (u->nop_job)
1056 job_dump(u->nop_job, f, prefix2);
1057
1058 }
1059
1060 /* Common implementation for multiple backends */
1061 int unit_load_fragment_and_dropin(Unit *u) {
1062 int r;
1063
1064 assert(u);
1065
1066 /* Load a .{service,socket,...} file */
1067 r = unit_load_fragment(u);
1068 if (r < 0)
1069 return r;
1070
1071 if (u->load_state == UNIT_STUB)
1072 return -ENOENT;
1073
1074 /* Load drop-in directory data */
1075 r = unit_load_dropin(unit_follow_merge(u));
1076 if (r < 0)
1077 return r;
1078
1079 return 0;
1080 }
1081
1082 /* Common implementation for multiple backends */
1083 int unit_load_fragment_and_dropin_optional(Unit *u) {
1084 int r;
1085
1086 assert(u);
1087
1088 /* Same as unit_load_fragment_and_dropin(), but whether
1089 * something can be loaded or not doesn't matter. */
1090
1091 /* Load a .service file */
1092 r = unit_load_fragment(u);
1093 if (r < 0)
1094 return r;
1095
1096 if (u->load_state == UNIT_STUB)
1097 u->load_state = UNIT_LOADED;
1098
1099 /* Load drop-in directory data */
1100 r = unit_load_dropin(unit_follow_merge(u));
1101 if (r < 0)
1102 return r;
1103
1104 return 0;
1105 }
1106
1107 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1108 assert(u);
1109 assert(target);
1110
1111 if (target->type != UNIT_TARGET)
1112 return 0;
1113
1114 /* Only add the dependency if both units are loaded, so that
1115 * that loop check below is reliable */
1116 if (u->load_state != UNIT_LOADED ||
1117 target->load_state != UNIT_LOADED)
1118 return 0;
1119
1120 /* If either side wants no automatic dependencies, then let's
1121 * skip this */
1122 if (!u->default_dependencies ||
1123 !target->default_dependencies)
1124 return 0;
1125
1126 /* Don't create loops */
1127 if (set_get(target->dependencies[UNIT_BEFORE], u))
1128 return 0;
1129
1130 return unit_add_dependency(target, UNIT_AFTER, u, true);
1131 }
1132
1133 static int unit_add_target_dependencies(Unit *u) {
1134
1135 static const UnitDependency deps[] = {
1136 UNIT_REQUIRED_BY,
1137 UNIT_REQUISITE_OF,
1138 UNIT_WANTED_BY,
1139 UNIT_BOUND_BY
1140 };
1141
1142 Unit *target;
1143 Iterator i;
1144 unsigned k;
1145 int r = 0;
1146
1147 assert(u);
1148
1149 for (k = 0; k < ELEMENTSOF(deps); k++)
1150 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1151 r = unit_add_default_target_dependency(u, target);
1152 if (r < 0)
1153 return r;
1154 }
1155
1156 return r;
1157 }
1158
1159 static int unit_add_slice_dependencies(Unit *u) {
1160 assert(u);
1161
1162 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1163 return 0;
1164
1165 if (UNIT_ISSET(u->slice))
1166 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1167
1168 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1169 return 0;
1170
1171 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1172 }
1173
1174 static int unit_add_mount_dependencies(Unit *u) {
1175 char **i;
1176 int r;
1177
1178 assert(u);
1179
1180 STRV_FOREACH(i, u->requires_mounts_for) {
1181 char prefix[strlen(*i) + 1];
1182
1183 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1184 _cleanup_free_ char *p = NULL;
1185 Unit *m;
1186
1187 r = unit_name_from_path(prefix, ".mount", &p);
1188 if (r < 0)
1189 return r;
1190
1191 m = manager_get_unit(u->manager, p);
1192 if (!m) {
1193 /* Make sure to load the mount unit if
1194 * it exists. If so the dependencies
1195 * on this unit will be added later
1196 * during the loading of the mount
1197 * unit. */
1198 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1199 continue;
1200 }
1201 if (m == u)
1202 continue;
1203
1204 if (m->load_state != UNIT_LOADED)
1205 continue;
1206
1207 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1208 if (r < 0)
1209 return r;
1210
1211 if (m->fragment_path) {
1212 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1213 if (r < 0)
1214 return r;
1215 }
1216 }
1217 }
1218
1219 return 0;
1220 }
1221
1222 static int unit_add_startup_units(Unit *u) {
1223 CGroupContext *c;
1224 int r;
1225
1226 c = unit_get_cgroup_context(u);
1227 if (!c)
1228 return 0;
1229
1230 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1231 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1232 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1233 return 0;
1234
1235 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1236 if (r < 0)
1237 return r;
1238
1239 return set_put(u->manager->startup_units, u);
1240 }
1241
1242 int unit_load(Unit *u) {
1243 int r;
1244
1245 assert(u);
1246
1247 if (u->in_load_queue) {
1248 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1249 u->in_load_queue = false;
1250 }
1251
1252 if (u->type == _UNIT_TYPE_INVALID)
1253 return -EINVAL;
1254
1255 if (u->load_state != UNIT_STUB)
1256 return 0;
1257
1258 if (u->transient_file) {
1259 r = fflush_and_check(u->transient_file);
1260 if (r < 0)
1261 goto fail;
1262
1263 fclose(u->transient_file);
1264 u->transient_file = NULL;
1265
1266 u->fragment_mtime = now(CLOCK_REALTIME);
1267 }
1268
1269 if (UNIT_VTABLE(u)->load) {
1270 r = UNIT_VTABLE(u)->load(u);
1271 if (r < 0)
1272 goto fail;
1273 }
1274
1275 if (u->load_state == UNIT_STUB) {
1276 r = -ENOENT;
1277 goto fail;
1278 }
1279
1280 if (u->load_state == UNIT_LOADED) {
1281
1282 r = unit_add_target_dependencies(u);
1283 if (r < 0)
1284 goto fail;
1285
1286 r = unit_add_slice_dependencies(u);
1287 if (r < 0)
1288 goto fail;
1289
1290 r = unit_add_mount_dependencies(u);
1291 if (r < 0)
1292 goto fail;
1293
1294 r = unit_add_startup_units(u);
1295 if (r < 0)
1296 goto fail;
1297
1298 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1299 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1300 r = -EINVAL;
1301 goto fail;
1302 }
1303
1304 unit_update_cgroup_members_masks(u);
1305 }
1306
1307 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1308
1309 unit_add_to_dbus_queue(unit_follow_merge(u));
1310 unit_add_to_gc_queue(u);
1311
1312 return 0;
1313
1314 fail:
1315 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1316 u->load_error = r;
1317 unit_add_to_dbus_queue(u);
1318 unit_add_to_gc_queue(u);
1319
1320 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1321
1322 return r;
1323 }
1324
1325 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1326 Condition *c;
1327 int triggered = -1;
1328
1329 assert(u);
1330 assert(to_string);
1331
1332 /* If the condition list is empty, then it is true */
1333 if (!first)
1334 return true;
1335
1336 /* Otherwise, if all of the non-trigger conditions apply and
1337 * if any of the trigger conditions apply (unless there are
1338 * none) we return true */
1339 LIST_FOREACH(conditions, c, first) {
1340 int r;
1341
1342 r = condition_test(c);
1343 if (r < 0)
1344 log_unit_warning(u,
1345 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1346 to_string(c->type),
1347 c->trigger ? "|" : "",
1348 c->negate ? "!" : "",
1349 c->parameter);
1350 else
1351 log_unit_debug(u,
1352 "%s=%s%s%s %s.",
1353 to_string(c->type),
1354 c->trigger ? "|" : "",
1355 c->negate ? "!" : "",
1356 c->parameter,
1357 condition_result_to_string(c->result));
1358
1359 if (!c->trigger && r <= 0)
1360 return false;
1361
1362 if (c->trigger && triggered <= 0)
1363 triggered = r > 0;
1364 }
1365
1366 return triggered != 0;
1367 }
1368
1369 static bool unit_condition_test(Unit *u) {
1370 assert(u);
1371
1372 dual_timestamp_get(&u->condition_timestamp);
1373 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1374
1375 return u->condition_result;
1376 }
1377
1378 static bool unit_assert_test(Unit *u) {
1379 assert(u);
1380
1381 dual_timestamp_get(&u->assert_timestamp);
1382 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1383
1384 return u->assert_result;
1385 }
1386
1387 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1388 DISABLE_WARNING_FORMAT_NONLITERAL;
1389 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1390 REENABLE_WARNING;
1391 }
1392
1393 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1394 const char *format;
1395 const UnitStatusMessageFormats *format_table;
1396
1397 assert(u);
1398 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1399
1400 if (t != JOB_RELOAD) {
1401 format_table = &UNIT_VTABLE(u)->status_message_formats;
1402 if (format_table) {
1403 format = format_table->starting_stopping[t == JOB_STOP];
1404 if (format)
1405 return format;
1406 }
1407 }
1408
1409 /* Return generic strings */
1410 if (t == JOB_START)
1411 return "Starting %s.";
1412 else if (t == JOB_STOP)
1413 return "Stopping %s.";
1414 else
1415 return "Reloading %s.";
1416 }
1417
1418 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1419 const char *format;
1420
1421 assert(u);
1422
1423 /* Reload status messages have traditionally not been printed to console. */
1424 if (!IN_SET(t, JOB_START, JOB_STOP))
1425 return;
1426
1427 format = unit_get_status_message_format(u, t);
1428
1429 DISABLE_WARNING_FORMAT_NONLITERAL;
1430 unit_status_printf(u, "", format);
1431 REENABLE_WARNING;
1432 }
1433
1434 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1435 const char *format;
1436 char buf[LINE_MAX];
1437 sd_id128_t mid;
1438
1439 assert(u);
1440
1441 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1442 return;
1443
1444 if (log_on_console())
1445 return;
1446
1447 /* We log status messages for all units and all operations. */
1448
1449 format = unit_get_status_message_format(u, t);
1450
1451 DISABLE_WARNING_FORMAT_NONLITERAL;
1452 xsprintf(buf, format, unit_description(u));
1453 REENABLE_WARNING;
1454
1455 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1456 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1457 SD_MESSAGE_UNIT_RELOADING;
1458
1459 /* Note that we deliberately use LOG_MESSAGE() instead of
1460 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1461 * closely what is written to screen using the status output,
1462 * which is supposed the highest level, friendliest output
1463 * possible, which means we should avoid the low-level unit
1464 * name. */
1465 log_struct(LOG_INFO,
1466 LOG_MESSAGE_ID(mid),
1467 LOG_UNIT_ID(u),
1468 LOG_MESSAGE("%s", buf),
1469 NULL);
1470 }
1471
1472 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1473 assert(u);
1474 assert(t >= 0);
1475 assert(t < _JOB_TYPE_MAX);
1476
1477 unit_status_log_starting_stopping_reloading(u, t);
1478 unit_status_print_starting_stopping(u, t);
1479 }
1480
1481 int unit_start_limit_test(Unit *u) {
1482 assert(u);
1483
1484 if (ratelimit_test(&u->start_limit)) {
1485 u->start_limit_hit = false;
1486 return 0;
1487 }
1488
1489 log_unit_warning(u, "Start request repeated too quickly.");
1490 u->start_limit_hit = true;
1491
1492 return failure_action(u->manager, u->start_limit_action, u->reboot_arg);
1493 }
1494
1495 /* Errors:
1496 * -EBADR: This unit type does not support starting.
1497 * -EALREADY: Unit is already started.
1498 * -EAGAIN: An operation is already in progress. Retry later.
1499 * -ECANCELED: Too many requests for now.
1500 * -EPROTO: Assert failed
1501 * -EINVAL: Unit not loaded
1502 * -EOPNOTSUPP: Unit type not supported
1503 */
1504 int unit_start(Unit *u) {
1505 UnitActiveState state;
1506 Unit *following;
1507
1508 assert(u);
1509
1510 /* If this is already started, then this will succeed. Note
1511 * that this will even succeed if this unit is not startable
1512 * by the user. This is relied on to detect when we need to
1513 * wait for units and when waiting is finished. */
1514 state = unit_active_state(u);
1515 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1516 return -EALREADY;
1517
1518 /* Units that aren't loaded cannot be started */
1519 if (u->load_state != UNIT_LOADED)
1520 return -EINVAL;
1521
1522 /* If the conditions failed, don't do anything at all. If we
1523 * already are activating this call might still be useful to
1524 * speed up activation in case there is some hold-off time,
1525 * but we don't want to recheck the condition in that case. */
1526 if (state != UNIT_ACTIVATING &&
1527 !unit_condition_test(u)) {
1528 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1529 return -EALREADY;
1530 }
1531
1532 /* If the asserts failed, fail the entire job */
1533 if (state != UNIT_ACTIVATING &&
1534 !unit_assert_test(u)) {
1535 log_unit_notice(u, "Starting requested but asserts failed.");
1536 return -EPROTO;
1537 }
1538
1539 /* Units of types that aren't supported cannot be
1540 * started. Note that we do this test only after the condition
1541 * checks, so that we rather return condition check errors
1542 * (which are usually not considered a true failure) than "not
1543 * supported" errors (which are considered a failure).
1544 */
1545 if (!unit_supported(u))
1546 return -EOPNOTSUPP;
1547
1548 /* Forward to the main object, if we aren't it. */
1549 following = unit_following(u);
1550 if (following) {
1551 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1552 return unit_start(following);
1553 }
1554
1555 /* If it is stopped, but we cannot start it, then fail */
1556 if (!UNIT_VTABLE(u)->start)
1557 return -EBADR;
1558
1559 /* We don't suppress calls to ->start() here when we are
1560 * already starting, to allow this request to be used as a
1561 * "hurry up" call, for example when the unit is in some "auto
1562 * restart" state where it waits for a holdoff timer to elapse
1563 * before it will start again. */
1564
1565 unit_add_to_dbus_queue(u);
1566
1567 return UNIT_VTABLE(u)->start(u);
1568 }
1569
1570 bool unit_can_start(Unit *u) {
1571 assert(u);
1572
1573 if (u->load_state != UNIT_LOADED)
1574 return false;
1575
1576 if (!unit_supported(u))
1577 return false;
1578
1579 return !!UNIT_VTABLE(u)->start;
1580 }
1581
1582 bool unit_can_isolate(Unit *u) {
1583 assert(u);
1584
1585 return unit_can_start(u) &&
1586 u->allow_isolate;
1587 }
1588
1589 /* Errors:
1590 * -EBADR: This unit type does not support stopping.
1591 * -EALREADY: Unit is already stopped.
1592 * -EAGAIN: An operation is already in progress. Retry later.
1593 */
1594 int unit_stop(Unit *u) {
1595 UnitActiveState state;
1596 Unit *following;
1597
1598 assert(u);
1599
1600 state = unit_active_state(u);
1601 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1602 return -EALREADY;
1603
1604 following = unit_following(u);
1605 if (following) {
1606 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1607 return unit_stop(following);
1608 }
1609
1610 if (!UNIT_VTABLE(u)->stop)
1611 return -EBADR;
1612
1613 unit_add_to_dbus_queue(u);
1614
1615 return UNIT_VTABLE(u)->stop(u);
1616 }
1617
1618 /* Errors:
1619 * -EBADR: This unit type does not support reloading.
1620 * -ENOEXEC: Unit is not started.
1621 * -EAGAIN: An operation is already in progress. Retry later.
1622 */
1623 int unit_reload(Unit *u) {
1624 UnitActiveState state;
1625 Unit *following;
1626
1627 assert(u);
1628
1629 if (u->load_state != UNIT_LOADED)
1630 return -EINVAL;
1631
1632 if (!unit_can_reload(u))
1633 return -EBADR;
1634
1635 state = unit_active_state(u);
1636 if (state == UNIT_RELOADING)
1637 return -EALREADY;
1638
1639 if (state != UNIT_ACTIVE) {
1640 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1641 return -ENOEXEC;
1642 }
1643
1644 following = unit_following(u);
1645 if (following) {
1646 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1647 return unit_reload(following);
1648 }
1649
1650 unit_add_to_dbus_queue(u);
1651
1652 return UNIT_VTABLE(u)->reload(u);
1653 }
1654
1655 bool unit_can_reload(Unit *u) {
1656 assert(u);
1657
1658 if (!UNIT_VTABLE(u)->reload)
1659 return false;
1660
1661 if (!UNIT_VTABLE(u)->can_reload)
1662 return true;
1663
1664 return UNIT_VTABLE(u)->can_reload(u);
1665 }
1666
1667 static void unit_check_unneeded(Unit *u) {
1668
1669 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1670
1671 static const UnitDependency needed_dependencies[] = {
1672 UNIT_REQUIRED_BY,
1673 UNIT_REQUISITE_OF,
1674 UNIT_WANTED_BY,
1675 UNIT_BOUND_BY,
1676 };
1677
1678 Unit *other;
1679 Iterator i;
1680 unsigned j;
1681 int r;
1682
1683 assert(u);
1684
1685 /* If this service shall be shut down when unneeded then do
1686 * so. */
1687
1688 if (!u->stop_when_unneeded)
1689 return;
1690
1691 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1692 return;
1693
1694 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1695 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1696 if (unit_active_or_pending(other))
1697 return;
1698
1699 /* If stopping a unit fails continuously we might enter a stop
1700 * loop here, hence stop acting on the service being
1701 * unnecessary after a while. */
1702 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1703 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1704 return;
1705 }
1706
1707 log_unit_info(u, "Unit not needed anymore. Stopping.");
1708
1709 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1710 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1711 if (r < 0)
1712 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1713 }
1714
1715 static void unit_check_binds_to(Unit *u) {
1716 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1717 bool stop = false;
1718 Unit *other;
1719 Iterator i;
1720 int r;
1721
1722 assert(u);
1723
1724 if (u->job)
1725 return;
1726
1727 if (unit_active_state(u) != UNIT_ACTIVE)
1728 return;
1729
1730 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1731 if (other->job)
1732 continue;
1733
1734 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1735 continue;
1736
1737 stop = true;
1738 break;
1739 }
1740
1741 if (!stop)
1742 return;
1743
1744 /* If stopping a unit fails continuously we might enter a stop
1745 * loop here, hence stop acting on the service being
1746 * unnecessary after a while. */
1747 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1748 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1749 return;
1750 }
1751
1752 assert(other);
1753 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1754
1755 /* A unit we need to run is gone. Sniff. Let's stop this. */
1756 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1757 if (r < 0)
1758 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1759 }
1760
1761 static void retroactively_start_dependencies(Unit *u) {
1762 Iterator i;
1763 Unit *other;
1764
1765 assert(u);
1766 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1767
1768 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1769 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1770 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1771 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1772
1773 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1774 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1775 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1776 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1777
1778 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1779 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1780 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1781 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1782
1783 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1784 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1785 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1786
1787 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1788 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1789 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1790 }
1791
1792 static void retroactively_stop_dependencies(Unit *u) {
1793 Iterator i;
1794 Unit *other;
1795
1796 assert(u);
1797 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1798
1799 /* Pull down units which are bound to us recursively if enabled */
1800 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1801 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1802 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1803 }
1804
1805 static void check_unneeded_dependencies(Unit *u) {
1806 Iterator i;
1807 Unit *other;
1808
1809 assert(u);
1810 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1811
1812 /* Garbage collect services that might not be needed anymore, if enabled */
1813 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1814 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1815 unit_check_unneeded(other);
1816 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1817 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1818 unit_check_unneeded(other);
1819 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1820 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1821 unit_check_unneeded(other);
1822 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1823 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1824 unit_check_unneeded(other);
1825 }
1826
1827 void unit_start_on_failure(Unit *u) {
1828 Unit *other;
1829 Iterator i;
1830
1831 assert(u);
1832
1833 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1834 return;
1835
1836 log_unit_info(u, "Triggering OnFailure= dependencies.");
1837
1838 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1839 int r;
1840
1841 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1842 if (r < 0)
1843 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1844 }
1845 }
1846
1847 void unit_trigger_notify(Unit *u) {
1848 Unit *other;
1849 Iterator i;
1850
1851 assert(u);
1852
1853 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1854 if (UNIT_VTABLE(other)->trigger_notify)
1855 UNIT_VTABLE(other)->trigger_notify(other, u);
1856 }
1857
1858 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1859 Manager *m;
1860 bool unexpected;
1861
1862 assert(u);
1863 assert(os < _UNIT_ACTIVE_STATE_MAX);
1864 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1865
1866 /* Note that this is called for all low-level state changes,
1867 * even if they might map to the same high-level
1868 * UnitActiveState! That means that ns == os is an expected
1869 * behavior here. For example: if a mount point is remounted
1870 * this function will be called too! */
1871
1872 m = u->manager;
1873
1874 /* Update timestamps for state changes */
1875 if (!MANAGER_IS_RELOADING(m)) {
1876 dual_timestamp_get(&u->state_change_timestamp);
1877
1878 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1879 u->inactive_exit_timestamp = u->state_change_timestamp;
1880 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1881 u->inactive_enter_timestamp = u->state_change_timestamp;
1882
1883 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1884 u->active_enter_timestamp = u->state_change_timestamp;
1885 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1886 u->active_exit_timestamp = u->state_change_timestamp;
1887 }
1888
1889 /* Keep track of failed units */
1890 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1891
1892 /* Make sure the cgroup is always removed when we become inactive */
1893 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1894 unit_prune_cgroup(u);
1895
1896 /* Note that this doesn't apply to RemainAfterExit services exiting
1897 * successfully, since there's no change of state in that case. Which is
1898 * why it is handled in service_set_state() */
1899 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1900 ExecContext *ec;
1901
1902 ec = unit_get_exec_context(u);
1903 if (ec && exec_context_may_touch_console(ec)) {
1904 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1905 m->n_on_console--;
1906
1907 if (m->n_on_console == 0)
1908 /* unset no_console_output flag, since the console is free */
1909 m->no_console_output = false;
1910 } else
1911 m->n_on_console++;
1912 }
1913 }
1914
1915 if (u->job) {
1916 unexpected = false;
1917
1918 if (u->job->state == JOB_WAITING)
1919
1920 /* So we reached a different state for this
1921 * job. Let's see if we can run it now if it
1922 * failed previously due to EAGAIN. */
1923 job_add_to_run_queue(u->job);
1924
1925 /* Let's check whether this state change constitutes a
1926 * finished job, or maybe contradicts a running job and
1927 * hence needs to invalidate jobs. */
1928
1929 switch (u->job->type) {
1930
1931 case JOB_START:
1932 case JOB_VERIFY_ACTIVE:
1933
1934 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1935 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
1936 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1937 unexpected = true;
1938
1939 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1940 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1941 }
1942
1943 break;
1944
1945 case JOB_RELOAD:
1946 case JOB_RELOAD_OR_START:
1947 case JOB_TRY_RELOAD:
1948
1949 if (u->job->state == JOB_RUNNING) {
1950 if (ns == UNIT_ACTIVE)
1951 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
1952 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1953 unexpected = true;
1954
1955 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1956 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1957 }
1958 }
1959
1960 break;
1961
1962 case JOB_STOP:
1963 case JOB_RESTART:
1964 case JOB_TRY_RESTART:
1965
1966 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1967 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
1968 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1969 unexpected = true;
1970 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
1971 }
1972
1973 break;
1974
1975 default:
1976 assert_not_reached("Job type unknown");
1977 }
1978
1979 } else
1980 unexpected = true;
1981
1982 if (!MANAGER_IS_RELOADING(m)) {
1983
1984 /* If this state change happened without being
1985 * requested by a job, then let's retroactively start
1986 * or stop dependencies. We skip that step when
1987 * deserializing, since we don't want to create any
1988 * additional jobs just because something is already
1989 * activated. */
1990
1991 if (unexpected) {
1992 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1993 retroactively_start_dependencies(u);
1994 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1995 retroactively_stop_dependencies(u);
1996 }
1997
1998 /* stop unneeded units regardless if going down was expected or not */
1999 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2000 check_unneeded_dependencies(u);
2001
2002 if (ns != os && ns == UNIT_FAILED) {
2003 log_unit_notice(u, "Unit entered failed state.");
2004 unit_start_on_failure(u);
2005 }
2006 }
2007
2008 /* Some names are special */
2009 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2010
2011 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2012 /* The bus might have just become available,
2013 * hence try to connect to it, if we aren't
2014 * yet connected. */
2015 bus_init(m, true);
2016
2017 if (u->type == UNIT_SERVICE &&
2018 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2019 !MANAGER_IS_RELOADING(m)) {
2020 /* Write audit record if we have just finished starting up */
2021 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2022 u->in_audit = true;
2023 }
2024
2025 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2026 manager_send_unit_plymouth(m, u);
2027
2028 } else {
2029
2030 /* We don't care about D-Bus here, since we'll get an
2031 * asynchronous notification for it anyway. */
2032
2033 if (u->type == UNIT_SERVICE &&
2034 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2035 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2036 !MANAGER_IS_RELOADING(m)) {
2037
2038 /* Hmm, if there was no start record written
2039 * write it now, so that we always have a nice
2040 * pair */
2041 if (!u->in_audit) {
2042 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2043
2044 if (ns == UNIT_INACTIVE)
2045 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2046 } else
2047 /* Write audit record if we have just finished shutting down */
2048 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2049
2050 u->in_audit = false;
2051 }
2052 }
2053
2054 manager_recheck_journal(m);
2055 unit_trigger_notify(u);
2056
2057 if (!MANAGER_IS_RELOADING(u->manager)) {
2058 /* Maybe we finished startup and are now ready for
2059 * being stopped because unneeded? */
2060 unit_check_unneeded(u);
2061
2062 /* Maybe we finished startup, but something we needed
2063 * has vanished? Let's die then. (This happens when
2064 * something BindsTo= to a Type=oneshot unit, as these
2065 * units go directly from starting to inactive,
2066 * without ever entering started.) */
2067 unit_check_binds_to(u);
2068 }
2069
2070 unit_add_to_dbus_queue(u);
2071 unit_add_to_gc_queue(u);
2072 }
2073
2074 int unit_watch_pid(Unit *u, pid_t pid) {
2075 int q, r;
2076
2077 assert(u);
2078 assert(pid >= 1);
2079
2080 /* Watch a specific PID. We only support one or two units
2081 * watching each PID for now, not more. */
2082
2083 r = set_ensure_allocated(&u->pids, NULL);
2084 if (r < 0)
2085 return r;
2086
2087 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2088 if (r < 0)
2089 return r;
2090
2091 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2092 if (r == -EEXIST) {
2093 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2094 if (r < 0)
2095 return r;
2096
2097 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2098 }
2099
2100 q = set_put(u->pids, PID_TO_PTR(pid));
2101 if (q < 0)
2102 return q;
2103
2104 return r;
2105 }
2106
2107 void unit_unwatch_pid(Unit *u, pid_t pid) {
2108 assert(u);
2109 assert(pid >= 1);
2110
2111 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2112 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2113 (void) set_remove(u->pids, PID_TO_PTR(pid));
2114 }
2115
2116 void unit_unwatch_all_pids(Unit *u) {
2117 assert(u);
2118
2119 while (!set_isempty(u->pids))
2120 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2121
2122 u->pids = set_free(u->pids);
2123 }
2124
2125 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2126 Iterator i;
2127 void *e;
2128
2129 assert(u);
2130
2131 /* Cleans dead PIDs from our list */
2132
2133 SET_FOREACH(e, u->pids, i) {
2134 pid_t pid = PTR_TO_PID(e);
2135
2136 if (pid == except1 || pid == except2)
2137 continue;
2138
2139 if (!pid_is_unwaited(pid))
2140 unit_unwatch_pid(u, pid);
2141 }
2142 }
2143
2144 bool unit_job_is_applicable(Unit *u, JobType j) {
2145 assert(u);
2146 assert(j >= 0 && j < _JOB_TYPE_MAX);
2147
2148 switch (j) {
2149
2150 case JOB_VERIFY_ACTIVE:
2151 case JOB_START:
2152 case JOB_STOP:
2153 case JOB_NOP:
2154 return true;
2155
2156 case JOB_RESTART:
2157 case JOB_TRY_RESTART:
2158 return unit_can_start(u);
2159
2160 case JOB_RELOAD:
2161 case JOB_TRY_RELOAD:
2162 return unit_can_reload(u);
2163
2164 case JOB_RELOAD_OR_START:
2165 return unit_can_reload(u) && unit_can_start(u);
2166
2167 default:
2168 assert_not_reached("Invalid job type");
2169 }
2170 }
2171
2172 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2173 assert(u);
2174
2175 /* Only warn about some unit types */
2176 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2177 return;
2178
2179 if (streq_ptr(u->id, other))
2180 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2181 else
2182 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2183 }
2184
2185 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2186
2187 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2188 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2189 [UNIT_WANTS] = UNIT_WANTED_BY,
2190 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2191 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2192 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2193 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2194 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2195 [UNIT_WANTED_BY] = UNIT_WANTS,
2196 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2197 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2198 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2199 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2200 [UNIT_BEFORE] = UNIT_AFTER,
2201 [UNIT_AFTER] = UNIT_BEFORE,
2202 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2203 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2204 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2205 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2206 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2207 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2208 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2209 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2210 };
2211 int r, q = 0, v = 0, w = 0;
2212 Unit *orig_u = u, *orig_other = other;
2213
2214 assert(u);
2215 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2216 assert(other);
2217
2218 u = unit_follow_merge(u);
2219 other = unit_follow_merge(other);
2220
2221 /* We won't allow dependencies on ourselves. We will not
2222 * consider them an error however. */
2223 if (u == other) {
2224 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2225 return 0;
2226 }
2227
2228 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2229 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2230 return 0;
2231 }
2232
2233 r = set_ensure_allocated(&u->dependencies[d], NULL);
2234 if (r < 0)
2235 return r;
2236
2237 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2238 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2239 if (r < 0)
2240 return r;
2241 }
2242
2243 if (add_reference) {
2244 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2245 if (r < 0)
2246 return r;
2247
2248 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2249 if (r < 0)
2250 return r;
2251 }
2252
2253 q = set_put(u->dependencies[d], other);
2254 if (q < 0)
2255 return q;
2256
2257 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2258 v = set_put(other->dependencies[inverse_table[d]], u);
2259 if (v < 0) {
2260 r = v;
2261 goto fail;
2262 }
2263 }
2264
2265 if (add_reference) {
2266 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2267 if (w < 0) {
2268 r = w;
2269 goto fail;
2270 }
2271
2272 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2273 if (r < 0)
2274 goto fail;
2275 }
2276
2277 unit_add_to_dbus_queue(u);
2278 return 0;
2279
2280 fail:
2281 if (q > 0)
2282 set_remove(u->dependencies[d], other);
2283
2284 if (v > 0)
2285 set_remove(other->dependencies[inverse_table[d]], u);
2286
2287 if (w > 0)
2288 set_remove(u->dependencies[UNIT_REFERENCES], other);
2289
2290 return r;
2291 }
2292
2293 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2294 int r;
2295
2296 assert(u);
2297
2298 r = unit_add_dependency(u, d, other, add_reference);
2299 if (r < 0)
2300 return r;
2301
2302 return unit_add_dependency(u, e, other, add_reference);
2303 }
2304
2305 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2306 int r;
2307
2308 assert(u);
2309 assert(name || path);
2310 assert(buf);
2311 assert(ret);
2312
2313 if (!name)
2314 name = basename(path);
2315
2316 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2317 *buf = NULL;
2318 *ret = name;
2319 return 0;
2320 }
2321
2322 if (u->instance)
2323 r = unit_name_replace_instance(name, u->instance, buf);
2324 else {
2325 _cleanup_free_ char *i = NULL;
2326
2327 r = unit_name_to_prefix(u->id, &i);
2328 if (r < 0)
2329 return r;
2330
2331 r = unit_name_replace_instance(name, i, buf);
2332 }
2333 if (r < 0)
2334 return r;
2335
2336 *ret = *buf;
2337 return 0;
2338 }
2339
2340 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2341 _cleanup_free_ char *buf = NULL;
2342 Unit *other;
2343 int r;
2344
2345 assert(u);
2346 assert(name || path);
2347
2348 r = resolve_template(u, name, path, &buf, &name);
2349 if (r < 0)
2350 return r;
2351
2352 r = manager_load_unit(u->manager, name, path, NULL, &other);
2353 if (r < 0)
2354 return r;
2355
2356 return unit_add_dependency(u, d, other, add_reference);
2357 }
2358
2359 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2360 _cleanup_free_ char *buf = NULL;
2361 Unit *other;
2362 int r;
2363
2364 assert(u);
2365 assert(name || path);
2366
2367 r = resolve_template(u, name, path, &buf, &name);
2368 if (r < 0)
2369 return r;
2370
2371 r = manager_load_unit(u->manager, name, path, NULL, &other);
2372 if (r < 0)
2373 return r;
2374
2375 return unit_add_two_dependencies(u, d, e, other, add_reference);
2376 }
2377
2378 int set_unit_path(const char *p) {
2379 /* This is mostly for debug purposes */
2380 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2381 return -errno;
2382
2383 return 0;
2384 }
2385
2386 char *unit_dbus_path(Unit *u) {
2387 assert(u);
2388
2389 if (!u->id)
2390 return NULL;
2391
2392 return unit_dbus_path_from_name(u->id);
2393 }
2394
2395 int unit_set_slice(Unit *u, Unit *slice) {
2396 assert(u);
2397 assert(slice);
2398
2399 /* Sets the unit slice if it has not been set before. Is extra
2400 * careful, to only allow this for units that actually have a
2401 * cgroup context. Also, we don't allow to set this for slices
2402 * (since the parent slice is derived from the name). Make
2403 * sure the unit we set is actually a slice. */
2404
2405 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2406 return -EOPNOTSUPP;
2407
2408 if (u->type == UNIT_SLICE)
2409 return -EINVAL;
2410
2411 if (unit_active_state(u) != UNIT_INACTIVE)
2412 return -EBUSY;
2413
2414 if (slice->type != UNIT_SLICE)
2415 return -EINVAL;
2416
2417 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2418 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2419 return -EPERM;
2420
2421 if (UNIT_DEREF(u->slice) == slice)
2422 return 0;
2423
2424 /* Disallow slice changes if @u is already bound to cgroups */
2425 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2426 return -EBUSY;
2427
2428 unit_ref_unset(&u->slice);
2429 unit_ref_set(&u->slice, slice);
2430 return 1;
2431 }
2432
2433 int unit_set_default_slice(Unit *u) {
2434 _cleanup_free_ char *b = NULL;
2435 const char *slice_name;
2436 Unit *slice;
2437 int r;
2438
2439 assert(u);
2440
2441 if (UNIT_ISSET(u->slice))
2442 return 0;
2443
2444 if (u->instance) {
2445 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2446
2447 /* Implicitly place all instantiated units in their
2448 * own per-template slice */
2449
2450 r = unit_name_to_prefix(u->id, &prefix);
2451 if (r < 0)
2452 return r;
2453
2454 /* The prefix is already escaped, but it might include
2455 * "-" which has a special meaning for slice units,
2456 * hence escape it here extra. */
2457 escaped = unit_name_escape(prefix);
2458 if (!escaped)
2459 return -ENOMEM;
2460
2461 if (MANAGER_IS_SYSTEM(u->manager))
2462 b = strjoin("system-", escaped, ".slice", NULL);
2463 else
2464 b = strappend(escaped, ".slice");
2465 if (!b)
2466 return -ENOMEM;
2467
2468 slice_name = b;
2469 } else
2470 slice_name =
2471 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2472 ? SPECIAL_SYSTEM_SLICE
2473 : SPECIAL_ROOT_SLICE;
2474
2475 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2476 if (r < 0)
2477 return r;
2478
2479 return unit_set_slice(u, slice);
2480 }
2481
2482 const char *unit_slice_name(Unit *u) {
2483 assert(u);
2484
2485 if (!UNIT_ISSET(u->slice))
2486 return NULL;
2487
2488 return UNIT_DEREF(u->slice)->id;
2489 }
2490
2491 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2492 _cleanup_free_ char *t = NULL;
2493 int r;
2494
2495 assert(u);
2496 assert(type);
2497 assert(_found);
2498
2499 r = unit_name_change_suffix(u->id, type, &t);
2500 if (r < 0)
2501 return r;
2502 if (unit_has_name(u, t))
2503 return -EINVAL;
2504
2505 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2506 assert(r < 0 || *_found != u);
2507 return r;
2508 }
2509
2510 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2511 const char *name, *old_owner, *new_owner;
2512 Unit *u = userdata;
2513 int r;
2514
2515 assert(message);
2516 assert(u);
2517
2518 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2519 if (r < 0) {
2520 bus_log_parse_error(r);
2521 return 0;
2522 }
2523
2524 if (UNIT_VTABLE(u)->bus_name_owner_change)
2525 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2526
2527 return 0;
2528 }
2529
2530 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2531 const char *match;
2532
2533 assert(u);
2534 assert(bus);
2535 assert(name);
2536
2537 if (u->match_bus_slot)
2538 return -EBUSY;
2539
2540 match = strjoina("type='signal',"
2541 "sender='org.freedesktop.DBus',"
2542 "path='/org/freedesktop/DBus',"
2543 "interface='org.freedesktop.DBus',"
2544 "member='NameOwnerChanged',"
2545 "arg0='", name, "'");
2546
2547 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2548 }
2549
2550 int unit_watch_bus_name(Unit *u, const char *name) {
2551 int r;
2552
2553 assert(u);
2554 assert(name);
2555
2556 /* Watch a specific name on the bus. We only support one unit
2557 * watching each name for now. */
2558
2559 if (u->manager->api_bus) {
2560 /* If the bus is already available, install the match directly.
2561 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2562 r = unit_install_bus_match(u, u->manager->api_bus, name);
2563 if (r < 0)
2564 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2565 }
2566
2567 r = hashmap_put(u->manager->watch_bus, name, u);
2568 if (r < 0) {
2569 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2570 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2571 }
2572
2573 return 0;
2574 }
2575
2576 void unit_unwatch_bus_name(Unit *u, const char *name) {
2577 assert(u);
2578 assert(name);
2579
2580 hashmap_remove_value(u->manager->watch_bus, name, u);
2581 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2582 }
2583
2584 bool unit_can_serialize(Unit *u) {
2585 assert(u);
2586
2587 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2588 }
2589
2590 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2591 int r;
2592
2593 assert(u);
2594 assert(f);
2595 assert(fds);
2596
2597 if (unit_can_serialize(u)) {
2598 ExecRuntime *rt;
2599
2600 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2601 if (r < 0)
2602 return r;
2603
2604 rt = unit_get_exec_runtime(u);
2605 if (rt) {
2606 r = exec_runtime_serialize(u, rt, f, fds);
2607 if (r < 0)
2608 return r;
2609 }
2610 }
2611
2612 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2613
2614 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2615 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2616 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2617 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2618
2619 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2620 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2621
2622 if (dual_timestamp_is_set(&u->condition_timestamp))
2623 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2624
2625 if (dual_timestamp_is_set(&u->assert_timestamp))
2626 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2627
2628 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2629
2630 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2631 if (u->cpu_usage_last != NSEC_INFINITY)
2632 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2633
2634 if (u->cgroup_path)
2635 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2636 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2637
2638 if (uid_is_valid(u->ref_uid))
2639 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2640 if (gid_is_valid(u->ref_gid))
2641 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2642
2643 bus_track_serialize(u->bus_track, f, "ref");
2644
2645 if (serialize_jobs) {
2646 if (u->job) {
2647 fprintf(f, "job\n");
2648 job_serialize(u->job, f);
2649 }
2650
2651 if (u->nop_job) {
2652 fprintf(f, "job\n");
2653 job_serialize(u->nop_job, f);
2654 }
2655 }
2656
2657 /* End marker */
2658 fputc('\n', f);
2659 return 0;
2660 }
2661
2662 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2663 assert(u);
2664 assert(f);
2665 assert(key);
2666
2667 if (!value)
2668 return 0;
2669
2670 fputs(key, f);
2671 fputc('=', f);
2672 fputs(value, f);
2673 fputc('\n', f);
2674
2675 return 1;
2676 }
2677
2678 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2679 _cleanup_free_ char *c = NULL;
2680
2681 assert(u);
2682 assert(f);
2683 assert(key);
2684
2685 if (!value)
2686 return 0;
2687
2688 c = cescape(value);
2689 if (!c)
2690 return -ENOMEM;
2691
2692 fputs(key, f);
2693 fputc('=', f);
2694 fputs(c, f);
2695 fputc('\n', f);
2696
2697 return 1;
2698 }
2699
2700 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2701 int copy;
2702
2703 assert(u);
2704 assert(f);
2705 assert(key);
2706
2707 if (fd < 0)
2708 return 0;
2709
2710 copy = fdset_put_dup(fds, fd);
2711 if (copy < 0)
2712 return copy;
2713
2714 fprintf(f, "%s=%i\n", key, copy);
2715 return 1;
2716 }
2717
2718 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2719 va_list ap;
2720
2721 assert(u);
2722 assert(f);
2723 assert(key);
2724 assert(format);
2725
2726 fputs(key, f);
2727 fputc('=', f);
2728
2729 va_start(ap, format);
2730 vfprintf(f, format, ap);
2731 va_end(ap);
2732
2733 fputc('\n', f);
2734 }
2735
2736 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2737 ExecRuntime **rt = NULL;
2738 size_t offset;
2739 int r;
2740
2741 assert(u);
2742 assert(f);
2743 assert(fds);
2744
2745 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2746 if (offset > 0)
2747 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2748
2749 for (;;) {
2750 char line[LINE_MAX], *l, *v;
2751 size_t k;
2752
2753 if (!fgets(line, sizeof(line), f)) {
2754 if (feof(f))
2755 return 0;
2756 return -errno;
2757 }
2758
2759 char_array_0(line);
2760 l = strstrip(line);
2761
2762 /* End marker */
2763 if (isempty(l))
2764 break;
2765
2766 k = strcspn(l, "=");
2767
2768 if (l[k] == '=') {
2769 l[k] = 0;
2770 v = l+k+1;
2771 } else
2772 v = l+k;
2773
2774 if (streq(l, "job")) {
2775 if (v[0] == '\0') {
2776 /* new-style serialized job */
2777 Job *j;
2778
2779 j = job_new_raw(u);
2780 if (!j)
2781 return log_oom();
2782
2783 r = job_deserialize(j, f);
2784 if (r < 0) {
2785 job_free(j);
2786 return r;
2787 }
2788
2789 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2790 if (r < 0) {
2791 job_free(j);
2792 return r;
2793 }
2794
2795 r = job_install_deserialized(j);
2796 if (r < 0) {
2797 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2798 job_free(j);
2799 return r;
2800 }
2801 } else /* legacy for pre-44 */
2802 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2803 continue;
2804 } else if (streq(l, "state-change-timestamp")) {
2805 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2806 continue;
2807 } else if (streq(l, "inactive-exit-timestamp")) {
2808 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2809 continue;
2810 } else if (streq(l, "active-enter-timestamp")) {
2811 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2812 continue;
2813 } else if (streq(l, "active-exit-timestamp")) {
2814 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2815 continue;
2816 } else if (streq(l, "inactive-enter-timestamp")) {
2817 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2818 continue;
2819 } else if (streq(l, "condition-timestamp")) {
2820 dual_timestamp_deserialize(v, &u->condition_timestamp);
2821 continue;
2822 } else if (streq(l, "assert-timestamp")) {
2823 dual_timestamp_deserialize(v, &u->assert_timestamp);
2824 continue;
2825 } else if (streq(l, "condition-result")) {
2826
2827 r = parse_boolean(v);
2828 if (r < 0)
2829 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2830 else
2831 u->condition_result = r;
2832
2833 continue;
2834
2835 } else if (streq(l, "assert-result")) {
2836
2837 r = parse_boolean(v);
2838 if (r < 0)
2839 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2840 else
2841 u->assert_result = r;
2842
2843 continue;
2844
2845 } else if (streq(l, "transient")) {
2846
2847 r = parse_boolean(v);
2848 if (r < 0)
2849 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2850 else
2851 u->transient = r;
2852
2853 continue;
2854
2855 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2856
2857 r = safe_atou64(v, &u->cpu_usage_base);
2858 if (r < 0)
2859 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
2860
2861 continue;
2862
2863 } else if (streq(l, "cpu-usage-last")) {
2864
2865 r = safe_atou64(v, &u->cpu_usage_last);
2866 if (r < 0)
2867 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
2868
2869 continue;
2870
2871 } else if (streq(l, "cgroup")) {
2872
2873 r = unit_set_cgroup_path(u, v);
2874 if (r < 0)
2875 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2876
2877 (void) unit_watch_cgroup(u);
2878
2879 continue;
2880 } else if (streq(l, "cgroup-realized")) {
2881 int b;
2882
2883 b = parse_boolean(v);
2884 if (b < 0)
2885 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2886 else
2887 u->cgroup_realized = b;
2888
2889 continue;
2890
2891 } else if (streq(l, "ref-uid")) {
2892 uid_t uid;
2893
2894 r = parse_uid(v, &uid);
2895 if (r < 0)
2896 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
2897 else
2898 unit_ref_uid_gid(u, uid, GID_INVALID);
2899
2900 continue;
2901
2902 } else if (streq(l, "ref-gid")) {
2903 gid_t gid;
2904
2905 r = parse_gid(v, &gid);
2906 if (r < 0)
2907 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
2908 else
2909 unit_ref_uid_gid(u, UID_INVALID, gid);
2910
2911 } else if (streq(l, "ref")) {
2912
2913 r = strv_extend(&u->deserialized_refs, v);
2914 if (r < 0)
2915 log_oom();
2916
2917 continue;
2918 }
2919
2920 if (unit_can_serialize(u)) {
2921 if (rt) {
2922 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
2923 if (r < 0) {
2924 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
2925 continue;
2926 }
2927
2928 /* Returns positive if key was handled by the call */
2929 if (r > 0)
2930 continue;
2931 }
2932
2933 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2934 if (r < 0)
2935 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
2936 }
2937 }
2938
2939 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
2940 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
2941 * before 228 where the base for timeouts was not persistent across reboots. */
2942
2943 if (!dual_timestamp_is_set(&u->state_change_timestamp))
2944 dual_timestamp_get(&u->state_change_timestamp);
2945
2946 return 0;
2947 }
2948
2949 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
2950 Unit *device;
2951 _cleanup_free_ char *e = NULL;
2952 int r;
2953
2954 assert(u);
2955
2956 /* Adds in links to the device node that this unit is based on */
2957 if (isempty(what))
2958 return 0;
2959
2960 if (!is_device_path(what))
2961 return 0;
2962
2963 /* When device units aren't supported (such as in a
2964 * container), don't create dependencies on them. */
2965 if (!unit_type_supported(UNIT_DEVICE))
2966 return 0;
2967
2968 r = unit_name_from_path(what, ".device", &e);
2969 if (r < 0)
2970 return r;
2971
2972 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2973 if (r < 0)
2974 return r;
2975
2976 r = unit_add_two_dependencies(u, UNIT_AFTER,
2977 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
2978 device, true);
2979 if (r < 0)
2980 return r;
2981
2982 if (wants) {
2983 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2984 if (r < 0)
2985 return r;
2986 }
2987
2988 return 0;
2989 }
2990
2991 int unit_coldplug(Unit *u) {
2992 int r = 0, q;
2993 char **i;
2994
2995 assert(u);
2996
2997 /* Make sure we don't enter a loop, when coldplugging
2998 * recursively. */
2999 if (u->coldplugged)
3000 return 0;
3001
3002 u->coldplugged = true;
3003
3004 STRV_FOREACH(i, u->deserialized_refs) {
3005 q = bus_unit_track_add_name(u, *i);
3006 if (q < 0 && r >= 0)
3007 r = q;
3008 }
3009 u->deserialized_refs = strv_free(u->deserialized_refs);
3010
3011 if (UNIT_VTABLE(u)->coldplug) {
3012 q = UNIT_VTABLE(u)->coldplug(u);
3013 if (q < 0 && r >= 0)
3014 r = q;
3015 }
3016
3017 if (u->job) {
3018 q = job_coldplug(u->job);
3019 if (q < 0 && r >= 0)
3020 r = q;
3021 }
3022
3023 return r;
3024 }
3025
3026 static bool fragment_mtime_newer(const char *path, usec_t mtime) {
3027 struct stat st;
3028
3029 if (!path)
3030 return false;
3031
3032 if (stat(path, &st) < 0)
3033 /* What, cannot access this anymore? */
3034 return true;
3035
3036 if (mtime > 0)
3037 /* For non-empty files check the mtime */
3038 return timespec_load(&st.st_mtim) > mtime;
3039 else if (!null_or_empty(&st))
3040 /* For masked files check if they are still so */
3041 return true;
3042
3043 return false;
3044 }
3045
3046 bool unit_need_daemon_reload(Unit *u) {
3047 _cleanup_strv_free_ char **t = NULL;
3048 char **path;
3049
3050 assert(u);
3051
3052 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime))
3053 return true;
3054
3055 if (fragment_mtime_newer(u->source_path, u->source_mtime))
3056 return true;
3057
3058 (void) unit_find_dropin_paths(u, &t);
3059 if (!strv_equal(u->dropin_paths, t))
3060 return true;
3061
3062 STRV_FOREACH(path, u->dropin_paths)
3063 if (fragment_mtime_newer(*path, u->dropin_mtime))
3064 return true;
3065
3066 return false;
3067 }
3068
3069 void unit_reset_failed(Unit *u) {
3070 assert(u);
3071
3072 if (UNIT_VTABLE(u)->reset_failed)
3073 UNIT_VTABLE(u)->reset_failed(u);
3074
3075 RATELIMIT_RESET(u->start_limit);
3076 u->start_limit_hit = false;
3077 }
3078
3079 Unit *unit_following(Unit *u) {
3080 assert(u);
3081
3082 if (UNIT_VTABLE(u)->following)
3083 return UNIT_VTABLE(u)->following(u);
3084
3085 return NULL;
3086 }
3087
3088 bool unit_stop_pending(Unit *u) {
3089 assert(u);
3090
3091 /* This call does check the current state of the unit. It's
3092 * hence useful to be called from state change calls of the
3093 * unit itself, where the state isn't updated yet. This is
3094 * different from unit_inactive_or_pending() which checks both
3095 * the current state and for a queued job. */
3096
3097 return u->job && u->job->type == JOB_STOP;
3098 }
3099
3100 bool unit_inactive_or_pending(Unit *u) {
3101 assert(u);
3102
3103 /* Returns true if the unit is inactive or going down */
3104
3105 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3106 return true;
3107
3108 if (unit_stop_pending(u))
3109 return true;
3110
3111 return false;
3112 }
3113
3114 bool unit_active_or_pending(Unit *u) {
3115 assert(u);
3116
3117 /* Returns true if the unit is active or going up */
3118
3119 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3120 return true;
3121
3122 if (u->job &&
3123 (u->job->type == JOB_START ||
3124 u->job->type == JOB_RELOAD_OR_START ||
3125 u->job->type == JOB_RESTART))
3126 return true;
3127
3128 return false;
3129 }
3130
3131 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3132 assert(u);
3133 assert(w >= 0 && w < _KILL_WHO_MAX);
3134 assert(SIGNAL_VALID(signo));
3135
3136 if (!UNIT_VTABLE(u)->kill)
3137 return -EOPNOTSUPP;
3138
3139 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3140 }
3141
3142 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3143 Set *pid_set;
3144 int r;
3145
3146 pid_set = set_new(NULL);
3147 if (!pid_set)
3148 return NULL;
3149
3150 /* Exclude the main/control pids from being killed via the cgroup */
3151 if (main_pid > 0) {
3152 r = set_put(pid_set, PID_TO_PTR(main_pid));
3153 if (r < 0)
3154 goto fail;
3155 }
3156
3157 if (control_pid > 0) {
3158 r = set_put(pid_set, PID_TO_PTR(control_pid));
3159 if (r < 0)
3160 goto fail;
3161 }
3162
3163 return pid_set;
3164
3165 fail:
3166 set_free(pid_set);
3167 return NULL;
3168 }
3169
3170 int unit_kill_common(
3171 Unit *u,
3172 KillWho who,
3173 int signo,
3174 pid_t main_pid,
3175 pid_t control_pid,
3176 sd_bus_error *error) {
3177
3178 int r = 0;
3179 bool killed = false;
3180
3181 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3182 if (main_pid < 0)
3183 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3184 else if (main_pid == 0)
3185 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3186 }
3187
3188 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3189 if (control_pid < 0)
3190 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3191 else if (control_pid == 0)
3192 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3193 }
3194
3195 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3196 if (control_pid > 0) {
3197 if (kill(control_pid, signo) < 0)
3198 r = -errno;
3199 else
3200 killed = true;
3201 }
3202
3203 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3204 if (main_pid > 0) {
3205 if (kill(main_pid, signo) < 0)
3206 r = -errno;
3207 else
3208 killed = true;
3209 }
3210
3211 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3212 _cleanup_set_free_ Set *pid_set = NULL;
3213 int q;
3214
3215 /* Exclude the main/control pids from being killed via the cgroup */
3216 pid_set = unit_pid_set(main_pid, control_pid);
3217 if (!pid_set)
3218 return -ENOMEM;
3219
3220 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3221 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3222 r = q;
3223 else
3224 killed = true;
3225 }
3226
3227 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3228 return -ESRCH;
3229
3230 return r;
3231 }
3232
3233 int unit_following_set(Unit *u, Set **s) {
3234 assert(u);
3235 assert(s);
3236
3237 if (UNIT_VTABLE(u)->following_set)
3238 return UNIT_VTABLE(u)->following_set(u, s);
3239
3240 *s = NULL;
3241 return 0;
3242 }
3243
3244 UnitFileState unit_get_unit_file_state(Unit *u) {
3245 int r;
3246
3247 assert(u);
3248
3249 if (u->unit_file_state < 0 && u->fragment_path) {
3250 r = unit_file_get_state(
3251 u->manager->unit_file_scope,
3252 NULL,
3253 basename(u->fragment_path),
3254 &u->unit_file_state);
3255 if (r < 0)
3256 u->unit_file_state = UNIT_FILE_BAD;
3257 }
3258
3259 return u->unit_file_state;
3260 }
3261
3262 int unit_get_unit_file_preset(Unit *u) {
3263 assert(u);
3264
3265 if (u->unit_file_preset < 0 && u->fragment_path)
3266 u->unit_file_preset = unit_file_query_preset(
3267 u->manager->unit_file_scope,
3268 NULL,
3269 basename(u->fragment_path));
3270
3271 return u->unit_file_preset;
3272 }
3273
3274 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3275 assert(ref);
3276 assert(u);
3277
3278 if (ref->unit)
3279 unit_ref_unset(ref);
3280
3281 ref->unit = u;
3282 LIST_PREPEND(refs, u->refs, ref);
3283 return u;
3284 }
3285
3286 void unit_ref_unset(UnitRef *ref) {
3287 assert(ref);
3288
3289 if (!ref->unit)
3290 return;
3291
3292 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3293 * be unreferenced now. */
3294 unit_add_to_gc_queue(ref->unit);
3295
3296 LIST_REMOVE(refs, ref->unit->refs, ref);
3297 ref->unit = NULL;
3298 }
3299
3300 static int user_from_unit_name(Unit *u, char **ret) {
3301
3302 static const uint8_t hash_key[] = {
3303 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3304 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3305 };
3306
3307 _cleanup_free_ char *n = NULL;
3308 int r;
3309
3310 r = unit_name_to_prefix(u->id, &n);
3311 if (r < 0)
3312 return r;
3313
3314 if (valid_user_group_name(n)) {
3315 *ret = n;
3316 n = NULL;
3317 return 0;
3318 }
3319
3320 /* If we can't use the unit name as a user name, then let's hash it and use that */
3321 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3322 return -ENOMEM;
3323
3324 return 0;
3325 }
3326
3327 int unit_patch_contexts(Unit *u) {
3328 CGroupContext *cc;
3329 ExecContext *ec;
3330 unsigned i;
3331 int r;
3332
3333 assert(u);
3334
3335 /* Patch in the manager defaults into the exec and cgroup
3336 * contexts, _after_ the rest of the settings have been
3337 * initialized */
3338
3339 ec = unit_get_exec_context(u);
3340 if (ec) {
3341 /* This only copies in the ones that need memory */
3342 for (i = 0; i < _RLIMIT_MAX; i++)
3343 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3344 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3345 if (!ec->rlimit[i])
3346 return -ENOMEM;
3347 }
3348
3349 if (MANAGER_IS_USER(u->manager) &&
3350 !ec->working_directory) {
3351
3352 r = get_home_dir(&ec->working_directory);
3353 if (r < 0)
3354 return r;
3355
3356 /* Allow user services to run, even if the
3357 * home directory is missing */
3358 ec->working_directory_missing_ok = true;
3359 }
3360
3361 if (MANAGER_IS_USER(u->manager) &&
3362 (ec->syscall_whitelist ||
3363 !set_isempty(ec->syscall_filter) ||
3364 !set_isempty(ec->syscall_archs) ||
3365 ec->address_families_whitelist ||
3366 !set_isempty(ec->address_families)))
3367 ec->no_new_privileges = true;
3368
3369 if (ec->private_devices)
3370 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_MKNOD);
3371
3372 if (ec->dynamic_user) {
3373 if (!ec->user) {
3374 r = user_from_unit_name(u, &ec->user);
3375 if (r < 0)
3376 return r;
3377 }
3378
3379 if (!ec->group) {
3380 ec->group = strdup(ec->user);
3381 if (!ec->group)
3382 return -ENOMEM;
3383 }
3384
3385 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3386 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3387
3388 ec->private_tmp = true;
3389 ec->remove_ipc = true;
3390 ec->protect_system = PROTECT_SYSTEM_STRICT;
3391 if (ec->protect_home == PROTECT_HOME_NO)
3392 ec->protect_home = PROTECT_HOME_READ_ONLY;
3393 }
3394 }
3395
3396 cc = unit_get_cgroup_context(u);
3397 if (cc) {
3398
3399 if (ec &&
3400 ec->private_devices &&
3401 cc->device_policy == CGROUP_AUTO)
3402 cc->device_policy = CGROUP_CLOSED;
3403 }
3404
3405 return 0;
3406 }
3407
3408 ExecContext *unit_get_exec_context(Unit *u) {
3409 size_t offset;
3410 assert(u);
3411
3412 if (u->type < 0)
3413 return NULL;
3414
3415 offset = UNIT_VTABLE(u)->exec_context_offset;
3416 if (offset <= 0)
3417 return NULL;
3418
3419 return (ExecContext*) ((uint8_t*) u + offset);
3420 }
3421
3422 KillContext *unit_get_kill_context(Unit *u) {
3423 size_t offset;
3424 assert(u);
3425
3426 if (u->type < 0)
3427 return NULL;
3428
3429 offset = UNIT_VTABLE(u)->kill_context_offset;
3430 if (offset <= 0)
3431 return NULL;
3432
3433 return (KillContext*) ((uint8_t*) u + offset);
3434 }
3435
3436 CGroupContext *unit_get_cgroup_context(Unit *u) {
3437 size_t offset;
3438
3439 if (u->type < 0)
3440 return NULL;
3441
3442 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3443 if (offset <= 0)
3444 return NULL;
3445
3446 return (CGroupContext*) ((uint8_t*) u + offset);
3447 }
3448
3449 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3450 size_t offset;
3451
3452 if (u->type < 0)
3453 return NULL;
3454
3455 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3456 if (offset <= 0)
3457 return NULL;
3458
3459 return *(ExecRuntime**) ((uint8_t*) u + offset);
3460 }
3461
3462 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3463 assert(u);
3464
3465 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3466 return NULL;
3467
3468 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3469 return u->manager->lookup_paths.transient;
3470
3471 if (mode == UNIT_RUNTIME)
3472 return u->manager->lookup_paths.runtime_control;
3473
3474 if (mode == UNIT_PERSISTENT)
3475 return u->manager->lookup_paths.persistent_control;
3476
3477 return NULL;
3478 }
3479
3480 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3481 _cleanup_free_ char *p = NULL, *q = NULL;
3482 const char *dir, *wrapped;
3483 int r;
3484
3485 assert(u);
3486
3487 if (u->transient_file) {
3488 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3489 * write to the transient unit file. */
3490 fputs(data, u->transient_file);
3491 fputc('\n', u->transient_file);
3492 return 0;
3493 }
3494
3495 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3496 return 0;
3497
3498 dir = unit_drop_in_dir(u, mode);
3499 if (!dir)
3500 return -EINVAL;
3501
3502 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3503 "# or an equivalent operation. Do not edit.\n",
3504 data,
3505 "\n");
3506
3507 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3508 if (r < 0)
3509 return r;
3510
3511 (void) mkdir_p(p, 0755);
3512 r = write_string_file_atomic_label(q, wrapped);
3513 if (r < 0)
3514 return r;
3515
3516 r = strv_push(&u->dropin_paths, q);
3517 if (r < 0)
3518 return r;
3519 q = NULL;
3520
3521 strv_uniq(u->dropin_paths);
3522
3523 u->dropin_mtime = now(CLOCK_REALTIME);
3524
3525 return 0;
3526 }
3527
3528 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3529 _cleanup_free_ char *p = NULL;
3530 va_list ap;
3531 int r;
3532
3533 assert(u);
3534 assert(name);
3535 assert(format);
3536
3537 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3538 return 0;
3539
3540 va_start(ap, format);
3541 r = vasprintf(&p, format, ap);
3542 va_end(ap);
3543
3544 if (r < 0)
3545 return -ENOMEM;
3546
3547 return unit_write_drop_in(u, mode, name, p);
3548 }
3549
3550 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3551 const char *ndata;
3552
3553 assert(u);
3554 assert(name);
3555 assert(data);
3556
3557 if (!UNIT_VTABLE(u)->private_section)
3558 return -EINVAL;
3559
3560 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3561 return 0;
3562
3563 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3564
3565 return unit_write_drop_in(u, mode, name, ndata);
3566 }
3567
3568 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3569 _cleanup_free_ char *p = NULL;
3570 va_list ap;
3571 int r;
3572
3573 assert(u);
3574 assert(name);
3575 assert(format);
3576
3577 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3578 return 0;
3579
3580 va_start(ap, format);
3581 r = vasprintf(&p, format, ap);
3582 va_end(ap);
3583
3584 if (r < 0)
3585 return -ENOMEM;
3586
3587 return unit_write_drop_in_private(u, mode, name, p);
3588 }
3589
3590 int unit_make_transient(Unit *u) {
3591 FILE *f;
3592 char *path;
3593
3594 assert(u);
3595
3596 if (!UNIT_VTABLE(u)->can_transient)
3597 return -EOPNOTSUPP;
3598
3599 path = strjoin(u->manager->lookup_paths.transient, "/", u->id, NULL);
3600 if (!path)
3601 return -ENOMEM;
3602
3603 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3604 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3605
3606 RUN_WITH_UMASK(0022) {
3607 f = fopen(path, "we");
3608 if (!f) {
3609 free(path);
3610 return -errno;
3611 }
3612 }
3613
3614 if (u->transient_file)
3615 fclose(u->transient_file);
3616 u->transient_file = f;
3617
3618 free(u->fragment_path);
3619 u->fragment_path = path;
3620
3621 u->source_path = mfree(u->source_path);
3622 u->dropin_paths = strv_free(u->dropin_paths);
3623 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3624
3625 u->load_state = UNIT_STUB;
3626 u->load_error = 0;
3627 u->transient = true;
3628
3629 unit_add_to_dbus_queue(u);
3630 unit_add_to_gc_queue(u);
3631
3632 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3633 u->transient_file);
3634
3635 return 0;
3636 }
3637
3638 static void log_kill(pid_t pid, int sig, void *userdata) {
3639 _cleanup_free_ char *comm = NULL;
3640
3641 (void) get_process_comm(pid, &comm);
3642
3643 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3644 only, like for example systemd's own PAM stub process. */
3645 if (comm && comm[0] == '(')
3646 return;
3647
3648 log_unit_notice(userdata,
3649 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3650 pid,
3651 strna(comm),
3652 signal_to_string(sig));
3653 }
3654
3655 static int operation_to_signal(KillContext *c, KillOperation k) {
3656 assert(c);
3657
3658 switch (k) {
3659
3660 case KILL_TERMINATE:
3661 case KILL_TERMINATE_AND_LOG:
3662 return c->kill_signal;
3663
3664 case KILL_KILL:
3665 return SIGKILL;
3666
3667 case KILL_ABORT:
3668 return SIGABRT;
3669
3670 default:
3671 assert_not_reached("KillOperation unknown");
3672 }
3673 }
3674
3675 int unit_kill_context(
3676 Unit *u,
3677 KillContext *c,
3678 KillOperation k,
3679 pid_t main_pid,
3680 pid_t control_pid,
3681 bool main_pid_alien) {
3682
3683 bool wait_for_exit = false, send_sighup;
3684 cg_kill_log_func_t log_func;
3685 int sig, r;
3686
3687 assert(u);
3688 assert(c);
3689
3690 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0 if we
3691 * killed something worth waiting for, 0 otherwise. */
3692
3693 if (c->kill_mode == KILL_NONE)
3694 return 0;
3695
3696 sig = operation_to_signal(c, k);
3697
3698 send_sighup =
3699 c->send_sighup &&
3700 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3701 sig != SIGHUP;
3702
3703 log_func =
3704 k != KILL_TERMINATE ||
3705 IN_SET(sig, SIGKILL, SIGABRT) ? log_kill : NULL;
3706
3707 if (main_pid > 0) {
3708 if (log_func)
3709 log_func(main_pid, sig, u);
3710
3711 r = kill_and_sigcont(main_pid, sig);
3712 if (r < 0 && r != -ESRCH) {
3713 _cleanup_free_ char *comm = NULL;
3714 (void) get_process_comm(main_pid, &comm);
3715
3716 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3717 } else {
3718 if (!main_pid_alien)
3719 wait_for_exit = true;
3720
3721 if (r != -ESRCH && send_sighup)
3722 (void) kill(main_pid, SIGHUP);
3723 }
3724 }
3725
3726 if (control_pid > 0) {
3727 if (log_func)
3728 log_func(control_pid, sig, u);
3729
3730 r = kill_and_sigcont(control_pid, sig);
3731 if (r < 0 && r != -ESRCH) {
3732 _cleanup_free_ char *comm = NULL;
3733 (void) get_process_comm(control_pid, &comm);
3734
3735 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3736 } else {
3737 wait_for_exit = true;
3738
3739 if (r != -ESRCH && send_sighup)
3740 (void) kill(control_pid, SIGHUP);
3741 }
3742 }
3743
3744 if (u->cgroup_path &&
3745 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3746 _cleanup_set_free_ Set *pid_set = NULL;
3747
3748 /* Exclude the main/control pids from being killed via the cgroup */
3749 pid_set = unit_pid_set(main_pid, control_pid);
3750 if (!pid_set)
3751 return -ENOMEM;
3752
3753 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3754 sig,
3755 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3756 pid_set,
3757 log_func, u);
3758 if (r < 0) {
3759 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3760 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3761
3762 } else if (r > 0) {
3763
3764 /* FIXME: For now, on the legacy hierarchy, we
3765 * will not wait for the cgroup members to die
3766 * if we are running in a container or if this
3767 * is a delegation unit, simply because cgroup
3768 * notification is unreliable in these
3769 * cases. It doesn't work at all in
3770 * containers, and outside of containers it
3771 * can be confused easily by left-over
3772 * directories in the cgroup — which however
3773 * should not exist in non-delegated units. On
3774 * the unified hierarchy that's different,
3775 * there we get proper events. Hence rely on
3776 * them.*/
3777
3778 if (cg_unified(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3779 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3780 wait_for_exit = true;
3781
3782 if (send_sighup) {
3783 set_free(pid_set);
3784
3785 pid_set = unit_pid_set(main_pid, control_pid);
3786 if (!pid_set)
3787 return -ENOMEM;
3788
3789 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3790 SIGHUP,
3791 CGROUP_IGNORE_SELF,
3792 pid_set,
3793 NULL, NULL);
3794 }
3795 }
3796 }
3797
3798 return wait_for_exit;
3799 }
3800
3801 int unit_require_mounts_for(Unit *u, const char *path) {
3802 char prefix[strlen(path) + 1], *p;
3803 int r;
3804
3805 assert(u);
3806 assert(path);
3807
3808 /* Registers a unit for requiring a certain path and all its
3809 * prefixes. We keep a simple array of these paths in the
3810 * unit, since its usually short. However, we build a prefix
3811 * table for all possible prefixes so that new appearing mount
3812 * units can easily determine which units to make themselves a
3813 * dependency of. */
3814
3815 if (!path_is_absolute(path))
3816 return -EINVAL;
3817
3818 p = strdup(path);
3819 if (!p)
3820 return -ENOMEM;
3821
3822 path_kill_slashes(p);
3823
3824 if (!path_is_safe(p)) {
3825 free(p);
3826 return -EPERM;
3827 }
3828
3829 if (strv_contains(u->requires_mounts_for, p)) {
3830 free(p);
3831 return 0;
3832 }
3833
3834 r = strv_consume(&u->requires_mounts_for, p);
3835 if (r < 0)
3836 return r;
3837
3838 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3839 Set *x;
3840
3841 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3842 if (!x) {
3843 char *q;
3844
3845 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3846 if (r < 0)
3847 return r;
3848
3849 q = strdup(prefix);
3850 if (!q)
3851 return -ENOMEM;
3852
3853 x = set_new(NULL);
3854 if (!x) {
3855 free(q);
3856 return -ENOMEM;
3857 }
3858
3859 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3860 if (r < 0) {
3861 free(q);
3862 set_free(x);
3863 return r;
3864 }
3865 }
3866
3867 r = set_put(x, u);
3868 if (r < 0)
3869 return r;
3870 }
3871
3872 return 0;
3873 }
3874
3875 int unit_setup_exec_runtime(Unit *u) {
3876 ExecRuntime **rt;
3877 size_t offset;
3878 Iterator i;
3879 Unit *other;
3880
3881 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3882 assert(offset > 0);
3883
3884 /* Check if there already is an ExecRuntime for this unit? */
3885 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3886 if (*rt)
3887 return 0;
3888
3889 /* Try to get it from somebody else */
3890 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3891
3892 *rt = unit_get_exec_runtime(other);
3893 if (*rt) {
3894 exec_runtime_ref(*rt);
3895 return 0;
3896 }
3897 }
3898
3899 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3900 }
3901
3902 int unit_setup_dynamic_creds(Unit *u) {
3903 ExecContext *ec;
3904 DynamicCreds *dcreds;
3905 size_t offset;
3906
3907 assert(u);
3908
3909 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
3910 assert(offset > 0);
3911 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
3912
3913 ec = unit_get_exec_context(u);
3914 assert(ec);
3915
3916 if (!ec->dynamic_user)
3917 return 0;
3918
3919 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
3920 }
3921
3922 bool unit_type_supported(UnitType t) {
3923 if (_unlikely_(t < 0))
3924 return false;
3925 if (_unlikely_(t >= _UNIT_TYPE_MAX))
3926 return false;
3927
3928 if (!unit_vtable[t]->supported)
3929 return true;
3930
3931 return unit_vtable[t]->supported();
3932 }
3933
3934 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
3935 int r;
3936
3937 assert(u);
3938 assert(where);
3939
3940 r = dir_is_empty(where);
3941 if (r > 0)
3942 return;
3943 if (r < 0) {
3944 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
3945 return;
3946 }
3947
3948 log_struct(LOG_NOTICE,
3949 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3950 LOG_UNIT_ID(u),
3951 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
3952 "WHERE=%s", where,
3953 NULL);
3954 }
3955
3956 int unit_fail_if_symlink(Unit *u, const char* where) {
3957 int r;
3958
3959 assert(u);
3960 assert(where);
3961
3962 r = is_symlink(where);
3963 if (r < 0) {
3964 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
3965 return 0;
3966 }
3967 if (r == 0)
3968 return 0;
3969
3970 log_struct(LOG_ERR,
3971 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3972 LOG_UNIT_ID(u),
3973 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
3974 "WHERE=%s", where,
3975 NULL);
3976
3977 return -ELOOP;
3978 }
3979
3980 bool unit_is_pristine(Unit *u) {
3981 assert(u);
3982
3983 /* Check if the unit already exists or is already around,
3984 * in a number of different ways. Note that to cater for unit
3985 * types such as slice, we are generally fine with units that
3986 * are marked UNIT_LOADED even though nothing was
3987 * actually loaded, as those unit types don't require a file
3988 * on disk to validly load. */
3989
3990 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
3991 u->fragment_path ||
3992 u->source_path ||
3993 !strv_isempty(u->dropin_paths) ||
3994 u->job ||
3995 u->merged_into);
3996 }
3997
3998 pid_t unit_control_pid(Unit *u) {
3999 assert(u);
4000
4001 if (UNIT_VTABLE(u)->control_pid)
4002 return UNIT_VTABLE(u)->control_pid(u);
4003
4004 return 0;
4005 }
4006
4007 pid_t unit_main_pid(Unit *u) {
4008 assert(u);
4009
4010 if (UNIT_VTABLE(u)->main_pid)
4011 return UNIT_VTABLE(u)->main_pid(u);
4012
4013 return 0;
4014 }
4015
4016 static void unit_unref_uid_internal(
4017 Unit *u,
4018 uid_t *ref_uid,
4019 bool destroy_now,
4020 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4021
4022 assert(u);
4023 assert(ref_uid);
4024 assert(_manager_unref_uid);
4025
4026 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4027 * gid_t are actually the same time, with the same validity rules.
4028 *
4029 * Drops a reference to UID/GID from a unit. */
4030
4031 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4032 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4033
4034 if (!uid_is_valid(*ref_uid))
4035 return;
4036
4037 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4038 *ref_uid = UID_INVALID;
4039 }
4040
4041 void unit_unref_uid(Unit *u, bool destroy_now) {
4042 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4043 }
4044
4045 void unit_unref_gid(Unit *u, bool destroy_now) {
4046 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4047 }
4048
4049 static int unit_ref_uid_internal(
4050 Unit *u,
4051 uid_t *ref_uid,
4052 uid_t uid,
4053 bool clean_ipc,
4054 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4055
4056 int r;
4057
4058 assert(u);
4059 assert(ref_uid);
4060 assert(uid_is_valid(uid));
4061 assert(_manager_ref_uid);
4062
4063 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4064 * are actually the same type, and have the same validity rules.
4065 *
4066 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4067 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4068 * drops to zero. */
4069
4070 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4071 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4072
4073 if (*ref_uid == uid)
4074 return 0;
4075
4076 if (uid_is_valid(*ref_uid)) /* Already set? */
4077 return -EBUSY;
4078
4079 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4080 if (r < 0)
4081 return r;
4082
4083 *ref_uid = uid;
4084 return 1;
4085 }
4086
4087 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4088 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4089 }
4090
4091 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4092 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4093 }
4094
4095 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4096 int r = 0, q = 0;
4097
4098 assert(u);
4099
4100 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4101
4102 if (uid_is_valid(uid)) {
4103 r = unit_ref_uid(u, uid, clean_ipc);
4104 if (r < 0)
4105 return r;
4106 }
4107
4108 if (gid_is_valid(gid)) {
4109 q = unit_ref_gid(u, gid, clean_ipc);
4110 if (q < 0) {
4111 if (r > 0)
4112 unit_unref_uid(u, false);
4113
4114 return q;
4115 }
4116 }
4117
4118 return r > 0 || q > 0;
4119 }
4120
4121 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4122 ExecContext *c;
4123 int r;
4124
4125 assert(u);
4126
4127 c = unit_get_exec_context(u);
4128
4129 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4130 if (r < 0)
4131 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4132
4133 return r;
4134 }
4135
4136 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4137 assert(u);
4138
4139 unit_unref_uid(u, destroy_now);
4140 unit_unref_gid(u, destroy_now);
4141 }
4142
4143 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4144 int r;
4145
4146 assert(u);
4147
4148 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4149 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4150 * objects when no service references the UID/GID anymore. */
4151
4152 r = unit_ref_uid_gid(u, uid, gid);
4153 if (r > 0)
4154 bus_unit_send_change_signal(u);
4155 }