]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: imply ProtectHome=read-only and ProtectSystem=strict if DynamicUser=1
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fileio-label.h"
39 #include "formats-util.h"
40 #include "load-dropin.h"
41 #include "load-fragment.h"
42 #include "log.h"
43 #include "macro.h"
44 #include "missing.h"
45 #include "mkdir.h"
46 #include "parse-util.h"
47 #include "path-util.h"
48 #include "process-util.h"
49 #include "set.h"
50 #include "signal-util.h"
51 #include "special.h"
52 #include "stat-util.h"
53 #include "stdio-util.h"
54 #include "string-util.h"
55 #include "strv.h"
56 #include "umask-util.h"
57 #include "unit-name.h"
58 #include "unit.h"
59 #include "user-util.h"
60 #include "virt.h"
61
62 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
63 [UNIT_SERVICE] = &service_vtable,
64 [UNIT_SOCKET] = &socket_vtable,
65 [UNIT_BUSNAME] = &busname_vtable,
66 [UNIT_TARGET] = &target_vtable,
67 [UNIT_DEVICE] = &device_vtable,
68 [UNIT_MOUNT] = &mount_vtable,
69 [UNIT_AUTOMOUNT] = &automount_vtable,
70 [UNIT_SWAP] = &swap_vtable,
71 [UNIT_TIMER] = &timer_vtable,
72 [UNIT_PATH] = &path_vtable,
73 [UNIT_SLICE] = &slice_vtable,
74 [UNIT_SCOPE] = &scope_vtable
75 };
76
77 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
78
79 Unit *unit_new(Manager *m, size_t size) {
80 Unit *u;
81
82 assert(m);
83 assert(size >= sizeof(Unit));
84
85 u = malloc0(size);
86 if (!u)
87 return NULL;
88
89 u->names = set_new(&string_hash_ops);
90 if (!u->names) {
91 free(u);
92 return NULL;
93 }
94
95 u->manager = m;
96 u->type = _UNIT_TYPE_INVALID;
97 u->default_dependencies = true;
98 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
99 u->unit_file_preset = -1;
100 u->on_failure_job_mode = JOB_REPLACE;
101 u->cgroup_inotify_wd = -1;
102 u->job_timeout = USEC_INFINITY;
103 u->ref_uid = UID_INVALID;
104 u->ref_gid = GID_INVALID;
105 u->cpu_usage_last = NSEC_INFINITY;
106
107 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
108 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
109
110 return u;
111 }
112
113 bool unit_has_name(Unit *u, const char *name) {
114 assert(u);
115 assert(name);
116
117 return set_contains(u->names, (char*) name);
118 }
119
120 static void unit_init(Unit *u) {
121 CGroupContext *cc;
122 ExecContext *ec;
123 KillContext *kc;
124
125 assert(u);
126 assert(u->manager);
127 assert(u->type >= 0);
128
129 cc = unit_get_cgroup_context(u);
130 if (cc) {
131 cgroup_context_init(cc);
132
133 /* Copy in the manager defaults into the cgroup
134 * context, _before_ the rest of the settings have
135 * been initialized */
136
137 cc->cpu_accounting = u->manager->default_cpu_accounting;
138 cc->io_accounting = u->manager->default_io_accounting;
139 cc->blockio_accounting = u->manager->default_blockio_accounting;
140 cc->memory_accounting = u->manager->default_memory_accounting;
141 cc->tasks_accounting = u->manager->default_tasks_accounting;
142
143 if (u->type != UNIT_SLICE)
144 cc->tasks_max = u->manager->default_tasks_max;
145 }
146
147 ec = unit_get_exec_context(u);
148 if (ec)
149 exec_context_init(ec);
150
151 kc = unit_get_kill_context(u);
152 if (kc)
153 kill_context_init(kc);
154
155 if (UNIT_VTABLE(u)->init)
156 UNIT_VTABLE(u)->init(u);
157 }
158
159 int unit_add_name(Unit *u, const char *text) {
160 _cleanup_free_ char *s = NULL, *i = NULL;
161 UnitType t;
162 int r;
163
164 assert(u);
165 assert(text);
166
167 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
168
169 if (!u->instance)
170 return -EINVAL;
171
172 r = unit_name_replace_instance(text, u->instance, &s);
173 if (r < 0)
174 return r;
175 } else {
176 s = strdup(text);
177 if (!s)
178 return -ENOMEM;
179 }
180
181 if (set_contains(u->names, s))
182 return 0;
183 if (hashmap_contains(u->manager->units, s))
184 return -EEXIST;
185
186 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
187 return -EINVAL;
188
189 t = unit_name_to_type(s);
190 if (t < 0)
191 return -EINVAL;
192
193 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
194 return -EINVAL;
195
196 r = unit_name_to_instance(s, &i);
197 if (r < 0)
198 return r;
199
200 if (i && !unit_type_may_template(t))
201 return -EINVAL;
202
203 /* Ensure that this unit is either instanced or not instanced,
204 * but not both. Note that we do allow names with different
205 * instance names however! */
206 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
207 return -EINVAL;
208
209 if (!unit_type_may_alias(t) && !set_isempty(u->names))
210 return -EEXIST;
211
212 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
213 return -E2BIG;
214
215 r = set_put(u->names, s);
216 if (r < 0)
217 return r;
218 assert(r > 0);
219
220 r = hashmap_put(u->manager->units, s, u);
221 if (r < 0) {
222 (void) set_remove(u->names, s);
223 return r;
224 }
225
226 if (u->type == _UNIT_TYPE_INVALID) {
227 u->type = t;
228 u->id = s;
229 u->instance = i;
230
231 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
232
233 unit_init(u);
234
235 i = NULL;
236 }
237
238 s = NULL;
239
240 unit_add_to_dbus_queue(u);
241 return 0;
242 }
243
244 int unit_choose_id(Unit *u, const char *name) {
245 _cleanup_free_ char *t = NULL;
246 char *s, *i;
247 int r;
248
249 assert(u);
250 assert(name);
251
252 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
253
254 if (!u->instance)
255 return -EINVAL;
256
257 r = unit_name_replace_instance(name, u->instance, &t);
258 if (r < 0)
259 return r;
260
261 name = t;
262 }
263
264 /* Selects one of the names of this unit as the id */
265 s = set_get(u->names, (char*) name);
266 if (!s)
267 return -ENOENT;
268
269 /* Determine the new instance from the new id */
270 r = unit_name_to_instance(s, &i);
271 if (r < 0)
272 return r;
273
274 u->id = s;
275
276 free(u->instance);
277 u->instance = i;
278
279 unit_add_to_dbus_queue(u);
280
281 return 0;
282 }
283
284 int unit_set_description(Unit *u, const char *description) {
285 char *s;
286
287 assert(u);
288
289 if (isempty(description))
290 s = NULL;
291 else {
292 s = strdup(description);
293 if (!s)
294 return -ENOMEM;
295 }
296
297 free(u->description);
298 u->description = s;
299
300 unit_add_to_dbus_queue(u);
301 return 0;
302 }
303
304 bool unit_check_gc(Unit *u) {
305 UnitActiveState state;
306 assert(u);
307
308 if (u->job)
309 return true;
310
311 if (u->nop_job)
312 return true;
313
314 state = unit_active_state(u);
315
316 /* If the unit is inactive and failed and no job is queued for
317 * it, then release its runtime resources */
318 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
319 UNIT_VTABLE(u)->release_resources)
320 UNIT_VTABLE(u)->release_resources(u);
321
322 /* But we keep the unit object around for longer when it is
323 * referenced or configured to not be gc'ed */
324 if (state != UNIT_INACTIVE)
325 return true;
326
327 if (u->no_gc)
328 return true;
329
330 if (u->refs)
331 return true;
332
333 if (sd_bus_track_count(u->bus_track) > 0)
334 return true;
335
336 if (UNIT_VTABLE(u)->check_gc)
337 if (UNIT_VTABLE(u)->check_gc(u))
338 return true;
339
340 return false;
341 }
342
343 void unit_add_to_load_queue(Unit *u) {
344 assert(u);
345 assert(u->type != _UNIT_TYPE_INVALID);
346
347 if (u->load_state != UNIT_STUB || u->in_load_queue)
348 return;
349
350 LIST_PREPEND(load_queue, u->manager->load_queue, u);
351 u->in_load_queue = true;
352 }
353
354 void unit_add_to_cleanup_queue(Unit *u) {
355 assert(u);
356
357 if (u->in_cleanup_queue)
358 return;
359
360 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
361 u->in_cleanup_queue = true;
362 }
363
364 void unit_add_to_gc_queue(Unit *u) {
365 assert(u);
366
367 if (u->in_gc_queue || u->in_cleanup_queue)
368 return;
369
370 if (unit_check_gc(u))
371 return;
372
373 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
374 u->in_gc_queue = true;
375
376 u->manager->n_in_gc_queue++;
377 }
378
379 void unit_add_to_dbus_queue(Unit *u) {
380 assert(u);
381 assert(u->type != _UNIT_TYPE_INVALID);
382
383 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
384 return;
385
386 /* Shortcut things if nobody cares */
387 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
388 set_isempty(u->manager->private_buses)) {
389 u->sent_dbus_new_signal = true;
390 return;
391 }
392
393 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
394 u->in_dbus_queue = true;
395 }
396
397 static void bidi_set_free(Unit *u, Set *s) {
398 Iterator i;
399 Unit *other;
400
401 assert(u);
402
403 /* Frees the set and makes sure we are dropped from the
404 * inverse pointers */
405
406 SET_FOREACH(other, s, i) {
407 UnitDependency d;
408
409 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
410 set_remove(other->dependencies[d], u);
411
412 unit_add_to_gc_queue(other);
413 }
414
415 set_free(s);
416 }
417
418 static void unit_remove_transient(Unit *u) {
419 char **i;
420
421 assert(u);
422
423 if (!u->transient)
424 return;
425
426 if (u->fragment_path)
427 (void) unlink(u->fragment_path);
428
429 STRV_FOREACH(i, u->dropin_paths) {
430 _cleanup_free_ char *p = NULL, *pp = NULL;
431
432 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
433 if (!p)
434 continue;
435
436 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
437 if (!pp)
438 continue;
439
440 /* Only drop transient drop-ins */
441 if (!path_equal(u->manager->lookup_paths.transient, pp))
442 continue;
443
444 (void) unlink(*i);
445 (void) rmdir(p);
446 }
447 }
448
449 static void unit_free_requires_mounts_for(Unit *u) {
450 char **j;
451
452 STRV_FOREACH(j, u->requires_mounts_for) {
453 char s[strlen(*j) + 1];
454
455 PATH_FOREACH_PREFIX_MORE(s, *j) {
456 char *y;
457 Set *x;
458
459 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
460 if (!x)
461 continue;
462
463 set_remove(x, u);
464
465 if (set_isempty(x)) {
466 hashmap_remove(u->manager->units_requiring_mounts_for, y);
467 free(y);
468 set_free(x);
469 }
470 }
471 }
472
473 u->requires_mounts_for = strv_free(u->requires_mounts_for);
474 }
475
476 static void unit_done(Unit *u) {
477 ExecContext *ec;
478 CGroupContext *cc;
479
480 assert(u);
481
482 if (u->type < 0)
483 return;
484
485 if (UNIT_VTABLE(u)->done)
486 UNIT_VTABLE(u)->done(u);
487
488 ec = unit_get_exec_context(u);
489 if (ec)
490 exec_context_done(ec);
491
492 cc = unit_get_cgroup_context(u);
493 if (cc)
494 cgroup_context_done(cc);
495 }
496
497 void unit_free(Unit *u) {
498 UnitDependency d;
499 Iterator i;
500 char *t;
501
502 assert(u);
503
504 if (u->transient_file)
505 fclose(u->transient_file);
506
507 if (!MANAGER_IS_RELOADING(u->manager))
508 unit_remove_transient(u);
509
510 bus_unit_send_removed_signal(u);
511
512 unit_done(u);
513
514 sd_bus_slot_unref(u->match_bus_slot);
515
516 sd_bus_track_unref(u->bus_track);
517 u->deserialized_refs = strv_free(u->deserialized_refs);
518
519 unit_free_requires_mounts_for(u);
520
521 SET_FOREACH(t, u->names, i)
522 hashmap_remove_value(u->manager->units, t, u);
523
524 if (u->job) {
525 Job *j = u->job;
526 job_uninstall(j);
527 job_free(j);
528 }
529
530 if (u->nop_job) {
531 Job *j = u->nop_job;
532 job_uninstall(j);
533 job_free(j);
534 }
535
536 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
537 bidi_set_free(u, u->dependencies[d]);
538
539 if (u->type != _UNIT_TYPE_INVALID)
540 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
541
542 if (u->in_load_queue)
543 LIST_REMOVE(load_queue, u->manager->load_queue, u);
544
545 if (u->in_dbus_queue)
546 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
547
548 if (u->in_cleanup_queue)
549 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
550
551 if (u->in_gc_queue) {
552 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
553 u->manager->n_in_gc_queue--;
554 }
555
556 if (u->in_cgroup_queue)
557 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
558
559 unit_release_cgroup(u);
560
561 unit_unref_uid_gid(u, false);
562
563 (void) manager_update_failed_units(u->manager, u, false);
564 set_remove(u->manager->startup_units, u);
565
566 free(u->description);
567 strv_free(u->documentation);
568 free(u->fragment_path);
569 free(u->source_path);
570 strv_free(u->dropin_paths);
571 free(u->instance);
572
573 free(u->job_timeout_reboot_arg);
574
575 set_free_free(u->names);
576
577 unit_unwatch_all_pids(u);
578
579 condition_free_list(u->conditions);
580 condition_free_list(u->asserts);
581
582 free(u->reboot_arg);
583
584 unit_ref_unset(&u->slice);
585
586 while (u->refs)
587 unit_ref_unset(u->refs);
588
589 free(u);
590 }
591
592 UnitActiveState unit_active_state(Unit *u) {
593 assert(u);
594
595 if (u->load_state == UNIT_MERGED)
596 return unit_active_state(unit_follow_merge(u));
597
598 /* After a reload it might happen that a unit is not correctly
599 * loaded but still has a process around. That's why we won't
600 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
601
602 return UNIT_VTABLE(u)->active_state(u);
603 }
604
605 const char* unit_sub_state_to_string(Unit *u) {
606 assert(u);
607
608 return UNIT_VTABLE(u)->sub_state_to_string(u);
609 }
610
611 static int complete_move(Set **s, Set **other) {
612 int r;
613
614 assert(s);
615 assert(other);
616
617 if (!*other)
618 return 0;
619
620 if (*s) {
621 r = set_move(*s, *other);
622 if (r < 0)
623 return r;
624 } else {
625 *s = *other;
626 *other = NULL;
627 }
628
629 return 0;
630 }
631
632 static int merge_names(Unit *u, Unit *other) {
633 char *t;
634 Iterator i;
635 int r;
636
637 assert(u);
638 assert(other);
639
640 r = complete_move(&u->names, &other->names);
641 if (r < 0)
642 return r;
643
644 set_free_free(other->names);
645 other->names = NULL;
646 other->id = NULL;
647
648 SET_FOREACH(t, u->names, i)
649 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
650
651 return 0;
652 }
653
654 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
655 unsigned n_reserve;
656
657 assert(u);
658 assert(other);
659 assert(d < _UNIT_DEPENDENCY_MAX);
660
661 /*
662 * If u does not have this dependency set allocated, there is no need
663 * to reserve anything. In that case other's set will be transferred
664 * as a whole to u by complete_move().
665 */
666 if (!u->dependencies[d])
667 return 0;
668
669 /* merge_dependencies() will skip a u-on-u dependency */
670 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
671
672 return set_reserve(u->dependencies[d], n_reserve);
673 }
674
675 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
676 Iterator i;
677 Unit *back;
678 int r;
679
680 assert(u);
681 assert(other);
682 assert(d < _UNIT_DEPENDENCY_MAX);
683
684 /* Fix backwards pointers */
685 SET_FOREACH(back, other->dependencies[d], i) {
686 UnitDependency k;
687
688 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
689 /* Do not add dependencies between u and itself */
690 if (back == u) {
691 if (set_remove(back->dependencies[k], other))
692 maybe_warn_about_dependency(u, other_id, k);
693 } else {
694 r = set_remove_and_put(back->dependencies[k], other, u);
695 if (r == -EEXIST)
696 set_remove(back->dependencies[k], other);
697 else
698 assert(r >= 0 || r == -ENOENT);
699 }
700 }
701 }
702
703 /* Also do not move dependencies on u to itself */
704 back = set_remove(other->dependencies[d], u);
705 if (back)
706 maybe_warn_about_dependency(u, other_id, d);
707
708 /* The move cannot fail. The caller must have performed a reservation. */
709 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
710
711 other->dependencies[d] = set_free(other->dependencies[d]);
712 }
713
714 int unit_merge(Unit *u, Unit *other) {
715 UnitDependency d;
716 const char *other_id = NULL;
717 int r;
718
719 assert(u);
720 assert(other);
721 assert(u->manager == other->manager);
722 assert(u->type != _UNIT_TYPE_INVALID);
723
724 other = unit_follow_merge(other);
725
726 if (other == u)
727 return 0;
728
729 if (u->type != other->type)
730 return -EINVAL;
731
732 if (!u->instance != !other->instance)
733 return -EINVAL;
734
735 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
736 return -EEXIST;
737
738 if (other->load_state != UNIT_STUB &&
739 other->load_state != UNIT_NOT_FOUND)
740 return -EEXIST;
741
742 if (other->job)
743 return -EEXIST;
744
745 if (other->nop_job)
746 return -EEXIST;
747
748 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
749 return -EEXIST;
750
751 if (other->id)
752 other_id = strdupa(other->id);
753
754 /* Make reservations to ensure merge_dependencies() won't fail */
755 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
756 r = reserve_dependencies(u, other, d);
757 /*
758 * We don't rollback reservations if we fail. We don't have
759 * a way to undo reservations. A reservation is not a leak.
760 */
761 if (r < 0)
762 return r;
763 }
764
765 /* Merge names */
766 r = merge_names(u, other);
767 if (r < 0)
768 return r;
769
770 /* Redirect all references */
771 while (other->refs)
772 unit_ref_set(other->refs, u);
773
774 /* Merge dependencies */
775 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
776 merge_dependencies(u, other, other_id, d);
777
778 other->load_state = UNIT_MERGED;
779 other->merged_into = u;
780
781 /* If there is still some data attached to the other node, we
782 * don't need it anymore, and can free it. */
783 if (other->load_state != UNIT_STUB)
784 if (UNIT_VTABLE(other)->done)
785 UNIT_VTABLE(other)->done(other);
786
787 unit_add_to_dbus_queue(u);
788 unit_add_to_cleanup_queue(other);
789
790 return 0;
791 }
792
793 int unit_merge_by_name(Unit *u, const char *name) {
794 _cleanup_free_ char *s = NULL;
795 Unit *other;
796 int r;
797
798 assert(u);
799 assert(name);
800
801 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
802 if (!u->instance)
803 return -EINVAL;
804
805 r = unit_name_replace_instance(name, u->instance, &s);
806 if (r < 0)
807 return r;
808
809 name = s;
810 }
811
812 other = manager_get_unit(u->manager, name);
813 if (other)
814 return unit_merge(u, other);
815
816 return unit_add_name(u, name);
817 }
818
819 Unit* unit_follow_merge(Unit *u) {
820 assert(u);
821
822 while (u->load_state == UNIT_MERGED)
823 assert_se(u = u->merged_into);
824
825 return u;
826 }
827
828 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
829 int r;
830
831 assert(u);
832 assert(c);
833
834 if (c->working_directory) {
835 r = unit_require_mounts_for(u, c->working_directory);
836 if (r < 0)
837 return r;
838 }
839
840 if (c->root_directory) {
841 r = unit_require_mounts_for(u, c->root_directory);
842 if (r < 0)
843 return r;
844 }
845
846 if (!MANAGER_IS_SYSTEM(u->manager))
847 return 0;
848
849 if (c->private_tmp) {
850 r = unit_require_mounts_for(u, "/tmp");
851 if (r < 0)
852 return r;
853
854 r = unit_require_mounts_for(u, "/var/tmp");
855 if (r < 0)
856 return r;
857 }
858
859 if (c->std_output != EXEC_OUTPUT_KMSG &&
860 c->std_output != EXEC_OUTPUT_SYSLOG &&
861 c->std_output != EXEC_OUTPUT_JOURNAL &&
862 c->std_output != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
863 c->std_output != EXEC_OUTPUT_SYSLOG_AND_CONSOLE &&
864 c->std_output != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
865 c->std_error != EXEC_OUTPUT_KMSG &&
866 c->std_error != EXEC_OUTPUT_SYSLOG &&
867 c->std_error != EXEC_OUTPUT_JOURNAL &&
868 c->std_error != EXEC_OUTPUT_KMSG_AND_CONSOLE &&
869 c->std_error != EXEC_OUTPUT_JOURNAL_AND_CONSOLE &&
870 c->std_error != EXEC_OUTPUT_SYSLOG_AND_CONSOLE)
871 return 0;
872
873 /* If syslog or kernel logging is requested, make sure our own
874 * logging daemon is run first. */
875
876 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
877 if (r < 0)
878 return r;
879
880 return 0;
881 }
882
883 const char *unit_description(Unit *u) {
884 assert(u);
885
886 if (u->description)
887 return u->description;
888
889 return strna(u->id);
890 }
891
892 void unit_dump(Unit *u, FILE *f, const char *prefix) {
893 char *t, **j;
894 UnitDependency d;
895 Iterator i;
896 const char *prefix2;
897 char
898 timestamp0[FORMAT_TIMESTAMP_MAX],
899 timestamp1[FORMAT_TIMESTAMP_MAX],
900 timestamp2[FORMAT_TIMESTAMP_MAX],
901 timestamp3[FORMAT_TIMESTAMP_MAX],
902 timestamp4[FORMAT_TIMESTAMP_MAX],
903 timespan[FORMAT_TIMESPAN_MAX];
904 Unit *following;
905 _cleanup_set_free_ Set *following_set = NULL;
906 int r;
907 const char *n;
908
909 assert(u);
910 assert(u->type >= 0);
911
912 prefix = strempty(prefix);
913 prefix2 = strjoina(prefix, "\t");
914
915 fprintf(f,
916 "%s-> Unit %s:\n"
917 "%s\tDescription: %s\n"
918 "%s\tInstance: %s\n"
919 "%s\tUnit Load State: %s\n"
920 "%s\tUnit Active State: %s\n"
921 "%s\tState Change Timestamp: %s\n"
922 "%s\tInactive Exit Timestamp: %s\n"
923 "%s\tActive Enter Timestamp: %s\n"
924 "%s\tActive Exit Timestamp: %s\n"
925 "%s\tInactive Enter Timestamp: %s\n"
926 "%s\tGC Check Good: %s\n"
927 "%s\tNeed Daemon Reload: %s\n"
928 "%s\tTransient: %s\n"
929 "%s\tSlice: %s\n"
930 "%s\tCGroup: %s\n"
931 "%s\tCGroup realized: %s\n"
932 "%s\tCGroup mask: 0x%x\n"
933 "%s\tCGroup members mask: 0x%x\n",
934 prefix, u->id,
935 prefix, unit_description(u),
936 prefix, strna(u->instance),
937 prefix, unit_load_state_to_string(u->load_state),
938 prefix, unit_active_state_to_string(unit_active_state(u)),
939 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
940 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
941 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
942 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
943 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
944 prefix, yes_no(unit_check_gc(u)),
945 prefix, yes_no(unit_need_daemon_reload(u)),
946 prefix, yes_no(u->transient),
947 prefix, strna(unit_slice_name(u)),
948 prefix, strna(u->cgroup_path),
949 prefix, yes_no(u->cgroup_realized),
950 prefix, u->cgroup_realized_mask,
951 prefix, u->cgroup_members_mask);
952
953 SET_FOREACH(t, u->names, i)
954 fprintf(f, "%s\tName: %s\n", prefix, t);
955
956 STRV_FOREACH(j, u->documentation)
957 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
958
959 following = unit_following(u);
960 if (following)
961 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
962
963 r = unit_following_set(u, &following_set);
964 if (r >= 0) {
965 Unit *other;
966
967 SET_FOREACH(other, following_set, i)
968 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
969 }
970
971 if (u->fragment_path)
972 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
973
974 if (u->source_path)
975 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
976
977 STRV_FOREACH(j, u->dropin_paths)
978 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
979
980 if (u->job_timeout != USEC_INFINITY)
981 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
982
983 if (u->job_timeout_action != FAILURE_ACTION_NONE)
984 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, failure_action_to_string(u->job_timeout_action));
985
986 if (u->job_timeout_reboot_arg)
987 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
988
989 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
990 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
991
992 if (dual_timestamp_is_set(&u->condition_timestamp))
993 fprintf(f,
994 "%s\tCondition Timestamp: %s\n"
995 "%s\tCondition Result: %s\n",
996 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
997 prefix, yes_no(u->condition_result));
998
999 if (dual_timestamp_is_set(&u->assert_timestamp))
1000 fprintf(f,
1001 "%s\tAssert Timestamp: %s\n"
1002 "%s\tAssert Result: %s\n",
1003 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1004 prefix, yes_no(u->assert_result));
1005
1006 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1007 Unit *other;
1008
1009 SET_FOREACH(other, u->dependencies[d], i)
1010 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1011 }
1012
1013 if (!strv_isempty(u->requires_mounts_for)) {
1014 fprintf(f,
1015 "%s\tRequiresMountsFor:", prefix);
1016
1017 STRV_FOREACH(j, u->requires_mounts_for)
1018 fprintf(f, " %s", *j);
1019
1020 fputs("\n", f);
1021 }
1022
1023 if (u->load_state == UNIT_LOADED) {
1024
1025 fprintf(f,
1026 "%s\tStopWhenUnneeded: %s\n"
1027 "%s\tRefuseManualStart: %s\n"
1028 "%s\tRefuseManualStop: %s\n"
1029 "%s\tDefaultDependencies: %s\n"
1030 "%s\tOnFailureJobMode: %s\n"
1031 "%s\tIgnoreOnIsolate: %s\n",
1032 prefix, yes_no(u->stop_when_unneeded),
1033 prefix, yes_no(u->refuse_manual_start),
1034 prefix, yes_no(u->refuse_manual_stop),
1035 prefix, yes_no(u->default_dependencies),
1036 prefix, job_mode_to_string(u->on_failure_job_mode),
1037 prefix, yes_no(u->ignore_on_isolate));
1038
1039 if (UNIT_VTABLE(u)->dump)
1040 UNIT_VTABLE(u)->dump(u, f, prefix2);
1041
1042 } else if (u->load_state == UNIT_MERGED)
1043 fprintf(f,
1044 "%s\tMerged into: %s\n",
1045 prefix, u->merged_into->id);
1046 else if (u->load_state == UNIT_ERROR)
1047 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1048
1049 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1050 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1051
1052 if (u->job)
1053 job_dump(u->job, f, prefix2);
1054
1055 if (u->nop_job)
1056 job_dump(u->nop_job, f, prefix2);
1057
1058 }
1059
1060 /* Common implementation for multiple backends */
1061 int unit_load_fragment_and_dropin(Unit *u) {
1062 int r;
1063
1064 assert(u);
1065
1066 /* Load a .{service,socket,...} file */
1067 r = unit_load_fragment(u);
1068 if (r < 0)
1069 return r;
1070
1071 if (u->load_state == UNIT_STUB)
1072 return -ENOENT;
1073
1074 /* Load drop-in directory data */
1075 r = unit_load_dropin(unit_follow_merge(u));
1076 if (r < 0)
1077 return r;
1078
1079 return 0;
1080 }
1081
1082 /* Common implementation for multiple backends */
1083 int unit_load_fragment_and_dropin_optional(Unit *u) {
1084 int r;
1085
1086 assert(u);
1087
1088 /* Same as unit_load_fragment_and_dropin(), but whether
1089 * something can be loaded or not doesn't matter. */
1090
1091 /* Load a .service file */
1092 r = unit_load_fragment(u);
1093 if (r < 0)
1094 return r;
1095
1096 if (u->load_state == UNIT_STUB)
1097 u->load_state = UNIT_LOADED;
1098
1099 /* Load drop-in directory data */
1100 r = unit_load_dropin(unit_follow_merge(u));
1101 if (r < 0)
1102 return r;
1103
1104 return 0;
1105 }
1106
1107 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1108 assert(u);
1109 assert(target);
1110
1111 if (target->type != UNIT_TARGET)
1112 return 0;
1113
1114 /* Only add the dependency if both units are loaded, so that
1115 * that loop check below is reliable */
1116 if (u->load_state != UNIT_LOADED ||
1117 target->load_state != UNIT_LOADED)
1118 return 0;
1119
1120 /* If either side wants no automatic dependencies, then let's
1121 * skip this */
1122 if (!u->default_dependencies ||
1123 !target->default_dependencies)
1124 return 0;
1125
1126 /* Don't create loops */
1127 if (set_get(target->dependencies[UNIT_BEFORE], u))
1128 return 0;
1129
1130 return unit_add_dependency(target, UNIT_AFTER, u, true);
1131 }
1132
1133 static int unit_add_target_dependencies(Unit *u) {
1134
1135 static const UnitDependency deps[] = {
1136 UNIT_REQUIRED_BY,
1137 UNIT_REQUISITE_OF,
1138 UNIT_WANTED_BY,
1139 UNIT_BOUND_BY
1140 };
1141
1142 Unit *target;
1143 Iterator i;
1144 unsigned k;
1145 int r = 0;
1146
1147 assert(u);
1148
1149 for (k = 0; k < ELEMENTSOF(deps); k++)
1150 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1151 r = unit_add_default_target_dependency(u, target);
1152 if (r < 0)
1153 return r;
1154 }
1155
1156 return r;
1157 }
1158
1159 static int unit_add_slice_dependencies(Unit *u) {
1160 assert(u);
1161
1162 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1163 return 0;
1164
1165 if (UNIT_ISSET(u->slice))
1166 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1167
1168 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1169 return 0;
1170
1171 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1172 }
1173
1174 static int unit_add_mount_dependencies(Unit *u) {
1175 char **i;
1176 int r;
1177
1178 assert(u);
1179
1180 STRV_FOREACH(i, u->requires_mounts_for) {
1181 char prefix[strlen(*i) + 1];
1182
1183 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1184 _cleanup_free_ char *p = NULL;
1185 Unit *m;
1186
1187 r = unit_name_from_path(prefix, ".mount", &p);
1188 if (r < 0)
1189 return r;
1190
1191 m = manager_get_unit(u->manager, p);
1192 if (!m) {
1193 /* Make sure to load the mount unit if
1194 * it exists. If so the dependencies
1195 * on this unit will be added later
1196 * during the loading of the mount
1197 * unit. */
1198 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1199 continue;
1200 }
1201 if (m == u)
1202 continue;
1203
1204 if (m->load_state != UNIT_LOADED)
1205 continue;
1206
1207 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1208 if (r < 0)
1209 return r;
1210
1211 if (m->fragment_path) {
1212 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1213 if (r < 0)
1214 return r;
1215 }
1216 }
1217 }
1218
1219 return 0;
1220 }
1221
1222 static int unit_add_startup_units(Unit *u) {
1223 CGroupContext *c;
1224 int r;
1225
1226 c = unit_get_cgroup_context(u);
1227 if (!c)
1228 return 0;
1229
1230 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1231 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1232 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1233 return 0;
1234
1235 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1236 if (r < 0)
1237 return r;
1238
1239 return set_put(u->manager->startup_units, u);
1240 }
1241
1242 int unit_load(Unit *u) {
1243 int r;
1244
1245 assert(u);
1246
1247 if (u->in_load_queue) {
1248 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1249 u->in_load_queue = false;
1250 }
1251
1252 if (u->type == _UNIT_TYPE_INVALID)
1253 return -EINVAL;
1254
1255 if (u->load_state != UNIT_STUB)
1256 return 0;
1257
1258 if (u->transient_file) {
1259 r = fflush_and_check(u->transient_file);
1260 if (r < 0)
1261 goto fail;
1262
1263 fclose(u->transient_file);
1264 u->transient_file = NULL;
1265
1266 u->fragment_mtime = now(CLOCK_REALTIME);
1267 }
1268
1269 if (UNIT_VTABLE(u)->load) {
1270 r = UNIT_VTABLE(u)->load(u);
1271 if (r < 0)
1272 goto fail;
1273 }
1274
1275 if (u->load_state == UNIT_STUB) {
1276 r = -ENOENT;
1277 goto fail;
1278 }
1279
1280 if (u->load_state == UNIT_LOADED) {
1281
1282 r = unit_add_target_dependencies(u);
1283 if (r < 0)
1284 goto fail;
1285
1286 r = unit_add_slice_dependencies(u);
1287 if (r < 0)
1288 goto fail;
1289
1290 r = unit_add_mount_dependencies(u);
1291 if (r < 0)
1292 goto fail;
1293
1294 r = unit_add_startup_units(u);
1295 if (r < 0)
1296 goto fail;
1297
1298 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1299 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1300 r = -EINVAL;
1301 goto fail;
1302 }
1303
1304 unit_update_cgroup_members_masks(u);
1305 }
1306
1307 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1308
1309 unit_add_to_dbus_queue(unit_follow_merge(u));
1310 unit_add_to_gc_queue(u);
1311
1312 return 0;
1313
1314 fail:
1315 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1316 u->load_error = r;
1317 unit_add_to_dbus_queue(u);
1318 unit_add_to_gc_queue(u);
1319
1320 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1321
1322 return r;
1323 }
1324
1325 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1326 Condition *c;
1327 int triggered = -1;
1328
1329 assert(u);
1330 assert(to_string);
1331
1332 /* If the condition list is empty, then it is true */
1333 if (!first)
1334 return true;
1335
1336 /* Otherwise, if all of the non-trigger conditions apply and
1337 * if any of the trigger conditions apply (unless there are
1338 * none) we return true */
1339 LIST_FOREACH(conditions, c, first) {
1340 int r;
1341
1342 r = condition_test(c);
1343 if (r < 0)
1344 log_unit_warning(u,
1345 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1346 to_string(c->type),
1347 c->trigger ? "|" : "",
1348 c->negate ? "!" : "",
1349 c->parameter);
1350 else
1351 log_unit_debug(u,
1352 "%s=%s%s%s %s.",
1353 to_string(c->type),
1354 c->trigger ? "|" : "",
1355 c->negate ? "!" : "",
1356 c->parameter,
1357 condition_result_to_string(c->result));
1358
1359 if (!c->trigger && r <= 0)
1360 return false;
1361
1362 if (c->trigger && triggered <= 0)
1363 triggered = r > 0;
1364 }
1365
1366 return triggered != 0;
1367 }
1368
1369 static bool unit_condition_test(Unit *u) {
1370 assert(u);
1371
1372 dual_timestamp_get(&u->condition_timestamp);
1373 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1374
1375 return u->condition_result;
1376 }
1377
1378 static bool unit_assert_test(Unit *u) {
1379 assert(u);
1380
1381 dual_timestamp_get(&u->assert_timestamp);
1382 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1383
1384 return u->assert_result;
1385 }
1386
1387 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1388 DISABLE_WARNING_FORMAT_NONLITERAL;
1389 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1390 REENABLE_WARNING;
1391 }
1392
1393 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1394 const char *format;
1395 const UnitStatusMessageFormats *format_table;
1396
1397 assert(u);
1398 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1399
1400 if (t != JOB_RELOAD) {
1401 format_table = &UNIT_VTABLE(u)->status_message_formats;
1402 if (format_table) {
1403 format = format_table->starting_stopping[t == JOB_STOP];
1404 if (format)
1405 return format;
1406 }
1407 }
1408
1409 /* Return generic strings */
1410 if (t == JOB_START)
1411 return "Starting %s.";
1412 else if (t == JOB_STOP)
1413 return "Stopping %s.";
1414 else
1415 return "Reloading %s.";
1416 }
1417
1418 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1419 const char *format;
1420
1421 assert(u);
1422
1423 /* Reload status messages have traditionally not been printed to console. */
1424 if (!IN_SET(t, JOB_START, JOB_STOP))
1425 return;
1426
1427 format = unit_get_status_message_format(u, t);
1428
1429 DISABLE_WARNING_FORMAT_NONLITERAL;
1430 unit_status_printf(u, "", format);
1431 REENABLE_WARNING;
1432 }
1433
1434 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1435 const char *format;
1436 char buf[LINE_MAX];
1437 sd_id128_t mid;
1438
1439 assert(u);
1440
1441 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1442 return;
1443
1444 if (log_on_console())
1445 return;
1446
1447 /* We log status messages for all units and all operations. */
1448
1449 format = unit_get_status_message_format(u, t);
1450
1451 DISABLE_WARNING_FORMAT_NONLITERAL;
1452 xsprintf(buf, format, unit_description(u));
1453 REENABLE_WARNING;
1454
1455 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1456 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1457 SD_MESSAGE_UNIT_RELOADING;
1458
1459 /* Note that we deliberately use LOG_MESSAGE() instead of
1460 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1461 * closely what is written to screen using the status output,
1462 * which is supposed the highest level, friendliest output
1463 * possible, which means we should avoid the low-level unit
1464 * name. */
1465 log_struct(LOG_INFO,
1466 LOG_MESSAGE_ID(mid),
1467 LOG_UNIT_ID(u),
1468 LOG_MESSAGE("%s", buf),
1469 NULL);
1470 }
1471
1472 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1473 assert(u);
1474 assert(t >= 0);
1475 assert(t < _JOB_TYPE_MAX);
1476
1477 unit_status_log_starting_stopping_reloading(u, t);
1478 unit_status_print_starting_stopping(u, t);
1479 }
1480
1481 int unit_start_limit_test(Unit *u) {
1482 assert(u);
1483
1484 if (ratelimit_test(&u->start_limit)) {
1485 u->start_limit_hit = false;
1486 return 0;
1487 }
1488
1489 log_unit_warning(u, "Start request repeated too quickly.");
1490 u->start_limit_hit = true;
1491
1492 return failure_action(u->manager, u->start_limit_action, u->reboot_arg);
1493 }
1494
1495 /* Errors:
1496 * -EBADR: This unit type does not support starting.
1497 * -EALREADY: Unit is already started.
1498 * -EAGAIN: An operation is already in progress. Retry later.
1499 * -ECANCELED: Too many requests for now.
1500 * -EPROTO: Assert failed
1501 * -EINVAL: Unit not loaded
1502 * -EOPNOTSUPP: Unit type not supported
1503 */
1504 int unit_start(Unit *u) {
1505 UnitActiveState state;
1506 Unit *following;
1507
1508 assert(u);
1509
1510 /* If this is already started, then this will succeed. Note
1511 * that this will even succeed if this unit is not startable
1512 * by the user. This is relied on to detect when we need to
1513 * wait for units and when waiting is finished. */
1514 state = unit_active_state(u);
1515 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1516 return -EALREADY;
1517
1518 /* Units that aren't loaded cannot be started */
1519 if (u->load_state != UNIT_LOADED)
1520 return -EINVAL;
1521
1522 /* If the conditions failed, don't do anything at all. If we
1523 * already are activating this call might still be useful to
1524 * speed up activation in case there is some hold-off time,
1525 * but we don't want to recheck the condition in that case. */
1526 if (state != UNIT_ACTIVATING &&
1527 !unit_condition_test(u)) {
1528 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1529 return -EALREADY;
1530 }
1531
1532 /* If the asserts failed, fail the entire job */
1533 if (state != UNIT_ACTIVATING &&
1534 !unit_assert_test(u)) {
1535 log_unit_notice(u, "Starting requested but asserts failed.");
1536 return -EPROTO;
1537 }
1538
1539 /* Units of types that aren't supported cannot be
1540 * started. Note that we do this test only after the condition
1541 * checks, so that we rather return condition check errors
1542 * (which are usually not considered a true failure) than "not
1543 * supported" errors (which are considered a failure).
1544 */
1545 if (!unit_supported(u))
1546 return -EOPNOTSUPP;
1547
1548 /* Forward to the main object, if we aren't it. */
1549 following = unit_following(u);
1550 if (following) {
1551 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1552 return unit_start(following);
1553 }
1554
1555 /* If it is stopped, but we cannot start it, then fail */
1556 if (!UNIT_VTABLE(u)->start)
1557 return -EBADR;
1558
1559 /* We don't suppress calls to ->start() here when we are
1560 * already starting, to allow this request to be used as a
1561 * "hurry up" call, for example when the unit is in some "auto
1562 * restart" state where it waits for a holdoff timer to elapse
1563 * before it will start again. */
1564
1565 unit_add_to_dbus_queue(u);
1566
1567 return UNIT_VTABLE(u)->start(u);
1568 }
1569
1570 bool unit_can_start(Unit *u) {
1571 assert(u);
1572
1573 if (u->load_state != UNIT_LOADED)
1574 return false;
1575
1576 if (!unit_supported(u))
1577 return false;
1578
1579 return !!UNIT_VTABLE(u)->start;
1580 }
1581
1582 bool unit_can_isolate(Unit *u) {
1583 assert(u);
1584
1585 return unit_can_start(u) &&
1586 u->allow_isolate;
1587 }
1588
1589 /* Errors:
1590 * -EBADR: This unit type does not support stopping.
1591 * -EALREADY: Unit is already stopped.
1592 * -EAGAIN: An operation is already in progress. Retry later.
1593 */
1594 int unit_stop(Unit *u) {
1595 UnitActiveState state;
1596 Unit *following;
1597
1598 assert(u);
1599
1600 state = unit_active_state(u);
1601 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1602 return -EALREADY;
1603
1604 following = unit_following(u);
1605 if (following) {
1606 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1607 return unit_stop(following);
1608 }
1609
1610 if (!UNIT_VTABLE(u)->stop)
1611 return -EBADR;
1612
1613 unit_add_to_dbus_queue(u);
1614
1615 return UNIT_VTABLE(u)->stop(u);
1616 }
1617
1618 /* Errors:
1619 * -EBADR: This unit type does not support reloading.
1620 * -ENOEXEC: Unit is not started.
1621 * -EAGAIN: An operation is already in progress. Retry later.
1622 */
1623 int unit_reload(Unit *u) {
1624 UnitActiveState state;
1625 Unit *following;
1626
1627 assert(u);
1628
1629 if (u->load_state != UNIT_LOADED)
1630 return -EINVAL;
1631
1632 if (!unit_can_reload(u))
1633 return -EBADR;
1634
1635 state = unit_active_state(u);
1636 if (state == UNIT_RELOADING)
1637 return -EALREADY;
1638
1639 if (state != UNIT_ACTIVE) {
1640 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1641 return -ENOEXEC;
1642 }
1643
1644 following = unit_following(u);
1645 if (following) {
1646 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1647 return unit_reload(following);
1648 }
1649
1650 unit_add_to_dbus_queue(u);
1651
1652 return UNIT_VTABLE(u)->reload(u);
1653 }
1654
1655 bool unit_can_reload(Unit *u) {
1656 assert(u);
1657
1658 if (!UNIT_VTABLE(u)->reload)
1659 return false;
1660
1661 if (!UNIT_VTABLE(u)->can_reload)
1662 return true;
1663
1664 return UNIT_VTABLE(u)->can_reload(u);
1665 }
1666
1667 static void unit_check_unneeded(Unit *u) {
1668
1669 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1670
1671 static const UnitDependency needed_dependencies[] = {
1672 UNIT_REQUIRED_BY,
1673 UNIT_REQUISITE_OF,
1674 UNIT_WANTED_BY,
1675 UNIT_BOUND_BY,
1676 };
1677
1678 Unit *other;
1679 Iterator i;
1680 unsigned j;
1681 int r;
1682
1683 assert(u);
1684
1685 /* If this service shall be shut down when unneeded then do
1686 * so. */
1687
1688 if (!u->stop_when_unneeded)
1689 return;
1690
1691 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1692 return;
1693
1694 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1695 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1696 if (unit_active_or_pending(other))
1697 return;
1698
1699 /* If stopping a unit fails continuously we might enter a stop
1700 * loop here, hence stop acting on the service being
1701 * unnecessary after a while. */
1702 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1703 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1704 return;
1705 }
1706
1707 log_unit_info(u, "Unit not needed anymore. Stopping.");
1708
1709 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1710 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1711 if (r < 0)
1712 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1713 }
1714
1715 static void unit_check_binds_to(Unit *u) {
1716 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1717 bool stop = false;
1718 Unit *other;
1719 Iterator i;
1720 int r;
1721
1722 assert(u);
1723
1724 if (u->job)
1725 return;
1726
1727 if (unit_active_state(u) != UNIT_ACTIVE)
1728 return;
1729
1730 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1731 if (other->job)
1732 continue;
1733
1734 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1735 continue;
1736
1737 stop = true;
1738 break;
1739 }
1740
1741 if (!stop)
1742 return;
1743
1744 /* If stopping a unit fails continuously we might enter a stop
1745 * loop here, hence stop acting on the service being
1746 * unnecessary after a while. */
1747 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1748 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1749 return;
1750 }
1751
1752 assert(other);
1753 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1754
1755 /* A unit we need to run is gone. Sniff. Let's stop this. */
1756 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1757 if (r < 0)
1758 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1759 }
1760
1761 static void retroactively_start_dependencies(Unit *u) {
1762 Iterator i;
1763 Unit *other;
1764
1765 assert(u);
1766 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1767
1768 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1769 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1770 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1771 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1772
1773 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1774 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1775 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1776 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1777
1778 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1779 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1780 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1781 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1782
1783 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1784 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1785 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1786
1787 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1788 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1789 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1790 }
1791
1792 static void retroactively_stop_dependencies(Unit *u) {
1793 Iterator i;
1794 Unit *other;
1795
1796 assert(u);
1797 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1798
1799 /* Pull down units which are bound to us recursively if enabled */
1800 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1801 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1802 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1803 }
1804
1805 static void check_unneeded_dependencies(Unit *u) {
1806 Iterator i;
1807 Unit *other;
1808
1809 assert(u);
1810 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1811
1812 /* Garbage collect services that might not be needed anymore, if enabled */
1813 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1814 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1815 unit_check_unneeded(other);
1816 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1817 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1818 unit_check_unneeded(other);
1819 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1820 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1821 unit_check_unneeded(other);
1822 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1823 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1824 unit_check_unneeded(other);
1825 }
1826
1827 void unit_start_on_failure(Unit *u) {
1828 Unit *other;
1829 Iterator i;
1830
1831 assert(u);
1832
1833 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1834 return;
1835
1836 log_unit_info(u, "Triggering OnFailure= dependencies.");
1837
1838 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1839 int r;
1840
1841 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1842 if (r < 0)
1843 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1844 }
1845 }
1846
1847 void unit_trigger_notify(Unit *u) {
1848 Unit *other;
1849 Iterator i;
1850
1851 assert(u);
1852
1853 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1854 if (UNIT_VTABLE(other)->trigger_notify)
1855 UNIT_VTABLE(other)->trigger_notify(other, u);
1856 }
1857
1858 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1859 Manager *m;
1860 bool unexpected;
1861
1862 assert(u);
1863 assert(os < _UNIT_ACTIVE_STATE_MAX);
1864 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1865
1866 /* Note that this is called for all low-level state changes,
1867 * even if they might map to the same high-level
1868 * UnitActiveState! That means that ns == os is an expected
1869 * behavior here. For example: if a mount point is remounted
1870 * this function will be called too! */
1871
1872 m = u->manager;
1873
1874 /* Update timestamps for state changes */
1875 if (!MANAGER_IS_RELOADING(m)) {
1876 dual_timestamp_get(&u->state_change_timestamp);
1877
1878 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1879 u->inactive_exit_timestamp = u->state_change_timestamp;
1880 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1881 u->inactive_enter_timestamp = u->state_change_timestamp;
1882
1883 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1884 u->active_enter_timestamp = u->state_change_timestamp;
1885 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1886 u->active_exit_timestamp = u->state_change_timestamp;
1887 }
1888
1889 /* Keep track of failed units */
1890 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1891
1892 /* Make sure the cgroup is always removed when we become inactive */
1893 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1894 unit_prune_cgroup(u);
1895
1896 /* Note that this doesn't apply to RemainAfterExit services exiting
1897 * successfully, since there's no change of state in that case. Which is
1898 * why it is handled in service_set_state() */
1899 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1900 ExecContext *ec;
1901
1902 ec = unit_get_exec_context(u);
1903 if (ec && exec_context_may_touch_console(ec)) {
1904 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1905 m->n_on_console--;
1906
1907 if (m->n_on_console == 0)
1908 /* unset no_console_output flag, since the console is free */
1909 m->no_console_output = false;
1910 } else
1911 m->n_on_console++;
1912 }
1913 }
1914
1915 if (u->job) {
1916 unexpected = false;
1917
1918 if (u->job->state == JOB_WAITING)
1919
1920 /* So we reached a different state for this
1921 * job. Let's see if we can run it now if it
1922 * failed previously due to EAGAIN. */
1923 job_add_to_run_queue(u->job);
1924
1925 /* Let's check whether this state change constitutes a
1926 * finished job, or maybe contradicts a running job and
1927 * hence needs to invalidate jobs. */
1928
1929 switch (u->job->type) {
1930
1931 case JOB_START:
1932 case JOB_VERIFY_ACTIVE:
1933
1934 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1935 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
1936 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1937 unexpected = true;
1938
1939 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1940 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1941 }
1942
1943 break;
1944
1945 case JOB_RELOAD:
1946 case JOB_RELOAD_OR_START:
1947 case JOB_TRY_RELOAD:
1948
1949 if (u->job->state == JOB_RUNNING) {
1950 if (ns == UNIT_ACTIVE)
1951 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
1952 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1953 unexpected = true;
1954
1955 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1956 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1957 }
1958 }
1959
1960 break;
1961
1962 case JOB_STOP:
1963 case JOB_RESTART:
1964 case JOB_TRY_RESTART:
1965
1966 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1967 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
1968 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
1969 unexpected = true;
1970 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
1971 }
1972
1973 break;
1974
1975 default:
1976 assert_not_reached("Job type unknown");
1977 }
1978
1979 } else
1980 unexpected = true;
1981
1982 if (!MANAGER_IS_RELOADING(m)) {
1983
1984 /* If this state change happened without being
1985 * requested by a job, then let's retroactively start
1986 * or stop dependencies. We skip that step when
1987 * deserializing, since we don't want to create any
1988 * additional jobs just because something is already
1989 * activated. */
1990
1991 if (unexpected) {
1992 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
1993 retroactively_start_dependencies(u);
1994 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
1995 retroactively_stop_dependencies(u);
1996 }
1997
1998 /* stop unneeded units regardless if going down was expected or not */
1999 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2000 check_unneeded_dependencies(u);
2001
2002 if (ns != os && ns == UNIT_FAILED) {
2003 log_unit_notice(u, "Unit entered failed state.");
2004 unit_start_on_failure(u);
2005 }
2006 }
2007
2008 /* Some names are special */
2009 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2010
2011 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2012 /* The bus might have just become available,
2013 * hence try to connect to it, if we aren't
2014 * yet connected. */
2015 bus_init(m, true);
2016
2017 if (u->type == UNIT_SERVICE &&
2018 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2019 !MANAGER_IS_RELOADING(m)) {
2020 /* Write audit record if we have just finished starting up */
2021 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2022 u->in_audit = true;
2023 }
2024
2025 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2026 manager_send_unit_plymouth(m, u);
2027
2028 } else {
2029
2030 /* We don't care about D-Bus here, since we'll get an
2031 * asynchronous notification for it anyway. */
2032
2033 if (u->type == UNIT_SERVICE &&
2034 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2035 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2036 !MANAGER_IS_RELOADING(m)) {
2037
2038 /* Hmm, if there was no start record written
2039 * write it now, so that we always have a nice
2040 * pair */
2041 if (!u->in_audit) {
2042 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2043
2044 if (ns == UNIT_INACTIVE)
2045 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2046 } else
2047 /* Write audit record if we have just finished shutting down */
2048 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2049
2050 u->in_audit = false;
2051 }
2052 }
2053
2054 manager_recheck_journal(m);
2055 unit_trigger_notify(u);
2056
2057 if (!MANAGER_IS_RELOADING(u->manager)) {
2058 /* Maybe we finished startup and are now ready for
2059 * being stopped because unneeded? */
2060 unit_check_unneeded(u);
2061
2062 /* Maybe we finished startup, but something we needed
2063 * has vanished? Let's die then. (This happens when
2064 * something BindsTo= to a Type=oneshot unit, as these
2065 * units go directly from starting to inactive,
2066 * without ever entering started.) */
2067 unit_check_binds_to(u);
2068 }
2069
2070 unit_add_to_dbus_queue(u);
2071 unit_add_to_gc_queue(u);
2072 }
2073
2074 int unit_watch_pid(Unit *u, pid_t pid) {
2075 int q, r;
2076
2077 assert(u);
2078 assert(pid >= 1);
2079
2080 /* Watch a specific PID. We only support one or two units
2081 * watching each PID for now, not more. */
2082
2083 r = set_ensure_allocated(&u->pids, NULL);
2084 if (r < 0)
2085 return r;
2086
2087 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2088 if (r < 0)
2089 return r;
2090
2091 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2092 if (r == -EEXIST) {
2093 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2094 if (r < 0)
2095 return r;
2096
2097 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2098 }
2099
2100 q = set_put(u->pids, PID_TO_PTR(pid));
2101 if (q < 0)
2102 return q;
2103
2104 return r;
2105 }
2106
2107 void unit_unwatch_pid(Unit *u, pid_t pid) {
2108 assert(u);
2109 assert(pid >= 1);
2110
2111 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2112 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2113 (void) set_remove(u->pids, PID_TO_PTR(pid));
2114 }
2115
2116 void unit_unwatch_all_pids(Unit *u) {
2117 assert(u);
2118
2119 while (!set_isempty(u->pids))
2120 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2121
2122 u->pids = set_free(u->pids);
2123 }
2124
2125 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2126 Iterator i;
2127 void *e;
2128
2129 assert(u);
2130
2131 /* Cleans dead PIDs from our list */
2132
2133 SET_FOREACH(e, u->pids, i) {
2134 pid_t pid = PTR_TO_PID(e);
2135
2136 if (pid == except1 || pid == except2)
2137 continue;
2138
2139 if (!pid_is_unwaited(pid))
2140 unit_unwatch_pid(u, pid);
2141 }
2142 }
2143
2144 bool unit_job_is_applicable(Unit *u, JobType j) {
2145 assert(u);
2146 assert(j >= 0 && j < _JOB_TYPE_MAX);
2147
2148 switch (j) {
2149
2150 case JOB_VERIFY_ACTIVE:
2151 case JOB_START:
2152 case JOB_STOP:
2153 case JOB_NOP:
2154 return true;
2155
2156 case JOB_RESTART:
2157 case JOB_TRY_RESTART:
2158 return unit_can_start(u);
2159
2160 case JOB_RELOAD:
2161 case JOB_TRY_RELOAD:
2162 return unit_can_reload(u);
2163
2164 case JOB_RELOAD_OR_START:
2165 return unit_can_reload(u) && unit_can_start(u);
2166
2167 default:
2168 assert_not_reached("Invalid job type");
2169 }
2170 }
2171
2172 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2173 assert(u);
2174
2175 /* Only warn about some unit types */
2176 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2177 return;
2178
2179 if (streq_ptr(u->id, other))
2180 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2181 else
2182 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2183 }
2184
2185 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2186
2187 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2188 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2189 [UNIT_WANTS] = UNIT_WANTED_BY,
2190 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2191 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2192 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2193 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2194 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2195 [UNIT_WANTED_BY] = UNIT_WANTS,
2196 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2197 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2198 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2199 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2200 [UNIT_BEFORE] = UNIT_AFTER,
2201 [UNIT_AFTER] = UNIT_BEFORE,
2202 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2203 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2204 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2205 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2206 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2207 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2208 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2209 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2210 };
2211 int r, q = 0, v = 0, w = 0;
2212 Unit *orig_u = u, *orig_other = other;
2213
2214 assert(u);
2215 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2216 assert(other);
2217
2218 u = unit_follow_merge(u);
2219 other = unit_follow_merge(other);
2220
2221 /* We won't allow dependencies on ourselves. We will not
2222 * consider them an error however. */
2223 if (u == other) {
2224 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2225 return 0;
2226 }
2227
2228 r = set_ensure_allocated(&u->dependencies[d], NULL);
2229 if (r < 0)
2230 return r;
2231
2232 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2233 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2234 if (r < 0)
2235 return r;
2236 }
2237
2238 if (add_reference) {
2239 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2240 if (r < 0)
2241 return r;
2242
2243 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2244 if (r < 0)
2245 return r;
2246 }
2247
2248 q = set_put(u->dependencies[d], other);
2249 if (q < 0)
2250 return q;
2251
2252 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2253 v = set_put(other->dependencies[inverse_table[d]], u);
2254 if (v < 0) {
2255 r = v;
2256 goto fail;
2257 }
2258 }
2259
2260 if (add_reference) {
2261 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2262 if (w < 0) {
2263 r = w;
2264 goto fail;
2265 }
2266
2267 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2268 if (r < 0)
2269 goto fail;
2270 }
2271
2272 unit_add_to_dbus_queue(u);
2273 return 0;
2274
2275 fail:
2276 if (q > 0)
2277 set_remove(u->dependencies[d], other);
2278
2279 if (v > 0)
2280 set_remove(other->dependencies[inverse_table[d]], u);
2281
2282 if (w > 0)
2283 set_remove(u->dependencies[UNIT_REFERENCES], other);
2284
2285 return r;
2286 }
2287
2288 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2289 int r;
2290
2291 assert(u);
2292
2293 r = unit_add_dependency(u, d, other, add_reference);
2294 if (r < 0)
2295 return r;
2296
2297 return unit_add_dependency(u, e, other, add_reference);
2298 }
2299
2300 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2301 int r;
2302
2303 assert(u);
2304 assert(name || path);
2305 assert(buf);
2306 assert(ret);
2307
2308 if (!name)
2309 name = basename(path);
2310
2311 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2312 *buf = NULL;
2313 *ret = name;
2314 return 0;
2315 }
2316
2317 if (u->instance)
2318 r = unit_name_replace_instance(name, u->instance, buf);
2319 else {
2320 _cleanup_free_ char *i = NULL;
2321
2322 r = unit_name_to_prefix(u->id, &i);
2323 if (r < 0)
2324 return r;
2325
2326 r = unit_name_replace_instance(name, i, buf);
2327 }
2328 if (r < 0)
2329 return r;
2330
2331 *ret = *buf;
2332 return 0;
2333 }
2334
2335 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2336 _cleanup_free_ char *buf = NULL;
2337 Unit *other;
2338 int r;
2339
2340 assert(u);
2341 assert(name || path);
2342
2343 r = resolve_template(u, name, path, &buf, &name);
2344 if (r < 0)
2345 return r;
2346
2347 r = manager_load_unit(u->manager, name, path, NULL, &other);
2348 if (r < 0)
2349 return r;
2350
2351 return unit_add_dependency(u, d, other, add_reference);
2352 }
2353
2354 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2355 _cleanup_free_ char *buf = NULL;
2356 Unit *other;
2357 int r;
2358
2359 assert(u);
2360 assert(name || path);
2361
2362 r = resolve_template(u, name, path, &buf, &name);
2363 if (r < 0)
2364 return r;
2365
2366 r = manager_load_unit(u->manager, name, path, NULL, &other);
2367 if (r < 0)
2368 return r;
2369
2370 return unit_add_two_dependencies(u, d, e, other, add_reference);
2371 }
2372
2373 int set_unit_path(const char *p) {
2374 /* This is mostly for debug purposes */
2375 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2376 return -errno;
2377
2378 return 0;
2379 }
2380
2381 char *unit_dbus_path(Unit *u) {
2382 assert(u);
2383
2384 if (!u->id)
2385 return NULL;
2386
2387 return unit_dbus_path_from_name(u->id);
2388 }
2389
2390 int unit_set_slice(Unit *u, Unit *slice) {
2391 assert(u);
2392 assert(slice);
2393
2394 /* Sets the unit slice if it has not been set before. Is extra
2395 * careful, to only allow this for units that actually have a
2396 * cgroup context. Also, we don't allow to set this for slices
2397 * (since the parent slice is derived from the name). Make
2398 * sure the unit we set is actually a slice. */
2399
2400 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2401 return -EOPNOTSUPP;
2402
2403 if (u->type == UNIT_SLICE)
2404 return -EINVAL;
2405
2406 if (unit_active_state(u) != UNIT_INACTIVE)
2407 return -EBUSY;
2408
2409 if (slice->type != UNIT_SLICE)
2410 return -EINVAL;
2411
2412 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2413 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2414 return -EPERM;
2415
2416 if (UNIT_DEREF(u->slice) == slice)
2417 return 0;
2418
2419 /* Disallow slice changes if @u is already bound to cgroups */
2420 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2421 return -EBUSY;
2422
2423 unit_ref_unset(&u->slice);
2424 unit_ref_set(&u->slice, slice);
2425 return 1;
2426 }
2427
2428 int unit_set_default_slice(Unit *u) {
2429 _cleanup_free_ char *b = NULL;
2430 const char *slice_name;
2431 Unit *slice;
2432 int r;
2433
2434 assert(u);
2435
2436 if (UNIT_ISSET(u->slice))
2437 return 0;
2438
2439 if (u->instance) {
2440 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2441
2442 /* Implicitly place all instantiated units in their
2443 * own per-template slice */
2444
2445 r = unit_name_to_prefix(u->id, &prefix);
2446 if (r < 0)
2447 return r;
2448
2449 /* The prefix is already escaped, but it might include
2450 * "-" which has a special meaning for slice units,
2451 * hence escape it here extra. */
2452 escaped = unit_name_escape(prefix);
2453 if (!escaped)
2454 return -ENOMEM;
2455
2456 if (MANAGER_IS_SYSTEM(u->manager))
2457 b = strjoin("system-", escaped, ".slice", NULL);
2458 else
2459 b = strappend(escaped, ".slice");
2460 if (!b)
2461 return -ENOMEM;
2462
2463 slice_name = b;
2464 } else
2465 slice_name =
2466 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2467 ? SPECIAL_SYSTEM_SLICE
2468 : SPECIAL_ROOT_SLICE;
2469
2470 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2471 if (r < 0)
2472 return r;
2473
2474 return unit_set_slice(u, slice);
2475 }
2476
2477 const char *unit_slice_name(Unit *u) {
2478 assert(u);
2479
2480 if (!UNIT_ISSET(u->slice))
2481 return NULL;
2482
2483 return UNIT_DEREF(u->slice)->id;
2484 }
2485
2486 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2487 _cleanup_free_ char *t = NULL;
2488 int r;
2489
2490 assert(u);
2491 assert(type);
2492 assert(_found);
2493
2494 r = unit_name_change_suffix(u->id, type, &t);
2495 if (r < 0)
2496 return r;
2497 if (unit_has_name(u, t))
2498 return -EINVAL;
2499
2500 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2501 assert(r < 0 || *_found != u);
2502 return r;
2503 }
2504
2505 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2506 const char *name, *old_owner, *new_owner;
2507 Unit *u = userdata;
2508 int r;
2509
2510 assert(message);
2511 assert(u);
2512
2513 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2514 if (r < 0) {
2515 bus_log_parse_error(r);
2516 return 0;
2517 }
2518
2519 if (UNIT_VTABLE(u)->bus_name_owner_change)
2520 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2521
2522 return 0;
2523 }
2524
2525 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2526 const char *match;
2527
2528 assert(u);
2529 assert(bus);
2530 assert(name);
2531
2532 if (u->match_bus_slot)
2533 return -EBUSY;
2534
2535 match = strjoina("type='signal',"
2536 "sender='org.freedesktop.DBus',"
2537 "path='/org/freedesktop/DBus',"
2538 "interface='org.freedesktop.DBus',"
2539 "member='NameOwnerChanged',"
2540 "arg0='", name, "'");
2541
2542 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2543 }
2544
2545 int unit_watch_bus_name(Unit *u, const char *name) {
2546 int r;
2547
2548 assert(u);
2549 assert(name);
2550
2551 /* Watch a specific name on the bus. We only support one unit
2552 * watching each name for now. */
2553
2554 if (u->manager->api_bus) {
2555 /* If the bus is already available, install the match directly.
2556 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2557 r = unit_install_bus_match(u, u->manager->api_bus, name);
2558 if (r < 0)
2559 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2560 }
2561
2562 r = hashmap_put(u->manager->watch_bus, name, u);
2563 if (r < 0) {
2564 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2565 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2566 }
2567
2568 return 0;
2569 }
2570
2571 void unit_unwatch_bus_name(Unit *u, const char *name) {
2572 assert(u);
2573 assert(name);
2574
2575 hashmap_remove_value(u->manager->watch_bus, name, u);
2576 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2577 }
2578
2579 bool unit_can_serialize(Unit *u) {
2580 assert(u);
2581
2582 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2583 }
2584
2585 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2586 int r;
2587
2588 assert(u);
2589 assert(f);
2590 assert(fds);
2591
2592 if (unit_can_serialize(u)) {
2593 ExecRuntime *rt;
2594
2595 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2596 if (r < 0)
2597 return r;
2598
2599 rt = unit_get_exec_runtime(u);
2600 if (rt) {
2601 r = exec_runtime_serialize(u, rt, f, fds);
2602 if (r < 0)
2603 return r;
2604 }
2605 }
2606
2607 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2608
2609 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2610 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2611 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2612 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2613
2614 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2615 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2616
2617 if (dual_timestamp_is_set(&u->condition_timestamp))
2618 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2619
2620 if (dual_timestamp_is_set(&u->assert_timestamp))
2621 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2622
2623 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2624
2625 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2626 if (u->cpu_usage_last != NSEC_INFINITY)
2627 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2628
2629 if (u->cgroup_path)
2630 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2631 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2632
2633 if (uid_is_valid(u->ref_uid))
2634 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2635 if (gid_is_valid(u->ref_gid))
2636 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2637
2638 bus_track_serialize(u->bus_track, f, "ref");
2639
2640 if (serialize_jobs) {
2641 if (u->job) {
2642 fprintf(f, "job\n");
2643 job_serialize(u->job, f);
2644 }
2645
2646 if (u->nop_job) {
2647 fprintf(f, "job\n");
2648 job_serialize(u->nop_job, f);
2649 }
2650 }
2651
2652 /* End marker */
2653 fputc('\n', f);
2654 return 0;
2655 }
2656
2657 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2658 assert(u);
2659 assert(f);
2660 assert(key);
2661
2662 if (!value)
2663 return 0;
2664
2665 fputs(key, f);
2666 fputc('=', f);
2667 fputs(value, f);
2668 fputc('\n', f);
2669
2670 return 1;
2671 }
2672
2673 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2674 _cleanup_free_ char *c = NULL;
2675
2676 assert(u);
2677 assert(f);
2678 assert(key);
2679
2680 if (!value)
2681 return 0;
2682
2683 c = cescape(value);
2684 if (!c)
2685 return -ENOMEM;
2686
2687 fputs(key, f);
2688 fputc('=', f);
2689 fputs(c, f);
2690 fputc('\n', f);
2691
2692 return 1;
2693 }
2694
2695 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2696 int copy;
2697
2698 assert(u);
2699 assert(f);
2700 assert(key);
2701
2702 if (fd < 0)
2703 return 0;
2704
2705 copy = fdset_put_dup(fds, fd);
2706 if (copy < 0)
2707 return copy;
2708
2709 fprintf(f, "%s=%i\n", key, copy);
2710 return 1;
2711 }
2712
2713 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2714 va_list ap;
2715
2716 assert(u);
2717 assert(f);
2718 assert(key);
2719 assert(format);
2720
2721 fputs(key, f);
2722 fputc('=', f);
2723
2724 va_start(ap, format);
2725 vfprintf(f, format, ap);
2726 va_end(ap);
2727
2728 fputc('\n', f);
2729 }
2730
2731 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2732 ExecRuntime **rt = NULL;
2733 size_t offset;
2734 int r;
2735
2736 assert(u);
2737 assert(f);
2738 assert(fds);
2739
2740 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2741 if (offset > 0)
2742 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2743
2744 for (;;) {
2745 char line[LINE_MAX], *l, *v;
2746 size_t k;
2747
2748 if (!fgets(line, sizeof(line), f)) {
2749 if (feof(f))
2750 return 0;
2751 return -errno;
2752 }
2753
2754 char_array_0(line);
2755 l = strstrip(line);
2756
2757 /* End marker */
2758 if (isempty(l))
2759 break;
2760
2761 k = strcspn(l, "=");
2762
2763 if (l[k] == '=') {
2764 l[k] = 0;
2765 v = l+k+1;
2766 } else
2767 v = l+k;
2768
2769 if (streq(l, "job")) {
2770 if (v[0] == '\0') {
2771 /* new-style serialized job */
2772 Job *j;
2773
2774 j = job_new_raw(u);
2775 if (!j)
2776 return log_oom();
2777
2778 r = job_deserialize(j, f);
2779 if (r < 0) {
2780 job_free(j);
2781 return r;
2782 }
2783
2784 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2785 if (r < 0) {
2786 job_free(j);
2787 return r;
2788 }
2789
2790 r = job_install_deserialized(j);
2791 if (r < 0) {
2792 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2793 job_free(j);
2794 return r;
2795 }
2796 } else /* legacy for pre-44 */
2797 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2798 continue;
2799 } else if (streq(l, "state-change-timestamp")) {
2800 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2801 continue;
2802 } else if (streq(l, "inactive-exit-timestamp")) {
2803 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2804 continue;
2805 } else if (streq(l, "active-enter-timestamp")) {
2806 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2807 continue;
2808 } else if (streq(l, "active-exit-timestamp")) {
2809 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2810 continue;
2811 } else if (streq(l, "inactive-enter-timestamp")) {
2812 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2813 continue;
2814 } else if (streq(l, "condition-timestamp")) {
2815 dual_timestamp_deserialize(v, &u->condition_timestamp);
2816 continue;
2817 } else if (streq(l, "assert-timestamp")) {
2818 dual_timestamp_deserialize(v, &u->assert_timestamp);
2819 continue;
2820 } else if (streq(l, "condition-result")) {
2821
2822 r = parse_boolean(v);
2823 if (r < 0)
2824 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2825 else
2826 u->condition_result = r;
2827
2828 continue;
2829
2830 } else if (streq(l, "assert-result")) {
2831
2832 r = parse_boolean(v);
2833 if (r < 0)
2834 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2835 else
2836 u->assert_result = r;
2837
2838 continue;
2839
2840 } else if (streq(l, "transient")) {
2841
2842 r = parse_boolean(v);
2843 if (r < 0)
2844 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2845 else
2846 u->transient = r;
2847
2848 continue;
2849
2850 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2851
2852 r = safe_atou64(v, &u->cpu_usage_base);
2853 if (r < 0)
2854 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
2855
2856 continue;
2857
2858 } else if (streq(l, "cpu-usage-last")) {
2859
2860 r = safe_atou64(v, &u->cpu_usage_last);
2861 if (r < 0)
2862 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
2863
2864 continue;
2865
2866 } else if (streq(l, "cgroup")) {
2867
2868 r = unit_set_cgroup_path(u, v);
2869 if (r < 0)
2870 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2871
2872 (void) unit_watch_cgroup(u);
2873
2874 continue;
2875 } else if (streq(l, "cgroup-realized")) {
2876 int b;
2877
2878 b = parse_boolean(v);
2879 if (b < 0)
2880 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2881 else
2882 u->cgroup_realized = b;
2883
2884 continue;
2885
2886 } else if (streq(l, "ref-uid")) {
2887 uid_t uid;
2888
2889 r = parse_uid(v, &uid);
2890 if (r < 0)
2891 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
2892 else
2893 unit_ref_uid_gid(u, uid, GID_INVALID);
2894
2895 continue;
2896
2897 } else if (streq(l, "ref-gid")) {
2898 gid_t gid;
2899
2900 r = parse_gid(v, &gid);
2901 if (r < 0)
2902 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
2903 else
2904 unit_ref_uid_gid(u, UID_INVALID, gid);
2905
2906 } else if (streq(l, "ref")) {
2907
2908 r = strv_extend(&u->deserialized_refs, v);
2909 if (r < 0)
2910 log_oom();
2911
2912 continue;
2913 }
2914
2915 if (unit_can_serialize(u)) {
2916 if (rt) {
2917 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
2918 if (r < 0) {
2919 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
2920 continue;
2921 }
2922
2923 /* Returns positive if key was handled by the call */
2924 if (r > 0)
2925 continue;
2926 }
2927
2928 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
2929 if (r < 0)
2930 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
2931 }
2932 }
2933
2934 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
2935 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
2936 * before 228 where the base for timeouts was not persistent across reboots. */
2937
2938 if (!dual_timestamp_is_set(&u->state_change_timestamp))
2939 dual_timestamp_get(&u->state_change_timestamp);
2940
2941 return 0;
2942 }
2943
2944 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
2945 Unit *device;
2946 _cleanup_free_ char *e = NULL;
2947 int r;
2948
2949 assert(u);
2950
2951 /* Adds in links to the device node that this unit is based on */
2952 if (isempty(what))
2953 return 0;
2954
2955 if (!is_device_path(what))
2956 return 0;
2957
2958 /* When device units aren't supported (such as in a
2959 * container), don't create dependencies on them. */
2960 if (!unit_type_supported(UNIT_DEVICE))
2961 return 0;
2962
2963 r = unit_name_from_path(what, ".device", &e);
2964 if (r < 0)
2965 return r;
2966
2967 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
2968 if (r < 0)
2969 return r;
2970
2971 r = unit_add_two_dependencies(u, UNIT_AFTER,
2972 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
2973 device, true);
2974 if (r < 0)
2975 return r;
2976
2977 if (wants) {
2978 r = unit_add_dependency(device, UNIT_WANTS, u, false);
2979 if (r < 0)
2980 return r;
2981 }
2982
2983 return 0;
2984 }
2985
2986 int unit_coldplug(Unit *u) {
2987 int r = 0, q;
2988 char **i;
2989
2990 assert(u);
2991
2992 /* Make sure we don't enter a loop, when coldplugging
2993 * recursively. */
2994 if (u->coldplugged)
2995 return 0;
2996
2997 u->coldplugged = true;
2998
2999 STRV_FOREACH(i, u->deserialized_refs) {
3000 q = bus_unit_track_add_name(u, *i);
3001 if (q < 0 && r >= 0)
3002 r = q;
3003 }
3004 u->deserialized_refs = strv_free(u->deserialized_refs);
3005
3006 if (UNIT_VTABLE(u)->coldplug) {
3007 q = UNIT_VTABLE(u)->coldplug(u);
3008 if (q < 0 && r >= 0)
3009 r = q;
3010 }
3011
3012 if (u->job) {
3013 q = job_coldplug(u->job);
3014 if (q < 0 && r >= 0)
3015 r = q;
3016 }
3017
3018 return r;
3019 }
3020
3021 static bool fragment_mtime_newer(const char *path, usec_t mtime) {
3022 struct stat st;
3023
3024 if (!path)
3025 return false;
3026
3027 if (stat(path, &st) < 0)
3028 /* What, cannot access this anymore? */
3029 return true;
3030
3031 if (mtime > 0)
3032 /* For non-empty files check the mtime */
3033 return timespec_load(&st.st_mtim) > mtime;
3034 else if (!null_or_empty(&st))
3035 /* For masked files check if they are still so */
3036 return true;
3037
3038 return false;
3039 }
3040
3041 bool unit_need_daemon_reload(Unit *u) {
3042 _cleanup_strv_free_ char **t = NULL;
3043 char **path;
3044
3045 assert(u);
3046
3047 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime))
3048 return true;
3049
3050 if (fragment_mtime_newer(u->source_path, u->source_mtime))
3051 return true;
3052
3053 (void) unit_find_dropin_paths(u, &t);
3054 if (!strv_equal(u->dropin_paths, t))
3055 return true;
3056
3057 STRV_FOREACH(path, u->dropin_paths)
3058 if (fragment_mtime_newer(*path, u->dropin_mtime))
3059 return true;
3060
3061 return false;
3062 }
3063
3064 void unit_reset_failed(Unit *u) {
3065 assert(u);
3066
3067 if (UNIT_VTABLE(u)->reset_failed)
3068 UNIT_VTABLE(u)->reset_failed(u);
3069
3070 RATELIMIT_RESET(u->start_limit);
3071 u->start_limit_hit = false;
3072 }
3073
3074 Unit *unit_following(Unit *u) {
3075 assert(u);
3076
3077 if (UNIT_VTABLE(u)->following)
3078 return UNIT_VTABLE(u)->following(u);
3079
3080 return NULL;
3081 }
3082
3083 bool unit_stop_pending(Unit *u) {
3084 assert(u);
3085
3086 /* This call does check the current state of the unit. It's
3087 * hence useful to be called from state change calls of the
3088 * unit itself, where the state isn't updated yet. This is
3089 * different from unit_inactive_or_pending() which checks both
3090 * the current state and for a queued job. */
3091
3092 return u->job && u->job->type == JOB_STOP;
3093 }
3094
3095 bool unit_inactive_or_pending(Unit *u) {
3096 assert(u);
3097
3098 /* Returns true if the unit is inactive or going down */
3099
3100 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3101 return true;
3102
3103 if (unit_stop_pending(u))
3104 return true;
3105
3106 return false;
3107 }
3108
3109 bool unit_active_or_pending(Unit *u) {
3110 assert(u);
3111
3112 /* Returns true if the unit is active or going up */
3113
3114 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3115 return true;
3116
3117 if (u->job &&
3118 (u->job->type == JOB_START ||
3119 u->job->type == JOB_RELOAD_OR_START ||
3120 u->job->type == JOB_RESTART))
3121 return true;
3122
3123 return false;
3124 }
3125
3126 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3127 assert(u);
3128 assert(w >= 0 && w < _KILL_WHO_MAX);
3129 assert(SIGNAL_VALID(signo));
3130
3131 if (!UNIT_VTABLE(u)->kill)
3132 return -EOPNOTSUPP;
3133
3134 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3135 }
3136
3137 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3138 Set *pid_set;
3139 int r;
3140
3141 pid_set = set_new(NULL);
3142 if (!pid_set)
3143 return NULL;
3144
3145 /* Exclude the main/control pids from being killed via the cgroup */
3146 if (main_pid > 0) {
3147 r = set_put(pid_set, PID_TO_PTR(main_pid));
3148 if (r < 0)
3149 goto fail;
3150 }
3151
3152 if (control_pid > 0) {
3153 r = set_put(pid_set, PID_TO_PTR(control_pid));
3154 if (r < 0)
3155 goto fail;
3156 }
3157
3158 return pid_set;
3159
3160 fail:
3161 set_free(pid_set);
3162 return NULL;
3163 }
3164
3165 int unit_kill_common(
3166 Unit *u,
3167 KillWho who,
3168 int signo,
3169 pid_t main_pid,
3170 pid_t control_pid,
3171 sd_bus_error *error) {
3172
3173 int r = 0;
3174 bool killed = false;
3175
3176 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3177 if (main_pid < 0)
3178 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3179 else if (main_pid == 0)
3180 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3181 }
3182
3183 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3184 if (control_pid < 0)
3185 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3186 else if (control_pid == 0)
3187 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3188 }
3189
3190 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3191 if (control_pid > 0) {
3192 if (kill(control_pid, signo) < 0)
3193 r = -errno;
3194 else
3195 killed = true;
3196 }
3197
3198 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3199 if (main_pid > 0) {
3200 if (kill(main_pid, signo) < 0)
3201 r = -errno;
3202 else
3203 killed = true;
3204 }
3205
3206 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3207 _cleanup_set_free_ Set *pid_set = NULL;
3208 int q;
3209
3210 /* Exclude the main/control pids from being killed via the cgroup */
3211 pid_set = unit_pid_set(main_pid, control_pid);
3212 if (!pid_set)
3213 return -ENOMEM;
3214
3215 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3216 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3217 r = q;
3218 else
3219 killed = true;
3220 }
3221
3222 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3223 return -ESRCH;
3224
3225 return r;
3226 }
3227
3228 int unit_following_set(Unit *u, Set **s) {
3229 assert(u);
3230 assert(s);
3231
3232 if (UNIT_VTABLE(u)->following_set)
3233 return UNIT_VTABLE(u)->following_set(u, s);
3234
3235 *s = NULL;
3236 return 0;
3237 }
3238
3239 UnitFileState unit_get_unit_file_state(Unit *u) {
3240 int r;
3241
3242 assert(u);
3243
3244 if (u->unit_file_state < 0 && u->fragment_path) {
3245 r = unit_file_get_state(
3246 u->manager->unit_file_scope,
3247 NULL,
3248 basename(u->fragment_path),
3249 &u->unit_file_state);
3250 if (r < 0)
3251 u->unit_file_state = UNIT_FILE_BAD;
3252 }
3253
3254 return u->unit_file_state;
3255 }
3256
3257 int unit_get_unit_file_preset(Unit *u) {
3258 assert(u);
3259
3260 if (u->unit_file_preset < 0 && u->fragment_path)
3261 u->unit_file_preset = unit_file_query_preset(
3262 u->manager->unit_file_scope,
3263 NULL,
3264 basename(u->fragment_path));
3265
3266 return u->unit_file_preset;
3267 }
3268
3269 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3270 assert(ref);
3271 assert(u);
3272
3273 if (ref->unit)
3274 unit_ref_unset(ref);
3275
3276 ref->unit = u;
3277 LIST_PREPEND(refs, u->refs, ref);
3278 return u;
3279 }
3280
3281 void unit_ref_unset(UnitRef *ref) {
3282 assert(ref);
3283
3284 if (!ref->unit)
3285 return;
3286
3287 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3288 * be unreferenced now. */
3289 unit_add_to_gc_queue(ref->unit);
3290
3291 LIST_REMOVE(refs, ref->unit->refs, ref);
3292 ref->unit = NULL;
3293 }
3294
3295 static int user_from_unit_name(Unit *u, char **ret) {
3296
3297 static const uint8_t hash_key[] = {
3298 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3299 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3300 };
3301
3302 _cleanup_free_ char *n = NULL;
3303 int r;
3304
3305 r = unit_name_to_prefix(u->id, &n);
3306 if (r < 0)
3307 return r;
3308
3309 if (valid_user_group_name(n)) {
3310 *ret = n;
3311 n = NULL;
3312 return 0;
3313 }
3314
3315 /* If we can't use the unit name as a user name, then let's hash it and use that */
3316 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3317 return -ENOMEM;
3318
3319 return 0;
3320 }
3321
3322 int unit_patch_contexts(Unit *u) {
3323 CGroupContext *cc;
3324 ExecContext *ec;
3325 unsigned i;
3326 int r;
3327
3328 assert(u);
3329
3330 /* Patch in the manager defaults into the exec and cgroup
3331 * contexts, _after_ the rest of the settings have been
3332 * initialized */
3333
3334 ec = unit_get_exec_context(u);
3335 if (ec) {
3336 /* This only copies in the ones that need memory */
3337 for (i = 0; i < _RLIMIT_MAX; i++)
3338 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3339 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3340 if (!ec->rlimit[i])
3341 return -ENOMEM;
3342 }
3343
3344 if (MANAGER_IS_USER(u->manager) &&
3345 !ec->working_directory) {
3346
3347 r = get_home_dir(&ec->working_directory);
3348 if (r < 0)
3349 return r;
3350
3351 /* Allow user services to run, even if the
3352 * home directory is missing */
3353 ec->working_directory_missing_ok = true;
3354 }
3355
3356 if (MANAGER_IS_USER(u->manager) &&
3357 (ec->syscall_whitelist ||
3358 !set_isempty(ec->syscall_filter) ||
3359 !set_isempty(ec->syscall_archs) ||
3360 ec->address_families_whitelist ||
3361 !set_isempty(ec->address_families)))
3362 ec->no_new_privileges = true;
3363
3364 if (ec->private_devices)
3365 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_MKNOD);
3366
3367 if (ec->dynamic_user) {
3368 if (!ec->user) {
3369 r = user_from_unit_name(u, &ec->user);
3370 if (r < 0)
3371 return r;
3372 }
3373
3374 if (!ec->group) {
3375 ec->group = strdup(ec->user);
3376 if (!ec->group)
3377 return -ENOMEM;
3378 }
3379
3380 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3381 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3382
3383 ec->private_tmp = true;
3384 ec->remove_ipc = true;
3385 ec->protect_system = PROTECT_SYSTEM_STRICT;
3386 if (ec->protect_home == PROTECT_HOME_NO)
3387 ec->protect_home = PROTECT_HOME_READ_ONLY;
3388 }
3389 }
3390
3391 cc = unit_get_cgroup_context(u);
3392 if (cc) {
3393
3394 if (ec &&
3395 ec->private_devices &&
3396 cc->device_policy == CGROUP_AUTO)
3397 cc->device_policy = CGROUP_CLOSED;
3398 }
3399
3400 return 0;
3401 }
3402
3403 ExecContext *unit_get_exec_context(Unit *u) {
3404 size_t offset;
3405 assert(u);
3406
3407 if (u->type < 0)
3408 return NULL;
3409
3410 offset = UNIT_VTABLE(u)->exec_context_offset;
3411 if (offset <= 0)
3412 return NULL;
3413
3414 return (ExecContext*) ((uint8_t*) u + offset);
3415 }
3416
3417 KillContext *unit_get_kill_context(Unit *u) {
3418 size_t offset;
3419 assert(u);
3420
3421 if (u->type < 0)
3422 return NULL;
3423
3424 offset = UNIT_VTABLE(u)->kill_context_offset;
3425 if (offset <= 0)
3426 return NULL;
3427
3428 return (KillContext*) ((uint8_t*) u + offset);
3429 }
3430
3431 CGroupContext *unit_get_cgroup_context(Unit *u) {
3432 size_t offset;
3433
3434 if (u->type < 0)
3435 return NULL;
3436
3437 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3438 if (offset <= 0)
3439 return NULL;
3440
3441 return (CGroupContext*) ((uint8_t*) u + offset);
3442 }
3443
3444 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3445 size_t offset;
3446
3447 if (u->type < 0)
3448 return NULL;
3449
3450 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3451 if (offset <= 0)
3452 return NULL;
3453
3454 return *(ExecRuntime**) ((uint8_t*) u + offset);
3455 }
3456
3457 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3458 assert(u);
3459
3460 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3461 return NULL;
3462
3463 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3464 return u->manager->lookup_paths.transient;
3465
3466 if (mode == UNIT_RUNTIME)
3467 return u->manager->lookup_paths.runtime_control;
3468
3469 if (mode == UNIT_PERSISTENT)
3470 return u->manager->lookup_paths.persistent_control;
3471
3472 return NULL;
3473 }
3474
3475 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3476 _cleanup_free_ char *p = NULL, *q = NULL;
3477 const char *dir, *wrapped;
3478 int r;
3479
3480 assert(u);
3481
3482 if (u->transient_file) {
3483 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3484 * write to the transient unit file. */
3485 fputs(data, u->transient_file);
3486 fputc('\n', u->transient_file);
3487 return 0;
3488 }
3489
3490 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3491 return 0;
3492
3493 dir = unit_drop_in_dir(u, mode);
3494 if (!dir)
3495 return -EINVAL;
3496
3497 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3498 "# or an equivalent operation. Do not edit.\n",
3499 data,
3500 "\n");
3501
3502 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3503 if (r < 0)
3504 return r;
3505
3506 (void) mkdir_p(p, 0755);
3507 r = write_string_file_atomic_label(q, wrapped);
3508 if (r < 0)
3509 return r;
3510
3511 r = strv_push(&u->dropin_paths, q);
3512 if (r < 0)
3513 return r;
3514 q = NULL;
3515
3516 strv_uniq(u->dropin_paths);
3517
3518 u->dropin_mtime = now(CLOCK_REALTIME);
3519
3520 return 0;
3521 }
3522
3523 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3524 _cleanup_free_ char *p = NULL;
3525 va_list ap;
3526 int r;
3527
3528 assert(u);
3529 assert(name);
3530 assert(format);
3531
3532 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3533 return 0;
3534
3535 va_start(ap, format);
3536 r = vasprintf(&p, format, ap);
3537 va_end(ap);
3538
3539 if (r < 0)
3540 return -ENOMEM;
3541
3542 return unit_write_drop_in(u, mode, name, p);
3543 }
3544
3545 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3546 const char *ndata;
3547
3548 assert(u);
3549 assert(name);
3550 assert(data);
3551
3552 if (!UNIT_VTABLE(u)->private_section)
3553 return -EINVAL;
3554
3555 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3556 return 0;
3557
3558 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3559
3560 return unit_write_drop_in(u, mode, name, ndata);
3561 }
3562
3563 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3564 _cleanup_free_ char *p = NULL;
3565 va_list ap;
3566 int r;
3567
3568 assert(u);
3569 assert(name);
3570 assert(format);
3571
3572 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3573 return 0;
3574
3575 va_start(ap, format);
3576 r = vasprintf(&p, format, ap);
3577 va_end(ap);
3578
3579 if (r < 0)
3580 return -ENOMEM;
3581
3582 return unit_write_drop_in_private(u, mode, name, p);
3583 }
3584
3585 int unit_make_transient(Unit *u) {
3586 FILE *f;
3587 char *path;
3588
3589 assert(u);
3590
3591 if (!UNIT_VTABLE(u)->can_transient)
3592 return -EOPNOTSUPP;
3593
3594 path = strjoin(u->manager->lookup_paths.transient, "/", u->id, NULL);
3595 if (!path)
3596 return -ENOMEM;
3597
3598 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3599 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3600
3601 RUN_WITH_UMASK(0022) {
3602 f = fopen(path, "we");
3603 if (!f) {
3604 free(path);
3605 return -errno;
3606 }
3607 }
3608
3609 if (u->transient_file)
3610 fclose(u->transient_file);
3611 u->transient_file = f;
3612
3613 free(u->fragment_path);
3614 u->fragment_path = path;
3615
3616 u->source_path = mfree(u->source_path);
3617 u->dropin_paths = strv_free(u->dropin_paths);
3618 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3619
3620 u->load_state = UNIT_STUB;
3621 u->load_error = 0;
3622 u->transient = true;
3623
3624 unit_add_to_dbus_queue(u);
3625 unit_add_to_gc_queue(u);
3626
3627 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3628 u->transient_file);
3629
3630 return 0;
3631 }
3632
3633 static void log_kill(pid_t pid, int sig, void *userdata) {
3634 _cleanup_free_ char *comm = NULL;
3635
3636 (void) get_process_comm(pid, &comm);
3637
3638 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3639 only, like for example systemd's own PAM stub process. */
3640 if (comm && comm[0] == '(')
3641 return;
3642
3643 log_unit_notice(userdata,
3644 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3645 pid,
3646 strna(comm),
3647 signal_to_string(sig));
3648 }
3649
3650 static int operation_to_signal(KillContext *c, KillOperation k) {
3651 assert(c);
3652
3653 switch (k) {
3654
3655 case KILL_TERMINATE:
3656 case KILL_TERMINATE_AND_LOG:
3657 return c->kill_signal;
3658
3659 case KILL_KILL:
3660 return SIGKILL;
3661
3662 case KILL_ABORT:
3663 return SIGABRT;
3664
3665 default:
3666 assert_not_reached("KillOperation unknown");
3667 }
3668 }
3669
3670 int unit_kill_context(
3671 Unit *u,
3672 KillContext *c,
3673 KillOperation k,
3674 pid_t main_pid,
3675 pid_t control_pid,
3676 bool main_pid_alien) {
3677
3678 bool wait_for_exit = false, send_sighup;
3679 cg_kill_log_func_t log_func;
3680 int sig, r;
3681
3682 assert(u);
3683 assert(c);
3684
3685 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0 if we
3686 * killed something worth waiting for, 0 otherwise. */
3687
3688 if (c->kill_mode == KILL_NONE)
3689 return 0;
3690
3691 sig = operation_to_signal(c, k);
3692
3693 send_sighup =
3694 c->send_sighup &&
3695 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3696 sig != SIGHUP;
3697
3698 log_func =
3699 k != KILL_TERMINATE ||
3700 IN_SET(sig, SIGKILL, SIGABRT) ? log_kill : NULL;
3701
3702 if (main_pid > 0) {
3703 if (log_func)
3704 log_func(main_pid, sig, u);
3705
3706 r = kill_and_sigcont(main_pid, sig);
3707 if (r < 0 && r != -ESRCH) {
3708 _cleanup_free_ char *comm = NULL;
3709 (void) get_process_comm(main_pid, &comm);
3710
3711 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3712 } else {
3713 if (!main_pid_alien)
3714 wait_for_exit = true;
3715
3716 if (r != -ESRCH && send_sighup)
3717 (void) kill(main_pid, SIGHUP);
3718 }
3719 }
3720
3721 if (control_pid > 0) {
3722 if (log_func)
3723 log_func(control_pid, sig, u);
3724
3725 r = kill_and_sigcont(control_pid, sig);
3726 if (r < 0 && r != -ESRCH) {
3727 _cleanup_free_ char *comm = NULL;
3728 (void) get_process_comm(control_pid, &comm);
3729
3730 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3731 } else {
3732 wait_for_exit = true;
3733
3734 if (r != -ESRCH && send_sighup)
3735 (void) kill(control_pid, SIGHUP);
3736 }
3737 }
3738
3739 if (u->cgroup_path &&
3740 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3741 _cleanup_set_free_ Set *pid_set = NULL;
3742
3743 /* Exclude the main/control pids from being killed via the cgroup */
3744 pid_set = unit_pid_set(main_pid, control_pid);
3745 if (!pid_set)
3746 return -ENOMEM;
3747
3748 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3749 sig,
3750 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3751 pid_set,
3752 log_func, u);
3753 if (r < 0) {
3754 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3755 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3756
3757 } else if (r > 0) {
3758
3759 /* FIXME: For now, on the legacy hierarchy, we
3760 * will not wait for the cgroup members to die
3761 * if we are running in a container or if this
3762 * is a delegation unit, simply because cgroup
3763 * notification is unreliable in these
3764 * cases. It doesn't work at all in
3765 * containers, and outside of containers it
3766 * can be confused easily by left-over
3767 * directories in the cgroup — which however
3768 * should not exist in non-delegated units. On
3769 * the unified hierarchy that's different,
3770 * there we get proper events. Hence rely on
3771 * them.*/
3772
3773 if (cg_unified(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3774 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3775 wait_for_exit = true;
3776
3777 if (send_sighup) {
3778 set_free(pid_set);
3779
3780 pid_set = unit_pid_set(main_pid, control_pid);
3781 if (!pid_set)
3782 return -ENOMEM;
3783
3784 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3785 SIGHUP,
3786 CGROUP_IGNORE_SELF,
3787 pid_set,
3788 NULL, NULL);
3789 }
3790 }
3791 }
3792
3793 return wait_for_exit;
3794 }
3795
3796 int unit_require_mounts_for(Unit *u, const char *path) {
3797 char prefix[strlen(path) + 1], *p;
3798 int r;
3799
3800 assert(u);
3801 assert(path);
3802
3803 /* Registers a unit for requiring a certain path and all its
3804 * prefixes. We keep a simple array of these paths in the
3805 * unit, since its usually short. However, we build a prefix
3806 * table for all possible prefixes so that new appearing mount
3807 * units can easily determine which units to make themselves a
3808 * dependency of. */
3809
3810 if (!path_is_absolute(path))
3811 return -EINVAL;
3812
3813 p = strdup(path);
3814 if (!p)
3815 return -ENOMEM;
3816
3817 path_kill_slashes(p);
3818
3819 if (!path_is_safe(p)) {
3820 free(p);
3821 return -EPERM;
3822 }
3823
3824 if (strv_contains(u->requires_mounts_for, p)) {
3825 free(p);
3826 return 0;
3827 }
3828
3829 r = strv_consume(&u->requires_mounts_for, p);
3830 if (r < 0)
3831 return r;
3832
3833 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3834 Set *x;
3835
3836 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3837 if (!x) {
3838 char *q;
3839
3840 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3841 if (r < 0)
3842 return r;
3843
3844 q = strdup(prefix);
3845 if (!q)
3846 return -ENOMEM;
3847
3848 x = set_new(NULL);
3849 if (!x) {
3850 free(q);
3851 return -ENOMEM;
3852 }
3853
3854 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3855 if (r < 0) {
3856 free(q);
3857 set_free(x);
3858 return r;
3859 }
3860 }
3861
3862 r = set_put(x, u);
3863 if (r < 0)
3864 return r;
3865 }
3866
3867 return 0;
3868 }
3869
3870 int unit_setup_exec_runtime(Unit *u) {
3871 ExecRuntime **rt;
3872 size_t offset;
3873 Iterator i;
3874 Unit *other;
3875
3876 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3877 assert(offset > 0);
3878
3879 /* Check if there already is an ExecRuntime for this unit? */
3880 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3881 if (*rt)
3882 return 0;
3883
3884 /* Try to get it from somebody else */
3885 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3886
3887 *rt = unit_get_exec_runtime(other);
3888 if (*rt) {
3889 exec_runtime_ref(*rt);
3890 return 0;
3891 }
3892 }
3893
3894 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3895 }
3896
3897 int unit_setup_dynamic_creds(Unit *u) {
3898 ExecContext *ec;
3899 DynamicCreds *dcreds;
3900 size_t offset;
3901
3902 assert(u);
3903
3904 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
3905 assert(offset > 0);
3906 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
3907
3908 ec = unit_get_exec_context(u);
3909 assert(ec);
3910
3911 if (!ec->dynamic_user)
3912 return 0;
3913
3914 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
3915 }
3916
3917 bool unit_type_supported(UnitType t) {
3918 if (_unlikely_(t < 0))
3919 return false;
3920 if (_unlikely_(t >= _UNIT_TYPE_MAX))
3921 return false;
3922
3923 if (!unit_vtable[t]->supported)
3924 return true;
3925
3926 return unit_vtable[t]->supported();
3927 }
3928
3929 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
3930 int r;
3931
3932 assert(u);
3933 assert(where);
3934
3935 r = dir_is_empty(where);
3936 if (r > 0)
3937 return;
3938 if (r < 0) {
3939 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
3940 return;
3941 }
3942
3943 log_struct(LOG_NOTICE,
3944 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3945 LOG_UNIT_ID(u),
3946 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
3947 "WHERE=%s", where,
3948 NULL);
3949 }
3950
3951 int unit_fail_if_symlink(Unit *u, const char* where) {
3952 int r;
3953
3954 assert(u);
3955 assert(where);
3956
3957 r = is_symlink(where);
3958 if (r < 0) {
3959 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
3960 return 0;
3961 }
3962 if (r == 0)
3963 return 0;
3964
3965 log_struct(LOG_ERR,
3966 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
3967 LOG_UNIT_ID(u),
3968 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
3969 "WHERE=%s", where,
3970 NULL);
3971
3972 return -ELOOP;
3973 }
3974
3975 bool unit_is_pristine(Unit *u) {
3976 assert(u);
3977
3978 /* Check if the unit already exists or is already around,
3979 * in a number of different ways. Note that to cater for unit
3980 * types such as slice, we are generally fine with units that
3981 * are marked UNIT_LOADED even though nothing was
3982 * actually loaded, as those unit types don't require a file
3983 * on disk to validly load. */
3984
3985 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
3986 u->fragment_path ||
3987 u->source_path ||
3988 !strv_isempty(u->dropin_paths) ||
3989 u->job ||
3990 u->merged_into);
3991 }
3992
3993 pid_t unit_control_pid(Unit *u) {
3994 assert(u);
3995
3996 if (UNIT_VTABLE(u)->control_pid)
3997 return UNIT_VTABLE(u)->control_pid(u);
3998
3999 return 0;
4000 }
4001
4002 pid_t unit_main_pid(Unit *u) {
4003 assert(u);
4004
4005 if (UNIT_VTABLE(u)->main_pid)
4006 return UNIT_VTABLE(u)->main_pid(u);
4007
4008 return 0;
4009 }
4010
4011 static void unit_unref_uid_internal(
4012 Unit *u,
4013 uid_t *ref_uid,
4014 bool destroy_now,
4015 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4016
4017 assert(u);
4018 assert(ref_uid);
4019 assert(_manager_unref_uid);
4020
4021 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4022 * gid_t are actually the same time, with the same validity rules.
4023 *
4024 * Drops a reference to UID/GID from a unit. */
4025
4026 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4027 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4028
4029 if (!uid_is_valid(*ref_uid))
4030 return;
4031
4032 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4033 *ref_uid = UID_INVALID;
4034 }
4035
4036 void unit_unref_uid(Unit *u, bool destroy_now) {
4037 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4038 }
4039
4040 void unit_unref_gid(Unit *u, bool destroy_now) {
4041 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4042 }
4043
4044 static int unit_ref_uid_internal(
4045 Unit *u,
4046 uid_t *ref_uid,
4047 uid_t uid,
4048 bool clean_ipc,
4049 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4050
4051 int r;
4052
4053 assert(u);
4054 assert(ref_uid);
4055 assert(uid_is_valid(uid));
4056 assert(_manager_ref_uid);
4057
4058 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4059 * are actually the same type, and have the same validity rules.
4060 *
4061 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4062 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4063 * drops to zero. */
4064
4065 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4066 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4067
4068 if (*ref_uid == uid)
4069 return 0;
4070
4071 if (uid_is_valid(*ref_uid)) /* Already set? */
4072 return -EBUSY;
4073
4074 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4075 if (r < 0)
4076 return r;
4077
4078 *ref_uid = uid;
4079 return 1;
4080 }
4081
4082 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4083 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4084 }
4085
4086 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4087 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4088 }
4089
4090 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4091 int r = 0, q = 0;
4092
4093 assert(u);
4094
4095 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4096
4097 if (uid_is_valid(uid)) {
4098 r = unit_ref_uid(u, uid, clean_ipc);
4099 if (r < 0)
4100 return r;
4101 }
4102
4103 if (gid_is_valid(gid)) {
4104 q = unit_ref_gid(u, gid, clean_ipc);
4105 if (q < 0) {
4106 if (r > 0)
4107 unit_unref_uid(u, false);
4108
4109 return q;
4110 }
4111 }
4112
4113 return r > 0 || q > 0;
4114 }
4115
4116 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4117 ExecContext *c;
4118 int r;
4119
4120 assert(u);
4121
4122 c = unit_get_exec_context(u);
4123
4124 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4125 if (r < 0)
4126 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4127
4128 return r;
4129 }
4130
4131 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4132 assert(u);
4133
4134 unit_unref_uid(u, destroy_now);
4135 unit_unref_gid(u, destroy_now);
4136 }
4137
4138 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4139 int r;
4140
4141 assert(u);
4142
4143 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4144 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4145 * objects when no service references the UID/GID anymore. */
4146
4147 r = unit_ref_uid_gid(u, uid, gid);
4148 if (r > 0)
4149 bus_unit_send_change_signal(u);
4150 }