]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Rename formats-util.h to format-util.h
[thirdparty/systemd.git] / src / core / unit.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2010 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include "sd-id128.h"
27 #include "sd-messages.h"
28
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
31 #include "bus-util.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
34 #include "dbus.h"
35 #include "dropin.h"
36 #include "escape.h"
37 #include "execute.h"
38 #include "fileio-label.h"
39 #include "format-util.h"
40 #include "id128-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
43 #include "log.h"
44 #include "macro.h"
45 #include "missing.h"
46 #include "mkdir.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
50 #include "set.h"
51 #include "signal-util.h"
52 #include "special.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-util.h"
56 #include "strv.h"
57 #include "umask-util.h"
58 #include "unit-name.h"
59 #include "unit.h"
60 #include "user-util.h"
61 #include "virt.h"
62
63 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
64 [UNIT_SERVICE] = &service_vtable,
65 [UNIT_SOCKET] = &socket_vtable,
66 [UNIT_BUSNAME] = &busname_vtable,
67 [UNIT_TARGET] = &target_vtable,
68 [UNIT_DEVICE] = &device_vtable,
69 [UNIT_MOUNT] = &mount_vtable,
70 [UNIT_AUTOMOUNT] = &automount_vtable,
71 [UNIT_SWAP] = &swap_vtable,
72 [UNIT_TIMER] = &timer_vtable,
73 [UNIT_PATH] = &path_vtable,
74 [UNIT_SLICE] = &slice_vtable,
75 [UNIT_SCOPE] = &scope_vtable
76 };
77
78 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
79
80 Unit *unit_new(Manager *m, size_t size) {
81 Unit *u;
82
83 assert(m);
84 assert(size >= sizeof(Unit));
85
86 u = malloc0(size);
87 if (!u)
88 return NULL;
89
90 u->names = set_new(&string_hash_ops);
91 if (!u->names)
92 return mfree(u);
93
94 u->manager = m;
95 u->type = _UNIT_TYPE_INVALID;
96 u->default_dependencies = true;
97 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
98 u->unit_file_preset = -1;
99 u->on_failure_job_mode = JOB_REPLACE;
100 u->cgroup_inotify_wd = -1;
101 u->job_timeout = USEC_INFINITY;
102 u->ref_uid = UID_INVALID;
103 u->ref_gid = GID_INVALID;
104 u->cpu_usage_last = NSEC_INFINITY;
105
106 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
107 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
108
109 return u;
110 }
111
112 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
113 Unit *u;
114 int r;
115
116 u = unit_new(m, size);
117 if (!u)
118 return -ENOMEM;
119
120 r = unit_add_name(u, name);
121 if (r < 0) {
122 unit_free(u);
123 return r;
124 }
125
126 *ret = u;
127 return r;
128 }
129
130 bool unit_has_name(Unit *u, const char *name) {
131 assert(u);
132 assert(name);
133
134 return set_contains(u->names, (char*) name);
135 }
136
137 static void unit_init(Unit *u) {
138 CGroupContext *cc;
139 ExecContext *ec;
140 KillContext *kc;
141
142 assert(u);
143 assert(u->manager);
144 assert(u->type >= 0);
145
146 cc = unit_get_cgroup_context(u);
147 if (cc) {
148 cgroup_context_init(cc);
149
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
153
154 cc->cpu_accounting = u->manager->default_cpu_accounting;
155 cc->io_accounting = u->manager->default_io_accounting;
156 cc->blockio_accounting = u->manager->default_blockio_accounting;
157 cc->memory_accounting = u->manager->default_memory_accounting;
158 cc->tasks_accounting = u->manager->default_tasks_accounting;
159
160 if (u->type != UNIT_SLICE)
161 cc->tasks_max = u->manager->default_tasks_max;
162 }
163
164 ec = unit_get_exec_context(u);
165 if (ec)
166 exec_context_init(ec);
167
168 kc = unit_get_kill_context(u);
169 if (kc)
170 kill_context_init(kc);
171
172 if (UNIT_VTABLE(u)->init)
173 UNIT_VTABLE(u)->init(u);
174 }
175
176 int unit_add_name(Unit *u, const char *text) {
177 _cleanup_free_ char *s = NULL, *i = NULL;
178 UnitType t;
179 int r;
180
181 assert(u);
182 assert(text);
183
184 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
185
186 if (!u->instance)
187 return -EINVAL;
188
189 r = unit_name_replace_instance(text, u->instance, &s);
190 if (r < 0)
191 return r;
192 } else {
193 s = strdup(text);
194 if (!s)
195 return -ENOMEM;
196 }
197
198 if (set_contains(u->names, s))
199 return 0;
200 if (hashmap_contains(u->manager->units, s))
201 return -EEXIST;
202
203 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
204 return -EINVAL;
205
206 t = unit_name_to_type(s);
207 if (t < 0)
208 return -EINVAL;
209
210 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
211 return -EINVAL;
212
213 r = unit_name_to_instance(s, &i);
214 if (r < 0)
215 return r;
216
217 if (i && !unit_type_may_template(t))
218 return -EINVAL;
219
220 /* Ensure that this unit is either instanced or not instanced,
221 * but not both. Note that we do allow names with different
222 * instance names however! */
223 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
224 return -EINVAL;
225
226 if (!unit_type_may_alias(t) && !set_isempty(u->names))
227 return -EEXIST;
228
229 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
230 return -E2BIG;
231
232 r = set_put(u->names, s);
233 if (r < 0)
234 return r;
235 assert(r > 0);
236
237 r = hashmap_put(u->manager->units, s, u);
238 if (r < 0) {
239 (void) set_remove(u->names, s);
240 return r;
241 }
242
243 if (u->type == _UNIT_TYPE_INVALID) {
244 u->type = t;
245 u->id = s;
246 u->instance = i;
247
248 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
249
250 unit_init(u);
251
252 i = NULL;
253 }
254
255 s = NULL;
256
257 unit_add_to_dbus_queue(u);
258 return 0;
259 }
260
261 int unit_choose_id(Unit *u, const char *name) {
262 _cleanup_free_ char *t = NULL;
263 char *s, *i;
264 int r;
265
266 assert(u);
267 assert(name);
268
269 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
270
271 if (!u->instance)
272 return -EINVAL;
273
274 r = unit_name_replace_instance(name, u->instance, &t);
275 if (r < 0)
276 return r;
277
278 name = t;
279 }
280
281 /* Selects one of the names of this unit as the id */
282 s = set_get(u->names, (char*) name);
283 if (!s)
284 return -ENOENT;
285
286 /* Determine the new instance from the new id */
287 r = unit_name_to_instance(s, &i);
288 if (r < 0)
289 return r;
290
291 u->id = s;
292
293 free(u->instance);
294 u->instance = i;
295
296 unit_add_to_dbus_queue(u);
297
298 return 0;
299 }
300
301 int unit_set_description(Unit *u, const char *description) {
302 char *s;
303
304 assert(u);
305
306 if (isempty(description))
307 s = NULL;
308 else {
309 s = strdup(description);
310 if (!s)
311 return -ENOMEM;
312 }
313
314 free(u->description);
315 u->description = s;
316
317 unit_add_to_dbus_queue(u);
318 return 0;
319 }
320
321 bool unit_check_gc(Unit *u) {
322 UnitActiveState state;
323 bool inactive;
324 assert(u);
325
326 if (u->job)
327 return true;
328
329 if (u->nop_job)
330 return true;
331
332 state = unit_active_state(u);
333 inactive = state == UNIT_INACTIVE;
334
335 /* If the unit is inactive and failed and no job is queued for
336 * it, then release its runtime resources */
337 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
338 UNIT_VTABLE(u)->release_resources)
339 UNIT_VTABLE(u)->release_resources(u, inactive);
340
341 /* But we keep the unit object around for longer when it is
342 * referenced or configured to not be gc'ed */
343 if (!inactive)
344 return true;
345
346 if (u->perpetual)
347 return true;
348
349 if (u->refs)
350 return true;
351
352 if (sd_bus_track_count(u->bus_track) > 0)
353 return true;
354
355 if (UNIT_VTABLE(u)->check_gc)
356 if (UNIT_VTABLE(u)->check_gc(u))
357 return true;
358
359 return false;
360 }
361
362 void unit_add_to_load_queue(Unit *u) {
363 assert(u);
364 assert(u->type != _UNIT_TYPE_INVALID);
365
366 if (u->load_state != UNIT_STUB || u->in_load_queue)
367 return;
368
369 LIST_PREPEND(load_queue, u->manager->load_queue, u);
370 u->in_load_queue = true;
371 }
372
373 void unit_add_to_cleanup_queue(Unit *u) {
374 assert(u);
375
376 if (u->in_cleanup_queue)
377 return;
378
379 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
380 u->in_cleanup_queue = true;
381 }
382
383 void unit_add_to_gc_queue(Unit *u) {
384 assert(u);
385
386 if (u->in_gc_queue || u->in_cleanup_queue)
387 return;
388
389 if (unit_check_gc(u))
390 return;
391
392 LIST_PREPEND(gc_queue, u->manager->gc_queue, u);
393 u->in_gc_queue = true;
394
395 u->manager->n_in_gc_queue++;
396 }
397
398 void unit_add_to_dbus_queue(Unit *u) {
399 assert(u);
400 assert(u->type != _UNIT_TYPE_INVALID);
401
402 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
403 return;
404
405 /* Shortcut things if nobody cares */
406 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
407 set_isempty(u->manager->private_buses)) {
408 u->sent_dbus_new_signal = true;
409 return;
410 }
411
412 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
413 u->in_dbus_queue = true;
414 }
415
416 static void bidi_set_free(Unit *u, Set *s) {
417 Iterator i;
418 Unit *other;
419
420 assert(u);
421
422 /* Frees the set and makes sure we are dropped from the
423 * inverse pointers */
424
425 SET_FOREACH(other, s, i) {
426 UnitDependency d;
427
428 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
429 set_remove(other->dependencies[d], u);
430
431 unit_add_to_gc_queue(other);
432 }
433
434 set_free(s);
435 }
436
437 static void unit_remove_transient(Unit *u) {
438 char **i;
439
440 assert(u);
441
442 if (!u->transient)
443 return;
444
445 if (u->fragment_path)
446 (void) unlink(u->fragment_path);
447
448 STRV_FOREACH(i, u->dropin_paths) {
449 _cleanup_free_ char *p = NULL, *pp = NULL;
450
451 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
452 if (!p)
453 continue;
454
455 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
456 if (!pp)
457 continue;
458
459 /* Only drop transient drop-ins */
460 if (!path_equal(u->manager->lookup_paths.transient, pp))
461 continue;
462
463 (void) unlink(*i);
464 (void) rmdir(p);
465 }
466 }
467
468 static void unit_free_requires_mounts_for(Unit *u) {
469 char **j;
470
471 STRV_FOREACH(j, u->requires_mounts_for) {
472 char s[strlen(*j) + 1];
473
474 PATH_FOREACH_PREFIX_MORE(s, *j) {
475 char *y;
476 Set *x;
477
478 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
479 if (!x)
480 continue;
481
482 set_remove(x, u);
483
484 if (set_isempty(x)) {
485 hashmap_remove(u->manager->units_requiring_mounts_for, y);
486 free(y);
487 set_free(x);
488 }
489 }
490 }
491
492 u->requires_mounts_for = strv_free(u->requires_mounts_for);
493 }
494
495 static void unit_done(Unit *u) {
496 ExecContext *ec;
497 CGroupContext *cc;
498
499 assert(u);
500
501 if (u->type < 0)
502 return;
503
504 if (UNIT_VTABLE(u)->done)
505 UNIT_VTABLE(u)->done(u);
506
507 ec = unit_get_exec_context(u);
508 if (ec)
509 exec_context_done(ec);
510
511 cc = unit_get_cgroup_context(u);
512 if (cc)
513 cgroup_context_done(cc);
514 }
515
516 void unit_free(Unit *u) {
517 UnitDependency d;
518 Iterator i;
519 char *t;
520
521 assert(u);
522
523 if (u->transient_file)
524 fclose(u->transient_file);
525
526 if (!MANAGER_IS_RELOADING(u->manager))
527 unit_remove_transient(u);
528
529 bus_unit_send_removed_signal(u);
530
531 unit_done(u);
532
533 sd_bus_slot_unref(u->match_bus_slot);
534
535 sd_bus_track_unref(u->bus_track);
536 u->deserialized_refs = strv_free(u->deserialized_refs);
537
538 unit_free_requires_mounts_for(u);
539
540 SET_FOREACH(t, u->names, i)
541 hashmap_remove_value(u->manager->units, t, u);
542
543 if (!sd_id128_is_null(u->invocation_id))
544 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
545
546 if (u->job) {
547 Job *j = u->job;
548 job_uninstall(j);
549 job_free(j);
550 }
551
552 if (u->nop_job) {
553 Job *j = u->nop_job;
554 job_uninstall(j);
555 job_free(j);
556 }
557
558 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
559 bidi_set_free(u, u->dependencies[d]);
560
561 if (u->type != _UNIT_TYPE_INVALID)
562 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
563
564 if (u->in_load_queue)
565 LIST_REMOVE(load_queue, u->manager->load_queue, u);
566
567 if (u->in_dbus_queue)
568 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
569
570 if (u->in_cleanup_queue)
571 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
572
573 if (u->in_gc_queue) {
574 LIST_REMOVE(gc_queue, u->manager->gc_queue, u);
575 u->manager->n_in_gc_queue--;
576 }
577
578 if (u->in_cgroup_queue)
579 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
580
581 unit_release_cgroup(u);
582
583 unit_unref_uid_gid(u, false);
584
585 (void) manager_update_failed_units(u->manager, u, false);
586 set_remove(u->manager->startup_units, u);
587
588 free(u->description);
589 strv_free(u->documentation);
590 free(u->fragment_path);
591 free(u->source_path);
592 strv_free(u->dropin_paths);
593 free(u->instance);
594
595 free(u->job_timeout_reboot_arg);
596
597 set_free_free(u->names);
598
599 unit_unwatch_all_pids(u);
600
601 condition_free_list(u->conditions);
602 condition_free_list(u->asserts);
603
604 free(u->reboot_arg);
605
606 unit_ref_unset(&u->slice);
607
608 while (u->refs)
609 unit_ref_unset(u->refs);
610
611 free(u);
612 }
613
614 UnitActiveState unit_active_state(Unit *u) {
615 assert(u);
616
617 if (u->load_state == UNIT_MERGED)
618 return unit_active_state(unit_follow_merge(u));
619
620 /* After a reload it might happen that a unit is not correctly
621 * loaded but still has a process around. That's why we won't
622 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
623
624 return UNIT_VTABLE(u)->active_state(u);
625 }
626
627 const char* unit_sub_state_to_string(Unit *u) {
628 assert(u);
629
630 return UNIT_VTABLE(u)->sub_state_to_string(u);
631 }
632
633 static int complete_move(Set **s, Set **other) {
634 int r;
635
636 assert(s);
637 assert(other);
638
639 if (!*other)
640 return 0;
641
642 if (*s) {
643 r = set_move(*s, *other);
644 if (r < 0)
645 return r;
646 } else {
647 *s = *other;
648 *other = NULL;
649 }
650
651 return 0;
652 }
653
654 static int merge_names(Unit *u, Unit *other) {
655 char *t;
656 Iterator i;
657 int r;
658
659 assert(u);
660 assert(other);
661
662 r = complete_move(&u->names, &other->names);
663 if (r < 0)
664 return r;
665
666 set_free_free(other->names);
667 other->names = NULL;
668 other->id = NULL;
669
670 SET_FOREACH(t, u->names, i)
671 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
672
673 return 0;
674 }
675
676 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
677 unsigned n_reserve;
678
679 assert(u);
680 assert(other);
681 assert(d < _UNIT_DEPENDENCY_MAX);
682
683 /*
684 * If u does not have this dependency set allocated, there is no need
685 * to reserve anything. In that case other's set will be transferred
686 * as a whole to u by complete_move().
687 */
688 if (!u->dependencies[d])
689 return 0;
690
691 /* merge_dependencies() will skip a u-on-u dependency */
692 n_reserve = set_size(other->dependencies[d]) - !!set_get(other->dependencies[d], u);
693
694 return set_reserve(u->dependencies[d], n_reserve);
695 }
696
697 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
698 Iterator i;
699 Unit *back;
700 int r;
701
702 assert(u);
703 assert(other);
704 assert(d < _UNIT_DEPENDENCY_MAX);
705
706 /* Fix backwards pointers */
707 SET_FOREACH(back, other->dependencies[d], i) {
708 UnitDependency k;
709
710 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
711 /* Do not add dependencies between u and itself */
712 if (back == u) {
713 if (set_remove(back->dependencies[k], other))
714 maybe_warn_about_dependency(u, other_id, k);
715 } else {
716 r = set_remove_and_put(back->dependencies[k], other, u);
717 if (r == -EEXIST)
718 set_remove(back->dependencies[k], other);
719 else
720 assert(r >= 0 || r == -ENOENT);
721 }
722 }
723 }
724
725 /* Also do not move dependencies on u to itself */
726 back = set_remove(other->dependencies[d], u);
727 if (back)
728 maybe_warn_about_dependency(u, other_id, d);
729
730 /* The move cannot fail. The caller must have performed a reservation. */
731 assert_se(complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
732
733 other->dependencies[d] = set_free(other->dependencies[d]);
734 }
735
736 int unit_merge(Unit *u, Unit *other) {
737 UnitDependency d;
738 const char *other_id = NULL;
739 int r;
740
741 assert(u);
742 assert(other);
743 assert(u->manager == other->manager);
744 assert(u->type != _UNIT_TYPE_INVALID);
745
746 other = unit_follow_merge(other);
747
748 if (other == u)
749 return 0;
750
751 if (u->type != other->type)
752 return -EINVAL;
753
754 if (!u->instance != !other->instance)
755 return -EINVAL;
756
757 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
758 return -EEXIST;
759
760 if (other->load_state != UNIT_STUB &&
761 other->load_state != UNIT_NOT_FOUND)
762 return -EEXIST;
763
764 if (other->job)
765 return -EEXIST;
766
767 if (other->nop_job)
768 return -EEXIST;
769
770 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
771 return -EEXIST;
772
773 if (other->id)
774 other_id = strdupa(other->id);
775
776 /* Make reservations to ensure merge_dependencies() won't fail */
777 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
778 r = reserve_dependencies(u, other, d);
779 /*
780 * We don't rollback reservations if we fail. We don't have
781 * a way to undo reservations. A reservation is not a leak.
782 */
783 if (r < 0)
784 return r;
785 }
786
787 /* Merge names */
788 r = merge_names(u, other);
789 if (r < 0)
790 return r;
791
792 /* Redirect all references */
793 while (other->refs)
794 unit_ref_set(other->refs, u);
795
796 /* Merge dependencies */
797 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
798 merge_dependencies(u, other, other_id, d);
799
800 other->load_state = UNIT_MERGED;
801 other->merged_into = u;
802
803 /* If there is still some data attached to the other node, we
804 * don't need it anymore, and can free it. */
805 if (other->load_state != UNIT_STUB)
806 if (UNIT_VTABLE(other)->done)
807 UNIT_VTABLE(other)->done(other);
808
809 unit_add_to_dbus_queue(u);
810 unit_add_to_cleanup_queue(other);
811
812 return 0;
813 }
814
815 int unit_merge_by_name(Unit *u, const char *name) {
816 _cleanup_free_ char *s = NULL;
817 Unit *other;
818 int r;
819
820 assert(u);
821 assert(name);
822
823 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
824 if (!u->instance)
825 return -EINVAL;
826
827 r = unit_name_replace_instance(name, u->instance, &s);
828 if (r < 0)
829 return r;
830
831 name = s;
832 }
833
834 other = manager_get_unit(u->manager, name);
835 if (other)
836 return unit_merge(u, other);
837
838 return unit_add_name(u, name);
839 }
840
841 Unit* unit_follow_merge(Unit *u) {
842 assert(u);
843
844 while (u->load_state == UNIT_MERGED)
845 assert_se(u = u->merged_into);
846
847 return u;
848 }
849
850 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
851 int r;
852
853 assert(u);
854 assert(c);
855
856 if (c->working_directory) {
857 r = unit_require_mounts_for(u, c->working_directory);
858 if (r < 0)
859 return r;
860 }
861
862 if (c->root_directory) {
863 r = unit_require_mounts_for(u, c->root_directory);
864 if (r < 0)
865 return r;
866 }
867
868 if (!MANAGER_IS_SYSTEM(u->manager))
869 return 0;
870
871 if (c->private_tmp) {
872 r = unit_require_mounts_for(u, "/tmp");
873 if (r < 0)
874 return r;
875
876 r = unit_require_mounts_for(u, "/var/tmp");
877 if (r < 0)
878 return r;
879 }
880
881 if (!IN_SET(c->std_output,
882 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
883 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
884 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
885 !IN_SET(c->std_error,
886 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
887 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
888 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
889 return 0;
890
891 /* If syslog or kernel logging is requested, make sure our own
892 * logging daemon is run first. */
893
894 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, NULL, true);
895 if (r < 0)
896 return r;
897
898 return 0;
899 }
900
901 const char *unit_description(Unit *u) {
902 assert(u);
903
904 if (u->description)
905 return u->description;
906
907 return strna(u->id);
908 }
909
910 void unit_dump(Unit *u, FILE *f, const char *prefix) {
911 char *t, **j;
912 UnitDependency d;
913 Iterator i;
914 const char *prefix2;
915 char
916 timestamp0[FORMAT_TIMESTAMP_MAX],
917 timestamp1[FORMAT_TIMESTAMP_MAX],
918 timestamp2[FORMAT_TIMESTAMP_MAX],
919 timestamp3[FORMAT_TIMESTAMP_MAX],
920 timestamp4[FORMAT_TIMESTAMP_MAX],
921 timespan[FORMAT_TIMESPAN_MAX];
922 Unit *following;
923 _cleanup_set_free_ Set *following_set = NULL;
924 int r;
925 const char *n;
926
927 assert(u);
928 assert(u->type >= 0);
929
930 prefix = strempty(prefix);
931 prefix2 = strjoina(prefix, "\t");
932
933 fprintf(f,
934 "%s-> Unit %s:\n"
935 "%s\tDescription: %s\n"
936 "%s\tInstance: %s\n"
937 "%s\tUnit Load State: %s\n"
938 "%s\tUnit Active State: %s\n"
939 "%s\tState Change Timestamp: %s\n"
940 "%s\tInactive Exit Timestamp: %s\n"
941 "%s\tActive Enter Timestamp: %s\n"
942 "%s\tActive Exit Timestamp: %s\n"
943 "%s\tInactive Enter Timestamp: %s\n"
944 "%s\tGC Check Good: %s\n"
945 "%s\tNeed Daemon Reload: %s\n"
946 "%s\tTransient: %s\n"
947 "%s\tPerpetual: %s\n"
948 "%s\tSlice: %s\n"
949 "%s\tCGroup: %s\n"
950 "%s\tCGroup realized: %s\n"
951 "%s\tCGroup mask: 0x%x\n"
952 "%s\tCGroup members mask: 0x%x\n",
953 prefix, u->id,
954 prefix, unit_description(u),
955 prefix, strna(u->instance),
956 prefix, unit_load_state_to_string(u->load_state),
957 prefix, unit_active_state_to_string(unit_active_state(u)),
958 prefix, strna(format_timestamp(timestamp0, sizeof(timestamp0), u->state_change_timestamp.realtime)),
959 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->inactive_exit_timestamp.realtime)),
960 prefix, strna(format_timestamp(timestamp2, sizeof(timestamp2), u->active_enter_timestamp.realtime)),
961 prefix, strna(format_timestamp(timestamp3, sizeof(timestamp3), u->active_exit_timestamp.realtime)),
962 prefix, strna(format_timestamp(timestamp4, sizeof(timestamp4), u->inactive_enter_timestamp.realtime)),
963 prefix, yes_no(unit_check_gc(u)),
964 prefix, yes_no(unit_need_daemon_reload(u)),
965 prefix, yes_no(u->transient),
966 prefix, yes_no(u->perpetual),
967 prefix, strna(unit_slice_name(u)),
968 prefix, strna(u->cgroup_path),
969 prefix, yes_no(u->cgroup_realized),
970 prefix, u->cgroup_realized_mask,
971 prefix, u->cgroup_members_mask);
972
973 SET_FOREACH(t, u->names, i)
974 fprintf(f, "%s\tName: %s\n", prefix, t);
975
976 if (!sd_id128_is_null(u->invocation_id))
977 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
978 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
979
980 STRV_FOREACH(j, u->documentation)
981 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
982
983 following = unit_following(u);
984 if (following)
985 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
986
987 r = unit_following_set(u, &following_set);
988 if (r >= 0) {
989 Unit *other;
990
991 SET_FOREACH(other, following_set, i)
992 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
993 }
994
995 if (u->fragment_path)
996 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
997
998 if (u->source_path)
999 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1000
1001 STRV_FOREACH(j, u->dropin_paths)
1002 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1003
1004 if (u->job_timeout != USEC_INFINITY)
1005 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1006
1007 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1008 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1009
1010 if (u->job_timeout_reboot_arg)
1011 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1012
1013 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1014 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1015
1016 if (dual_timestamp_is_set(&u->condition_timestamp))
1017 fprintf(f,
1018 "%s\tCondition Timestamp: %s\n"
1019 "%s\tCondition Result: %s\n",
1020 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->condition_timestamp.realtime)),
1021 prefix, yes_no(u->condition_result));
1022
1023 if (dual_timestamp_is_set(&u->assert_timestamp))
1024 fprintf(f,
1025 "%s\tAssert Timestamp: %s\n"
1026 "%s\tAssert Result: %s\n",
1027 prefix, strna(format_timestamp(timestamp1, sizeof(timestamp1), u->assert_timestamp.realtime)),
1028 prefix, yes_no(u->assert_result));
1029
1030 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1031 Unit *other;
1032
1033 SET_FOREACH(other, u->dependencies[d], i)
1034 fprintf(f, "%s\t%s: %s\n", prefix, unit_dependency_to_string(d), other->id);
1035 }
1036
1037 if (!strv_isempty(u->requires_mounts_for)) {
1038 fprintf(f,
1039 "%s\tRequiresMountsFor:", prefix);
1040
1041 STRV_FOREACH(j, u->requires_mounts_for)
1042 fprintf(f, " %s", *j);
1043
1044 fputs("\n", f);
1045 }
1046
1047 if (u->load_state == UNIT_LOADED) {
1048
1049 fprintf(f,
1050 "%s\tStopWhenUnneeded: %s\n"
1051 "%s\tRefuseManualStart: %s\n"
1052 "%s\tRefuseManualStop: %s\n"
1053 "%s\tDefaultDependencies: %s\n"
1054 "%s\tOnFailureJobMode: %s\n"
1055 "%s\tIgnoreOnIsolate: %s\n",
1056 prefix, yes_no(u->stop_when_unneeded),
1057 prefix, yes_no(u->refuse_manual_start),
1058 prefix, yes_no(u->refuse_manual_stop),
1059 prefix, yes_no(u->default_dependencies),
1060 prefix, job_mode_to_string(u->on_failure_job_mode),
1061 prefix, yes_no(u->ignore_on_isolate));
1062
1063 if (UNIT_VTABLE(u)->dump)
1064 UNIT_VTABLE(u)->dump(u, f, prefix2);
1065
1066 } else if (u->load_state == UNIT_MERGED)
1067 fprintf(f,
1068 "%s\tMerged into: %s\n",
1069 prefix, u->merged_into->id);
1070 else if (u->load_state == UNIT_ERROR)
1071 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror(-u->load_error));
1072
1073 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1074 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1075
1076 if (u->job)
1077 job_dump(u->job, f, prefix2);
1078
1079 if (u->nop_job)
1080 job_dump(u->nop_job, f, prefix2);
1081 }
1082
1083 /* Common implementation for multiple backends */
1084 int unit_load_fragment_and_dropin(Unit *u) {
1085 int r;
1086
1087 assert(u);
1088
1089 /* Load a .{service,socket,...} file */
1090 r = unit_load_fragment(u);
1091 if (r < 0)
1092 return r;
1093
1094 if (u->load_state == UNIT_STUB)
1095 return -ENOENT;
1096
1097 /* Load drop-in directory data */
1098 r = unit_load_dropin(unit_follow_merge(u));
1099 if (r < 0)
1100 return r;
1101
1102 return 0;
1103 }
1104
1105 /* Common implementation for multiple backends */
1106 int unit_load_fragment_and_dropin_optional(Unit *u) {
1107 int r;
1108
1109 assert(u);
1110
1111 /* Same as unit_load_fragment_and_dropin(), but whether
1112 * something can be loaded or not doesn't matter. */
1113
1114 /* Load a .service file */
1115 r = unit_load_fragment(u);
1116 if (r < 0)
1117 return r;
1118
1119 if (u->load_state == UNIT_STUB)
1120 u->load_state = UNIT_LOADED;
1121
1122 /* Load drop-in directory data */
1123 r = unit_load_dropin(unit_follow_merge(u));
1124 if (r < 0)
1125 return r;
1126
1127 return 0;
1128 }
1129
1130 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1131 assert(u);
1132 assert(target);
1133
1134 if (target->type != UNIT_TARGET)
1135 return 0;
1136
1137 /* Only add the dependency if both units are loaded, so that
1138 * that loop check below is reliable */
1139 if (u->load_state != UNIT_LOADED ||
1140 target->load_state != UNIT_LOADED)
1141 return 0;
1142
1143 /* If either side wants no automatic dependencies, then let's
1144 * skip this */
1145 if (!u->default_dependencies ||
1146 !target->default_dependencies)
1147 return 0;
1148
1149 /* Don't create loops */
1150 if (set_get(target->dependencies[UNIT_BEFORE], u))
1151 return 0;
1152
1153 return unit_add_dependency(target, UNIT_AFTER, u, true);
1154 }
1155
1156 static int unit_add_target_dependencies(Unit *u) {
1157
1158 static const UnitDependency deps[] = {
1159 UNIT_REQUIRED_BY,
1160 UNIT_REQUISITE_OF,
1161 UNIT_WANTED_BY,
1162 UNIT_BOUND_BY
1163 };
1164
1165 Unit *target;
1166 Iterator i;
1167 unsigned k;
1168 int r = 0;
1169
1170 assert(u);
1171
1172 for (k = 0; k < ELEMENTSOF(deps); k++)
1173 SET_FOREACH(target, u->dependencies[deps[k]], i) {
1174 r = unit_add_default_target_dependency(u, target);
1175 if (r < 0)
1176 return r;
1177 }
1178
1179 return r;
1180 }
1181
1182 static int unit_add_slice_dependencies(Unit *u) {
1183 assert(u);
1184
1185 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1186 return 0;
1187
1188 if (UNIT_ISSET(u->slice))
1189 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true);
1190
1191 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1192 return 0;
1193
1194 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, NULL, true);
1195 }
1196
1197 static int unit_add_mount_dependencies(Unit *u) {
1198 char **i;
1199 int r;
1200
1201 assert(u);
1202
1203 STRV_FOREACH(i, u->requires_mounts_for) {
1204 char prefix[strlen(*i) + 1];
1205
1206 PATH_FOREACH_PREFIX_MORE(prefix, *i) {
1207 _cleanup_free_ char *p = NULL;
1208 Unit *m;
1209
1210 r = unit_name_from_path(prefix, ".mount", &p);
1211 if (r < 0)
1212 return r;
1213
1214 m = manager_get_unit(u->manager, p);
1215 if (!m) {
1216 /* Make sure to load the mount unit if
1217 * it exists. If so the dependencies
1218 * on this unit will be added later
1219 * during the loading of the mount
1220 * unit. */
1221 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1222 continue;
1223 }
1224 if (m == u)
1225 continue;
1226
1227 if (m->load_state != UNIT_LOADED)
1228 continue;
1229
1230 r = unit_add_dependency(u, UNIT_AFTER, m, true);
1231 if (r < 0)
1232 return r;
1233
1234 if (m->fragment_path) {
1235 r = unit_add_dependency(u, UNIT_REQUIRES, m, true);
1236 if (r < 0)
1237 return r;
1238 }
1239 }
1240 }
1241
1242 return 0;
1243 }
1244
1245 static int unit_add_startup_units(Unit *u) {
1246 CGroupContext *c;
1247 int r;
1248
1249 c = unit_get_cgroup_context(u);
1250 if (!c)
1251 return 0;
1252
1253 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1254 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1255 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1256 return 0;
1257
1258 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1259 if (r < 0)
1260 return r;
1261
1262 return set_put(u->manager->startup_units, u);
1263 }
1264
1265 int unit_load(Unit *u) {
1266 int r;
1267
1268 assert(u);
1269
1270 if (u->in_load_queue) {
1271 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1272 u->in_load_queue = false;
1273 }
1274
1275 if (u->type == _UNIT_TYPE_INVALID)
1276 return -EINVAL;
1277
1278 if (u->load_state != UNIT_STUB)
1279 return 0;
1280
1281 if (u->transient_file) {
1282 r = fflush_and_check(u->transient_file);
1283 if (r < 0)
1284 goto fail;
1285
1286 fclose(u->transient_file);
1287 u->transient_file = NULL;
1288
1289 u->fragment_mtime = now(CLOCK_REALTIME);
1290 }
1291
1292 if (UNIT_VTABLE(u)->load) {
1293 r = UNIT_VTABLE(u)->load(u);
1294 if (r < 0)
1295 goto fail;
1296 }
1297
1298 if (u->load_state == UNIT_STUB) {
1299 r = -ENOENT;
1300 goto fail;
1301 }
1302
1303 if (u->load_state == UNIT_LOADED) {
1304
1305 r = unit_add_target_dependencies(u);
1306 if (r < 0)
1307 goto fail;
1308
1309 r = unit_add_slice_dependencies(u);
1310 if (r < 0)
1311 goto fail;
1312
1313 r = unit_add_mount_dependencies(u);
1314 if (r < 0)
1315 goto fail;
1316
1317 r = unit_add_startup_units(u);
1318 if (r < 0)
1319 goto fail;
1320
1321 if (u->on_failure_job_mode == JOB_ISOLATE && set_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1322 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1323 r = -EINVAL;
1324 goto fail;
1325 }
1326
1327 unit_update_cgroup_members_masks(u);
1328 }
1329
1330 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1331
1332 unit_add_to_dbus_queue(unit_follow_merge(u));
1333 unit_add_to_gc_queue(u);
1334
1335 return 0;
1336
1337 fail:
1338 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND : UNIT_ERROR;
1339 u->load_error = r;
1340 unit_add_to_dbus_queue(u);
1341 unit_add_to_gc_queue(u);
1342
1343 log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1344
1345 return r;
1346 }
1347
1348 static bool unit_condition_test_list(Unit *u, Condition *first, const char *(*to_string)(ConditionType t)) {
1349 Condition *c;
1350 int triggered = -1;
1351
1352 assert(u);
1353 assert(to_string);
1354
1355 /* If the condition list is empty, then it is true */
1356 if (!first)
1357 return true;
1358
1359 /* Otherwise, if all of the non-trigger conditions apply and
1360 * if any of the trigger conditions apply (unless there are
1361 * none) we return true */
1362 LIST_FOREACH(conditions, c, first) {
1363 int r;
1364
1365 r = condition_test(c);
1366 if (r < 0)
1367 log_unit_warning(u,
1368 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1369 to_string(c->type),
1370 c->trigger ? "|" : "",
1371 c->negate ? "!" : "",
1372 c->parameter);
1373 else
1374 log_unit_debug(u,
1375 "%s=%s%s%s %s.",
1376 to_string(c->type),
1377 c->trigger ? "|" : "",
1378 c->negate ? "!" : "",
1379 c->parameter,
1380 condition_result_to_string(c->result));
1381
1382 if (!c->trigger && r <= 0)
1383 return false;
1384
1385 if (c->trigger && triggered <= 0)
1386 triggered = r > 0;
1387 }
1388
1389 return triggered != 0;
1390 }
1391
1392 static bool unit_condition_test(Unit *u) {
1393 assert(u);
1394
1395 dual_timestamp_get(&u->condition_timestamp);
1396 u->condition_result = unit_condition_test_list(u, u->conditions, condition_type_to_string);
1397
1398 return u->condition_result;
1399 }
1400
1401 static bool unit_assert_test(Unit *u) {
1402 assert(u);
1403
1404 dual_timestamp_get(&u->assert_timestamp);
1405 u->assert_result = unit_condition_test_list(u, u->asserts, assert_type_to_string);
1406
1407 return u->assert_result;
1408 }
1409
1410 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1411 DISABLE_WARNING_FORMAT_NONLITERAL;
1412 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, unit_description(u));
1413 REENABLE_WARNING;
1414 }
1415
1416 _pure_ static const char* unit_get_status_message_format(Unit *u, JobType t) {
1417 const char *format;
1418 const UnitStatusMessageFormats *format_table;
1419
1420 assert(u);
1421 assert(IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD));
1422
1423 if (t != JOB_RELOAD) {
1424 format_table = &UNIT_VTABLE(u)->status_message_formats;
1425 if (format_table) {
1426 format = format_table->starting_stopping[t == JOB_STOP];
1427 if (format)
1428 return format;
1429 }
1430 }
1431
1432 /* Return generic strings */
1433 if (t == JOB_START)
1434 return "Starting %s.";
1435 else if (t == JOB_STOP)
1436 return "Stopping %s.";
1437 else
1438 return "Reloading %s.";
1439 }
1440
1441 static void unit_status_print_starting_stopping(Unit *u, JobType t) {
1442 const char *format;
1443
1444 assert(u);
1445
1446 /* Reload status messages have traditionally not been printed to console. */
1447 if (!IN_SET(t, JOB_START, JOB_STOP))
1448 return;
1449
1450 format = unit_get_status_message_format(u, t);
1451
1452 DISABLE_WARNING_FORMAT_NONLITERAL;
1453 unit_status_printf(u, "", format);
1454 REENABLE_WARNING;
1455 }
1456
1457 static void unit_status_log_starting_stopping_reloading(Unit *u, JobType t) {
1458 const char *format;
1459 char buf[LINE_MAX];
1460 sd_id128_t mid;
1461
1462 assert(u);
1463
1464 if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
1465 return;
1466
1467 if (log_on_console())
1468 return;
1469
1470 /* We log status messages for all units and all operations. */
1471
1472 format = unit_get_status_message_format(u, t);
1473
1474 DISABLE_WARNING_FORMAT_NONLITERAL;
1475 snprintf(buf, sizeof buf, format, unit_description(u));
1476 REENABLE_WARNING;
1477
1478 mid = t == JOB_START ? SD_MESSAGE_UNIT_STARTING :
1479 t == JOB_STOP ? SD_MESSAGE_UNIT_STOPPING :
1480 SD_MESSAGE_UNIT_RELOADING;
1481
1482 /* Note that we deliberately use LOG_MESSAGE() instead of
1483 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1484 * closely what is written to screen using the status output,
1485 * which is supposed the highest level, friendliest output
1486 * possible, which means we should avoid the low-level unit
1487 * name. */
1488 log_struct(LOG_INFO,
1489 LOG_MESSAGE_ID(mid),
1490 LOG_UNIT_ID(u),
1491 LOG_MESSAGE("%s", buf),
1492 NULL);
1493 }
1494
1495 void unit_status_emit_starting_stopping_reloading(Unit *u, JobType t) {
1496 assert(u);
1497 assert(t >= 0);
1498 assert(t < _JOB_TYPE_MAX);
1499
1500 unit_status_log_starting_stopping_reloading(u, t);
1501 unit_status_print_starting_stopping(u, t);
1502 }
1503
1504 int unit_start_limit_test(Unit *u) {
1505 assert(u);
1506
1507 if (ratelimit_test(&u->start_limit)) {
1508 u->start_limit_hit = false;
1509 return 0;
1510 }
1511
1512 log_unit_warning(u, "Start request repeated too quickly.");
1513 u->start_limit_hit = true;
1514
1515 return emergency_action(u->manager, u->start_limit_action, u->reboot_arg, "unit failed");
1516 }
1517
1518 /* Errors:
1519 * -EBADR: This unit type does not support starting.
1520 * -EALREADY: Unit is already started.
1521 * -EAGAIN: An operation is already in progress. Retry later.
1522 * -ECANCELED: Too many requests for now.
1523 * -EPROTO: Assert failed
1524 * -EINVAL: Unit not loaded
1525 * -EOPNOTSUPP: Unit type not supported
1526 */
1527 int unit_start(Unit *u) {
1528 UnitActiveState state;
1529 Unit *following;
1530
1531 assert(u);
1532
1533 /* If this is already started, then this will succeed. Note
1534 * that this will even succeed if this unit is not startable
1535 * by the user. This is relied on to detect when we need to
1536 * wait for units and when waiting is finished. */
1537 state = unit_active_state(u);
1538 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1539 return -EALREADY;
1540
1541 /* Units that aren't loaded cannot be started */
1542 if (u->load_state != UNIT_LOADED)
1543 return -EINVAL;
1544
1545 /* If the conditions failed, don't do anything at all. If we
1546 * already are activating this call might still be useful to
1547 * speed up activation in case there is some hold-off time,
1548 * but we don't want to recheck the condition in that case. */
1549 if (state != UNIT_ACTIVATING &&
1550 !unit_condition_test(u)) {
1551 log_unit_debug(u, "Starting requested but condition failed. Not starting unit.");
1552 return -EALREADY;
1553 }
1554
1555 /* If the asserts failed, fail the entire job */
1556 if (state != UNIT_ACTIVATING &&
1557 !unit_assert_test(u)) {
1558 log_unit_notice(u, "Starting requested but asserts failed.");
1559 return -EPROTO;
1560 }
1561
1562 /* Units of types that aren't supported cannot be
1563 * started. Note that we do this test only after the condition
1564 * checks, so that we rather return condition check errors
1565 * (which are usually not considered a true failure) than "not
1566 * supported" errors (which are considered a failure).
1567 */
1568 if (!unit_supported(u))
1569 return -EOPNOTSUPP;
1570
1571 /* Forward to the main object, if we aren't it. */
1572 following = unit_following(u);
1573 if (following) {
1574 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1575 return unit_start(following);
1576 }
1577
1578 /* If it is stopped, but we cannot start it, then fail */
1579 if (!UNIT_VTABLE(u)->start)
1580 return -EBADR;
1581
1582 /* We don't suppress calls to ->start() here when we are
1583 * already starting, to allow this request to be used as a
1584 * "hurry up" call, for example when the unit is in some "auto
1585 * restart" state where it waits for a holdoff timer to elapse
1586 * before it will start again. */
1587
1588 unit_add_to_dbus_queue(u);
1589
1590 return UNIT_VTABLE(u)->start(u);
1591 }
1592
1593 bool unit_can_start(Unit *u) {
1594 assert(u);
1595
1596 if (u->load_state != UNIT_LOADED)
1597 return false;
1598
1599 if (!unit_supported(u))
1600 return false;
1601
1602 return !!UNIT_VTABLE(u)->start;
1603 }
1604
1605 bool unit_can_isolate(Unit *u) {
1606 assert(u);
1607
1608 return unit_can_start(u) &&
1609 u->allow_isolate;
1610 }
1611
1612 /* Errors:
1613 * -EBADR: This unit type does not support stopping.
1614 * -EALREADY: Unit is already stopped.
1615 * -EAGAIN: An operation is already in progress. Retry later.
1616 */
1617 int unit_stop(Unit *u) {
1618 UnitActiveState state;
1619 Unit *following;
1620
1621 assert(u);
1622
1623 state = unit_active_state(u);
1624 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1625 return -EALREADY;
1626
1627 following = unit_following(u);
1628 if (following) {
1629 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1630 return unit_stop(following);
1631 }
1632
1633 if (!UNIT_VTABLE(u)->stop)
1634 return -EBADR;
1635
1636 unit_add_to_dbus_queue(u);
1637
1638 return UNIT_VTABLE(u)->stop(u);
1639 }
1640
1641 bool unit_can_stop(Unit *u) {
1642 assert(u);
1643
1644 if (!unit_supported(u))
1645 return false;
1646
1647 if (u->perpetual)
1648 return false;
1649
1650 return !!UNIT_VTABLE(u)->stop;
1651 }
1652
1653 /* Errors:
1654 * -EBADR: This unit type does not support reloading.
1655 * -ENOEXEC: Unit is not started.
1656 * -EAGAIN: An operation is already in progress. Retry later.
1657 */
1658 int unit_reload(Unit *u) {
1659 UnitActiveState state;
1660 Unit *following;
1661
1662 assert(u);
1663
1664 if (u->load_state != UNIT_LOADED)
1665 return -EINVAL;
1666
1667 if (!unit_can_reload(u))
1668 return -EBADR;
1669
1670 state = unit_active_state(u);
1671 if (state == UNIT_RELOADING)
1672 return -EALREADY;
1673
1674 if (state != UNIT_ACTIVE) {
1675 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1676 return -ENOEXEC;
1677 }
1678
1679 following = unit_following(u);
1680 if (following) {
1681 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1682 return unit_reload(following);
1683 }
1684
1685 unit_add_to_dbus_queue(u);
1686
1687 return UNIT_VTABLE(u)->reload(u);
1688 }
1689
1690 bool unit_can_reload(Unit *u) {
1691 assert(u);
1692
1693 if (!UNIT_VTABLE(u)->reload)
1694 return false;
1695
1696 if (!UNIT_VTABLE(u)->can_reload)
1697 return true;
1698
1699 return UNIT_VTABLE(u)->can_reload(u);
1700 }
1701
1702 static void unit_check_unneeded(Unit *u) {
1703
1704 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1705
1706 static const UnitDependency needed_dependencies[] = {
1707 UNIT_REQUIRED_BY,
1708 UNIT_REQUISITE_OF,
1709 UNIT_WANTED_BY,
1710 UNIT_BOUND_BY,
1711 };
1712
1713 Unit *other;
1714 Iterator i;
1715 unsigned j;
1716 int r;
1717
1718 assert(u);
1719
1720 /* If this service shall be shut down when unneeded then do
1721 * so. */
1722
1723 if (!u->stop_when_unneeded)
1724 return;
1725
1726 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
1727 return;
1728
1729 for (j = 0; j < ELEMENTSOF(needed_dependencies); j++)
1730 SET_FOREACH(other, u->dependencies[needed_dependencies[j]], i)
1731 if (unit_active_or_pending(other))
1732 return;
1733
1734 /* If stopping a unit fails continuously we might enter a stop
1735 * loop here, hence stop acting on the service being
1736 * unnecessary after a while. */
1737 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1738 log_unit_warning(u, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1739 return;
1740 }
1741
1742 log_unit_info(u, "Unit not needed anymore. Stopping.");
1743
1744 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1745 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1746 if (r < 0)
1747 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1748 }
1749
1750 static void unit_check_binds_to(Unit *u) {
1751 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1752 bool stop = false;
1753 Unit *other;
1754 Iterator i;
1755 int r;
1756
1757 assert(u);
1758
1759 if (u->job)
1760 return;
1761
1762 if (unit_active_state(u) != UNIT_ACTIVE)
1763 return;
1764
1765 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i) {
1766 if (other->job)
1767 continue;
1768
1769 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1770 continue;
1771
1772 stop = true;
1773 break;
1774 }
1775
1776 if (!stop)
1777 return;
1778
1779 /* If stopping a unit fails continuously we might enter a stop
1780 * loop here, hence stop acting on the service being
1781 * unnecessary after a while. */
1782 if (!ratelimit_test(&u->auto_stop_ratelimit)) {
1783 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1784 return;
1785 }
1786
1787 assert(other);
1788 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1789
1790 /* A unit we need to run is gone. Sniff. Let's stop this. */
1791 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, &error, NULL);
1792 if (r < 0)
1793 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1794 }
1795
1796 static void retroactively_start_dependencies(Unit *u) {
1797 Iterator i;
1798 Unit *other;
1799
1800 assert(u);
1801 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1802
1803 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1804 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1805 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1806 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1807
1808 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1809 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1810 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1811 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL);
1812
1813 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1814 if (!set_get(u->dependencies[UNIT_AFTER], other) &&
1815 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1816 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL);
1817
1818 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTS], i)
1819 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1820 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1821
1822 SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
1823 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1824 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1825 }
1826
1827 static void retroactively_stop_dependencies(Unit *u) {
1828 Iterator i;
1829 Unit *other;
1830
1831 assert(u);
1832 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1833
1834 /* Pull down units which are bound to us recursively if enabled */
1835 SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
1836 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1837 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL);
1838 }
1839
1840 static void check_unneeded_dependencies(Unit *u) {
1841 Iterator i;
1842 Unit *other;
1843
1844 assert(u);
1845 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1846
1847 /* Garbage collect services that might not be needed anymore, if enabled */
1848 SET_FOREACH(other, u->dependencies[UNIT_REQUIRES], i)
1849 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1850 unit_check_unneeded(other);
1851 SET_FOREACH(other, u->dependencies[UNIT_WANTS], i)
1852 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1853 unit_check_unneeded(other);
1854 SET_FOREACH(other, u->dependencies[UNIT_REQUISITE], i)
1855 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1856 unit_check_unneeded(other);
1857 SET_FOREACH(other, u->dependencies[UNIT_BINDS_TO], i)
1858 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1859 unit_check_unneeded(other);
1860 }
1861
1862 void unit_start_on_failure(Unit *u) {
1863 Unit *other;
1864 Iterator i;
1865
1866 assert(u);
1867
1868 if (set_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1869 return;
1870
1871 log_unit_info(u, "Triggering OnFailure= dependencies.");
1872
1873 SET_FOREACH(other, u->dependencies[UNIT_ON_FAILURE], i) {
1874 int r;
1875
1876 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, NULL);
1877 if (r < 0)
1878 log_unit_error_errno(u, r, "Failed to enqueue OnFailure= job: %m");
1879 }
1880 }
1881
1882 void unit_trigger_notify(Unit *u) {
1883 Unit *other;
1884 Iterator i;
1885
1886 assert(u);
1887
1888 SET_FOREACH(other, u->dependencies[UNIT_TRIGGERED_BY], i)
1889 if (UNIT_VTABLE(other)->trigger_notify)
1890 UNIT_VTABLE(other)->trigger_notify(other, u);
1891 }
1892
1893 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_success) {
1894 Manager *m;
1895 bool unexpected;
1896
1897 assert(u);
1898 assert(os < _UNIT_ACTIVE_STATE_MAX);
1899 assert(ns < _UNIT_ACTIVE_STATE_MAX);
1900
1901 /* Note that this is called for all low-level state changes,
1902 * even if they might map to the same high-level
1903 * UnitActiveState! That means that ns == os is an expected
1904 * behavior here. For example: if a mount point is remounted
1905 * this function will be called too! */
1906
1907 m = u->manager;
1908
1909 /* Update timestamps for state changes */
1910 if (!MANAGER_IS_RELOADING(m)) {
1911 dual_timestamp_get(&u->state_change_timestamp);
1912
1913 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
1914 u->inactive_exit_timestamp = u->state_change_timestamp;
1915 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
1916 u->inactive_enter_timestamp = u->state_change_timestamp;
1917
1918 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
1919 u->active_enter_timestamp = u->state_change_timestamp;
1920 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
1921 u->active_exit_timestamp = u->state_change_timestamp;
1922 }
1923
1924 /* Keep track of failed units */
1925 (void) manager_update_failed_units(u->manager, u, ns == UNIT_FAILED);
1926
1927 /* Make sure the cgroup is always removed when we become inactive */
1928 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1929 unit_prune_cgroup(u);
1930
1931 /* Note that this doesn't apply to RemainAfterExit services exiting
1932 * successfully, since there's no change of state in that case. Which is
1933 * why it is handled in service_set_state() */
1934 if (UNIT_IS_INACTIVE_OR_FAILED(os) != UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1935 ExecContext *ec;
1936
1937 ec = unit_get_exec_context(u);
1938 if (ec && exec_context_may_touch_console(ec)) {
1939 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
1940 m->n_on_console--;
1941
1942 if (m->n_on_console == 0)
1943 /* unset no_console_output flag, since the console is free */
1944 m->no_console_output = false;
1945 } else
1946 m->n_on_console++;
1947 }
1948 }
1949
1950 if (u->job) {
1951 unexpected = false;
1952
1953 if (u->job->state == JOB_WAITING)
1954
1955 /* So we reached a different state for this
1956 * job. Let's see if we can run it now if it
1957 * failed previously due to EAGAIN. */
1958 job_add_to_run_queue(u->job);
1959
1960 /* Let's check whether this state change constitutes a
1961 * finished job, or maybe contradicts a running job and
1962 * hence needs to invalidate jobs. */
1963
1964 switch (u->job->type) {
1965
1966 case JOB_START:
1967 case JOB_VERIFY_ACTIVE:
1968
1969 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
1970 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
1971 else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
1972 unexpected = true;
1973
1974 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1975 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1976 }
1977
1978 break;
1979
1980 case JOB_RELOAD:
1981 case JOB_RELOAD_OR_START:
1982 case JOB_TRY_RELOAD:
1983
1984 if (u->job->state == JOB_RUNNING) {
1985 if (ns == UNIT_ACTIVE)
1986 job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true, false);
1987 else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
1988 unexpected = true;
1989
1990 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
1991 job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
1992 }
1993 }
1994
1995 break;
1996
1997 case JOB_STOP:
1998 case JOB_RESTART:
1999 case JOB_TRY_RESTART:
2000
2001 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2002 job_finish_and_invalidate(u->job, JOB_DONE, true, false);
2003 else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2004 unexpected = true;
2005 job_finish_and_invalidate(u->job, JOB_FAILED, true, false);
2006 }
2007
2008 break;
2009
2010 default:
2011 assert_not_reached("Job type unknown");
2012 }
2013
2014 } else
2015 unexpected = true;
2016
2017 if (!MANAGER_IS_RELOADING(m)) {
2018
2019 /* If this state change happened without being
2020 * requested by a job, then let's retroactively start
2021 * or stop dependencies. We skip that step when
2022 * deserializing, since we don't want to create any
2023 * additional jobs just because something is already
2024 * activated. */
2025
2026 if (unexpected) {
2027 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2028 retroactively_start_dependencies(u);
2029 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2030 retroactively_stop_dependencies(u);
2031 }
2032
2033 /* stop unneeded units regardless if going down was expected or not */
2034 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2035 check_unneeded_dependencies(u);
2036
2037 if (ns != os && ns == UNIT_FAILED) {
2038 log_unit_notice(u, "Unit entered failed state.");
2039 unit_start_on_failure(u);
2040 }
2041 }
2042
2043 /* Some names are special */
2044 if (UNIT_IS_ACTIVE_OR_RELOADING(ns)) {
2045
2046 if (unit_has_name(u, SPECIAL_DBUS_SERVICE))
2047 /* The bus might have just become available,
2048 * hence try to connect to it, if we aren't
2049 * yet connected. */
2050 bus_init(m, true);
2051
2052 if (u->type == UNIT_SERVICE &&
2053 !UNIT_IS_ACTIVE_OR_RELOADING(os) &&
2054 !MANAGER_IS_RELOADING(m)) {
2055 /* Write audit record if we have just finished starting up */
2056 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, true);
2057 u->in_audit = true;
2058 }
2059
2060 if (!UNIT_IS_ACTIVE_OR_RELOADING(os))
2061 manager_send_unit_plymouth(m, u);
2062
2063 } else {
2064
2065 /* We don't care about D-Bus here, since we'll get an
2066 * asynchronous notification for it anyway. */
2067
2068 if (u->type == UNIT_SERVICE &&
2069 UNIT_IS_INACTIVE_OR_FAILED(ns) &&
2070 !UNIT_IS_INACTIVE_OR_FAILED(os) &&
2071 !MANAGER_IS_RELOADING(m)) {
2072
2073 /* Hmm, if there was no start record written
2074 * write it now, so that we always have a nice
2075 * pair */
2076 if (!u->in_audit) {
2077 manager_send_unit_audit(m, u, AUDIT_SERVICE_START, ns == UNIT_INACTIVE);
2078
2079 if (ns == UNIT_INACTIVE)
2080 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, true);
2081 } else
2082 /* Write audit record if we have just finished shutting down */
2083 manager_send_unit_audit(m, u, AUDIT_SERVICE_STOP, ns == UNIT_INACTIVE);
2084
2085 u->in_audit = false;
2086 }
2087 }
2088
2089 manager_recheck_journal(m);
2090 unit_trigger_notify(u);
2091
2092 if (!MANAGER_IS_RELOADING(u->manager)) {
2093 /* Maybe we finished startup and are now ready for
2094 * being stopped because unneeded? */
2095 unit_check_unneeded(u);
2096
2097 /* Maybe we finished startup, but something we needed
2098 * has vanished? Let's die then. (This happens when
2099 * something BindsTo= to a Type=oneshot unit, as these
2100 * units go directly from starting to inactive,
2101 * without ever entering started.) */
2102 unit_check_binds_to(u);
2103 }
2104
2105 unit_add_to_dbus_queue(u);
2106 unit_add_to_gc_queue(u);
2107 }
2108
2109 int unit_watch_pid(Unit *u, pid_t pid) {
2110 int q, r;
2111
2112 assert(u);
2113 assert(pid >= 1);
2114
2115 /* Watch a specific PID. We only support one or two units
2116 * watching each PID for now, not more. */
2117
2118 r = set_ensure_allocated(&u->pids, NULL);
2119 if (r < 0)
2120 return r;
2121
2122 r = hashmap_ensure_allocated(&u->manager->watch_pids1, NULL);
2123 if (r < 0)
2124 return r;
2125
2126 r = hashmap_put(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2127 if (r == -EEXIST) {
2128 r = hashmap_ensure_allocated(&u->manager->watch_pids2, NULL);
2129 if (r < 0)
2130 return r;
2131
2132 r = hashmap_put(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2133 }
2134
2135 q = set_put(u->pids, PID_TO_PTR(pid));
2136 if (q < 0)
2137 return q;
2138
2139 return r;
2140 }
2141
2142 void unit_unwatch_pid(Unit *u, pid_t pid) {
2143 assert(u);
2144 assert(pid >= 1);
2145
2146 (void) hashmap_remove_value(u->manager->watch_pids1, PID_TO_PTR(pid), u);
2147 (void) hashmap_remove_value(u->manager->watch_pids2, PID_TO_PTR(pid), u);
2148 (void) set_remove(u->pids, PID_TO_PTR(pid));
2149 }
2150
2151 void unit_unwatch_all_pids(Unit *u) {
2152 assert(u);
2153
2154 while (!set_isempty(u->pids))
2155 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2156
2157 u->pids = set_free(u->pids);
2158 }
2159
2160 void unit_tidy_watch_pids(Unit *u, pid_t except1, pid_t except2) {
2161 Iterator i;
2162 void *e;
2163
2164 assert(u);
2165
2166 /* Cleans dead PIDs from our list */
2167
2168 SET_FOREACH(e, u->pids, i) {
2169 pid_t pid = PTR_TO_PID(e);
2170
2171 if (pid == except1 || pid == except2)
2172 continue;
2173
2174 if (!pid_is_unwaited(pid))
2175 unit_unwatch_pid(u, pid);
2176 }
2177 }
2178
2179 bool unit_job_is_applicable(Unit *u, JobType j) {
2180 assert(u);
2181 assert(j >= 0 && j < _JOB_TYPE_MAX);
2182
2183 switch (j) {
2184
2185 case JOB_VERIFY_ACTIVE:
2186 case JOB_START:
2187 case JOB_NOP:
2188 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2189 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2190 * jobs for it. */
2191 return true;
2192
2193 case JOB_STOP:
2194 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2195 * external events), hence it makes no sense to permit enqueing such a request either. */
2196 return !u->perpetual;
2197
2198 case JOB_RESTART:
2199 case JOB_TRY_RESTART:
2200 return unit_can_stop(u) && unit_can_start(u);
2201
2202 case JOB_RELOAD:
2203 case JOB_TRY_RELOAD:
2204 return unit_can_reload(u);
2205
2206 case JOB_RELOAD_OR_START:
2207 return unit_can_reload(u) && unit_can_start(u);
2208
2209 default:
2210 assert_not_reached("Invalid job type");
2211 }
2212 }
2213
2214 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2215 assert(u);
2216
2217 /* Only warn about some unit types */
2218 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2219 return;
2220
2221 if (streq_ptr(u->id, other))
2222 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2223 else
2224 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2225 }
2226
2227 int unit_add_dependency(Unit *u, UnitDependency d, Unit *other, bool add_reference) {
2228
2229 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2230 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2231 [UNIT_WANTS] = UNIT_WANTED_BY,
2232 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2233 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2234 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2235 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2236 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2237 [UNIT_WANTED_BY] = UNIT_WANTS,
2238 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2239 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2240 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2241 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2242 [UNIT_BEFORE] = UNIT_AFTER,
2243 [UNIT_AFTER] = UNIT_BEFORE,
2244 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2245 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2246 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2247 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2248 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2249 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2250 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2251 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2252 };
2253 int r, q = 0, v = 0, w = 0;
2254 Unit *orig_u = u, *orig_other = other;
2255
2256 assert(u);
2257 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2258 assert(other);
2259
2260 u = unit_follow_merge(u);
2261 other = unit_follow_merge(other);
2262
2263 /* We won't allow dependencies on ourselves. We will not
2264 * consider them an error however. */
2265 if (u == other) {
2266 maybe_warn_about_dependency(orig_u, orig_other->id, d);
2267 return 0;
2268 }
2269
2270 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2271 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2272 return 0;
2273 }
2274
2275 r = set_ensure_allocated(&u->dependencies[d], NULL);
2276 if (r < 0)
2277 return r;
2278
2279 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID) {
2280 r = set_ensure_allocated(&other->dependencies[inverse_table[d]], NULL);
2281 if (r < 0)
2282 return r;
2283 }
2284
2285 if (add_reference) {
2286 r = set_ensure_allocated(&u->dependencies[UNIT_REFERENCES], NULL);
2287 if (r < 0)
2288 return r;
2289
2290 r = set_ensure_allocated(&other->dependencies[UNIT_REFERENCED_BY], NULL);
2291 if (r < 0)
2292 return r;
2293 }
2294
2295 q = set_put(u->dependencies[d], other);
2296 if (q < 0)
2297 return q;
2298
2299 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2300 v = set_put(other->dependencies[inverse_table[d]], u);
2301 if (v < 0) {
2302 r = v;
2303 goto fail;
2304 }
2305 }
2306
2307 if (add_reference) {
2308 w = set_put(u->dependencies[UNIT_REFERENCES], other);
2309 if (w < 0) {
2310 r = w;
2311 goto fail;
2312 }
2313
2314 r = set_put(other->dependencies[UNIT_REFERENCED_BY], u);
2315 if (r < 0)
2316 goto fail;
2317 }
2318
2319 unit_add_to_dbus_queue(u);
2320 return 0;
2321
2322 fail:
2323 if (q > 0)
2324 set_remove(u->dependencies[d], other);
2325
2326 if (v > 0)
2327 set_remove(other->dependencies[inverse_table[d]], u);
2328
2329 if (w > 0)
2330 set_remove(u->dependencies[UNIT_REFERENCES], other);
2331
2332 return r;
2333 }
2334
2335 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference) {
2336 int r;
2337
2338 assert(u);
2339
2340 r = unit_add_dependency(u, d, other, add_reference);
2341 if (r < 0)
2342 return r;
2343
2344 return unit_add_dependency(u, e, other, add_reference);
2345 }
2346
2347 static int resolve_template(Unit *u, const char *name, const char*path, char **buf, const char **ret) {
2348 int r;
2349
2350 assert(u);
2351 assert(name || path);
2352 assert(buf);
2353 assert(ret);
2354
2355 if (!name)
2356 name = basename(path);
2357
2358 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2359 *buf = NULL;
2360 *ret = name;
2361 return 0;
2362 }
2363
2364 if (u->instance)
2365 r = unit_name_replace_instance(name, u->instance, buf);
2366 else {
2367 _cleanup_free_ char *i = NULL;
2368
2369 r = unit_name_to_prefix(u->id, &i);
2370 if (r < 0)
2371 return r;
2372
2373 r = unit_name_replace_instance(name, i, buf);
2374 }
2375 if (r < 0)
2376 return r;
2377
2378 *ret = *buf;
2379 return 0;
2380 }
2381
2382 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, const char *path, bool add_reference) {
2383 _cleanup_free_ char *buf = NULL;
2384 Unit *other;
2385 int r;
2386
2387 assert(u);
2388 assert(name || path);
2389
2390 r = resolve_template(u, name, path, &buf, &name);
2391 if (r < 0)
2392 return r;
2393
2394 r = manager_load_unit(u->manager, name, path, NULL, &other);
2395 if (r < 0)
2396 return r;
2397
2398 return unit_add_dependency(u, d, other, add_reference);
2399 }
2400
2401 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, const char *path, bool add_reference) {
2402 _cleanup_free_ char *buf = NULL;
2403 Unit *other;
2404 int r;
2405
2406 assert(u);
2407 assert(name || path);
2408
2409 r = resolve_template(u, name, path, &buf, &name);
2410 if (r < 0)
2411 return r;
2412
2413 r = manager_load_unit(u->manager, name, path, NULL, &other);
2414 if (r < 0)
2415 return r;
2416
2417 return unit_add_two_dependencies(u, d, e, other, add_reference);
2418 }
2419
2420 int set_unit_path(const char *p) {
2421 /* This is mostly for debug purposes */
2422 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2423 return -errno;
2424
2425 return 0;
2426 }
2427
2428 char *unit_dbus_path(Unit *u) {
2429 assert(u);
2430
2431 if (!u->id)
2432 return NULL;
2433
2434 return unit_dbus_path_from_name(u->id);
2435 }
2436
2437 char *unit_dbus_path_invocation_id(Unit *u) {
2438 assert(u);
2439
2440 if (sd_id128_is_null(u->invocation_id))
2441 return NULL;
2442
2443 return unit_dbus_path_from_name(u->invocation_id_string);
2444 }
2445
2446 int unit_set_slice(Unit *u, Unit *slice) {
2447 assert(u);
2448 assert(slice);
2449
2450 /* Sets the unit slice if it has not been set before. Is extra
2451 * careful, to only allow this for units that actually have a
2452 * cgroup context. Also, we don't allow to set this for slices
2453 * (since the parent slice is derived from the name). Make
2454 * sure the unit we set is actually a slice. */
2455
2456 if (!UNIT_HAS_CGROUP_CONTEXT(u))
2457 return -EOPNOTSUPP;
2458
2459 if (u->type == UNIT_SLICE)
2460 return -EINVAL;
2461
2462 if (unit_active_state(u) != UNIT_INACTIVE)
2463 return -EBUSY;
2464
2465 if (slice->type != UNIT_SLICE)
2466 return -EINVAL;
2467
2468 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
2469 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
2470 return -EPERM;
2471
2472 if (UNIT_DEREF(u->slice) == slice)
2473 return 0;
2474
2475 /* Disallow slice changes if @u is already bound to cgroups */
2476 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
2477 return -EBUSY;
2478
2479 unit_ref_unset(&u->slice);
2480 unit_ref_set(&u->slice, slice);
2481 return 1;
2482 }
2483
2484 int unit_set_default_slice(Unit *u) {
2485 _cleanup_free_ char *b = NULL;
2486 const char *slice_name;
2487 Unit *slice;
2488 int r;
2489
2490 assert(u);
2491
2492 if (UNIT_ISSET(u->slice))
2493 return 0;
2494
2495 if (u->instance) {
2496 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
2497
2498 /* Implicitly place all instantiated units in their
2499 * own per-template slice */
2500
2501 r = unit_name_to_prefix(u->id, &prefix);
2502 if (r < 0)
2503 return r;
2504
2505 /* The prefix is already escaped, but it might include
2506 * "-" which has a special meaning for slice units,
2507 * hence escape it here extra. */
2508 escaped = unit_name_escape(prefix);
2509 if (!escaped)
2510 return -ENOMEM;
2511
2512 if (MANAGER_IS_SYSTEM(u->manager))
2513 b = strjoin("system-", escaped, ".slice");
2514 else
2515 b = strappend(escaped, ".slice");
2516 if (!b)
2517 return -ENOMEM;
2518
2519 slice_name = b;
2520 } else
2521 slice_name =
2522 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
2523 ? SPECIAL_SYSTEM_SLICE
2524 : SPECIAL_ROOT_SLICE;
2525
2526 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
2527 if (r < 0)
2528 return r;
2529
2530 return unit_set_slice(u, slice);
2531 }
2532
2533 const char *unit_slice_name(Unit *u) {
2534 assert(u);
2535
2536 if (!UNIT_ISSET(u->slice))
2537 return NULL;
2538
2539 return UNIT_DEREF(u->slice)->id;
2540 }
2541
2542 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
2543 _cleanup_free_ char *t = NULL;
2544 int r;
2545
2546 assert(u);
2547 assert(type);
2548 assert(_found);
2549
2550 r = unit_name_change_suffix(u->id, type, &t);
2551 if (r < 0)
2552 return r;
2553 if (unit_has_name(u, t))
2554 return -EINVAL;
2555
2556 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
2557 assert(r < 0 || *_found != u);
2558 return r;
2559 }
2560
2561 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
2562 const char *name, *old_owner, *new_owner;
2563 Unit *u = userdata;
2564 int r;
2565
2566 assert(message);
2567 assert(u);
2568
2569 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
2570 if (r < 0) {
2571 bus_log_parse_error(r);
2572 return 0;
2573 }
2574
2575 if (UNIT_VTABLE(u)->bus_name_owner_change)
2576 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
2577
2578 return 0;
2579 }
2580
2581 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
2582 const char *match;
2583
2584 assert(u);
2585 assert(bus);
2586 assert(name);
2587
2588 if (u->match_bus_slot)
2589 return -EBUSY;
2590
2591 match = strjoina("type='signal',"
2592 "sender='org.freedesktop.DBus',"
2593 "path='/org/freedesktop/DBus',"
2594 "interface='org.freedesktop.DBus',"
2595 "member='NameOwnerChanged',"
2596 "arg0='", name, "'");
2597
2598 return sd_bus_add_match(bus, &u->match_bus_slot, match, signal_name_owner_changed, u);
2599 }
2600
2601 int unit_watch_bus_name(Unit *u, const char *name) {
2602 int r;
2603
2604 assert(u);
2605 assert(name);
2606
2607 /* Watch a specific name on the bus. We only support one unit
2608 * watching each name for now. */
2609
2610 if (u->manager->api_bus) {
2611 /* If the bus is already available, install the match directly.
2612 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2613 r = unit_install_bus_match(u, u->manager->api_bus, name);
2614 if (r < 0)
2615 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
2616 }
2617
2618 r = hashmap_put(u->manager->watch_bus, name, u);
2619 if (r < 0) {
2620 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2621 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
2622 }
2623
2624 return 0;
2625 }
2626
2627 void unit_unwatch_bus_name(Unit *u, const char *name) {
2628 assert(u);
2629 assert(name);
2630
2631 hashmap_remove_value(u->manager->watch_bus, name, u);
2632 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
2633 }
2634
2635 bool unit_can_serialize(Unit *u) {
2636 assert(u);
2637
2638 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
2639 }
2640
2641 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
2642 int r;
2643
2644 assert(u);
2645 assert(f);
2646 assert(fds);
2647
2648 if (unit_can_serialize(u)) {
2649 ExecRuntime *rt;
2650
2651 r = UNIT_VTABLE(u)->serialize(u, f, fds);
2652 if (r < 0)
2653 return r;
2654
2655 rt = unit_get_exec_runtime(u);
2656 if (rt) {
2657 r = exec_runtime_serialize(u, rt, f, fds);
2658 if (r < 0)
2659 return r;
2660 }
2661 }
2662
2663 dual_timestamp_serialize(f, "state-change-timestamp", &u->state_change_timestamp);
2664
2665 dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
2666 dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
2667 dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
2668 dual_timestamp_serialize(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
2669
2670 dual_timestamp_serialize(f, "condition-timestamp", &u->condition_timestamp);
2671 dual_timestamp_serialize(f, "assert-timestamp", &u->assert_timestamp);
2672
2673 if (dual_timestamp_is_set(&u->condition_timestamp))
2674 unit_serialize_item(u, f, "condition-result", yes_no(u->condition_result));
2675
2676 if (dual_timestamp_is_set(&u->assert_timestamp))
2677 unit_serialize_item(u, f, "assert-result", yes_no(u->assert_result));
2678
2679 unit_serialize_item(u, f, "transient", yes_no(u->transient));
2680
2681 unit_serialize_item_format(u, f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
2682 if (u->cpu_usage_last != NSEC_INFINITY)
2683 unit_serialize_item_format(u, f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
2684
2685 if (u->cgroup_path)
2686 unit_serialize_item(u, f, "cgroup", u->cgroup_path);
2687 unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
2688
2689 if (uid_is_valid(u->ref_uid))
2690 unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
2691 if (gid_is_valid(u->ref_gid))
2692 unit_serialize_item_format(u, f, "ref-gid", GID_FMT, u->ref_gid);
2693
2694 if (!sd_id128_is_null(u->invocation_id))
2695 unit_serialize_item_format(u, f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
2696
2697 bus_track_serialize(u->bus_track, f, "ref");
2698
2699 if (serialize_jobs) {
2700 if (u->job) {
2701 fprintf(f, "job\n");
2702 job_serialize(u->job, f);
2703 }
2704
2705 if (u->nop_job) {
2706 fprintf(f, "job\n");
2707 job_serialize(u->nop_job, f);
2708 }
2709 }
2710
2711 /* End marker */
2712 fputc('\n', f);
2713 return 0;
2714 }
2715
2716 int unit_serialize_item(Unit *u, FILE *f, const char *key, const char *value) {
2717 assert(u);
2718 assert(f);
2719 assert(key);
2720
2721 if (!value)
2722 return 0;
2723
2724 fputs(key, f);
2725 fputc('=', f);
2726 fputs(value, f);
2727 fputc('\n', f);
2728
2729 return 1;
2730 }
2731
2732 int unit_serialize_item_escaped(Unit *u, FILE *f, const char *key, const char *value) {
2733 _cleanup_free_ char *c = NULL;
2734
2735 assert(u);
2736 assert(f);
2737 assert(key);
2738
2739 if (!value)
2740 return 0;
2741
2742 c = cescape(value);
2743 if (!c)
2744 return -ENOMEM;
2745
2746 fputs(key, f);
2747 fputc('=', f);
2748 fputs(c, f);
2749 fputc('\n', f);
2750
2751 return 1;
2752 }
2753
2754 int unit_serialize_item_fd(Unit *u, FILE *f, FDSet *fds, const char *key, int fd) {
2755 int copy;
2756
2757 assert(u);
2758 assert(f);
2759 assert(key);
2760
2761 if (fd < 0)
2762 return 0;
2763
2764 copy = fdset_put_dup(fds, fd);
2765 if (copy < 0)
2766 return copy;
2767
2768 fprintf(f, "%s=%i\n", key, copy);
2769 return 1;
2770 }
2771
2772 void unit_serialize_item_format(Unit *u, FILE *f, const char *key, const char *format, ...) {
2773 va_list ap;
2774
2775 assert(u);
2776 assert(f);
2777 assert(key);
2778 assert(format);
2779
2780 fputs(key, f);
2781 fputc('=', f);
2782
2783 va_start(ap, format);
2784 vfprintf(f, format, ap);
2785 va_end(ap);
2786
2787 fputc('\n', f);
2788 }
2789
2790 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
2791 ExecRuntime **rt = NULL;
2792 size_t offset;
2793 int r;
2794
2795 assert(u);
2796 assert(f);
2797 assert(fds);
2798
2799 offset = UNIT_VTABLE(u)->exec_runtime_offset;
2800 if (offset > 0)
2801 rt = (ExecRuntime**) ((uint8_t*) u + offset);
2802
2803 for (;;) {
2804 char line[LINE_MAX], *l, *v;
2805 size_t k;
2806
2807 if (!fgets(line, sizeof(line), f)) {
2808 if (feof(f))
2809 return 0;
2810 return -errno;
2811 }
2812
2813 char_array_0(line);
2814 l = strstrip(line);
2815
2816 /* End marker */
2817 if (isempty(l))
2818 break;
2819
2820 k = strcspn(l, "=");
2821
2822 if (l[k] == '=') {
2823 l[k] = 0;
2824 v = l+k+1;
2825 } else
2826 v = l+k;
2827
2828 if (streq(l, "job")) {
2829 if (v[0] == '\0') {
2830 /* new-style serialized job */
2831 Job *j;
2832
2833 j = job_new_raw(u);
2834 if (!j)
2835 return log_oom();
2836
2837 r = job_deserialize(j, f);
2838 if (r < 0) {
2839 job_free(j);
2840 return r;
2841 }
2842
2843 r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
2844 if (r < 0) {
2845 job_free(j);
2846 return r;
2847 }
2848
2849 r = job_install_deserialized(j);
2850 if (r < 0) {
2851 hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
2852 job_free(j);
2853 return r;
2854 }
2855 } else /* legacy for pre-44 */
2856 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
2857 continue;
2858 } else if (streq(l, "state-change-timestamp")) {
2859 dual_timestamp_deserialize(v, &u->state_change_timestamp);
2860 continue;
2861 } else if (streq(l, "inactive-exit-timestamp")) {
2862 dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
2863 continue;
2864 } else if (streq(l, "active-enter-timestamp")) {
2865 dual_timestamp_deserialize(v, &u->active_enter_timestamp);
2866 continue;
2867 } else if (streq(l, "active-exit-timestamp")) {
2868 dual_timestamp_deserialize(v, &u->active_exit_timestamp);
2869 continue;
2870 } else if (streq(l, "inactive-enter-timestamp")) {
2871 dual_timestamp_deserialize(v, &u->inactive_enter_timestamp);
2872 continue;
2873 } else if (streq(l, "condition-timestamp")) {
2874 dual_timestamp_deserialize(v, &u->condition_timestamp);
2875 continue;
2876 } else if (streq(l, "assert-timestamp")) {
2877 dual_timestamp_deserialize(v, &u->assert_timestamp);
2878 continue;
2879 } else if (streq(l, "condition-result")) {
2880
2881 r = parse_boolean(v);
2882 if (r < 0)
2883 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
2884 else
2885 u->condition_result = r;
2886
2887 continue;
2888
2889 } else if (streq(l, "assert-result")) {
2890
2891 r = parse_boolean(v);
2892 if (r < 0)
2893 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
2894 else
2895 u->assert_result = r;
2896
2897 continue;
2898
2899 } else if (streq(l, "transient")) {
2900
2901 r = parse_boolean(v);
2902 if (r < 0)
2903 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
2904 else
2905 u->transient = r;
2906
2907 continue;
2908
2909 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
2910
2911 r = safe_atou64(v, &u->cpu_usage_base);
2912 if (r < 0)
2913 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
2914
2915 continue;
2916
2917 } else if (streq(l, "cpu-usage-last")) {
2918
2919 r = safe_atou64(v, &u->cpu_usage_last);
2920 if (r < 0)
2921 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
2922
2923 continue;
2924
2925 } else if (streq(l, "cgroup")) {
2926
2927 r = unit_set_cgroup_path(u, v);
2928 if (r < 0)
2929 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
2930
2931 (void) unit_watch_cgroup(u);
2932
2933 continue;
2934 } else if (streq(l, "cgroup-realized")) {
2935 int b;
2936
2937 b = parse_boolean(v);
2938 if (b < 0)
2939 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
2940 else
2941 u->cgroup_realized = b;
2942
2943 continue;
2944
2945 } else if (streq(l, "ref-uid")) {
2946 uid_t uid;
2947
2948 r = parse_uid(v, &uid);
2949 if (r < 0)
2950 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
2951 else
2952 unit_ref_uid_gid(u, uid, GID_INVALID);
2953
2954 continue;
2955
2956 } else if (streq(l, "ref-gid")) {
2957 gid_t gid;
2958
2959 r = parse_gid(v, &gid);
2960 if (r < 0)
2961 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
2962 else
2963 unit_ref_uid_gid(u, UID_INVALID, gid);
2964
2965 } else if (streq(l, "ref")) {
2966
2967 r = strv_extend(&u->deserialized_refs, v);
2968 if (r < 0)
2969 log_oom();
2970
2971 continue;
2972 } else if (streq(l, "invocation-id")) {
2973 sd_id128_t id;
2974
2975 r = sd_id128_from_string(v, &id);
2976 if (r < 0)
2977 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
2978 else {
2979 r = unit_set_invocation_id(u, id);
2980 if (r < 0)
2981 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
2982 }
2983
2984 continue;
2985 }
2986
2987 if (unit_can_serialize(u)) {
2988 if (rt) {
2989 r = exec_runtime_deserialize_item(u, rt, l, v, fds);
2990 if (r < 0) {
2991 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
2992 continue;
2993 }
2994
2995 /* Returns positive if key was handled by the call */
2996 if (r > 0)
2997 continue;
2998 }
2999
3000 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3001 if (r < 0)
3002 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3003 }
3004 }
3005
3006 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3007 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3008 * before 228 where the base for timeouts was not persistent across reboots. */
3009
3010 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3011 dual_timestamp_get(&u->state_change_timestamp);
3012
3013 return 0;
3014 }
3015
3016 int unit_add_node_link(Unit *u, const char *what, bool wants, UnitDependency dep) {
3017 Unit *device;
3018 _cleanup_free_ char *e = NULL;
3019 int r;
3020
3021 assert(u);
3022
3023 /* Adds in links to the device node that this unit is based on */
3024 if (isempty(what))
3025 return 0;
3026
3027 if (!is_device_path(what))
3028 return 0;
3029
3030 /* When device units aren't supported (such as in a
3031 * container), don't create dependencies on them. */
3032 if (!unit_type_supported(UNIT_DEVICE))
3033 return 0;
3034
3035 r = unit_name_from_path(what, ".device", &e);
3036 if (r < 0)
3037 return r;
3038
3039 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3040 if (r < 0)
3041 return r;
3042
3043 r = unit_add_two_dependencies(u, UNIT_AFTER,
3044 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3045 device, true);
3046 if (r < 0)
3047 return r;
3048
3049 if (wants) {
3050 r = unit_add_dependency(device, UNIT_WANTS, u, false);
3051 if (r < 0)
3052 return r;
3053 }
3054
3055 return 0;
3056 }
3057
3058 int unit_coldplug(Unit *u) {
3059 int r = 0, q;
3060 char **i;
3061
3062 assert(u);
3063
3064 /* Make sure we don't enter a loop, when coldplugging
3065 * recursively. */
3066 if (u->coldplugged)
3067 return 0;
3068
3069 u->coldplugged = true;
3070
3071 STRV_FOREACH(i, u->deserialized_refs) {
3072 q = bus_unit_track_add_name(u, *i);
3073 if (q < 0 && r >= 0)
3074 r = q;
3075 }
3076 u->deserialized_refs = strv_free(u->deserialized_refs);
3077
3078 if (UNIT_VTABLE(u)->coldplug) {
3079 q = UNIT_VTABLE(u)->coldplug(u);
3080 if (q < 0 && r >= 0)
3081 r = q;
3082 }
3083
3084 if (u->job) {
3085 q = job_coldplug(u->job);
3086 if (q < 0 && r >= 0)
3087 r = q;
3088 }
3089
3090 return r;
3091 }
3092
3093 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3094 struct stat st;
3095
3096 if (!path)
3097 return false;
3098
3099 if (stat(path, &st) < 0)
3100 /* What, cannot access this anymore? */
3101 return true;
3102
3103 if (path_masked)
3104 /* For masked files check if they are still so */
3105 return !null_or_empty(&st);
3106 else
3107 /* For non-empty files check the mtime */
3108 return timespec_load(&st.st_mtim) > mtime;
3109
3110 return false;
3111 }
3112
3113 bool unit_need_daemon_reload(Unit *u) {
3114 _cleanup_strv_free_ char **t = NULL;
3115 char **path;
3116
3117 assert(u);
3118
3119 /* For unit files, we allow masking… */
3120 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3121 u->load_state == UNIT_MASKED))
3122 return true;
3123
3124 /* Source paths should not be masked… */
3125 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3126 return true;
3127
3128 (void) unit_find_dropin_paths(u, &t);
3129 if (!strv_equal(u->dropin_paths, t))
3130 return true;
3131
3132 /* … any drop-ins that are masked are simply omitted from the list. */
3133 STRV_FOREACH(path, u->dropin_paths)
3134 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3135 return true;
3136
3137 return false;
3138 }
3139
3140 void unit_reset_failed(Unit *u) {
3141 assert(u);
3142
3143 if (UNIT_VTABLE(u)->reset_failed)
3144 UNIT_VTABLE(u)->reset_failed(u);
3145
3146 RATELIMIT_RESET(u->start_limit);
3147 u->start_limit_hit = false;
3148 }
3149
3150 Unit *unit_following(Unit *u) {
3151 assert(u);
3152
3153 if (UNIT_VTABLE(u)->following)
3154 return UNIT_VTABLE(u)->following(u);
3155
3156 return NULL;
3157 }
3158
3159 bool unit_stop_pending(Unit *u) {
3160 assert(u);
3161
3162 /* This call does check the current state of the unit. It's
3163 * hence useful to be called from state change calls of the
3164 * unit itself, where the state isn't updated yet. This is
3165 * different from unit_inactive_or_pending() which checks both
3166 * the current state and for a queued job. */
3167
3168 return u->job && u->job->type == JOB_STOP;
3169 }
3170
3171 bool unit_inactive_or_pending(Unit *u) {
3172 assert(u);
3173
3174 /* Returns true if the unit is inactive or going down */
3175
3176 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3177 return true;
3178
3179 if (unit_stop_pending(u))
3180 return true;
3181
3182 return false;
3183 }
3184
3185 bool unit_active_or_pending(Unit *u) {
3186 assert(u);
3187
3188 /* Returns true if the unit is active or going up */
3189
3190 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3191 return true;
3192
3193 if (u->job &&
3194 (u->job->type == JOB_START ||
3195 u->job->type == JOB_RELOAD_OR_START ||
3196 u->job->type == JOB_RESTART))
3197 return true;
3198
3199 return false;
3200 }
3201
3202 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3203 assert(u);
3204 assert(w >= 0 && w < _KILL_WHO_MAX);
3205 assert(SIGNAL_VALID(signo));
3206
3207 if (!UNIT_VTABLE(u)->kill)
3208 return -EOPNOTSUPP;
3209
3210 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3211 }
3212
3213 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3214 Set *pid_set;
3215 int r;
3216
3217 pid_set = set_new(NULL);
3218 if (!pid_set)
3219 return NULL;
3220
3221 /* Exclude the main/control pids from being killed via the cgroup */
3222 if (main_pid > 0) {
3223 r = set_put(pid_set, PID_TO_PTR(main_pid));
3224 if (r < 0)
3225 goto fail;
3226 }
3227
3228 if (control_pid > 0) {
3229 r = set_put(pid_set, PID_TO_PTR(control_pid));
3230 if (r < 0)
3231 goto fail;
3232 }
3233
3234 return pid_set;
3235
3236 fail:
3237 set_free(pid_set);
3238 return NULL;
3239 }
3240
3241 int unit_kill_common(
3242 Unit *u,
3243 KillWho who,
3244 int signo,
3245 pid_t main_pid,
3246 pid_t control_pid,
3247 sd_bus_error *error) {
3248
3249 int r = 0;
3250 bool killed = false;
3251
3252 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3253 if (main_pid < 0)
3254 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3255 else if (main_pid == 0)
3256 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3257 }
3258
3259 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3260 if (control_pid < 0)
3261 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3262 else if (control_pid == 0)
3263 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3264 }
3265
3266 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3267 if (control_pid > 0) {
3268 if (kill(control_pid, signo) < 0)
3269 r = -errno;
3270 else
3271 killed = true;
3272 }
3273
3274 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3275 if (main_pid > 0) {
3276 if (kill(main_pid, signo) < 0)
3277 r = -errno;
3278 else
3279 killed = true;
3280 }
3281
3282 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3283 _cleanup_set_free_ Set *pid_set = NULL;
3284 int q;
3285
3286 /* Exclude the main/control pids from being killed via the cgroup */
3287 pid_set = unit_pid_set(main_pid, control_pid);
3288 if (!pid_set)
3289 return -ENOMEM;
3290
3291 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
3292 if (q < 0 && q != -EAGAIN && q != -ESRCH && q != -ENOENT)
3293 r = q;
3294 else
3295 killed = true;
3296 }
3297
3298 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
3299 return -ESRCH;
3300
3301 return r;
3302 }
3303
3304 int unit_following_set(Unit *u, Set **s) {
3305 assert(u);
3306 assert(s);
3307
3308 if (UNIT_VTABLE(u)->following_set)
3309 return UNIT_VTABLE(u)->following_set(u, s);
3310
3311 *s = NULL;
3312 return 0;
3313 }
3314
3315 UnitFileState unit_get_unit_file_state(Unit *u) {
3316 int r;
3317
3318 assert(u);
3319
3320 if (u->unit_file_state < 0 && u->fragment_path) {
3321 r = unit_file_get_state(
3322 u->manager->unit_file_scope,
3323 NULL,
3324 basename(u->fragment_path),
3325 &u->unit_file_state);
3326 if (r < 0)
3327 u->unit_file_state = UNIT_FILE_BAD;
3328 }
3329
3330 return u->unit_file_state;
3331 }
3332
3333 int unit_get_unit_file_preset(Unit *u) {
3334 assert(u);
3335
3336 if (u->unit_file_preset < 0 && u->fragment_path)
3337 u->unit_file_preset = unit_file_query_preset(
3338 u->manager->unit_file_scope,
3339 NULL,
3340 basename(u->fragment_path));
3341
3342 return u->unit_file_preset;
3343 }
3344
3345 Unit* unit_ref_set(UnitRef *ref, Unit *u) {
3346 assert(ref);
3347 assert(u);
3348
3349 if (ref->unit)
3350 unit_ref_unset(ref);
3351
3352 ref->unit = u;
3353 LIST_PREPEND(refs, u->refs, ref);
3354 return u;
3355 }
3356
3357 void unit_ref_unset(UnitRef *ref) {
3358 assert(ref);
3359
3360 if (!ref->unit)
3361 return;
3362
3363 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3364 * be unreferenced now. */
3365 unit_add_to_gc_queue(ref->unit);
3366
3367 LIST_REMOVE(refs, ref->unit->refs, ref);
3368 ref->unit = NULL;
3369 }
3370
3371 static int user_from_unit_name(Unit *u, char **ret) {
3372
3373 static const uint8_t hash_key[] = {
3374 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3375 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3376 };
3377
3378 _cleanup_free_ char *n = NULL;
3379 int r;
3380
3381 r = unit_name_to_prefix(u->id, &n);
3382 if (r < 0)
3383 return r;
3384
3385 if (valid_user_group_name(n)) {
3386 *ret = n;
3387 n = NULL;
3388 return 0;
3389 }
3390
3391 /* If we can't use the unit name as a user name, then let's hash it and use that */
3392 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3393 return -ENOMEM;
3394
3395 return 0;
3396 }
3397
3398 int unit_patch_contexts(Unit *u) {
3399 CGroupContext *cc;
3400 ExecContext *ec;
3401 unsigned i;
3402 int r;
3403
3404 assert(u);
3405
3406 /* Patch in the manager defaults into the exec and cgroup
3407 * contexts, _after_ the rest of the settings have been
3408 * initialized */
3409
3410 ec = unit_get_exec_context(u);
3411 if (ec) {
3412 /* This only copies in the ones that need memory */
3413 for (i = 0; i < _RLIMIT_MAX; i++)
3414 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3415 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3416 if (!ec->rlimit[i])
3417 return -ENOMEM;
3418 }
3419
3420 if (MANAGER_IS_USER(u->manager) &&
3421 !ec->working_directory) {
3422
3423 r = get_home_dir(&ec->working_directory);
3424 if (r < 0)
3425 return r;
3426
3427 /* Allow user services to run, even if the
3428 * home directory is missing */
3429 ec->working_directory_missing_ok = true;
3430 }
3431
3432 if (MANAGER_IS_USER(u->manager) &&
3433 (ec->syscall_whitelist ||
3434 !set_isempty(ec->syscall_filter) ||
3435 !set_isempty(ec->syscall_archs) ||
3436 ec->address_families_whitelist ||
3437 !set_isempty(ec->address_families)))
3438 ec->no_new_privileges = true;
3439
3440 if (ec->private_devices)
3441 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3442
3443 if (ec->protect_kernel_modules)
3444 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3445
3446 if (ec->dynamic_user) {
3447 if (!ec->user) {
3448 r = user_from_unit_name(u, &ec->user);
3449 if (r < 0)
3450 return r;
3451 }
3452
3453 if (!ec->group) {
3454 ec->group = strdup(ec->user);
3455 if (!ec->group)
3456 return -ENOMEM;
3457 }
3458
3459 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3460 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3461
3462 ec->private_tmp = true;
3463 ec->remove_ipc = true;
3464 ec->protect_system = PROTECT_SYSTEM_STRICT;
3465 if (ec->protect_home == PROTECT_HOME_NO)
3466 ec->protect_home = PROTECT_HOME_READ_ONLY;
3467 }
3468 }
3469
3470 cc = unit_get_cgroup_context(u);
3471 if (cc) {
3472
3473 if (ec &&
3474 ec->private_devices &&
3475 cc->device_policy == CGROUP_AUTO)
3476 cc->device_policy = CGROUP_CLOSED;
3477 }
3478
3479 return 0;
3480 }
3481
3482 ExecContext *unit_get_exec_context(Unit *u) {
3483 size_t offset;
3484 assert(u);
3485
3486 if (u->type < 0)
3487 return NULL;
3488
3489 offset = UNIT_VTABLE(u)->exec_context_offset;
3490 if (offset <= 0)
3491 return NULL;
3492
3493 return (ExecContext*) ((uint8_t*) u + offset);
3494 }
3495
3496 KillContext *unit_get_kill_context(Unit *u) {
3497 size_t offset;
3498 assert(u);
3499
3500 if (u->type < 0)
3501 return NULL;
3502
3503 offset = UNIT_VTABLE(u)->kill_context_offset;
3504 if (offset <= 0)
3505 return NULL;
3506
3507 return (KillContext*) ((uint8_t*) u + offset);
3508 }
3509
3510 CGroupContext *unit_get_cgroup_context(Unit *u) {
3511 size_t offset;
3512
3513 if (u->type < 0)
3514 return NULL;
3515
3516 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3517 if (offset <= 0)
3518 return NULL;
3519
3520 return (CGroupContext*) ((uint8_t*) u + offset);
3521 }
3522
3523 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3524 size_t offset;
3525
3526 if (u->type < 0)
3527 return NULL;
3528
3529 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3530 if (offset <= 0)
3531 return NULL;
3532
3533 return *(ExecRuntime**) ((uint8_t*) u + offset);
3534 }
3535
3536 static const char* unit_drop_in_dir(Unit *u, UnitSetPropertiesMode mode) {
3537 assert(u);
3538
3539 if (!IN_SET(mode, UNIT_RUNTIME, UNIT_PERSISTENT))
3540 return NULL;
3541
3542 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3543 return u->manager->lookup_paths.transient;
3544
3545 if (mode == UNIT_RUNTIME)
3546 return u->manager->lookup_paths.runtime_control;
3547
3548 if (mode == UNIT_PERSISTENT)
3549 return u->manager->lookup_paths.persistent_control;
3550
3551 return NULL;
3552 }
3553
3554 int unit_write_drop_in(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3555 _cleanup_free_ char *p = NULL, *q = NULL;
3556 const char *dir, *wrapped;
3557 int r;
3558
3559 assert(u);
3560
3561 if (u->transient_file) {
3562 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3563 * write to the transient unit file. */
3564 fputs(data, u->transient_file);
3565 fputc('\n', u->transient_file);
3566 return 0;
3567 }
3568
3569 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3570 return 0;
3571
3572 dir = unit_drop_in_dir(u, mode);
3573 if (!dir)
3574 return -EINVAL;
3575
3576 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3577 "# or an equivalent operation. Do not edit.\n",
3578 data,
3579 "\n");
3580
3581 r = drop_in_file(dir, u->id, 50, name, &p, &q);
3582 if (r < 0)
3583 return r;
3584
3585 (void) mkdir_p(p, 0755);
3586 r = write_string_file_atomic_label(q, wrapped);
3587 if (r < 0)
3588 return r;
3589
3590 r = strv_push(&u->dropin_paths, q);
3591 if (r < 0)
3592 return r;
3593 q = NULL;
3594
3595 strv_uniq(u->dropin_paths);
3596
3597 u->dropin_mtime = now(CLOCK_REALTIME);
3598
3599 return 0;
3600 }
3601
3602 int unit_write_drop_in_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3603 _cleanup_free_ char *p = NULL;
3604 va_list ap;
3605 int r;
3606
3607 assert(u);
3608 assert(name);
3609 assert(format);
3610
3611 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3612 return 0;
3613
3614 va_start(ap, format);
3615 r = vasprintf(&p, format, ap);
3616 va_end(ap);
3617
3618 if (r < 0)
3619 return -ENOMEM;
3620
3621 return unit_write_drop_in(u, mode, name, p);
3622 }
3623
3624 int unit_write_drop_in_private(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *data) {
3625 const char *ndata;
3626
3627 assert(u);
3628 assert(name);
3629 assert(data);
3630
3631 if (!UNIT_VTABLE(u)->private_section)
3632 return -EINVAL;
3633
3634 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3635 return 0;
3636
3637 ndata = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
3638
3639 return unit_write_drop_in(u, mode, name, ndata);
3640 }
3641
3642 int unit_write_drop_in_private_format(Unit *u, UnitSetPropertiesMode mode, const char *name, const char *format, ...) {
3643 _cleanup_free_ char *p = NULL;
3644 va_list ap;
3645 int r;
3646
3647 assert(u);
3648 assert(name);
3649 assert(format);
3650
3651 if (!IN_SET(mode, UNIT_PERSISTENT, UNIT_RUNTIME))
3652 return 0;
3653
3654 va_start(ap, format);
3655 r = vasprintf(&p, format, ap);
3656 va_end(ap);
3657
3658 if (r < 0)
3659 return -ENOMEM;
3660
3661 return unit_write_drop_in_private(u, mode, name, p);
3662 }
3663
3664 int unit_make_transient(Unit *u) {
3665 FILE *f;
3666 char *path;
3667
3668 assert(u);
3669
3670 if (!UNIT_VTABLE(u)->can_transient)
3671 return -EOPNOTSUPP;
3672
3673 path = strjoin(u->manager->lookup_paths.transient, "/", u->id);
3674 if (!path)
3675 return -ENOMEM;
3676
3677 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3678 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3679
3680 RUN_WITH_UMASK(0022) {
3681 f = fopen(path, "we");
3682 if (!f) {
3683 free(path);
3684 return -errno;
3685 }
3686 }
3687
3688 if (u->transient_file)
3689 fclose(u->transient_file);
3690 u->transient_file = f;
3691
3692 free(u->fragment_path);
3693 u->fragment_path = path;
3694
3695 u->source_path = mfree(u->source_path);
3696 u->dropin_paths = strv_free(u->dropin_paths);
3697 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
3698
3699 u->load_state = UNIT_STUB;
3700 u->load_error = 0;
3701 u->transient = true;
3702
3703 unit_add_to_dbus_queue(u);
3704 unit_add_to_gc_queue(u);
3705
3706 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3707 u->transient_file);
3708
3709 return 0;
3710 }
3711
3712 static void log_kill(pid_t pid, int sig, void *userdata) {
3713 _cleanup_free_ char *comm = NULL;
3714
3715 (void) get_process_comm(pid, &comm);
3716
3717 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3718 only, like for example systemd's own PAM stub process. */
3719 if (comm && comm[0] == '(')
3720 return;
3721
3722 log_unit_notice(userdata,
3723 "Killing process " PID_FMT " (%s) with signal SIG%s.",
3724 pid,
3725 strna(comm),
3726 signal_to_string(sig));
3727 }
3728
3729 static int operation_to_signal(KillContext *c, KillOperation k) {
3730 assert(c);
3731
3732 switch (k) {
3733
3734 case KILL_TERMINATE:
3735 case KILL_TERMINATE_AND_LOG:
3736 return c->kill_signal;
3737
3738 case KILL_KILL:
3739 return SIGKILL;
3740
3741 case KILL_ABORT:
3742 return SIGABRT;
3743
3744 default:
3745 assert_not_reached("KillOperation unknown");
3746 }
3747 }
3748
3749 int unit_kill_context(
3750 Unit *u,
3751 KillContext *c,
3752 KillOperation k,
3753 pid_t main_pid,
3754 pid_t control_pid,
3755 bool main_pid_alien) {
3756
3757 bool wait_for_exit = false, send_sighup;
3758 cg_kill_log_func_t log_func;
3759 int sig, r;
3760
3761 assert(u);
3762 assert(c);
3763
3764 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0 if we
3765 * killed something worth waiting for, 0 otherwise. */
3766
3767 if (c->kill_mode == KILL_NONE)
3768 return 0;
3769
3770 sig = operation_to_signal(c, k);
3771
3772 send_sighup =
3773 c->send_sighup &&
3774 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
3775 sig != SIGHUP;
3776
3777 log_func =
3778 k != KILL_TERMINATE ||
3779 IN_SET(sig, SIGKILL, SIGABRT) ? log_kill : NULL;
3780
3781 if (main_pid > 0) {
3782 if (log_func)
3783 log_func(main_pid, sig, u);
3784
3785 r = kill_and_sigcont(main_pid, sig);
3786 if (r < 0 && r != -ESRCH) {
3787 _cleanup_free_ char *comm = NULL;
3788 (void) get_process_comm(main_pid, &comm);
3789
3790 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
3791 } else {
3792 if (!main_pid_alien)
3793 wait_for_exit = true;
3794
3795 if (r != -ESRCH && send_sighup)
3796 (void) kill(main_pid, SIGHUP);
3797 }
3798 }
3799
3800 if (control_pid > 0) {
3801 if (log_func)
3802 log_func(control_pid, sig, u);
3803
3804 r = kill_and_sigcont(control_pid, sig);
3805 if (r < 0 && r != -ESRCH) {
3806 _cleanup_free_ char *comm = NULL;
3807 (void) get_process_comm(control_pid, &comm);
3808
3809 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
3810 } else {
3811 wait_for_exit = true;
3812
3813 if (r != -ESRCH && send_sighup)
3814 (void) kill(control_pid, SIGHUP);
3815 }
3816 }
3817
3818 if (u->cgroup_path &&
3819 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
3820 _cleanup_set_free_ Set *pid_set = NULL;
3821
3822 /* Exclude the main/control pids from being killed via the cgroup */
3823 pid_set = unit_pid_set(main_pid, control_pid);
3824 if (!pid_set)
3825 return -ENOMEM;
3826
3827 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3828 sig,
3829 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
3830 pid_set,
3831 log_func, u);
3832 if (r < 0) {
3833 if (r != -EAGAIN && r != -ESRCH && r != -ENOENT)
3834 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
3835
3836 } else if (r > 0) {
3837
3838 /* FIXME: For now, on the legacy hierarchy, we
3839 * will not wait for the cgroup members to die
3840 * if we are running in a container or if this
3841 * is a delegation unit, simply because cgroup
3842 * notification is unreliable in these
3843 * cases. It doesn't work at all in
3844 * containers, and outside of containers it
3845 * can be confused easily by left-over
3846 * directories in the cgroup — which however
3847 * should not exist in non-delegated units. On
3848 * the unified hierarchy that's different,
3849 * there we get proper events. Hence rely on
3850 * them.*/
3851
3852 if (cg_unified(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
3853 (detect_container() == 0 && !unit_cgroup_delegate(u)))
3854 wait_for_exit = true;
3855
3856 if (send_sighup) {
3857 set_free(pid_set);
3858
3859 pid_set = unit_pid_set(main_pid, control_pid);
3860 if (!pid_set)
3861 return -ENOMEM;
3862
3863 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
3864 SIGHUP,
3865 CGROUP_IGNORE_SELF,
3866 pid_set,
3867 NULL, NULL);
3868 }
3869 }
3870 }
3871
3872 return wait_for_exit;
3873 }
3874
3875 int unit_require_mounts_for(Unit *u, const char *path) {
3876 char prefix[strlen(path) + 1], *p;
3877 int r;
3878
3879 assert(u);
3880 assert(path);
3881
3882 /* Registers a unit for requiring a certain path and all its
3883 * prefixes. We keep a simple array of these paths in the
3884 * unit, since its usually short. However, we build a prefix
3885 * table for all possible prefixes so that new appearing mount
3886 * units can easily determine which units to make themselves a
3887 * dependency of. */
3888
3889 if (!path_is_absolute(path))
3890 return -EINVAL;
3891
3892 p = strdup(path);
3893 if (!p)
3894 return -ENOMEM;
3895
3896 path_kill_slashes(p);
3897
3898 if (!path_is_safe(p)) {
3899 free(p);
3900 return -EPERM;
3901 }
3902
3903 if (strv_contains(u->requires_mounts_for, p)) {
3904 free(p);
3905 return 0;
3906 }
3907
3908 r = strv_consume(&u->requires_mounts_for, p);
3909 if (r < 0)
3910 return r;
3911
3912 PATH_FOREACH_PREFIX_MORE(prefix, p) {
3913 Set *x;
3914
3915 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
3916 if (!x) {
3917 char *q;
3918
3919 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &string_hash_ops);
3920 if (r < 0)
3921 return r;
3922
3923 q = strdup(prefix);
3924 if (!q)
3925 return -ENOMEM;
3926
3927 x = set_new(NULL);
3928 if (!x) {
3929 free(q);
3930 return -ENOMEM;
3931 }
3932
3933 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
3934 if (r < 0) {
3935 free(q);
3936 set_free(x);
3937 return r;
3938 }
3939 }
3940
3941 r = set_put(x, u);
3942 if (r < 0)
3943 return r;
3944 }
3945
3946 return 0;
3947 }
3948
3949 int unit_setup_exec_runtime(Unit *u) {
3950 ExecRuntime **rt;
3951 size_t offset;
3952 Iterator i;
3953 Unit *other;
3954
3955 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3956 assert(offset > 0);
3957
3958 /* Check if there already is an ExecRuntime for this unit? */
3959 rt = (ExecRuntime**) ((uint8_t*) u + offset);
3960 if (*rt)
3961 return 0;
3962
3963 /* Try to get it from somebody else */
3964 SET_FOREACH(other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
3965
3966 *rt = unit_get_exec_runtime(other);
3967 if (*rt) {
3968 exec_runtime_ref(*rt);
3969 return 0;
3970 }
3971 }
3972
3973 return exec_runtime_make(rt, unit_get_exec_context(u), u->id);
3974 }
3975
3976 int unit_setup_dynamic_creds(Unit *u) {
3977 ExecContext *ec;
3978 DynamicCreds *dcreds;
3979 size_t offset;
3980
3981 assert(u);
3982
3983 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
3984 assert(offset > 0);
3985 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
3986
3987 ec = unit_get_exec_context(u);
3988 assert(ec);
3989
3990 if (!ec->dynamic_user)
3991 return 0;
3992
3993 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
3994 }
3995
3996 bool unit_type_supported(UnitType t) {
3997 if (_unlikely_(t < 0))
3998 return false;
3999 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4000 return false;
4001
4002 if (!unit_vtable[t]->supported)
4003 return true;
4004
4005 return unit_vtable[t]->supported();
4006 }
4007
4008 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4009 int r;
4010
4011 assert(u);
4012 assert(where);
4013
4014 r = dir_is_empty(where);
4015 if (r > 0)
4016 return;
4017 if (r < 0) {
4018 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4019 return;
4020 }
4021
4022 log_struct(LOG_NOTICE,
4023 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
4024 LOG_UNIT_ID(u),
4025 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4026 "WHERE=%s", where,
4027 NULL);
4028 }
4029
4030 int unit_fail_if_symlink(Unit *u, const char* where) {
4031 int r;
4032
4033 assert(u);
4034 assert(where);
4035
4036 r = is_symlink(where);
4037 if (r < 0) {
4038 log_unit_debug_errno(u, r, "Failed to check symlink %s, ignoring: %m", where);
4039 return 0;
4040 }
4041 if (r == 0)
4042 return 0;
4043
4044 log_struct(LOG_ERR,
4045 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING),
4046 LOG_UNIT_ID(u),
4047 LOG_UNIT_MESSAGE(u, "Mount on symlink %s not allowed.", where),
4048 "WHERE=%s", where,
4049 NULL);
4050
4051 return -ELOOP;
4052 }
4053
4054 bool unit_is_pristine(Unit *u) {
4055 assert(u);
4056
4057 /* Check if the unit already exists or is already around,
4058 * in a number of different ways. Note that to cater for unit
4059 * types such as slice, we are generally fine with units that
4060 * are marked UNIT_LOADED even though nothing was
4061 * actually loaded, as those unit types don't require a file
4062 * on disk to validly load. */
4063
4064 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4065 u->fragment_path ||
4066 u->source_path ||
4067 !strv_isempty(u->dropin_paths) ||
4068 u->job ||
4069 u->merged_into);
4070 }
4071
4072 pid_t unit_control_pid(Unit *u) {
4073 assert(u);
4074
4075 if (UNIT_VTABLE(u)->control_pid)
4076 return UNIT_VTABLE(u)->control_pid(u);
4077
4078 return 0;
4079 }
4080
4081 pid_t unit_main_pid(Unit *u) {
4082 assert(u);
4083
4084 if (UNIT_VTABLE(u)->main_pid)
4085 return UNIT_VTABLE(u)->main_pid(u);
4086
4087 return 0;
4088 }
4089
4090 static void unit_unref_uid_internal(
4091 Unit *u,
4092 uid_t *ref_uid,
4093 bool destroy_now,
4094 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4095
4096 assert(u);
4097 assert(ref_uid);
4098 assert(_manager_unref_uid);
4099
4100 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4101 * gid_t are actually the same time, with the same validity rules.
4102 *
4103 * Drops a reference to UID/GID from a unit. */
4104
4105 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4106 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4107
4108 if (!uid_is_valid(*ref_uid))
4109 return;
4110
4111 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4112 *ref_uid = UID_INVALID;
4113 }
4114
4115 void unit_unref_uid(Unit *u, bool destroy_now) {
4116 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4117 }
4118
4119 void unit_unref_gid(Unit *u, bool destroy_now) {
4120 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4121 }
4122
4123 static int unit_ref_uid_internal(
4124 Unit *u,
4125 uid_t *ref_uid,
4126 uid_t uid,
4127 bool clean_ipc,
4128 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4129
4130 int r;
4131
4132 assert(u);
4133 assert(ref_uid);
4134 assert(uid_is_valid(uid));
4135 assert(_manager_ref_uid);
4136
4137 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4138 * are actually the same type, and have the same validity rules.
4139 *
4140 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4141 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4142 * drops to zero. */
4143
4144 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4145 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4146
4147 if (*ref_uid == uid)
4148 return 0;
4149
4150 if (uid_is_valid(*ref_uid)) /* Already set? */
4151 return -EBUSY;
4152
4153 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4154 if (r < 0)
4155 return r;
4156
4157 *ref_uid = uid;
4158 return 1;
4159 }
4160
4161 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4162 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4163 }
4164
4165 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4166 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4167 }
4168
4169 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4170 int r = 0, q = 0;
4171
4172 assert(u);
4173
4174 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4175
4176 if (uid_is_valid(uid)) {
4177 r = unit_ref_uid(u, uid, clean_ipc);
4178 if (r < 0)
4179 return r;
4180 }
4181
4182 if (gid_is_valid(gid)) {
4183 q = unit_ref_gid(u, gid, clean_ipc);
4184 if (q < 0) {
4185 if (r > 0)
4186 unit_unref_uid(u, false);
4187
4188 return q;
4189 }
4190 }
4191
4192 return r > 0 || q > 0;
4193 }
4194
4195 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4196 ExecContext *c;
4197 int r;
4198
4199 assert(u);
4200
4201 c = unit_get_exec_context(u);
4202
4203 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4204 if (r < 0)
4205 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4206
4207 return r;
4208 }
4209
4210 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4211 assert(u);
4212
4213 unit_unref_uid(u, destroy_now);
4214 unit_unref_gid(u, destroy_now);
4215 }
4216
4217 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4218 int r;
4219
4220 assert(u);
4221
4222 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4223 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4224 * objects when no service references the UID/GID anymore. */
4225
4226 r = unit_ref_uid_gid(u, uid, gid);
4227 if (r > 0)
4228 bus_unit_send_change_signal(u);
4229 }
4230
4231 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
4232 int r;
4233
4234 assert(u);
4235
4236 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4237
4238 if (sd_id128_equal(u->invocation_id, id))
4239 return 0;
4240
4241 if (!sd_id128_is_null(u->invocation_id))
4242 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
4243
4244 if (sd_id128_is_null(id)) {
4245 r = 0;
4246 goto reset;
4247 }
4248
4249 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
4250 if (r < 0)
4251 goto reset;
4252
4253 u->invocation_id = id;
4254 sd_id128_to_string(id, u->invocation_id_string);
4255
4256 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
4257 if (r < 0)
4258 goto reset;
4259
4260 return 0;
4261
4262 reset:
4263 u->invocation_id = SD_ID128_NULL;
4264 u->invocation_id_string[0] = 0;
4265 return r;
4266 }
4267
4268 int unit_acquire_invocation_id(Unit *u) {
4269 sd_id128_t id;
4270 int r;
4271
4272 assert(u);
4273
4274 r = sd_id128_randomize(&id);
4275 if (r < 0)
4276 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4277
4278 r = unit_set_invocation_id(u, id);
4279 if (r < 0)
4280 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4281
4282 return 0;
4283 }