]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: Avoid race when starting dbus services
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bpf-firewall.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-util.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "fs-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "install.h"
32 #include "load-dropin.h"
33 #include "load-fragment.h"
34 #include "log.h"
35 #include "macro.h"
36 #include "missing.h"
37 #include "mkdir.h"
38 #include "parse-util.h"
39 #include "path-util.h"
40 #include "process-util.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
126 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 }
193
194 kc = unit_get_kill_context(u);
195 if (kc)
196 kill_context_init(kc);
197
198 if (UNIT_VTABLE(u)->init)
199 UNIT_VTABLE(u)->init(u);
200 }
201
202 int unit_add_name(Unit *u, const char *text) {
203 _cleanup_free_ char *s = NULL, *i = NULL;
204 UnitType t;
205 int r;
206
207 assert(u);
208 assert(text);
209
210 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211
212 if (!u->instance)
213 return -EINVAL;
214
215 r = unit_name_replace_instance(text, u->instance, &s);
216 if (r < 0)
217 return r;
218 } else {
219 s = strdup(text);
220 if (!s)
221 return -ENOMEM;
222 }
223
224 if (set_contains(u->names, s))
225 return 0;
226 if (hashmap_contains(u->manager->units, s))
227 return -EEXIST;
228
229 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
230 return -EINVAL;
231
232 t = unit_name_to_type(s);
233 if (t < 0)
234 return -EINVAL;
235
236 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
237 return -EINVAL;
238
239 r = unit_name_to_instance(s, &i);
240 if (r < 0)
241 return r;
242
243 if (i && !unit_type_may_template(t))
244 return -EINVAL;
245
246 /* Ensure that this unit is either instanced or not instanced,
247 * but not both. Note that we do allow names with different
248 * instance names however! */
249 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
250 return -EINVAL;
251
252 if (!unit_type_may_alias(t) && !set_isempty(u->names))
253 return -EEXIST;
254
255 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
256 return -E2BIG;
257
258 r = set_put(u->names, s);
259 if (r < 0)
260 return r;
261 assert(r > 0);
262
263 r = hashmap_put(u->manager->units, s, u);
264 if (r < 0) {
265 (void) set_remove(u->names, s);
266 return r;
267 }
268
269 if (u->type == _UNIT_TYPE_INVALID) {
270 u->type = t;
271 u->id = s;
272 u->instance = TAKE_PTR(i);
273
274 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
275
276 unit_init(u);
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_may_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection.
346 * Returns true when the unit may be collected, and false if there's some
347 * reason to keep it loaded.
348 *
349 * References from other units are *not* checked here. Instead, this is done
350 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
351 */
352
353 if (u->job)
354 return false;
355
356 if (u->nop_job)
357 return false;
358
359 state = unit_active_state(u);
360
361 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
362 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
363 UNIT_VTABLE(u)->release_resources)
364 UNIT_VTABLE(u)->release_resources(u);
365
366 if (u->perpetual)
367 return false;
368
369 if (sd_bus_track_count(u->bus_track) > 0)
370 return false;
371
372 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
373 switch (u->collect_mode) {
374
375 case COLLECT_INACTIVE:
376 if (state != UNIT_INACTIVE)
377 return false;
378
379 break;
380
381 case COLLECT_INACTIVE_OR_FAILED:
382 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
383 return false;
384
385 break;
386
387 default:
388 assert_not_reached("Unknown garbage collection mode");
389 }
390
391 if (u->cgroup_path) {
392 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
393 * around. Units with active processes should never be collected. */
394
395 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
396 if (r < 0)
397 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
398 if (r <= 0)
399 return false;
400 }
401
402 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
403 return false;
404
405 return true;
406 }
407
408 void unit_add_to_load_queue(Unit *u) {
409 assert(u);
410 assert(u->type != _UNIT_TYPE_INVALID);
411
412 if (u->load_state != UNIT_STUB || u->in_load_queue)
413 return;
414
415 LIST_PREPEND(load_queue, u->manager->load_queue, u);
416 u->in_load_queue = true;
417 }
418
419 void unit_add_to_cleanup_queue(Unit *u) {
420 assert(u);
421
422 if (u->in_cleanup_queue)
423 return;
424
425 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
426 u->in_cleanup_queue = true;
427 }
428
429 void unit_add_to_gc_queue(Unit *u) {
430 assert(u);
431
432 if (u->in_gc_queue || u->in_cleanup_queue)
433 return;
434
435 if (!unit_may_gc(u))
436 return;
437
438 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
439 u->in_gc_queue = true;
440 }
441
442 void unit_add_to_dbus_queue(Unit *u) {
443 assert(u);
444 assert(u->type != _UNIT_TYPE_INVALID);
445
446 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
447 return;
448
449 /* Shortcut things if nobody cares */
450 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
451 sd_bus_track_count(u->bus_track) <= 0 &&
452 set_isempty(u->manager->private_buses)) {
453 u->sent_dbus_new_signal = true;
454 return;
455 }
456
457 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
458 u->in_dbus_queue = true;
459 }
460
461 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
462 assert(u);
463
464 if (u->in_stop_when_unneeded_queue)
465 return;
466
467 if (!u->stop_when_unneeded)
468 return;
469
470 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
471 return;
472
473 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
474 u->in_stop_when_unneeded_queue = true;
475 }
476
477 static void bidi_set_free(Unit *u, Hashmap *h) {
478 Unit *other;
479 Iterator i;
480 void *v;
481
482 assert(u);
483
484 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
485
486 HASHMAP_FOREACH_KEY(v, other, h, i) {
487 UnitDependency d;
488
489 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
490 hashmap_remove(other->dependencies[d], u);
491
492 unit_add_to_gc_queue(other);
493 }
494
495 hashmap_free(h);
496 }
497
498 static void unit_remove_transient(Unit *u) {
499 char **i;
500
501 assert(u);
502
503 if (!u->transient)
504 return;
505
506 if (u->fragment_path)
507 (void) unlink(u->fragment_path);
508
509 STRV_FOREACH(i, u->dropin_paths) {
510 _cleanup_free_ char *p = NULL, *pp = NULL;
511
512 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
513 if (!p)
514 continue;
515
516 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
517 if (!pp)
518 continue;
519
520 /* Only drop transient drop-ins */
521 if (!path_equal(u->manager->lookup_paths.transient, pp))
522 continue;
523
524 (void) unlink(*i);
525 (void) rmdir(p);
526 }
527 }
528
529 static void unit_free_requires_mounts_for(Unit *u) {
530 assert(u);
531
532 for (;;) {
533 _cleanup_free_ char *path;
534
535 path = hashmap_steal_first_key(u->requires_mounts_for);
536 if (!path)
537 break;
538 else {
539 char s[strlen(path) + 1];
540
541 PATH_FOREACH_PREFIX_MORE(s, path) {
542 char *y;
543 Set *x;
544
545 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
546 if (!x)
547 continue;
548
549 (void) set_remove(x, u);
550
551 if (set_isempty(x)) {
552 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
553 free(y);
554 set_free(x);
555 }
556 }
557 }
558 }
559
560 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
561 }
562
563 static void unit_done(Unit *u) {
564 ExecContext *ec;
565 CGroupContext *cc;
566
567 assert(u);
568
569 if (u->type < 0)
570 return;
571
572 if (UNIT_VTABLE(u)->done)
573 UNIT_VTABLE(u)->done(u);
574
575 ec = unit_get_exec_context(u);
576 if (ec)
577 exec_context_done(ec);
578
579 cc = unit_get_cgroup_context(u);
580 if (cc)
581 cgroup_context_done(cc);
582 }
583
584 void unit_free(Unit *u) {
585 UnitDependency d;
586 Iterator i;
587 char *t;
588
589 if (!u)
590 return;
591
592 if (UNIT_ISSET(u->slice)) {
593 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
594 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
595
596 /* And make sure the parent is realized again, updating cgroup memberships */
597 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
598 }
599
600 u->transient_file = safe_fclose(u->transient_file);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_remove_transient(u);
604
605 bus_unit_send_removed_signal(u);
606
607 unit_done(u);
608
609 unit_dequeue_rewatch_pids(u);
610
611 sd_bus_slot_unref(u->match_bus_slot);
612 sd_bus_track_unref(u->bus_track);
613 u->deserialized_refs = strv_free(u->deserialized_refs);
614
615 unit_free_requires_mounts_for(u);
616
617 SET_FOREACH(t, u->names, i)
618 hashmap_remove_value(u->manager->units, t, u);
619
620 if (!sd_id128_is_null(u->invocation_id))
621 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
622
623 if (u->job) {
624 Job *j = u->job;
625 job_uninstall(j);
626 job_free(j);
627 }
628
629 if (u->nop_job) {
630 Job *j = u->nop_job;
631 job_uninstall(j);
632 job_free(j);
633 }
634
635 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
636 bidi_set_free(u, u->dependencies[d]);
637
638 if (u->on_console)
639 manager_unref_console(u->manager);
640
641 unit_release_cgroup(u);
642
643 if (!MANAGER_IS_RELOADING(u->manager))
644 unit_unlink_state_files(u);
645
646 unit_unref_uid_gid(u, false);
647
648 (void) manager_update_failed_units(u->manager, u, false);
649 set_remove(u->manager->startup_units, u);
650
651 unit_unwatch_all_pids(u);
652
653 unit_ref_unset(&u->slice);
654 while (u->refs_by_target)
655 unit_ref_unset(u->refs_by_target);
656
657 if (u->type != _UNIT_TYPE_INVALID)
658 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
659
660 if (u->in_load_queue)
661 LIST_REMOVE(load_queue, u->manager->load_queue, u);
662
663 if (u->in_dbus_queue)
664 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
665
666 if (u->in_gc_queue)
667 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
668
669 if (u->in_cgroup_realize_queue)
670 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
671
672 if (u->in_cgroup_empty_queue)
673 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
674
675 if (u->in_cleanup_queue)
676 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
677
678 if (u->in_target_deps_queue)
679 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
680
681 if (u->in_stop_when_unneeded_queue)
682 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
683
684 safe_close(u->ip_accounting_ingress_map_fd);
685 safe_close(u->ip_accounting_egress_map_fd);
686
687 safe_close(u->ipv4_allow_map_fd);
688 safe_close(u->ipv6_allow_map_fd);
689 safe_close(u->ipv4_deny_map_fd);
690 safe_close(u->ipv6_deny_map_fd);
691
692 bpf_program_unref(u->ip_bpf_ingress);
693 bpf_program_unref(u->ip_bpf_ingress_installed);
694 bpf_program_unref(u->ip_bpf_egress);
695 bpf_program_unref(u->ip_bpf_egress_installed);
696
697 set_free(u->ip_bpf_custom_ingress);
698 set_free(u->ip_bpf_custom_egress);
699 set_free(u->ip_bpf_custom_ingress_installed);
700 set_free(u->ip_bpf_custom_egress_installed);
701
702 bpf_program_unref(u->bpf_device_control_installed);
703
704 condition_free_list(u->conditions);
705 condition_free_list(u->asserts);
706
707 free(u->description);
708 strv_free(u->documentation);
709 free(u->fragment_path);
710 free(u->source_path);
711 strv_free(u->dropin_paths);
712 free(u->instance);
713
714 free(u->job_timeout_reboot_arg);
715
716 set_free_free(u->names);
717
718 free(u->reboot_arg);
719
720 free(u);
721 }
722
723 UnitActiveState unit_active_state(Unit *u) {
724 assert(u);
725
726 if (u->load_state == UNIT_MERGED)
727 return unit_active_state(unit_follow_merge(u));
728
729 /* After a reload it might happen that a unit is not correctly
730 * loaded but still has a process around. That's why we won't
731 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
732
733 return UNIT_VTABLE(u)->active_state(u);
734 }
735
736 const char* unit_sub_state_to_string(Unit *u) {
737 assert(u);
738
739 return UNIT_VTABLE(u)->sub_state_to_string(u);
740 }
741
742 static int set_complete_move(Set **s, Set **other) {
743 assert(s);
744 assert(other);
745
746 if (!other)
747 return 0;
748
749 if (*s)
750 return set_move(*s, *other);
751 else
752 *s = TAKE_PTR(*other);
753
754 return 0;
755 }
756
757 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
758 assert(s);
759 assert(other);
760
761 if (!*other)
762 return 0;
763
764 if (*s)
765 return hashmap_move(*s, *other);
766 else
767 *s = TAKE_PTR(*other);
768
769 return 0;
770 }
771
772 static int merge_names(Unit *u, Unit *other) {
773 char *t;
774 Iterator i;
775 int r;
776
777 assert(u);
778 assert(other);
779
780 r = set_complete_move(&u->names, &other->names);
781 if (r < 0)
782 return r;
783
784 set_free_free(other->names);
785 other->names = NULL;
786 other->id = NULL;
787
788 SET_FOREACH(t, u->names, i)
789 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
790
791 return 0;
792 }
793
794 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
795 unsigned n_reserve;
796
797 assert(u);
798 assert(other);
799 assert(d < _UNIT_DEPENDENCY_MAX);
800
801 /*
802 * If u does not have this dependency set allocated, there is no need
803 * to reserve anything. In that case other's set will be transferred
804 * as a whole to u by complete_move().
805 */
806 if (!u->dependencies[d])
807 return 0;
808
809 /* merge_dependencies() will skip a u-on-u dependency */
810 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
811
812 return hashmap_reserve(u->dependencies[d], n_reserve);
813 }
814
815 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
816 Iterator i;
817 Unit *back;
818 void *v;
819 int r;
820
821 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
822
823 assert(u);
824 assert(other);
825 assert(d < _UNIT_DEPENDENCY_MAX);
826
827 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
828 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
829 UnitDependency k;
830
831 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
832 * pointers back, and let's fix them up, to instead point to 'u'. */
833
834 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
835 if (back == u) {
836 /* Do not add dependencies between u and itself. */
837 if (hashmap_remove(back->dependencies[k], other))
838 maybe_warn_about_dependency(u, other_id, k);
839 } else {
840 UnitDependencyInfo di_u, di_other, di_merged;
841
842 /* Let's drop this dependency between "back" and "other", and let's create it between
843 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
844 * and any such dependency which might already exist */
845
846 di_other.data = hashmap_get(back->dependencies[k], other);
847 if (!di_other.data)
848 continue; /* dependency isn't set, let's try the next one */
849
850 di_u.data = hashmap_get(back->dependencies[k], u);
851
852 di_merged = (UnitDependencyInfo) {
853 .origin_mask = di_u.origin_mask | di_other.origin_mask,
854 .destination_mask = di_u.destination_mask | di_other.destination_mask,
855 };
856
857 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
858 if (r < 0)
859 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
860 assert(r >= 0);
861
862 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
863 }
864 }
865
866 }
867
868 /* Also do not move dependencies on u to itself */
869 back = hashmap_remove(other->dependencies[d], u);
870 if (back)
871 maybe_warn_about_dependency(u, other_id, d);
872
873 /* The move cannot fail. The caller must have performed a reservation. */
874 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
875
876 other->dependencies[d] = hashmap_free(other->dependencies[d]);
877 }
878
879 int unit_merge(Unit *u, Unit *other) {
880 UnitDependency d;
881 const char *other_id = NULL;
882 int r;
883
884 assert(u);
885 assert(other);
886 assert(u->manager == other->manager);
887 assert(u->type != _UNIT_TYPE_INVALID);
888
889 other = unit_follow_merge(other);
890
891 if (other == u)
892 return 0;
893
894 if (u->type != other->type)
895 return -EINVAL;
896
897 if (!u->instance != !other->instance)
898 return -EINVAL;
899
900 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
901 return -EEXIST;
902
903 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
904 return -EEXIST;
905
906 if (other->job)
907 return -EEXIST;
908
909 if (other->nop_job)
910 return -EEXIST;
911
912 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
913 return -EEXIST;
914
915 if (other->id)
916 other_id = strdupa(other->id);
917
918 /* Make reservations to ensure merge_dependencies() won't fail */
919 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
920 r = reserve_dependencies(u, other, d);
921 /*
922 * We don't rollback reservations if we fail. We don't have
923 * a way to undo reservations. A reservation is not a leak.
924 */
925 if (r < 0)
926 return r;
927 }
928
929 /* Merge names */
930 r = merge_names(u, other);
931 if (r < 0)
932 return r;
933
934 /* Redirect all references */
935 while (other->refs_by_target)
936 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
937
938 /* Merge dependencies */
939 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
940 merge_dependencies(u, other, other_id, d);
941
942 other->load_state = UNIT_MERGED;
943 other->merged_into = u;
944
945 /* If there is still some data attached to the other node, we
946 * don't need it anymore, and can free it. */
947 if (other->load_state != UNIT_STUB)
948 if (UNIT_VTABLE(other)->done)
949 UNIT_VTABLE(other)->done(other);
950
951 unit_add_to_dbus_queue(u);
952 unit_add_to_cleanup_queue(other);
953
954 return 0;
955 }
956
957 int unit_merge_by_name(Unit *u, const char *name) {
958 _cleanup_free_ char *s = NULL;
959 Unit *other;
960 int r;
961
962 /* Either add name to u, or if a unit with name already exists, merge it with u.
963 * If name is a template, do the same for name@instance, where instance is u's instance. */
964
965 assert(u);
966 assert(name);
967
968 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
969 if (!u->instance)
970 return -EINVAL;
971
972 r = unit_name_replace_instance(name, u->instance, &s);
973 if (r < 0)
974 return r;
975
976 name = s;
977 }
978
979 other = manager_get_unit(u->manager, name);
980 if (other)
981 return unit_merge(u, other);
982
983 return unit_add_name(u, name);
984 }
985
986 Unit* unit_follow_merge(Unit *u) {
987 assert(u);
988
989 while (u->load_state == UNIT_MERGED)
990 assert_se(u = u->merged_into);
991
992 return u;
993 }
994
995 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
996 ExecDirectoryType dt;
997 char **dp;
998 int r;
999
1000 assert(u);
1001 assert(c);
1002
1003 if (c->working_directory && !c->working_directory_missing_ok) {
1004 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1005 if (r < 0)
1006 return r;
1007 }
1008
1009 if (c->root_directory) {
1010 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 if (c->root_image) {
1016 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1017 if (r < 0)
1018 return r;
1019 }
1020
1021 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1022 if (!u->manager->prefix[dt])
1023 continue;
1024
1025 STRV_FOREACH(dp, c->directories[dt].paths) {
1026 _cleanup_free_ char *p;
1027
1028 p = path_join(u->manager->prefix[dt], *dp);
1029 if (!p)
1030 return -ENOMEM;
1031
1032 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1033 if (r < 0)
1034 return r;
1035 }
1036 }
1037
1038 if (!MANAGER_IS_SYSTEM(u->manager))
1039 return 0;
1040
1041 if (c->private_tmp) {
1042 const char *p;
1043
1044 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1045 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1046 if (r < 0)
1047 return r;
1048 }
1049
1050 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1051 if (r < 0)
1052 return r;
1053 }
1054
1055 if (!IN_SET(c->std_output,
1056 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1057 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1058 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1059 !IN_SET(c->std_error,
1060 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1061 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1062 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1063 return 0;
1064
1065 /* If syslog or kernel logging is requested, make sure our own
1066 * logging daemon is run first. */
1067
1068 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1069 if (r < 0)
1070 return r;
1071
1072 return 0;
1073 }
1074
1075 const char *unit_description(Unit *u) {
1076 assert(u);
1077
1078 if (u->description)
1079 return u->description;
1080
1081 return strna(u->id);
1082 }
1083
1084 const char *unit_status_string(Unit *u) {
1085 assert(u);
1086
1087 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1088 return u->id;
1089
1090 return unit_description(u);
1091 }
1092
1093 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1094 const struct {
1095 UnitDependencyMask mask;
1096 const char *name;
1097 } table[] = {
1098 { UNIT_DEPENDENCY_FILE, "file" },
1099 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1100 { UNIT_DEPENDENCY_DEFAULT, "default" },
1101 { UNIT_DEPENDENCY_UDEV, "udev" },
1102 { UNIT_DEPENDENCY_PATH, "path" },
1103 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1104 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1105 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1106 };
1107 size_t i;
1108
1109 assert(f);
1110 assert(kind);
1111 assert(space);
1112
1113 for (i = 0; i < ELEMENTSOF(table); i++) {
1114
1115 if (mask == 0)
1116 break;
1117
1118 if (FLAGS_SET(mask, table[i].mask)) {
1119 if (*space)
1120 fputc(' ', f);
1121 else
1122 *space = true;
1123
1124 fputs(kind, f);
1125 fputs("-", f);
1126 fputs(table[i].name, f);
1127
1128 mask &= ~table[i].mask;
1129 }
1130 }
1131
1132 assert(mask == 0);
1133 }
1134
1135 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1136 char *t, **j;
1137 UnitDependency d;
1138 Iterator i;
1139 const char *prefix2;
1140 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1141 Unit *following;
1142 _cleanup_set_free_ Set *following_set = NULL;
1143 const char *n;
1144 CGroupMask m;
1145 int r;
1146
1147 assert(u);
1148 assert(u->type >= 0);
1149
1150 prefix = strempty(prefix);
1151 prefix2 = strjoina(prefix, "\t");
1152
1153 fprintf(f,
1154 "%s-> Unit %s:\n",
1155 prefix, u->id);
1156
1157 SET_FOREACH(t, u->names, i)
1158 if (!streq(t, u->id))
1159 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1160
1161 fprintf(f,
1162 "%s\tDescription: %s\n"
1163 "%s\tInstance: %s\n"
1164 "%s\tUnit Load State: %s\n"
1165 "%s\tUnit Active State: %s\n"
1166 "%s\tState Change Timestamp: %s\n"
1167 "%s\tInactive Exit Timestamp: %s\n"
1168 "%s\tActive Enter Timestamp: %s\n"
1169 "%s\tActive Exit Timestamp: %s\n"
1170 "%s\tInactive Enter Timestamp: %s\n"
1171 "%s\tMay GC: %s\n"
1172 "%s\tNeed Daemon Reload: %s\n"
1173 "%s\tTransient: %s\n"
1174 "%s\tPerpetual: %s\n"
1175 "%s\tGarbage Collection Mode: %s\n"
1176 "%s\tSlice: %s\n"
1177 "%s\tCGroup: %s\n"
1178 "%s\tCGroup realized: %s\n",
1179 prefix, unit_description(u),
1180 prefix, strna(u->instance),
1181 prefix, unit_load_state_to_string(u->load_state),
1182 prefix, unit_active_state_to_string(unit_active_state(u)),
1183 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1184 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1185 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1186 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1187 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1188 prefix, yes_no(unit_may_gc(u)),
1189 prefix, yes_no(unit_need_daemon_reload(u)),
1190 prefix, yes_no(u->transient),
1191 prefix, yes_no(u->perpetual),
1192 prefix, collect_mode_to_string(u->collect_mode),
1193 prefix, strna(unit_slice_name(u)),
1194 prefix, strna(u->cgroup_path),
1195 prefix, yes_no(u->cgroup_realized));
1196
1197 if (u->cgroup_realized_mask != 0) {
1198 _cleanup_free_ char *s = NULL;
1199 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1200 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1201 }
1202
1203 if (u->cgroup_enabled_mask != 0) {
1204 _cleanup_free_ char *s = NULL;
1205 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1206 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1207 }
1208
1209 m = unit_get_own_mask(u);
1210 if (m != 0) {
1211 _cleanup_free_ char *s = NULL;
1212 (void) cg_mask_to_string(m, &s);
1213 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1214 }
1215
1216 m = unit_get_members_mask(u);
1217 if (m != 0) {
1218 _cleanup_free_ char *s = NULL;
1219 (void) cg_mask_to_string(m, &s);
1220 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1221 }
1222
1223 m = unit_get_delegate_mask(u);
1224 if (m != 0) {
1225 _cleanup_free_ char *s = NULL;
1226 (void) cg_mask_to_string(m, &s);
1227 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1228 }
1229
1230 if (!sd_id128_is_null(u->invocation_id))
1231 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1232 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1233
1234 STRV_FOREACH(j, u->documentation)
1235 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1236
1237 following = unit_following(u);
1238 if (following)
1239 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1240
1241 r = unit_following_set(u, &following_set);
1242 if (r >= 0) {
1243 Unit *other;
1244
1245 SET_FOREACH(other, following_set, i)
1246 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1247 }
1248
1249 if (u->fragment_path)
1250 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1251
1252 if (u->source_path)
1253 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1254
1255 STRV_FOREACH(j, u->dropin_paths)
1256 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1257
1258 if (u->failure_action != EMERGENCY_ACTION_NONE)
1259 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1260 if (u->failure_action_exit_status >= 0)
1261 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1262 if (u->success_action != EMERGENCY_ACTION_NONE)
1263 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1264 if (u->success_action_exit_status >= 0)
1265 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1266
1267 if (u->job_timeout != USEC_INFINITY)
1268 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1269
1270 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1271 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1272
1273 if (u->job_timeout_reboot_arg)
1274 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1275
1276 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1277 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1278
1279 if (dual_timestamp_is_set(&u->condition_timestamp))
1280 fprintf(f,
1281 "%s\tCondition Timestamp: %s\n"
1282 "%s\tCondition Result: %s\n",
1283 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1284 prefix, yes_no(u->condition_result));
1285
1286 if (dual_timestamp_is_set(&u->assert_timestamp))
1287 fprintf(f,
1288 "%s\tAssert Timestamp: %s\n"
1289 "%s\tAssert Result: %s\n",
1290 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1291 prefix, yes_no(u->assert_result));
1292
1293 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1294 UnitDependencyInfo di;
1295 Unit *other;
1296
1297 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1298 bool space = false;
1299
1300 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1301
1302 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1303 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1304
1305 fputs(")\n", f);
1306 }
1307 }
1308
1309 if (!hashmap_isempty(u->requires_mounts_for)) {
1310 UnitDependencyInfo di;
1311 const char *path;
1312
1313 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1314 bool space = false;
1315
1316 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1317
1318 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1319 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1320
1321 fputs(")\n", f);
1322 }
1323 }
1324
1325 if (u->load_state == UNIT_LOADED) {
1326
1327 fprintf(f,
1328 "%s\tStopWhenUnneeded: %s\n"
1329 "%s\tRefuseManualStart: %s\n"
1330 "%s\tRefuseManualStop: %s\n"
1331 "%s\tDefaultDependencies: %s\n"
1332 "%s\tOnFailureJobMode: %s\n"
1333 "%s\tIgnoreOnIsolate: %s\n",
1334 prefix, yes_no(u->stop_when_unneeded),
1335 prefix, yes_no(u->refuse_manual_start),
1336 prefix, yes_no(u->refuse_manual_stop),
1337 prefix, yes_no(u->default_dependencies),
1338 prefix, job_mode_to_string(u->on_failure_job_mode),
1339 prefix, yes_no(u->ignore_on_isolate));
1340
1341 if (UNIT_VTABLE(u)->dump)
1342 UNIT_VTABLE(u)->dump(u, f, prefix2);
1343
1344 } else if (u->load_state == UNIT_MERGED)
1345 fprintf(f,
1346 "%s\tMerged into: %s\n",
1347 prefix, u->merged_into->id);
1348 else if (u->load_state == UNIT_ERROR)
1349 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1350
1351 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1352 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1353
1354 if (u->job)
1355 job_dump(u->job, f, prefix2);
1356
1357 if (u->nop_job)
1358 job_dump(u->nop_job, f, prefix2);
1359 }
1360
1361 /* Common implementation for multiple backends */
1362 int unit_load_fragment_and_dropin(Unit *u) {
1363 int r;
1364
1365 assert(u);
1366
1367 /* Load a .{service,socket,...} file */
1368 r = unit_load_fragment(u);
1369 if (r < 0)
1370 return r;
1371
1372 if (u->load_state == UNIT_STUB)
1373 return -ENOENT;
1374
1375 /* Load drop-in directory data. If u is an alias, we might be reloading the
1376 * target unit needlessly. But we cannot be sure which drops-ins have already
1377 * been loaded and which not, at least without doing complicated book-keeping,
1378 * so let's always reread all drop-ins. */
1379 return unit_load_dropin(unit_follow_merge(u));
1380 }
1381
1382 /* Common implementation for multiple backends */
1383 int unit_load_fragment_and_dropin_optional(Unit *u) {
1384 int r;
1385
1386 assert(u);
1387
1388 /* Same as unit_load_fragment_and_dropin(), but whether
1389 * something can be loaded or not doesn't matter. */
1390
1391 /* Load a .service/.socket/.slice/… file */
1392 r = unit_load_fragment(u);
1393 if (r < 0)
1394 return r;
1395
1396 if (u->load_state == UNIT_STUB)
1397 u->load_state = UNIT_LOADED;
1398
1399 /* Load drop-in directory data */
1400 return unit_load_dropin(unit_follow_merge(u));
1401 }
1402
1403 void unit_add_to_target_deps_queue(Unit *u) {
1404 Manager *m = u->manager;
1405
1406 assert(u);
1407
1408 if (u->in_target_deps_queue)
1409 return;
1410
1411 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1412 u->in_target_deps_queue = true;
1413 }
1414
1415 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1416 assert(u);
1417 assert(target);
1418
1419 if (target->type != UNIT_TARGET)
1420 return 0;
1421
1422 /* Only add the dependency if both units are loaded, so that
1423 * that loop check below is reliable */
1424 if (u->load_state != UNIT_LOADED ||
1425 target->load_state != UNIT_LOADED)
1426 return 0;
1427
1428 /* If either side wants no automatic dependencies, then let's
1429 * skip this */
1430 if (!u->default_dependencies ||
1431 !target->default_dependencies)
1432 return 0;
1433
1434 /* Don't create loops */
1435 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1436 return 0;
1437
1438 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1439 }
1440
1441 static int unit_add_slice_dependencies(Unit *u) {
1442 UnitDependencyMask mask;
1443 assert(u);
1444
1445 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1446 return 0;
1447
1448 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1449 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1450 relationship). */
1451 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1452
1453 if (UNIT_ISSET(u->slice))
1454 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1455
1456 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1457 return 0;
1458
1459 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1460 }
1461
1462 static int unit_add_mount_dependencies(Unit *u) {
1463 UnitDependencyInfo di;
1464 const char *path;
1465 Iterator i;
1466 int r;
1467
1468 assert(u);
1469
1470 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1471 char prefix[strlen(path) + 1];
1472
1473 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1474 _cleanup_free_ char *p = NULL;
1475 Unit *m;
1476
1477 r = unit_name_from_path(prefix, ".mount", &p);
1478 if (r < 0)
1479 return r;
1480
1481 m = manager_get_unit(u->manager, p);
1482 if (!m) {
1483 /* Make sure to load the mount unit if
1484 * it exists. If so the dependencies
1485 * on this unit will be added later
1486 * during the loading of the mount
1487 * unit. */
1488 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1489 continue;
1490 }
1491 if (m == u)
1492 continue;
1493
1494 if (m->load_state != UNIT_LOADED)
1495 continue;
1496
1497 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1498 if (r < 0)
1499 return r;
1500
1501 if (m->fragment_path) {
1502 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1503 if (r < 0)
1504 return r;
1505 }
1506 }
1507 }
1508
1509 return 0;
1510 }
1511
1512 static int unit_add_startup_units(Unit *u) {
1513 CGroupContext *c;
1514 int r;
1515
1516 c = unit_get_cgroup_context(u);
1517 if (!c)
1518 return 0;
1519
1520 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1521 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1522 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1523 return 0;
1524
1525 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1526 if (r < 0)
1527 return r;
1528
1529 return set_put(u->manager->startup_units, u);
1530 }
1531
1532 int unit_load(Unit *u) {
1533 int r;
1534
1535 assert(u);
1536
1537 if (u->in_load_queue) {
1538 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1539 u->in_load_queue = false;
1540 }
1541
1542 if (u->type == _UNIT_TYPE_INVALID)
1543 return -EINVAL;
1544
1545 if (u->load_state != UNIT_STUB)
1546 return 0;
1547
1548 if (u->transient_file) {
1549 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1550 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1551
1552 r = fflush_and_check(u->transient_file);
1553 if (r < 0)
1554 goto fail;
1555
1556 u->transient_file = safe_fclose(u->transient_file);
1557 u->fragment_mtime = now(CLOCK_REALTIME);
1558 }
1559
1560 if (UNIT_VTABLE(u)->load) {
1561 r = UNIT_VTABLE(u)->load(u);
1562 if (r < 0)
1563 goto fail;
1564 }
1565
1566 if (u->load_state == UNIT_STUB) {
1567 r = -ENOENT;
1568 goto fail;
1569 }
1570
1571 if (u->load_state == UNIT_LOADED) {
1572 unit_add_to_target_deps_queue(u);
1573
1574 r = unit_add_slice_dependencies(u);
1575 if (r < 0)
1576 goto fail;
1577
1578 r = unit_add_mount_dependencies(u);
1579 if (r < 0)
1580 goto fail;
1581
1582 r = unit_add_startup_units(u);
1583 if (r < 0)
1584 goto fail;
1585
1586 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1587 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1588 r = -ENOEXEC;
1589 goto fail;
1590 }
1591
1592 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1593 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1594
1595 /* We finished loading, let's ensure our parents recalculate the members mask */
1596 unit_invalidate_cgroup_members_masks(u);
1597 }
1598
1599 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1600
1601 unit_add_to_dbus_queue(unit_follow_merge(u));
1602 unit_add_to_gc_queue(u);
1603
1604 return 0;
1605
1606 fail:
1607 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1608 * return ENOEXEC to ensure units are placed in this state after loading */
1609
1610 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1611 r == -ENOEXEC ? UNIT_BAD_SETTING :
1612 UNIT_ERROR;
1613 u->load_error = r;
1614
1615 unit_add_to_dbus_queue(u);
1616 unit_add_to_gc_queue(u);
1617
1618 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1619 }
1620
1621 _printf_(7, 8)
1622 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1623 Unit *u = userdata;
1624 va_list ap;
1625 int r;
1626
1627 va_start(ap, format);
1628 if (u)
1629 r = log_object_internalv(level, error, file, line, func,
1630 u->manager->unit_log_field,
1631 u->id,
1632 u->manager->invocation_log_field,
1633 u->invocation_id_string,
1634 format, ap);
1635 else
1636 r = log_internalv(level, error, file, line, func, format, ap);
1637 va_end(ap);
1638
1639 return r;
1640 }
1641
1642 static bool unit_test_condition(Unit *u) {
1643 assert(u);
1644
1645 dual_timestamp_get(&u->condition_timestamp);
1646 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1647
1648 unit_add_to_dbus_queue(u);
1649
1650 return u->condition_result;
1651 }
1652
1653 static bool unit_test_assert(Unit *u) {
1654 assert(u);
1655
1656 dual_timestamp_get(&u->assert_timestamp);
1657 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1658
1659 unit_add_to_dbus_queue(u);
1660
1661 return u->assert_result;
1662 }
1663
1664 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1665 const char *d;
1666
1667 d = unit_status_string(u);
1668 if (log_get_show_color())
1669 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1670
1671 DISABLE_WARNING_FORMAT_NONLITERAL;
1672 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1673 REENABLE_WARNING;
1674 }
1675
1676 int unit_test_start_limit(Unit *u) {
1677 const char *reason;
1678
1679 assert(u);
1680
1681 if (ratelimit_below(&u->start_limit)) {
1682 u->start_limit_hit = false;
1683 return 0;
1684 }
1685
1686 log_unit_warning(u, "Start request repeated too quickly.");
1687 u->start_limit_hit = true;
1688
1689 reason = strjoina("unit ", u->id, " failed");
1690
1691 emergency_action(u->manager, u->start_limit_action,
1692 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1693 u->reboot_arg, -1, reason);
1694
1695 return -ECANCELED;
1696 }
1697
1698 bool unit_shall_confirm_spawn(Unit *u) {
1699 assert(u);
1700
1701 if (manager_is_confirm_spawn_disabled(u->manager))
1702 return false;
1703
1704 /* For some reasons units remaining in the same process group
1705 * as PID 1 fail to acquire the console even if it's not used
1706 * by any process. So skip the confirmation question for them. */
1707 return !unit_get_exec_context(u)->same_pgrp;
1708 }
1709
1710 static bool unit_verify_deps(Unit *u) {
1711 Unit *other;
1712 Iterator j;
1713 void *v;
1714
1715 assert(u);
1716
1717 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1718 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1719 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1720 * conjunction with After= as for them any such check would make things entirely racy. */
1721
1722 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1723
1724 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1725 continue;
1726
1727 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1728 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1729 return false;
1730 }
1731 }
1732
1733 return true;
1734 }
1735
1736 /* Errors that aren't really errors:
1737 * -EALREADY: Unit is already started.
1738 * -ECOMM: Condition failed
1739 * -EAGAIN: An operation is already in progress. Retry later.
1740 *
1741 * Errors that are real errors:
1742 * -EBADR: This unit type does not support starting.
1743 * -ECANCELED: Start limit hit, too many requests for now
1744 * -EPROTO: Assert failed
1745 * -EINVAL: Unit not loaded
1746 * -EOPNOTSUPP: Unit type not supported
1747 * -ENOLINK: The necessary dependencies are not fulfilled.
1748 * -ESTALE: This unit has been started before and can't be started a second time
1749 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1750 */
1751 int unit_start(Unit *u) {
1752 UnitActiveState state;
1753 Unit *following;
1754 int r;
1755
1756 assert(u);
1757
1758 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1759 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1760 * waiting is finished. */
1761 state = unit_active_state(u);
1762 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1763 return -EALREADY;
1764 if (state == UNIT_MAINTENANCE)
1765 return -EAGAIN;
1766
1767 /* Units that aren't loaded cannot be started */
1768 if (u->load_state != UNIT_LOADED)
1769 return -EINVAL;
1770
1771 /* Refuse starting scope units more than once */
1772 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1773 return -ESTALE;
1774
1775 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1776 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1777 * recheck the condition in that case. */
1778 if (state != UNIT_ACTIVATING &&
1779 !unit_test_condition(u)) {
1780
1781 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1782 * .start() method of the unit type after it did some additional checks verifying everything
1783 * is in order (so that those other checks can propagate errors properly). However, if a
1784 * condition check doesn't hold we don't get that far but we should still ensure we are not
1785 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1786 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1787 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1788 * condition check is a bit different from the condition check inside the per-unit .start()
1789 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1790 * after all the condition failed). */
1791
1792 r = unit_test_start_limit(u);
1793 if (r < 0)
1794 return r;
1795
1796 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1797 }
1798
1799 /* If the asserts failed, fail the entire job */
1800 if (state != UNIT_ACTIVATING &&
1801 !unit_test_assert(u))
1802 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1803
1804 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1805 * condition checks, so that we rather return condition check errors (which are usually not
1806 * considered a true failure) than "not supported" errors (which are considered a failure).
1807 */
1808 if (!unit_type_supported(u->type))
1809 return -EOPNOTSUPP;
1810
1811 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1812 * should have taken care of this already, but let's check this here again. After all, our
1813 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1814 if (!unit_verify_deps(u))
1815 return -ENOLINK;
1816
1817 /* Forward to the main object, if we aren't it. */
1818 following = unit_following(u);
1819 if (following) {
1820 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1821 return unit_start(following);
1822 }
1823
1824 /* If it is stopped, but we cannot start it, then fail */
1825 if (!UNIT_VTABLE(u)->start)
1826 return -EBADR;
1827
1828 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1829 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1830 * waits for a holdoff timer to elapse before it will start again. */
1831
1832 unit_add_to_dbus_queue(u);
1833
1834 return UNIT_VTABLE(u)->start(u);
1835 }
1836
1837 bool unit_can_start(Unit *u) {
1838 assert(u);
1839
1840 if (u->load_state != UNIT_LOADED)
1841 return false;
1842
1843 if (!unit_type_supported(u->type))
1844 return false;
1845
1846 /* Scope units may be started only once */
1847 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1848 return false;
1849
1850 return !!UNIT_VTABLE(u)->start;
1851 }
1852
1853 bool unit_can_isolate(Unit *u) {
1854 assert(u);
1855
1856 return unit_can_start(u) &&
1857 u->allow_isolate;
1858 }
1859
1860 /* Errors:
1861 * -EBADR: This unit type does not support stopping.
1862 * -EALREADY: Unit is already stopped.
1863 * -EAGAIN: An operation is already in progress. Retry later.
1864 */
1865 int unit_stop(Unit *u) {
1866 UnitActiveState state;
1867 Unit *following;
1868
1869 assert(u);
1870
1871 state = unit_active_state(u);
1872 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1873 return -EALREADY;
1874
1875 following = unit_following(u);
1876 if (following) {
1877 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1878 return unit_stop(following);
1879 }
1880
1881 if (!UNIT_VTABLE(u)->stop)
1882 return -EBADR;
1883
1884 unit_add_to_dbus_queue(u);
1885
1886 return UNIT_VTABLE(u)->stop(u);
1887 }
1888
1889 bool unit_can_stop(Unit *u) {
1890 assert(u);
1891
1892 if (!unit_type_supported(u->type))
1893 return false;
1894
1895 if (u->perpetual)
1896 return false;
1897
1898 return !!UNIT_VTABLE(u)->stop;
1899 }
1900
1901 /* Errors:
1902 * -EBADR: This unit type does not support reloading.
1903 * -ENOEXEC: Unit is not started.
1904 * -EAGAIN: An operation is already in progress. Retry later.
1905 */
1906 int unit_reload(Unit *u) {
1907 UnitActiveState state;
1908 Unit *following;
1909
1910 assert(u);
1911
1912 if (u->load_state != UNIT_LOADED)
1913 return -EINVAL;
1914
1915 if (!unit_can_reload(u))
1916 return -EBADR;
1917
1918 state = unit_active_state(u);
1919 if (state == UNIT_RELOADING)
1920 return -EAGAIN;
1921
1922 if (state != UNIT_ACTIVE) {
1923 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1924 return -ENOEXEC;
1925 }
1926
1927 following = unit_following(u);
1928 if (following) {
1929 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1930 return unit_reload(following);
1931 }
1932
1933 unit_add_to_dbus_queue(u);
1934
1935 if (!UNIT_VTABLE(u)->reload) {
1936 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1937 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1938 return 0;
1939 }
1940
1941 return UNIT_VTABLE(u)->reload(u);
1942 }
1943
1944 bool unit_can_reload(Unit *u) {
1945 assert(u);
1946
1947 if (UNIT_VTABLE(u)->can_reload)
1948 return UNIT_VTABLE(u)->can_reload(u);
1949
1950 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1951 return true;
1952
1953 return UNIT_VTABLE(u)->reload;
1954 }
1955
1956 bool unit_is_unneeded(Unit *u) {
1957 static const UnitDependency deps[] = {
1958 UNIT_REQUIRED_BY,
1959 UNIT_REQUISITE_OF,
1960 UNIT_WANTED_BY,
1961 UNIT_BOUND_BY,
1962 };
1963 size_t j;
1964
1965 assert(u);
1966
1967 if (!u->stop_when_unneeded)
1968 return false;
1969
1970 /* Don't clean up while the unit is transitioning or is even inactive. */
1971 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1972 return false;
1973 if (u->job)
1974 return false;
1975
1976 for (j = 0; j < ELEMENTSOF(deps); j++) {
1977 Unit *other;
1978 Iterator i;
1979 void *v;
1980
1981 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1982 * restart, then don't clean this one up. */
1983
1984 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1985 if (other->job)
1986 return false;
1987
1988 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1989 return false;
1990
1991 if (unit_will_restart(other))
1992 return false;
1993 }
1994 }
1995
1996 return true;
1997 }
1998
1999 static void check_unneeded_dependencies(Unit *u) {
2000
2001 static const UnitDependency deps[] = {
2002 UNIT_REQUIRES,
2003 UNIT_REQUISITE,
2004 UNIT_WANTS,
2005 UNIT_BINDS_TO,
2006 };
2007 size_t j;
2008
2009 assert(u);
2010
2011 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2012
2013 for (j = 0; j < ELEMENTSOF(deps); j++) {
2014 Unit *other;
2015 Iterator i;
2016 void *v;
2017
2018 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2019 unit_submit_to_stop_when_unneeded_queue(other);
2020 }
2021 }
2022
2023 static void unit_check_binds_to(Unit *u) {
2024 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2025 bool stop = false;
2026 Unit *other;
2027 Iterator i;
2028 void *v;
2029 int r;
2030
2031 assert(u);
2032
2033 if (u->job)
2034 return;
2035
2036 if (unit_active_state(u) != UNIT_ACTIVE)
2037 return;
2038
2039 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2040 if (other->job)
2041 continue;
2042
2043 if (!other->coldplugged)
2044 /* We might yet create a job for the other unit… */
2045 continue;
2046
2047 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2048 continue;
2049
2050 stop = true;
2051 break;
2052 }
2053
2054 if (!stop)
2055 return;
2056
2057 /* If stopping a unit fails continuously we might enter a stop
2058 * loop here, hence stop acting on the service being
2059 * unnecessary after a while. */
2060 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2061 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2062 return;
2063 }
2064
2065 assert(other);
2066 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2067
2068 /* A unit we need to run is gone. Sniff. Let's stop this. */
2069 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2070 if (r < 0)
2071 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2072 }
2073
2074 static void retroactively_start_dependencies(Unit *u) {
2075 Iterator i;
2076 Unit *other;
2077 void *v;
2078
2079 assert(u);
2080 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2081
2082 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2083 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2084 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2085 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2086
2087 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2088 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2089 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2090 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2091
2092 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2093 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2094 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2095 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2096
2097 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2098 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2099 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2100
2101 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2102 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2104 }
2105
2106 static void retroactively_stop_dependencies(Unit *u) {
2107 Unit *other;
2108 Iterator i;
2109 void *v;
2110
2111 assert(u);
2112 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2113
2114 /* Pull down units which are bound to us recursively if enabled */
2115 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2116 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2117 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2118 }
2119
2120 void unit_start_on_failure(Unit *u) {
2121 Unit *other;
2122 Iterator i;
2123 void *v;
2124 int r;
2125
2126 assert(u);
2127
2128 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2129 return;
2130
2131 log_unit_info(u, "Triggering OnFailure= dependencies.");
2132
2133 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2134 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2135
2136 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2137 if (r < 0)
2138 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2139 }
2140 }
2141
2142 void unit_trigger_notify(Unit *u) {
2143 Unit *other;
2144 Iterator i;
2145 void *v;
2146
2147 assert(u);
2148
2149 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2150 if (UNIT_VTABLE(other)->trigger_notify)
2151 UNIT_VTABLE(other)->trigger_notify(other, u);
2152 }
2153
2154 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2155 if (condition_notice && log_level > LOG_NOTICE)
2156 return LOG_NOTICE;
2157 if (condition_info && log_level > LOG_INFO)
2158 return LOG_INFO;
2159 return log_level;
2160 }
2161
2162 static int unit_log_resources(Unit *u) {
2163 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2164 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2165 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2166 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2167 size_t n_message_parts = 0, n_iovec = 0;
2168 char* message_parts[1 + 2 + 2 + 1], *t;
2169 nsec_t nsec = NSEC_INFINITY;
2170 CGroupIPAccountingMetric m;
2171 size_t i;
2172 int r;
2173 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2174 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2175 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2176 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2177 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2178 };
2179 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2180 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2181 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2182 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2183 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2184 };
2185
2186 assert(u);
2187
2188 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2189 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2190 * information and the complete data in structured fields. */
2191
2192 (void) unit_get_cpu_usage(u, &nsec);
2193 if (nsec != NSEC_INFINITY) {
2194 char buf[FORMAT_TIMESPAN_MAX] = "";
2195
2196 /* Format the CPU time for inclusion in the structured log message */
2197 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2198 r = log_oom();
2199 goto finish;
2200 }
2201 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2202
2203 /* Format the CPU time for inclusion in the human language message string */
2204 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2205 t = strjoin("consumed ", buf, " CPU time");
2206 if (!t) {
2207 r = log_oom();
2208 goto finish;
2209 }
2210
2211 message_parts[n_message_parts++] = t;
2212
2213 log_level = raise_level(log_level,
2214 nsec > NOTICEWORTHY_CPU_NSEC,
2215 nsec > MENTIONWORTHY_CPU_NSEC);
2216 }
2217
2218 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2219 char buf[FORMAT_BYTES_MAX] = "";
2220 uint64_t value = UINT64_MAX;
2221
2222 assert(io_fields[k]);
2223
2224 (void) unit_get_io_accounting(u, k, k > 0, &value);
2225 if (value == UINT64_MAX)
2226 continue;
2227
2228 have_io_accounting = true;
2229 if (value > 0)
2230 any_io = true;
2231
2232 /* Format IO accounting data for inclusion in the structured log message */
2233 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2234 r = log_oom();
2235 goto finish;
2236 }
2237 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2238
2239 /* Format the IO accounting data for inclusion in the human language message string, but only
2240 * for the bytes counters (and not for the operations counters) */
2241 if (k == CGROUP_IO_READ_BYTES) {
2242 assert(!rr);
2243 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2244 if (!rr) {
2245 r = log_oom();
2246 goto finish;
2247 }
2248 } else if (k == CGROUP_IO_WRITE_BYTES) {
2249 assert(!wr);
2250 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2251 if (!wr) {
2252 r = log_oom();
2253 goto finish;
2254 }
2255 }
2256
2257 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2258 log_level = raise_level(log_level,
2259 value > MENTIONWORTHY_IO_BYTES,
2260 value > NOTICEWORTHY_IO_BYTES);
2261 }
2262
2263 if (have_io_accounting) {
2264 if (any_io) {
2265 if (rr)
2266 message_parts[n_message_parts++] = TAKE_PTR(rr);
2267 if (wr)
2268 message_parts[n_message_parts++] = TAKE_PTR(wr);
2269
2270 } else {
2271 char *k;
2272
2273 k = strdup("no IO");
2274 if (!k) {
2275 r = log_oom();
2276 goto finish;
2277 }
2278
2279 message_parts[n_message_parts++] = k;
2280 }
2281 }
2282
2283 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2284 char buf[FORMAT_BYTES_MAX] = "";
2285 uint64_t value = UINT64_MAX;
2286
2287 assert(ip_fields[m]);
2288
2289 (void) unit_get_ip_accounting(u, m, &value);
2290 if (value == UINT64_MAX)
2291 continue;
2292
2293 have_ip_accounting = true;
2294 if (value > 0)
2295 any_traffic = true;
2296
2297 /* Format IP accounting data for inclusion in the structured log message */
2298 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2299 r = log_oom();
2300 goto finish;
2301 }
2302 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2303
2304 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2305 * bytes counters (and not for the packets counters) */
2306 if (m == CGROUP_IP_INGRESS_BYTES) {
2307 assert(!igress);
2308 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2309 if (!igress) {
2310 r = log_oom();
2311 goto finish;
2312 }
2313 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2314 assert(!egress);
2315 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2316 if (!egress) {
2317 r = log_oom();
2318 goto finish;
2319 }
2320 }
2321
2322 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2323 log_level = raise_level(log_level,
2324 value > MENTIONWORTHY_IP_BYTES,
2325 value > NOTICEWORTHY_IP_BYTES);
2326 }
2327
2328 if (have_ip_accounting) {
2329 if (any_traffic) {
2330 if (igress)
2331 message_parts[n_message_parts++] = TAKE_PTR(igress);
2332 if (egress)
2333 message_parts[n_message_parts++] = TAKE_PTR(egress);
2334
2335 } else {
2336 char *k;
2337
2338 k = strdup("no IP traffic");
2339 if (!k) {
2340 r = log_oom();
2341 goto finish;
2342 }
2343
2344 message_parts[n_message_parts++] = k;
2345 }
2346 }
2347
2348 /* Is there any accounting data available at all? */
2349 if (n_iovec == 0) {
2350 r = 0;
2351 goto finish;
2352 }
2353
2354 if (n_message_parts == 0)
2355 t = strjoina("MESSAGE=", u->id, ": Completed.");
2356 else {
2357 _cleanup_free_ char *joined;
2358
2359 message_parts[n_message_parts] = NULL;
2360
2361 joined = strv_join(message_parts, ", ");
2362 if (!joined) {
2363 r = log_oom();
2364 goto finish;
2365 }
2366
2367 joined[0] = ascii_toupper(joined[0]);
2368 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2369 }
2370
2371 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2372 * and hence don't increase n_iovec for them */
2373 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2374 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2375
2376 t = strjoina(u->manager->unit_log_field, u->id);
2377 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2378
2379 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2380 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2381
2382 log_struct_iovec(log_level, iovec, n_iovec + 4);
2383 r = 0;
2384
2385 finish:
2386 for (i = 0; i < n_message_parts; i++)
2387 free(message_parts[i]);
2388
2389 for (i = 0; i < n_iovec; i++)
2390 free(iovec[i].iov_base);
2391
2392 return r;
2393
2394 }
2395
2396 static void unit_update_on_console(Unit *u) {
2397 bool b;
2398
2399 assert(u);
2400
2401 b = unit_needs_console(u);
2402 if (u->on_console == b)
2403 return;
2404
2405 u->on_console = b;
2406 if (b)
2407 manager_ref_console(u->manager);
2408 else
2409 manager_unref_console(u->manager);
2410 }
2411
2412 static void unit_emit_audit_start(Unit *u) {
2413 assert(u);
2414
2415 if (u->type != UNIT_SERVICE)
2416 return;
2417
2418 /* Write audit record if we have just finished starting up */
2419 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2420 u->in_audit = true;
2421 }
2422
2423 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2424 assert(u);
2425
2426 if (u->type != UNIT_SERVICE)
2427 return;
2428
2429 if (u->in_audit) {
2430 /* Write audit record if we have just finished shutting down */
2431 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2432 u->in_audit = false;
2433 } else {
2434 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2435 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2436
2437 if (state == UNIT_INACTIVE)
2438 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2439 }
2440 }
2441
2442 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2443 bool unexpected = false;
2444 JobResult result;
2445
2446 assert(j);
2447
2448 if (j->state == JOB_WAITING)
2449
2450 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2451 * due to EAGAIN. */
2452 job_add_to_run_queue(j);
2453
2454 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2455 * hence needs to invalidate jobs. */
2456
2457 switch (j->type) {
2458
2459 case JOB_START:
2460 case JOB_VERIFY_ACTIVE:
2461
2462 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2463 job_finish_and_invalidate(j, JOB_DONE, true, false);
2464 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2465 unexpected = true;
2466
2467 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2468 if (ns == UNIT_FAILED)
2469 result = JOB_FAILED;
2470 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2471 result = JOB_SKIPPED;
2472 else
2473 result = JOB_DONE;
2474
2475 job_finish_and_invalidate(j, result, true, false);
2476 }
2477 }
2478
2479 break;
2480
2481 case JOB_RELOAD:
2482 case JOB_RELOAD_OR_START:
2483 case JOB_TRY_RELOAD:
2484
2485 if (j->state == JOB_RUNNING) {
2486 if (ns == UNIT_ACTIVE)
2487 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2488 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2489 unexpected = true;
2490
2491 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2492 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2493 }
2494 }
2495
2496 break;
2497
2498 case JOB_STOP:
2499 case JOB_RESTART:
2500 case JOB_TRY_RESTART:
2501
2502 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2503 job_finish_and_invalidate(j, JOB_DONE, true, false);
2504 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2505 unexpected = true;
2506 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2507 }
2508
2509 break;
2510
2511 default:
2512 assert_not_reached("Job type unknown");
2513 }
2514
2515 return unexpected;
2516 }
2517
2518 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2519 const char *reason;
2520 Manager *m;
2521
2522 assert(u);
2523 assert(os < _UNIT_ACTIVE_STATE_MAX);
2524 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2525
2526 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2527 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2528 * remounted this function will be called too! */
2529
2530 m = u->manager;
2531
2532 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2533 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2534 unit_add_to_dbus_queue(u);
2535
2536 /* Update timestamps for state changes */
2537 if (!MANAGER_IS_RELOADING(m)) {
2538 dual_timestamp_get(&u->state_change_timestamp);
2539
2540 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2541 u->inactive_exit_timestamp = u->state_change_timestamp;
2542 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2543 u->inactive_enter_timestamp = u->state_change_timestamp;
2544
2545 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2546 u->active_enter_timestamp = u->state_change_timestamp;
2547 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2548 u->active_exit_timestamp = u->state_change_timestamp;
2549 }
2550
2551 /* Keep track of failed units */
2552 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2553
2554 /* Make sure the cgroup and state files are always removed when we become inactive */
2555 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2556 unit_prune_cgroup(u);
2557 unit_unlink_state_files(u);
2558 }
2559
2560 unit_update_on_console(u);
2561
2562 if (!MANAGER_IS_RELOADING(m)) {
2563 bool unexpected;
2564
2565 /* Let's propagate state changes to the job */
2566 if (u->job)
2567 unexpected = unit_process_job(u->job, ns, flags);
2568 else
2569 unexpected = true;
2570
2571 /* If this state change happened without being requested by a job, then let's retroactively start or
2572 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2573 * additional jobs just because something is already activated. */
2574
2575 if (unexpected) {
2576 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2577 retroactively_start_dependencies(u);
2578 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2579 retroactively_stop_dependencies(u);
2580 }
2581
2582 /* stop unneeded units regardless if going down was expected or not */
2583 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2584 check_unneeded_dependencies(u);
2585
2586 if (ns != os && ns == UNIT_FAILED) {
2587 log_unit_debug(u, "Unit entered failed state.");
2588
2589 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2590 unit_start_on_failure(u);
2591 }
2592
2593 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2594 /* This unit just finished starting up */
2595
2596 unit_emit_audit_start(u);
2597 manager_send_unit_plymouth(m, u);
2598 }
2599
2600 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2601 /* This unit just stopped/failed. */
2602
2603 unit_emit_audit_stop(u, ns);
2604 unit_log_resources(u);
2605 }
2606 }
2607
2608 manager_recheck_journal(m);
2609 manager_recheck_dbus(m);
2610
2611 unit_trigger_notify(u);
2612
2613 if (!MANAGER_IS_RELOADING(m)) {
2614 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2615 unit_submit_to_stop_when_unneeded_queue(u);
2616
2617 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2618 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2619 * without ever entering started.) */
2620 unit_check_binds_to(u);
2621
2622 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2623 reason = strjoina("unit ", u->id, " failed");
2624 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2625 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2626 reason = strjoina("unit ", u->id, " succeeded");
2627 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2628 }
2629 }
2630
2631 unit_add_to_gc_queue(u);
2632 }
2633
2634 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2635 int r;
2636
2637 assert(u);
2638 assert(pid_is_valid(pid));
2639
2640 /* Watch a specific PID */
2641
2642 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2643 * opportunity to remove any stalled references to this PID as they can be created
2644 * easily (when watching a process which is not our direct child). */
2645 if (exclusive)
2646 manager_unwatch_pid(u->manager, pid);
2647
2648 r = set_ensure_allocated(&u->pids, NULL);
2649 if (r < 0)
2650 return r;
2651
2652 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2653 if (r < 0)
2654 return r;
2655
2656 /* First try, let's add the unit keyed by "pid". */
2657 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2658 if (r == -EEXIST) {
2659 Unit **array;
2660 bool found = false;
2661 size_t n = 0;
2662
2663 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2664 * to an array of Units rather than just a Unit), lists us already. */
2665
2666 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2667 if (array)
2668 for (; array[n]; n++)
2669 if (array[n] == u)
2670 found = true;
2671
2672 if (found) /* Found it already? if so, do nothing */
2673 r = 0;
2674 else {
2675 Unit **new_array;
2676
2677 /* Allocate a new array */
2678 new_array = new(Unit*, n + 2);
2679 if (!new_array)
2680 return -ENOMEM;
2681
2682 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2683 new_array[n] = u;
2684 new_array[n+1] = NULL;
2685
2686 /* Add or replace the old array */
2687 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2688 if (r < 0) {
2689 free(new_array);
2690 return r;
2691 }
2692
2693 free(array);
2694 }
2695 } else if (r < 0)
2696 return r;
2697
2698 r = set_put(u->pids, PID_TO_PTR(pid));
2699 if (r < 0)
2700 return r;
2701
2702 return 0;
2703 }
2704
2705 void unit_unwatch_pid(Unit *u, pid_t pid) {
2706 Unit **array;
2707
2708 assert(u);
2709 assert(pid_is_valid(pid));
2710
2711 /* First let's drop the unit in case it's keyed as "pid". */
2712 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2713
2714 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2715 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2716 if (array) {
2717 size_t n, m = 0;
2718
2719 /* Let's iterate through the array, dropping our own entry */
2720 for (n = 0; array[n]; n++)
2721 if (array[n] != u)
2722 array[m++] = array[n];
2723 array[m] = NULL;
2724
2725 if (m == 0) {
2726 /* The array is now empty, remove the entire entry */
2727 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2728 free(array);
2729 }
2730 }
2731
2732 (void) set_remove(u->pids, PID_TO_PTR(pid));
2733 }
2734
2735 void unit_unwatch_all_pids(Unit *u) {
2736 assert(u);
2737
2738 while (!set_isempty(u->pids))
2739 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2740
2741 u->pids = set_free(u->pids);
2742 }
2743
2744 static void unit_tidy_watch_pids(Unit *u) {
2745 pid_t except1, except2;
2746 Iterator i;
2747 void *e;
2748
2749 assert(u);
2750
2751 /* Cleans dead PIDs from our list */
2752
2753 except1 = unit_main_pid(u);
2754 except2 = unit_control_pid(u);
2755
2756 SET_FOREACH(e, u->pids, i) {
2757 pid_t pid = PTR_TO_PID(e);
2758
2759 if (pid == except1 || pid == except2)
2760 continue;
2761
2762 if (!pid_is_unwaited(pid))
2763 unit_unwatch_pid(u, pid);
2764 }
2765 }
2766
2767 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2768 Unit *u = userdata;
2769
2770 assert(s);
2771 assert(u);
2772
2773 unit_tidy_watch_pids(u);
2774 unit_watch_all_pids(u);
2775
2776 /* If the PID set is empty now, then let's finish this off. */
2777 unit_synthesize_cgroup_empty_event(u);
2778
2779 return 0;
2780 }
2781
2782 int unit_enqueue_rewatch_pids(Unit *u) {
2783 int r;
2784
2785 assert(u);
2786
2787 if (!u->cgroup_path)
2788 return -ENOENT;
2789
2790 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2791 if (r < 0)
2792 return r;
2793 if (r > 0) /* On unified we can use proper notifications */
2794 return 0;
2795
2796 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2797 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2798 * involves issuing kill(pid, 0) on all processes we watch. */
2799
2800 if (!u->rewatch_pids_event_source) {
2801 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2802
2803 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2804 if (r < 0)
2805 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2806
2807 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2808 if (r < 0)
2809 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2810
2811 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2812
2813 u->rewatch_pids_event_source = TAKE_PTR(s);
2814 }
2815
2816 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2817 if (r < 0)
2818 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2819
2820 return 0;
2821 }
2822
2823 void unit_dequeue_rewatch_pids(Unit *u) {
2824 int r;
2825 assert(u);
2826
2827 if (!u->rewatch_pids_event_source)
2828 return;
2829
2830 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2831 if (r < 0)
2832 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2833
2834 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2835 }
2836
2837 bool unit_job_is_applicable(Unit *u, JobType j) {
2838 assert(u);
2839 assert(j >= 0 && j < _JOB_TYPE_MAX);
2840
2841 switch (j) {
2842
2843 case JOB_VERIFY_ACTIVE:
2844 case JOB_START:
2845 case JOB_NOP:
2846 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2847 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2848 * jobs for it. */
2849 return true;
2850
2851 case JOB_STOP:
2852 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2853 * external events), hence it makes no sense to permit enqueing such a request either. */
2854 return !u->perpetual;
2855
2856 case JOB_RESTART:
2857 case JOB_TRY_RESTART:
2858 return unit_can_stop(u) && unit_can_start(u);
2859
2860 case JOB_RELOAD:
2861 case JOB_TRY_RELOAD:
2862 return unit_can_reload(u);
2863
2864 case JOB_RELOAD_OR_START:
2865 return unit_can_reload(u) && unit_can_start(u);
2866
2867 default:
2868 assert_not_reached("Invalid job type");
2869 }
2870 }
2871
2872 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2873 assert(u);
2874
2875 /* Only warn about some unit types */
2876 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2877 return;
2878
2879 if (streq_ptr(u->id, other))
2880 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2881 else
2882 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2883 }
2884
2885 static int unit_add_dependency_hashmap(
2886 Hashmap **h,
2887 Unit *other,
2888 UnitDependencyMask origin_mask,
2889 UnitDependencyMask destination_mask) {
2890
2891 UnitDependencyInfo info;
2892 int r;
2893
2894 assert(h);
2895 assert(other);
2896 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2897 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2898 assert(origin_mask > 0 || destination_mask > 0);
2899
2900 r = hashmap_ensure_allocated(h, NULL);
2901 if (r < 0)
2902 return r;
2903
2904 assert_cc(sizeof(void*) == sizeof(info));
2905
2906 info.data = hashmap_get(*h, other);
2907 if (info.data) {
2908 /* Entry already exists. Add in our mask. */
2909
2910 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2911 FLAGS_SET(destination_mask, info.destination_mask))
2912 return 0; /* NOP */
2913
2914 info.origin_mask |= origin_mask;
2915 info.destination_mask |= destination_mask;
2916
2917 r = hashmap_update(*h, other, info.data);
2918 } else {
2919 info = (UnitDependencyInfo) {
2920 .origin_mask = origin_mask,
2921 .destination_mask = destination_mask,
2922 };
2923
2924 r = hashmap_put(*h, other, info.data);
2925 }
2926 if (r < 0)
2927 return r;
2928
2929 return 1;
2930 }
2931
2932 int unit_add_dependency(
2933 Unit *u,
2934 UnitDependency d,
2935 Unit *other,
2936 bool add_reference,
2937 UnitDependencyMask mask) {
2938
2939 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2940 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2941 [UNIT_WANTS] = UNIT_WANTED_BY,
2942 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2943 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2944 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2945 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2946 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2947 [UNIT_WANTED_BY] = UNIT_WANTS,
2948 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2949 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2950 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2951 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2952 [UNIT_BEFORE] = UNIT_AFTER,
2953 [UNIT_AFTER] = UNIT_BEFORE,
2954 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2955 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2956 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2957 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2958 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2959 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2960 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2961 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2962 };
2963 Unit *original_u = u, *original_other = other;
2964 int r;
2965
2966 assert(u);
2967 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2968 assert(other);
2969
2970 u = unit_follow_merge(u);
2971 other = unit_follow_merge(other);
2972
2973 /* We won't allow dependencies on ourselves. We will not
2974 * consider them an error however. */
2975 if (u == other) {
2976 maybe_warn_about_dependency(original_u, original_other->id, d);
2977 return 0;
2978 }
2979
2980 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2981 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2982 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2983 return 0;
2984 }
2985
2986 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2987 if (r < 0)
2988 return r;
2989
2990 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2991 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2992 if (r < 0)
2993 return r;
2994 }
2995
2996 if (add_reference) {
2997 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2998 if (r < 0)
2999 return r;
3000
3001 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
3002 if (r < 0)
3003 return r;
3004 }
3005
3006 unit_add_to_dbus_queue(u);
3007 return 0;
3008 }
3009
3010 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3011 int r;
3012
3013 assert(u);
3014
3015 r = unit_add_dependency(u, d, other, add_reference, mask);
3016 if (r < 0)
3017 return r;
3018
3019 return unit_add_dependency(u, e, other, add_reference, mask);
3020 }
3021
3022 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3023 int r;
3024
3025 assert(u);
3026 assert(name);
3027 assert(buf);
3028 assert(ret);
3029
3030 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3031 *buf = NULL;
3032 *ret = name;
3033 return 0;
3034 }
3035
3036 if (u->instance)
3037 r = unit_name_replace_instance(name, u->instance, buf);
3038 else {
3039 _cleanup_free_ char *i = NULL;
3040
3041 r = unit_name_to_prefix(u->id, &i);
3042 if (r < 0)
3043 return r;
3044
3045 r = unit_name_replace_instance(name, i, buf);
3046 }
3047 if (r < 0)
3048 return r;
3049
3050 *ret = *buf;
3051 return 0;
3052 }
3053
3054 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3055 _cleanup_free_ char *buf = NULL;
3056 Unit *other;
3057 int r;
3058
3059 assert(u);
3060 assert(name);
3061
3062 r = resolve_template(u, name, &buf, &name);
3063 if (r < 0)
3064 return r;
3065
3066 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3067 if (r < 0)
3068 return r;
3069
3070 return unit_add_dependency(u, d, other, add_reference, mask);
3071 }
3072
3073 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3074 _cleanup_free_ char *buf = NULL;
3075 Unit *other;
3076 int r;
3077
3078 assert(u);
3079 assert(name);
3080
3081 r = resolve_template(u, name, &buf, &name);
3082 if (r < 0)
3083 return r;
3084
3085 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3086 if (r < 0)
3087 return r;
3088
3089 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3090 }
3091
3092 int set_unit_path(const char *p) {
3093 /* This is mostly for debug purposes */
3094 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3095 return -errno;
3096
3097 return 0;
3098 }
3099
3100 char *unit_dbus_path(Unit *u) {
3101 assert(u);
3102
3103 if (!u->id)
3104 return NULL;
3105
3106 return unit_dbus_path_from_name(u->id);
3107 }
3108
3109 char *unit_dbus_path_invocation_id(Unit *u) {
3110 assert(u);
3111
3112 if (sd_id128_is_null(u->invocation_id))
3113 return NULL;
3114
3115 return unit_dbus_path_from_name(u->invocation_id_string);
3116 }
3117
3118 int unit_set_slice(Unit *u, Unit *slice) {
3119 assert(u);
3120 assert(slice);
3121
3122 /* Sets the unit slice if it has not been set before. Is extra
3123 * careful, to only allow this for units that actually have a
3124 * cgroup context. Also, we don't allow to set this for slices
3125 * (since the parent slice is derived from the name). Make
3126 * sure the unit we set is actually a slice. */
3127
3128 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3129 return -EOPNOTSUPP;
3130
3131 if (u->type == UNIT_SLICE)
3132 return -EINVAL;
3133
3134 if (unit_active_state(u) != UNIT_INACTIVE)
3135 return -EBUSY;
3136
3137 if (slice->type != UNIT_SLICE)
3138 return -EINVAL;
3139
3140 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3141 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3142 return -EPERM;
3143
3144 if (UNIT_DEREF(u->slice) == slice)
3145 return 0;
3146
3147 /* Disallow slice changes if @u is already bound to cgroups */
3148 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3149 return -EBUSY;
3150
3151 unit_ref_set(&u->slice, u, slice);
3152 return 1;
3153 }
3154
3155 int unit_set_default_slice(Unit *u) {
3156 const char *slice_name;
3157 Unit *slice;
3158 int r;
3159
3160 assert(u);
3161
3162 if (UNIT_ISSET(u->slice))
3163 return 0;
3164
3165 if (u->instance) {
3166 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3167
3168 /* Implicitly place all instantiated units in their
3169 * own per-template slice */
3170
3171 r = unit_name_to_prefix(u->id, &prefix);
3172 if (r < 0)
3173 return r;
3174
3175 /* The prefix is already escaped, but it might include
3176 * "-" which has a special meaning for slice units,
3177 * hence escape it here extra. */
3178 escaped = unit_name_escape(prefix);
3179 if (!escaped)
3180 return -ENOMEM;
3181
3182 if (MANAGER_IS_SYSTEM(u->manager))
3183 slice_name = strjoina("system-", escaped, ".slice");
3184 else
3185 slice_name = strjoina(escaped, ".slice");
3186 } else
3187 slice_name =
3188 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3189 ? SPECIAL_SYSTEM_SLICE
3190 : SPECIAL_ROOT_SLICE;
3191
3192 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3193 if (r < 0)
3194 return r;
3195
3196 return unit_set_slice(u, slice);
3197 }
3198
3199 const char *unit_slice_name(Unit *u) {
3200 assert(u);
3201
3202 if (!UNIT_ISSET(u->slice))
3203 return NULL;
3204
3205 return UNIT_DEREF(u->slice)->id;
3206 }
3207
3208 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3209 _cleanup_free_ char *t = NULL;
3210 int r;
3211
3212 assert(u);
3213 assert(type);
3214 assert(_found);
3215
3216 r = unit_name_change_suffix(u->id, type, &t);
3217 if (r < 0)
3218 return r;
3219 if (unit_has_name(u, t))
3220 return -EINVAL;
3221
3222 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3223 assert(r < 0 || *_found != u);
3224 return r;
3225 }
3226
3227 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3228 const char *name, *old_owner, *new_owner;
3229 Unit *u = userdata;
3230 int r;
3231
3232 assert(message);
3233 assert(u);
3234
3235 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3236 if (r < 0) {
3237 bus_log_parse_error(r);
3238 return 0;
3239 }
3240
3241 old_owner = empty_to_null(old_owner);
3242 new_owner = empty_to_null(new_owner);
3243
3244 if (UNIT_VTABLE(u)->bus_name_owner_change)
3245 UNIT_VTABLE(u)->bus_name_owner_change(u, old_owner, new_owner);
3246
3247 return 0;
3248 }
3249
3250 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3251 const sd_bus_error *e;
3252 const char *new_owner;
3253 Unit *u = userdata;
3254 int r;
3255
3256 assert(message);
3257 assert(u);
3258
3259 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3260
3261 if (sd_bus_error_is_set(error)) {
3262 log_error("Failed to get name owner from bus: %s", error->message);
3263 return 0;
3264 }
3265
3266 e = sd_bus_message_get_error(message);
3267 if (sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3268 return 0;
3269
3270 if (e) {
3271 log_error("Unexpected error response from GetNameOwner: %s", e->message);
3272 return 0;
3273 }
3274
3275 r = sd_bus_message_read(message, "s", &new_owner);
3276 if (r < 0) {
3277 bus_log_parse_error(r);
3278 return 0;
3279 }
3280
3281 new_owner = empty_to_null(new_owner);
3282
3283 if (UNIT_VTABLE(u)->bus_name_owner_change)
3284 UNIT_VTABLE(u)->bus_name_owner_change(u, NULL, new_owner);
3285
3286 return 0;
3287 }
3288
3289 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3290 const char *match;
3291
3292 assert(u);
3293 assert(bus);
3294 assert(name);
3295
3296 if (u->match_bus_slot)
3297 return -EBUSY;
3298
3299 match = strjoina("type='signal',"
3300 "sender='org.freedesktop.DBus',"
3301 "path='/org/freedesktop/DBus',"
3302 "interface='org.freedesktop.DBus',"
3303 "member='NameOwnerChanged',"
3304 "arg0='", name, "'");
3305
3306 int r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3307 if (r < 0)
3308 return r;
3309
3310 return sd_bus_call_method_async(bus,
3311 &u->get_name_owner_slot,
3312 "org.freedesktop.DBus",
3313 "/org/freedesktop/DBus",
3314 "org.freedesktop.DBus",
3315 "GetNameOwner",
3316 get_name_owner_handler,
3317 u,
3318 "s", name);
3319 }
3320
3321 int unit_watch_bus_name(Unit *u, const char *name) {
3322 int r;
3323
3324 assert(u);
3325 assert(name);
3326
3327 /* Watch a specific name on the bus. We only support one unit
3328 * watching each name for now. */
3329
3330 if (u->manager->api_bus) {
3331 /* If the bus is already available, install the match directly.
3332 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3333 r = unit_install_bus_match(u, u->manager->api_bus, name);
3334 if (r < 0)
3335 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3336 }
3337
3338 r = hashmap_put(u->manager->watch_bus, name, u);
3339 if (r < 0) {
3340 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3341 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3342 }
3343
3344 return 0;
3345 }
3346
3347 void unit_unwatch_bus_name(Unit *u, const char *name) {
3348 assert(u);
3349 assert(name);
3350
3351 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3352 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3353 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3354 }
3355
3356 bool unit_can_serialize(Unit *u) {
3357 assert(u);
3358
3359 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3360 }
3361
3362 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3363 _cleanup_free_ char *s = NULL;
3364 int r;
3365
3366 assert(f);
3367 assert(key);
3368
3369 if (mask == 0)
3370 return 0;
3371
3372 r = cg_mask_to_string(mask, &s);
3373 if (r < 0)
3374 return log_error_errno(r, "Failed to format cgroup mask: %m");
3375
3376 return serialize_item(f, key, s);
3377 }
3378
3379 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3380 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3381 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3382 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3383 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3384 };
3385
3386 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3387 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3388 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3389 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3390 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3391 };
3392
3393 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3394 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3395 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3396 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3397 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3398 };
3399
3400 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3401 CGroupIPAccountingMetric m;
3402 int r;
3403
3404 assert(u);
3405 assert(f);
3406 assert(fds);
3407
3408 if (unit_can_serialize(u)) {
3409 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3410 if (r < 0)
3411 return r;
3412 }
3413
3414 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3415
3416 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3417 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3418 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3419 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3420
3421 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3422 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3423
3424 if (dual_timestamp_is_set(&u->condition_timestamp))
3425 (void) serialize_bool(f, "condition-result", u->condition_result);
3426
3427 if (dual_timestamp_is_set(&u->assert_timestamp))
3428 (void) serialize_bool(f, "assert-result", u->assert_result);
3429
3430 (void) serialize_bool(f, "transient", u->transient);
3431 (void) serialize_bool(f, "in-audit", u->in_audit);
3432
3433 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3434 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3435 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3436 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3437 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3438
3439 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3440 if (u->cpu_usage_last != NSEC_INFINITY)
3441 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3442
3443 if (u->oom_kill_last > 0)
3444 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3445
3446 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3447 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3448
3449 if (u->io_accounting_last[im] != UINT64_MAX)
3450 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3451 }
3452
3453 if (u->cgroup_path)
3454 (void) serialize_item(f, "cgroup", u->cgroup_path);
3455
3456 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3457 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3458 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3459 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3460
3461 if (uid_is_valid(u->ref_uid))
3462 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3463 if (gid_is_valid(u->ref_gid))
3464 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3465
3466 if (!sd_id128_is_null(u->invocation_id))
3467 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3468
3469 bus_track_serialize(u->bus_track, f, "ref");
3470
3471 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3472 uint64_t v;
3473
3474 r = unit_get_ip_accounting(u, m, &v);
3475 if (r >= 0)
3476 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3477 }
3478
3479 if (serialize_jobs) {
3480 if (u->job) {
3481 fputs("job\n", f);
3482 job_serialize(u->job, f);
3483 }
3484
3485 if (u->nop_job) {
3486 fputs("job\n", f);
3487 job_serialize(u->nop_job, f);
3488 }
3489 }
3490
3491 /* End marker */
3492 fputc('\n', f);
3493 return 0;
3494 }
3495
3496 static int unit_deserialize_job(Unit *u, FILE *f) {
3497 _cleanup_(job_freep) Job *j = NULL;
3498 int r;
3499
3500 assert(u);
3501 assert(f);
3502
3503 j = job_new_raw(u);
3504 if (!j)
3505 return log_oom();
3506
3507 r = job_deserialize(j, f);
3508 if (r < 0)
3509 return r;
3510
3511 r = job_install_deserialized(j);
3512 if (r < 0)
3513 return r;
3514
3515 TAKE_PTR(j);
3516 return 0;
3517 }
3518
3519 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3520 int r;
3521
3522 assert(u);
3523 assert(f);
3524 assert(fds);
3525
3526 for (;;) {
3527 _cleanup_free_ char *line = NULL;
3528 char *l, *v;
3529 ssize_t m;
3530 size_t k;
3531
3532 r = read_line(f, LONG_LINE_MAX, &line);
3533 if (r < 0)
3534 return log_error_errno(r, "Failed to read serialization line: %m");
3535 if (r == 0) /* eof */
3536 break;
3537
3538 l = strstrip(line);
3539 if (isempty(l)) /* End marker */
3540 break;
3541
3542 k = strcspn(l, "=");
3543
3544 if (l[k] == '=') {
3545 l[k] = 0;
3546 v = l+k+1;
3547 } else
3548 v = l+k;
3549
3550 if (streq(l, "job")) {
3551 if (v[0] == '\0') {
3552 /* New-style serialized job */
3553 r = unit_deserialize_job(u, f);
3554 if (r < 0)
3555 return r;
3556 } else /* Legacy for pre-44 */
3557 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3558 continue;
3559 } else if (streq(l, "state-change-timestamp")) {
3560 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3561 continue;
3562 } else if (streq(l, "inactive-exit-timestamp")) {
3563 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3564 continue;
3565 } else if (streq(l, "active-enter-timestamp")) {
3566 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3567 continue;
3568 } else if (streq(l, "active-exit-timestamp")) {
3569 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3570 continue;
3571 } else if (streq(l, "inactive-enter-timestamp")) {
3572 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3573 continue;
3574 } else if (streq(l, "condition-timestamp")) {
3575 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3576 continue;
3577 } else if (streq(l, "assert-timestamp")) {
3578 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3579 continue;
3580 } else if (streq(l, "condition-result")) {
3581
3582 r = parse_boolean(v);
3583 if (r < 0)
3584 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3585 else
3586 u->condition_result = r;
3587
3588 continue;
3589
3590 } else if (streq(l, "assert-result")) {
3591
3592 r = parse_boolean(v);
3593 if (r < 0)
3594 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3595 else
3596 u->assert_result = r;
3597
3598 continue;
3599
3600 } else if (streq(l, "transient")) {
3601
3602 r = parse_boolean(v);
3603 if (r < 0)
3604 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3605 else
3606 u->transient = r;
3607
3608 continue;
3609
3610 } else if (streq(l, "in-audit")) {
3611
3612 r = parse_boolean(v);
3613 if (r < 0)
3614 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3615 else
3616 u->in_audit = r;
3617
3618 continue;
3619
3620 } else if (streq(l, "exported-invocation-id")) {
3621
3622 r = parse_boolean(v);
3623 if (r < 0)
3624 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3625 else
3626 u->exported_invocation_id = r;
3627
3628 continue;
3629
3630 } else if (streq(l, "exported-log-level-max")) {
3631
3632 r = parse_boolean(v);
3633 if (r < 0)
3634 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3635 else
3636 u->exported_log_level_max = r;
3637
3638 continue;
3639
3640 } else if (streq(l, "exported-log-extra-fields")) {
3641
3642 r = parse_boolean(v);
3643 if (r < 0)
3644 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3645 else
3646 u->exported_log_extra_fields = r;
3647
3648 continue;
3649
3650 } else if (streq(l, "exported-log-rate-limit-interval")) {
3651
3652 r = parse_boolean(v);
3653 if (r < 0)
3654 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3655 else
3656 u->exported_log_rate_limit_interval = r;
3657
3658 continue;
3659
3660 } else if (streq(l, "exported-log-rate-limit-burst")) {
3661
3662 r = parse_boolean(v);
3663 if (r < 0)
3664 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3665 else
3666 u->exported_log_rate_limit_burst = r;
3667
3668 continue;
3669
3670 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3671
3672 r = safe_atou64(v, &u->cpu_usage_base);
3673 if (r < 0)
3674 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3675
3676 continue;
3677
3678 } else if (streq(l, "cpu-usage-last")) {
3679
3680 r = safe_atou64(v, &u->cpu_usage_last);
3681 if (r < 0)
3682 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3683
3684 continue;
3685
3686 } else if (streq(l, "oom-kill-last")) {
3687
3688 r = safe_atou64(v, &u->oom_kill_last);
3689 if (r < 0)
3690 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3691
3692 continue;
3693
3694 } else if (streq(l, "cgroup")) {
3695
3696 r = unit_set_cgroup_path(u, v);
3697 if (r < 0)
3698 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3699
3700 (void) unit_watch_cgroup(u);
3701 (void) unit_watch_cgroup_memory(u);
3702
3703 continue;
3704 } else if (streq(l, "cgroup-realized")) {
3705 int b;
3706
3707 b = parse_boolean(v);
3708 if (b < 0)
3709 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3710 else
3711 u->cgroup_realized = b;
3712
3713 continue;
3714
3715 } else if (streq(l, "cgroup-realized-mask")) {
3716
3717 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3718 if (r < 0)
3719 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3720 continue;
3721
3722 } else if (streq(l, "cgroup-enabled-mask")) {
3723
3724 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3725 if (r < 0)
3726 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3727 continue;
3728
3729 } else if (streq(l, "cgroup-invalidated-mask")) {
3730
3731 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3732 if (r < 0)
3733 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3734 continue;
3735
3736 } else if (streq(l, "ref-uid")) {
3737 uid_t uid;
3738
3739 r = parse_uid(v, &uid);
3740 if (r < 0)
3741 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3742 else
3743 unit_ref_uid_gid(u, uid, GID_INVALID);
3744
3745 continue;
3746
3747 } else if (streq(l, "ref-gid")) {
3748 gid_t gid;
3749
3750 r = parse_gid(v, &gid);
3751 if (r < 0)
3752 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3753 else
3754 unit_ref_uid_gid(u, UID_INVALID, gid);
3755
3756 continue;
3757
3758 } else if (streq(l, "ref")) {
3759
3760 r = strv_extend(&u->deserialized_refs, v);
3761 if (r < 0)
3762 return log_oom();
3763
3764 continue;
3765 } else if (streq(l, "invocation-id")) {
3766 sd_id128_t id;
3767
3768 r = sd_id128_from_string(v, &id);
3769 if (r < 0)
3770 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3771 else {
3772 r = unit_set_invocation_id(u, id);
3773 if (r < 0)
3774 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3775 }
3776
3777 continue;
3778 }
3779
3780 /* Check if this is an IP accounting metric serialization field */
3781 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3782 if (m >= 0) {
3783 uint64_t c;
3784
3785 r = safe_atou64(v, &c);
3786 if (r < 0)
3787 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3788 else
3789 u->ip_accounting_extra[m] = c;
3790 continue;
3791 }
3792
3793 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3794 if (m >= 0) {
3795 uint64_t c;
3796
3797 r = safe_atou64(v, &c);
3798 if (r < 0)
3799 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3800 else
3801 u->io_accounting_base[m] = c;
3802 continue;
3803 }
3804
3805 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3806 if (m >= 0) {
3807 uint64_t c;
3808
3809 r = safe_atou64(v, &c);
3810 if (r < 0)
3811 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3812 else
3813 u->io_accounting_last[m] = c;
3814 continue;
3815 }
3816
3817 if (unit_can_serialize(u)) {
3818 r = exec_runtime_deserialize_compat(u, l, v, fds);
3819 if (r < 0) {
3820 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3821 continue;
3822 }
3823
3824 /* Returns positive if key was handled by the call */
3825 if (r > 0)
3826 continue;
3827
3828 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3829 if (r < 0)
3830 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3831 }
3832 }
3833
3834 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3835 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3836 * before 228 where the base for timeouts was not persistent across reboots. */
3837
3838 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3839 dual_timestamp_get(&u->state_change_timestamp);
3840
3841 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3842 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3843 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3844 unit_invalidate_cgroup_bpf(u);
3845
3846 return 0;
3847 }
3848
3849 int unit_deserialize_skip(FILE *f) {
3850 int r;
3851 assert(f);
3852
3853 /* Skip serialized data for this unit. We don't know what it is. */
3854
3855 for (;;) {
3856 _cleanup_free_ char *line = NULL;
3857 char *l;
3858
3859 r = read_line(f, LONG_LINE_MAX, &line);
3860 if (r < 0)
3861 return log_error_errno(r, "Failed to read serialization line: %m");
3862 if (r == 0)
3863 return 0;
3864
3865 l = strstrip(line);
3866
3867 /* End marker */
3868 if (isempty(l))
3869 return 1;
3870 }
3871 }
3872
3873 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3874 Unit *device;
3875 _cleanup_free_ char *e = NULL;
3876 int r;
3877
3878 assert(u);
3879
3880 /* Adds in links to the device node that this unit is based on */
3881 if (isempty(what))
3882 return 0;
3883
3884 if (!is_device_path(what))
3885 return 0;
3886
3887 /* When device units aren't supported (such as in a
3888 * container), don't create dependencies on them. */
3889 if (!unit_type_supported(UNIT_DEVICE))
3890 return 0;
3891
3892 r = unit_name_from_path(what, ".device", &e);
3893 if (r < 0)
3894 return r;
3895
3896 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3897 if (r < 0)
3898 return r;
3899
3900 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3901 dep = UNIT_BINDS_TO;
3902
3903 r = unit_add_two_dependencies(u, UNIT_AFTER,
3904 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3905 device, true, mask);
3906 if (r < 0)
3907 return r;
3908
3909 if (wants) {
3910 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3911 if (r < 0)
3912 return r;
3913 }
3914
3915 return 0;
3916 }
3917
3918 int unit_coldplug(Unit *u) {
3919 int r = 0, q;
3920 char **i;
3921
3922 assert(u);
3923
3924 /* Make sure we don't enter a loop, when coldplugging recursively. */
3925 if (u->coldplugged)
3926 return 0;
3927
3928 u->coldplugged = true;
3929
3930 STRV_FOREACH(i, u->deserialized_refs) {
3931 q = bus_unit_track_add_name(u, *i);
3932 if (q < 0 && r >= 0)
3933 r = q;
3934 }
3935 u->deserialized_refs = strv_free(u->deserialized_refs);
3936
3937 if (UNIT_VTABLE(u)->coldplug) {
3938 q = UNIT_VTABLE(u)->coldplug(u);
3939 if (q < 0 && r >= 0)
3940 r = q;
3941 }
3942
3943 if (u->job) {
3944 q = job_coldplug(u->job);
3945 if (q < 0 && r >= 0)
3946 r = q;
3947 }
3948
3949 return r;
3950 }
3951
3952 void unit_catchup(Unit *u) {
3953 assert(u);
3954
3955 if (UNIT_VTABLE(u)->catchup)
3956 UNIT_VTABLE(u)->catchup(u);
3957 }
3958
3959 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3960 struct stat st;
3961
3962 if (!path)
3963 return false;
3964
3965 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3966 * are never out-of-date. */
3967 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3968 return false;
3969
3970 if (stat(path, &st) < 0)
3971 /* What, cannot access this anymore? */
3972 return true;
3973
3974 if (path_masked)
3975 /* For masked files check if they are still so */
3976 return !null_or_empty(&st);
3977 else
3978 /* For non-empty files check the mtime */
3979 return timespec_load(&st.st_mtim) > mtime;
3980
3981 return false;
3982 }
3983
3984 bool unit_need_daemon_reload(Unit *u) {
3985 _cleanup_strv_free_ char **t = NULL;
3986 char **path;
3987
3988 assert(u);
3989
3990 /* For unit files, we allow masking… */
3991 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3992 u->load_state == UNIT_MASKED))
3993 return true;
3994
3995 /* Source paths should not be masked… */
3996 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3997 return true;
3998
3999 if (u->load_state == UNIT_LOADED)
4000 (void) unit_find_dropin_paths(u, &t);
4001 if (!strv_equal(u->dropin_paths, t))
4002 return true;
4003
4004 /* … any drop-ins that are masked are simply omitted from the list. */
4005 STRV_FOREACH(path, u->dropin_paths)
4006 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4007 return true;
4008
4009 return false;
4010 }
4011
4012 void unit_reset_failed(Unit *u) {
4013 assert(u);
4014
4015 if (UNIT_VTABLE(u)->reset_failed)
4016 UNIT_VTABLE(u)->reset_failed(u);
4017
4018 RATELIMIT_RESET(u->start_limit);
4019 u->start_limit_hit = false;
4020 }
4021
4022 Unit *unit_following(Unit *u) {
4023 assert(u);
4024
4025 if (UNIT_VTABLE(u)->following)
4026 return UNIT_VTABLE(u)->following(u);
4027
4028 return NULL;
4029 }
4030
4031 bool unit_stop_pending(Unit *u) {
4032 assert(u);
4033
4034 /* This call does check the current state of the unit. It's
4035 * hence useful to be called from state change calls of the
4036 * unit itself, where the state isn't updated yet. This is
4037 * different from unit_inactive_or_pending() which checks both
4038 * the current state and for a queued job. */
4039
4040 return u->job && u->job->type == JOB_STOP;
4041 }
4042
4043 bool unit_inactive_or_pending(Unit *u) {
4044 assert(u);
4045
4046 /* Returns true if the unit is inactive or going down */
4047
4048 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4049 return true;
4050
4051 if (unit_stop_pending(u))
4052 return true;
4053
4054 return false;
4055 }
4056
4057 bool unit_active_or_pending(Unit *u) {
4058 assert(u);
4059
4060 /* Returns true if the unit is active or going up */
4061
4062 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4063 return true;
4064
4065 if (u->job &&
4066 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4067 return true;
4068
4069 return false;
4070 }
4071
4072 bool unit_will_restart(Unit *u) {
4073 assert(u);
4074
4075 if (!UNIT_VTABLE(u)->will_restart)
4076 return false;
4077
4078 return UNIT_VTABLE(u)->will_restart(u);
4079 }
4080
4081 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4082 assert(u);
4083 assert(w >= 0 && w < _KILL_WHO_MAX);
4084 assert(SIGNAL_VALID(signo));
4085
4086 if (!UNIT_VTABLE(u)->kill)
4087 return -EOPNOTSUPP;
4088
4089 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4090 }
4091
4092 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4093 _cleanup_set_free_ Set *pid_set = NULL;
4094 int r;
4095
4096 pid_set = set_new(NULL);
4097 if (!pid_set)
4098 return NULL;
4099
4100 /* Exclude the main/control pids from being killed via the cgroup */
4101 if (main_pid > 0) {
4102 r = set_put(pid_set, PID_TO_PTR(main_pid));
4103 if (r < 0)
4104 return NULL;
4105 }
4106
4107 if (control_pid > 0) {
4108 r = set_put(pid_set, PID_TO_PTR(control_pid));
4109 if (r < 0)
4110 return NULL;
4111 }
4112
4113 return TAKE_PTR(pid_set);
4114 }
4115
4116 int unit_kill_common(
4117 Unit *u,
4118 KillWho who,
4119 int signo,
4120 pid_t main_pid,
4121 pid_t control_pid,
4122 sd_bus_error *error) {
4123
4124 int r = 0;
4125 bool killed = false;
4126
4127 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4128 if (main_pid < 0)
4129 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4130 else if (main_pid == 0)
4131 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4132 }
4133
4134 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4135 if (control_pid < 0)
4136 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4137 else if (control_pid == 0)
4138 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4139 }
4140
4141 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4142 if (control_pid > 0) {
4143 if (kill(control_pid, signo) < 0)
4144 r = -errno;
4145 else
4146 killed = true;
4147 }
4148
4149 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4150 if (main_pid > 0) {
4151 if (kill(main_pid, signo) < 0)
4152 r = -errno;
4153 else
4154 killed = true;
4155 }
4156
4157 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4158 _cleanup_set_free_ Set *pid_set = NULL;
4159 int q;
4160
4161 /* Exclude the main/control pids from being killed via the cgroup */
4162 pid_set = unit_pid_set(main_pid, control_pid);
4163 if (!pid_set)
4164 return -ENOMEM;
4165
4166 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4167 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4168 r = q;
4169 else
4170 killed = true;
4171 }
4172
4173 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4174 return -ESRCH;
4175
4176 return r;
4177 }
4178
4179 int unit_following_set(Unit *u, Set **s) {
4180 assert(u);
4181 assert(s);
4182
4183 if (UNIT_VTABLE(u)->following_set)
4184 return UNIT_VTABLE(u)->following_set(u, s);
4185
4186 *s = NULL;
4187 return 0;
4188 }
4189
4190 UnitFileState unit_get_unit_file_state(Unit *u) {
4191 int r;
4192
4193 assert(u);
4194
4195 if (u->unit_file_state < 0 && u->fragment_path) {
4196 r = unit_file_get_state(
4197 u->manager->unit_file_scope,
4198 NULL,
4199 u->id,
4200 &u->unit_file_state);
4201 if (r < 0)
4202 u->unit_file_state = UNIT_FILE_BAD;
4203 }
4204
4205 return u->unit_file_state;
4206 }
4207
4208 int unit_get_unit_file_preset(Unit *u) {
4209 assert(u);
4210
4211 if (u->unit_file_preset < 0 && u->fragment_path)
4212 u->unit_file_preset = unit_file_query_preset(
4213 u->manager->unit_file_scope,
4214 NULL,
4215 basename(u->fragment_path));
4216
4217 return u->unit_file_preset;
4218 }
4219
4220 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4221 assert(ref);
4222 assert(source);
4223 assert(target);
4224
4225 if (ref->target)
4226 unit_ref_unset(ref);
4227
4228 ref->source = source;
4229 ref->target = target;
4230 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4231 return target;
4232 }
4233
4234 void unit_ref_unset(UnitRef *ref) {
4235 assert(ref);
4236
4237 if (!ref->target)
4238 return;
4239
4240 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4241 * be unreferenced now. */
4242 unit_add_to_gc_queue(ref->target);
4243
4244 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4245 ref->source = ref->target = NULL;
4246 }
4247
4248 static int user_from_unit_name(Unit *u, char **ret) {
4249
4250 static const uint8_t hash_key[] = {
4251 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4252 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4253 };
4254
4255 _cleanup_free_ char *n = NULL;
4256 int r;
4257
4258 r = unit_name_to_prefix(u->id, &n);
4259 if (r < 0)
4260 return r;
4261
4262 if (valid_user_group_name(n)) {
4263 *ret = TAKE_PTR(n);
4264 return 0;
4265 }
4266
4267 /* If we can't use the unit name as a user name, then let's hash it and use that */
4268 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4269 return -ENOMEM;
4270
4271 return 0;
4272 }
4273
4274 int unit_patch_contexts(Unit *u) {
4275 CGroupContext *cc;
4276 ExecContext *ec;
4277 unsigned i;
4278 int r;
4279
4280 assert(u);
4281
4282 /* Patch in the manager defaults into the exec and cgroup
4283 * contexts, _after_ the rest of the settings have been
4284 * initialized */
4285
4286 ec = unit_get_exec_context(u);
4287 if (ec) {
4288 /* This only copies in the ones that need memory */
4289 for (i = 0; i < _RLIMIT_MAX; i++)
4290 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4291 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4292 if (!ec->rlimit[i])
4293 return -ENOMEM;
4294 }
4295
4296 if (MANAGER_IS_USER(u->manager) &&
4297 !ec->working_directory) {
4298
4299 r = get_home_dir(&ec->working_directory);
4300 if (r < 0)
4301 return r;
4302
4303 /* Allow user services to run, even if the
4304 * home directory is missing */
4305 ec->working_directory_missing_ok = true;
4306 }
4307
4308 if (ec->private_devices)
4309 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4310
4311 if (ec->protect_kernel_modules)
4312 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4313
4314 if (ec->dynamic_user) {
4315 if (!ec->user) {
4316 r = user_from_unit_name(u, &ec->user);
4317 if (r < 0)
4318 return r;
4319 }
4320
4321 if (!ec->group) {
4322 ec->group = strdup(ec->user);
4323 if (!ec->group)
4324 return -ENOMEM;
4325 }
4326
4327 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4328 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4329 * sandbox. */
4330
4331 ec->private_tmp = true;
4332 ec->remove_ipc = true;
4333 ec->protect_system = PROTECT_SYSTEM_STRICT;
4334 if (ec->protect_home == PROTECT_HOME_NO)
4335 ec->protect_home = PROTECT_HOME_READ_ONLY;
4336
4337 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4338 * them. */
4339 ec->no_new_privileges = true;
4340 ec->restrict_suid_sgid = true;
4341 }
4342 }
4343
4344 cc = unit_get_cgroup_context(u);
4345 if (cc && ec) {
4346
4347 if (ec->private_devices &&
4348 cc->device_policy == CGROUP_AUTO)
4349 cc->device_policy = CGROUP_CLOSED;
4350
4351 if (ec->root_image &&
4352 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4353
4354 /* When RootImage= is specified, the following devices are touched. */
4355 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4356 if (r < 0)
4357 return r;
4358
4359 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4360 if (r < 0)
4361 return r;
4362
4363 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4364 if (r < 0)
4365 return r;
4366 }
4367 }
4368
4369 return 0;
4370 }
4371
4372 ExecContext *unit_get_exec_context(Unit *u) {
4373 size_t offset;
4374 assert(u);
4375
4376 if (u->type < 0)
4377 return NULL;
4378
4379 offset = UNIT_VTABLE(u)->exec_context_offset;
4380 if (offset <= 0)
4381 return NULL;
4382
4383 return (ExecContext*) ((uint8_t*) u + offset);
4384 }
4385
4386 KillContext *unit_get_kill_context(Unit *u) {
4387 size_t offset;
4388 assert(u);
4389
4390 if (u->type < 0)
4391 return NULL;
4392
4393 offset = UNIT_VTABLE(u)->kill_context_offset;
4394 if (offset <= 0)
4395 return NULL;
4396
4397 return (KillContext*) ((uint8_t*) u + offset);
4398 }
4399
4400 CGroupContext *unit_get_cgroup_context(Unit *u) {
4401 size_t offset;
4402
4403 if (u->type < 0)
4404 return NULL;
4405
4406 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4407 if (offset <= 0)
4408 return NULL;
4409
4410 return (CGroupContext*) ((uint8_t*) u + offset);
4411 }
4412
4413 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4414 size_t offset;
4415
4416 if (u->type < 0)
4417 return NULL;
4418
4419 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4420 if (offset <= 0)
4421 return NULL;
4422
4423 return *(ExecRuntime**) ((uint8_t*) u + offset);
4424 }
4425
4426 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4427 assert(u);
4428
4429 if (UNIT_WRITE_FLAGS_NOOP(flags))
4430 return NULL;
4431
4432 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4433 return u->manager->lookup_paths.transient;
4434
4435 if (flags & UNIT_PERSISTENT)
4436 return u->manager->lookup_paths.persistent_control;
4437
4438 if (flags & UNIT_RUNTIME)
4439 return u->manager->lookup_paths.runtime_control;
4440
4441 return NULL;
4442 }
4443
4444 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4445 char *ret = NULL;
4446
4447 if (!s)
4448 return NULL;
4449
4450 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4451 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4452 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4453 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4454 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4455 * allocations. */
4456
4457 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4458 ret = specifier_escape(s);
4459 if (!ret)
4460 return NULL;
4461
4462 s = ret;
4463 }
4464
4465 if (flags & UNIT_ESCAPE_C) {
4466 char *a;
4467
4468 a = cescape(s);
4469 free(ret);
4470 if (!a)
4471 return NULL;
4472
4473 ret = a;
4474 }
4475
4476 if (buf) {
4477 *buf = ret;
4478 return ret ?: (char*) s;
4479 }
4480
4481 return ret ?: strdup(s);
4482 }
4483
4484 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4485 _cleanup_free_ char *result = NULL;
4486 size_t n = 0, allocated = 0;
4487 char **i;
4488
4489 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4490 * way suitable for ExecStart= stanzas */
4491
4492 STRV_FOREACH(i, l) {
4493 _cleanup_free_ char *buf = NULL;
4494 const char *p;
4495 size_t a;
4496 char *q;
4497
4498 p = unit_escape_setting(*i, flags, &buf);
4499 if (!p)
4500 return NULL;
4501
4502 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4503 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4504 return NULL;
4505
4506 q = result + n;
4507 if (n > 0)
4508 *(q++) = ' ';
4509
4510 *(q++) = '"';
4511 q = stpcpy(q, p);
4512 *(q++) = '"';
4513
4514 n += a;
4515 }
4516
4517 if (!GREEDY_REALLOC(result, allocated, n + 1))
4518 return NULL;
4519
4520 result[n] = 0;
4521
4522 return TAKE_PTR(result);
4523 }
4524
4525 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4526 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4527 const char *dir, *wrapped;
4528 int r;
4529
4530 assert(u);
4531 assert(name);
4532 assert(data);
4533
4534 if (UNIT_WRITE_FLAGS_NOOP(flags))
4535 return 0;
4536
4537 data = unit_escape_setting(data, flags, &escaped);
4538 if (!data)
4539 return -ENOMEM;
4540
4541 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4542 * previous section header is the same */
4543
4544 if (flags & UNIT_PRIVATE) {
4545 if (!UNIT_VTABLE(u)->private_section)
4546 return -EINVAL;
4547
4548 if (!u->transient_file || u->last_section_private < 0)
4549 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4550 else if (u->last_section_private == 0)
4551 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4552 } else {
4553 if (!u->transient_file || u->last_section_private < 0)
4554 data = strjoina("[Unit]\n", data);
4555 else if (u->last_section_private > 0)
4556 data = strjoina("\n[Unit]\n", data);
4557 }
4558
4559 if (u->transient_file) {
4560 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4561 * write to the transient unit file. */
4562 fputs(data, u->transient_file);
4563
4564 if (!endswith(data, "\n"))
4565 fputc('\n', u->transient_file);
4566
4567 /* Remember which section we wrote this entry to */
4568 u->last_section_private = !!(flags & UNIT_PRIVATE);
4569 return 0;
4570 }
4571
4572 dir = unit_drop_in_dir(u, flags);
4573 if (!dir)
4574 return -EINVAL;
4575
4576 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4577 "# or an equivalent operation. Do not edit.\n",
4578 data,
4579 "\n");
4580
4581 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4582 if (r < 0)
4583 return r;
4584
4585 (void) mkdir_p_label(p, 0755);
4586 r = write_string_file_atomic_label(q, wrapped);
4587 if (r < 0)
4588 return r;
4589
4590 r = strv_push(&u->dropin_paths, q);
4591 if (r < 0)
4592 return r;
4593 q = NULL;
4594
4595 strv_uniq(u->dropin_paths);
4596
4597 u->dropin_mtime = now(CLOCK_REALTIME);
4598
4599 return 0;
4600 }
4601
4602 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4603 _cleanup_free_ char *p = NULL;
4604 va_list ap;
4605 int r;
4606
4607 assert(u);
4608 assert(name);
4609 assert(format);
4610
4611 if (UNIT_WRITE_FLAGS_NOOP(flags))
4612 return 0;
4613
4614 va_start(ap, format);
4615 r = vasprintf(&p, format, ap);
4616 va_end(ap);
4617
4618 if (r < 0)
4619 return -ENOMEM;
4620
4621 return unit_write_setting(u, flags, name, p);
4622 }
4623
4624 int unit_make_transient(Unit *u) {
4625 _cleanup_free_ char *path = NULL;
4626 FILE *f;
4627
4628 assert(u);
4629
4630 if (!UNIT_VTABLE(u)->can_transient)
4631 return -EOPNOTSUPP;
4632
4633 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4634
4635 path = path_join(u->manager->lookup_paths.transient, u->id);
4636 if (!path)
4637 return -ENOMEM;
4638
4639 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4640 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4641
4642 RUN_WITH_UMASK(0022) {
4643 f = fopen(path, "we");
4644 if (!f)
4645 return -errno;
4646 }
4647
4648 safe_fclose(u->transient_file);
4649 u->transient_file = f;
4650
4651 free_and_replace(u->fragment_path, path);
4652
4653 u->source_path = mfree(u->source_path);
4654 u->dropin_paths = strv_free(u->dropin_paths);
4655 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4656
4657 u->load_state = UNIT_STUB;
4658 u->load_error = 0;
4659 u->transient = true;
4660
4661 unit_add_to_dbus_queue(u);
4662 unit_add_to_gc_queue(u);
4663
4664 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4665 u->transient_file);
4666
4667 return 0;
4668 }
4669
4670 static int log_kill(pid_t pid, int sig, void *userdata) {
4671 _cleanup_free_ char *comm = NULL;
4672
4673 (void) get_process_comm(pid, &comm);
4674
4675 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4676 only, like for example systemd's own PAM stub process. */
4677 if (comm && comm[0] == '(')
4678 return 0;
4679
4680 log_unit_notice(userdata,
4681 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4682 pid,
4683 strna(comm),
4684 signal_to_string(sig));
4685
4686 return 1;
4687 }
4688
4689 static int operation_to_signal(KillContext *c, KillOperation k) {
4690 assert(c);
4691
4692 switch (k) {
4693
4694 case KILL_TERMINATE:
4695 case KILL_TERMINATE_AND_LOG:
4696 return c->kill_signal;
4697
4698 case KILL_KILL:
4699 return c->final_kill_signal;
4700
4701 case KILL_WATCHDOG:
4702 return c->watchdog_signal;
4703
4704 default:
4705 assert_not_reached("KillOperation unknown");
4706 }
4707 }
4708
4709 int unit_kill_context(
4710 Unit *u,
4711 KillContext *c,
4712 KillOperation k,
4713 pid_t main_pid,
4714 pid_t control_pid,
4715 bool main_pid_alien) {
4716
4717 bool wait_for_exit = false, send_sighup;
4718 cg_kill_log_func_t log_func = NULL;
4719 int sig, r;
4720
4721 assert(u);
4722 assert(c);
4723
4724 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4725 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4726
4727 if (c->kill_mode == KILL_NONE)
4728 return 0;
4729
4730 sig = operation_to_signal(c, k);
4731
4732 send_sighup =
4733 c->send_sighup &&
4734 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4735 sig != SIGHUP;
4736
4737 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4738 log_func = log_kill;
4739
4740 if (main_pid > 0) {
4741 if (log_func)
4742 log_func(main_pid, sig, u);
4743
4744 r = kill_and_sigcont(main_pid, sig);
4745 if (r < 0 && r != -ESRCH) {
4746 _cleanup_free_ char *comm = NULL;
4747 (void) get_process_comm(main_pid, &comm);
4748
4749 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4750 } else {
4751 if (!main_pid_alien)
4752 wait_for_exit = true;
4753
4754 if (r != -ESRCH && send_sighup)
4755 (void) kill(main_pid, SIGHUP);
4756 }
4757 }
4758
4759 if (control_pid > 0) {
4760 if (log_func)
4761 log_func(control_pid, sig, u);
4762
4763 r = kill_and_sigcont(control_pid, sig);
4764 if (r < 0 && r != -ESRCH) {
4765 _cleanup_free_ char *comm = NULL;
4766 (void) get_process_comm(control_pid, &comm);
4767
4768 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4769 } else {
4770 wait_for_exit = true;
4771
4772 if (r != -ESRCH && send_sighup)
4773 (void) kill(control_pid, SIGHUP);
4774 }
4775 }
4776
4777 if (u->cgroup_path &&
4778 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4779 _cleanup_set_free_ Set *pid_set = NULL;
4780
4781 /* Exclude the main/control pids from being killed via the cgroup */
4782 pid_set = unit_pid_set(main_pid, control_pid);
4783 if (!pid_set)
4784 return -ENOMEM;
4785
4786 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4787 sig,
4788 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4789 pid_set,
4790 log_func, u);
4791 if (r < 0) {
4792 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4793 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4794
4795 } else if (r > 0) {
4796
4797 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4798 * we are running in a container or if this is a delegation unit, simply because cgroup
4799 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4800 * of containers it can be confused easily by left-over directories in the cgroup — which
4801 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4802 * there we get proper events. Hence rely on them. */
4803
4804 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4805 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4806 wait_for_exit = true;
4807
4808 if (send_sighup) {
4809 set_free(pid_set);
4810
4811 pid_set = unit_pid_set(main_pid, control_pid);
4812 if (!pid_set)
4813 return -ENOMEM;
4814
4815 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4816 SIGHUP,
4817 CGROUP_IGNORE_SELF,
4818 pid_set,
4819 NULL, NULL);
4820 }
4821 }
4822 }
4823
4824 return wait_for_exit;
4825 }
4826
4827 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4828 _cleanup_free_ char *p = NULL;
4829 UnitDependencyInfo di;
4830 int r;
4831
4832 assert(u);
4833 assert(path);
4834
4835 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4836 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4837 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4838 * determine which units to make themselves a dependency of. */
4839
4840 if (!path_is_absolute(path))
4841 return -EINVAL;
4842
4843 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4844 if (r < 0)
4845 return r;
4846
4847 p = strdup(path);
4848 if (!p)
4849 return -ENOMEM;
4850
4851 path = path_simplify(p, true);
4852
4853 if (!path_is_normalized(path))
4854 return -EPERM;
4855
4856 if (hashmap_contains(u->requires_mounts_for, path))
4857 return 0;
4858
4859 di = (UnitDependencyInfo) {
4860 .origin_mask = mask
4861 };
4862
4863 r = hashmap_put(u->requires_mounts_for, path, di.data);
4864 if (r < 0)
4865 return r;
4866 p = NULL;
4867
4868 char prefix[strlen(path) + 1];
4869 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4870 Set *x;
4871
4872 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4873 if (!x) {
4874 _cleanup_free_ char *q = NULL;
4875
4876 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4877 if (r < 0)
4878 return r;
4879
4880 q = strdup(prefix);
4881 if (!q)
4882 return -ENOMEM;
4883
4884 x = set_new(NULL);
4885 if (!x)
4886 return -ENOMEM;
4887
4888 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4889 if (r < 0) {
4890 set_free(x);
4891 return r;
4892 }
4893 q = NULL;
4894 }
4895
4896 r = set_put(x, u);
4897 if (r < 0)
4898 return r;
4899 }
4900
4901 return 0;
4902 }
4903
4904 int unit_setup_exec_runtime(Unit *u) {
4905 ExecRuntime **rt;
4906 size_t offset;
4907 Unit *other;
4908 Iterator i;
4909 void *v;
4910 int r;
4911
4912 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4913 assert(offset > 0);
4914
4915 /* Check if there already is an ExecRuntime for this unit? */
4916 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4917 if (*rt)
4918 return 0;
4919
4920 /* Try to get it from somebody else */
4921 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4922 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4923 if (r == 1)
4924 return 1;
4925 }
4926
4927 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4928 }
4929
4930 int unit_setup_dynamic_creds(Unit *u) {
4931 ExecContext *ec;
4932 DynamicCreds *dcreds;
4933 size_t offset;
4934
4935 assert(u);
4936
4937 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4938 assert(offset > 0);
4939 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4940
4941 ec = unit_get_exec_context(u);
4942 assert(ec);
4943
4944 if (!ec->dynamic_user)
4945 return 0;
4946
4947 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4948 }
4949
4950 bool unit_type_supported(UnitType t) {
4951 if (_unlikely_(t < 0))
4952 return false;
4953 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4954 return false;
4955
4956 if (!unit_vtable[t]->supported)
4957 return true;
4958
4959 return unit_vtable[t]->supported();
4960 }
4961
4962 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4963 int r;
4964
4965 assert(u);
4966 assert(where);
4967
4968 r = dir_is_empty(where);
4969 if (r > 0 || r == -ENOTDIR)
4970 return;
4971 if (r < 0) {
4972 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4973 return;
4974 }
4975
4976 log_struct(LOG_NOTICE,
4977 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4978 LOG_UNIT_ID(u),
4979 LOG_UNIT_INVOCATION_ID(u),
4980 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4981 "WHERE=%s", where);
4982 }
4983
4984 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4985 _cleanup_free_ char *canonical_where = NULL;
4986 int r;
4987
4988 assert(u);
4989 assert(where);
4990
4991 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4992 if (r < 0) {
4993 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4994 return 0;
4995 }
4996
4997 /* We will happily ignore a trailing slash (or any redundant slashes) */
4998 if (path_equal(where, canonical_where))
4999 return 0;
5000
5001 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5002 log_struct(LOG_ERR,
5003 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5004 LOG_UNIT_ID(u),
5005 LOG_UNIT_INVOCATION_ID(u),
5006 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5007 "WHERE=%s", where);
5008
5009 return -ELOOP;
5010 }
5011
5012 bool unit_is_pristine(Unit *u) {
5013 assert(u);
5014
5015 /* Check if the unit already exists or is already around,
5016 * in a number of different ways. Note that to cater for unit
5017 * types such as slice, we are generally fine with units that
5018 * are marked UNIT_LOADED even though nothing was actually
5019 * loaded, as those unit types don't require a file on disk. */
5020
5021 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5022 u->fragment_path ||
5023 u->source_path ||
5024 !strv_isempty(u->dropin_paths) ||
5025 u->job ||
5026 u->merged_into);
5027 }
5028
5029 pid_t unit_control_pid(Unit *u) {
5030 assert(u);
5031
5032 if (UNIT_VTABLE(u)->control_pid)
5033 return UNIT_VTABLE(u)->control_pid(u);
5034
5035 return 0;
5036 }
5037
5038 pid_t unit_main_pid(Unit *u) {
5039 assert(u);
5040
5041 if (UNIT_VTABLE(u)->main_pid)
5042 return UNIT_VTABLE(u)->main_pid(u);
5043
5044 return 0;
5045 }
5046
5047 static void unit_unref_uid_internal(
5048 Unit *u,
5049 uid_t *ref_uid,
5050 bool destroy_now,
5051 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5052
5053 assert(u);
5054 assert(ref_uid);
5055 assert(_manager_unref_uid);
5056
5057 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5058 * gid_t are actually the same time, with the same validity rules.
5059 *
5060 * Drops a reference to UID/GID from a unit. */
5061
5062 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5063 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5064
5065 if (!uid_is_valid(*ref_uid))
5066 return;
5067
5068 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5069 *ref_uid = UID_INVALID;
5070 }
5071
5072 void unit_unref_uid(Unit *u, bool destroy_now) {
5073 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5074 }
5075
5076 void unit_unref_gid(Unit *u, bool destroy_now) {
5077 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5078 }
5079
5080 static int unit_ref_uid_internal(
5081 Unit *u,
5082 uid_t *ref_uid,
5083 uid_t uid,
5084 bool clean_ipc,
5085 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5086
5087 int r;
5088
5089 assert(u);
5090 assert(ref_uid);
5091 assert(uid_is_valid(uid));
5092 assert(_manager_ref_uid);
5093
5094 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5095 * are actually the same type, and have the same validity rules.
5096 *
5097 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5098 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5099 * drops to zero. */
5100
5101 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5102 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5103
5104 if (*ref_uid == uid)
5105 return 0;
5106
5107 if (uid_is_valid(*ref_uid)) /* Already set? */
5108 return -EBUSY;
5109
5110 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5111 if (r < 0)
5112 return r;
5113
5114 *ref_uid = uid;
5115 return 1;
5116 }
5117
5118 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5119 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5120 }
5121
5122 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5123 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5124 }
5125
5126 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5127 int r = 0, q = 0;
5128
5129 assert(u);
5130
5131 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5132
5133 if (uid_is_valid(uid)) {
5134 r = unit_ref_uid(u, uid, clean_ipc);
5135 if (r < 0)
5136 return r;
5137 }
5138
5139 if (gid_is_valid(gid)) {
5140 q = unit_ref_gid(u, gid, clean_ipc);
5141 if (q < 0) {
5142 if (r > 0)
5143 unit_unref_uid(u, false);
5144
5145 return q;
5146 }
5147 }
5148
5149 return r > 0 || q > 0;
5150 }
5151
5152 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5153 ExecContext *c;
5154 int r;
5155
5156 assert(u);
5157
5158 c = unit_get_exec_context(u);
5159
5160 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5161 if (r < 0)
5162 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5163
5164 return r;
5165 }
5166
5167 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5168 assert(u);
5169
5170 unit_unref_uid(u, destroy_now);
5171 unit_unref_gid(u, destroy_now);
5172 }
5173
5174 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5175 int r;
5176
5177 assert(u);
5178
5179 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5180 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5181 * objects when no service references the UID/GID anymore. */
5182
5183 r = unit_ref_uid_gid(u, uid, gid);
5184 if (r > 0)
5185 unit_add_to_dbus_queue(u);
5186 }
5187
5188 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5189 int r;
5190
5191 assert(u);
5192
5193 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5194
5195 if (sd_id128_equal(u->invocation_id, id))
5196 return 0;
5197
5198 if (!sd_id128_is_null(u->invocation_id))
5199 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5200
5201 if (sd_id128_is_null(id)) {
5202 r = 0;
5203 goto reset;
5204 }
5205
5206 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5207 if (r < 0)
5208 goto reset;
5209
5210 u->invocation_id = id;
5211 sd_id128_to_string(id, u->invocation_id_string);
5212
5213 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5214 if (r < 0)
5215 goto reset;
5216
5217 return 0;
5218
5219 reset:
5220 u->invocation_id = SD_ID128_NULL;
5221 u->invocation_id_string[0] = 0;
5222 return r;
5223 }
5224
5225 int unit_acquire_invocation_id(Unit *u) {
5226 sd_id128_t id;
5227 int r;
5228
5229 assert(u);
5230
5231 r = sd_id128_randomize(&id);
5232 if (r < 0)
5233 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5234
5235 r = unit_set_invocation_id(u, id);
5236 if (r < 0)
5237 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5238
5239 unit_add_to_dbus_queue(u);
5240 return 0;
5241 }
5242
5243 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5244 int r;
5245
5246 assert(u);
5247 assert(p);
5248
5249 /* Copy parameters from manager */
5250 r = manager_get_effective_environment(u->manager, &p->environment);
5251 if (r < 0)
5252 return r;
5253
5254 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5255 p->cgroup_supported = u->manager->cgroup_supported;
5256 p->prefix = u->manager->prefix;
5257 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5258
5259 /* Copy parameters from unit */
5260 p->cgroup_path = u->cgroup_path;
5261 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5262
5263 return 0;
5264 }
5265
5266 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5267 int r;
5268
5269 assert(u);
5270 assert(ret);
5271
5272 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5273 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5274
5275 (void) unit_realize_cgroup(u);
5276
5277 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5278 if (r != 0)
5279 return r;
5280
5281 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5282 (void) ignore_signals(SIGPIPE, -1);
5283
5284 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5285
5286 if (u->cgroup_path) {
5287 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5288 if (r < 0) {
5289 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5290 _exit(EXIT_CGROUP);
5291 }
5292 }
5293
5294 return 0;
5295 }
5296
5297 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5298 assert(u);
5299 assert(d >= 0);
5300 assert(d < _UNIT_DEPENDENCY_MAX);
5301 assert(other);
5302
5303 if (di.origin_mask == 0 && di.destination_mask == 0) {
5304 /* No bit set anymore, let's drop the whole entry */
5305 assert_se(hashmap_remove(u->dependencies[d], other));
5306 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5307 } else
5308 /* Mask was reduced, let's update the entry */
5309 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5310 }
5311
5312 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5313 UnitDependency d;
5314
5315 assert(u);
5316
5317 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5318
5319 if (mask == 0)
5320 return;
5321
5322 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5323 bool done;
5324
5325 do {
5326 UnitDependencyInfo di;
5327 Unit *other;
5328 Iterator i;
5329
5330 done = true;
5331
5332 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5333 UnitDependency q;
5334
5335 if ((di.origin_mask & ~mask) == di.origin_mask)
5336 continue;
5337 di.origin_mask &= ~mask;
5338 unit_update_dependency_mask(u, d, other, di);
5339
5340 /* We updated the dependency from our unit to the other unit now. But most dependencies
5341 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5342 * all dependency types on the other unit and delete all those which point to us and
5343 * have the right mask set. */
5344
5345 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5346 UnitDependencyInfo dj;
5347
5348 dj.data = hashmap_get(other->dependencies[q], u);
5349 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5350 continue;
5351 dj.destination_mask &= ~mask;
5352
5353 unit_update_dependency_mask(other, q, u, dj);
5354 }
5355
5356 unit_add_to_gc_queue(other);
5357
5358 done = false;
5359 break;
5360 }
5361
5362 } while (!done);
5363 }
5364 }
5365
5366 static int unit_export_invocation_id(Unit *u) {
5367 const char *p;
5368 int r;
5369
5370 assert(u);
5371
5372 if (u->exported_invocation_id)
5373 return 0;
5374
5375 if (sd_id128_is_null(u->invocation_id))
5376 return 0;
5377
5378 p = strjoina("/run/systemd/units/invocation:", u->id);
5379 r = symlink_atomic(u->invocation_id_string, p);
5380 if (r < 0)
5381 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5382
5383 u->exported_invocation_id = true;
5384 return 0;
5385 }
5386
5387 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5388 const char *p;
5389 char buf[2];
5390 int r;
5391
5392 assert(u);
5393 assert(c);
5394
5395 if (u->exported_log_level_max)
5396 return 0;
5397
5398 if (c->log_level_max < 0)
5399 return 0;
5400
5401 assert(c->log_level_max <= 7);
5402
5403 buf[0] = '0' + c->log_level_max;
5404 buf[1] = 0;
5405
5406 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5407 r = symlink_atomic(buf, p);
5408 if (r < 0)
5409 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5410
5411 u->exported_log_level_max = true;
5412 return 0;
5413 }
5414
5415 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5416 _cleanup_close_ int fd = -1;
5417 struct iovec *iovec;
5418 const char *p;
5419 char *pattern;
5420 le64_t *sizes;
5421 ssize_t n;
5422 size_t i;
5423 int r;
5424
5425 if (u->exported_log_extra_fields)
5426 return 0;
5427
5428 if (c->n_log_extra_fields <= 0)
5429 return 0;
5430
5431 sizes = newa(le64_t, c->n_log_extra_fields);
5432 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5433
5434 for (i = 0; i < c->n_log_extra_fields; i++) {
5435 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5436
5437 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5438 iovec[i*2+1] = c->log_extra_fields[i];
5439 }
5440
5441 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5442 pattern = strjoina(p, ".XXXXXX");
5443
5444 fd = mkostemp_safe(pattern);
5445 if (fd < 0)
5446 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5447
5448 n = writev(fd, iovec, c->n_log_extra_fields*2);
5449 if (n < 0) {
5450 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5451 goto fail;
5452 }
5453
5454 (void) fchmod(fd, 0644);
5455
5456 if (rename(pattern, p) < 0) {
5457 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5458 goto fail;
5459 }
5460
5461 u->exported_log_extra_fields = true;
5462 return 0;
5463
5464 fail:
5465 (void) unlink(pattern);
5466 return r;
5467 }
5468
5469 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5470 _cleanup_free_ char *buf = NULL;
5471 const char *p;
5472 int r;
5473
5474 assert(u);
5475 assert(c);
5476
5477 if (u->exported_log_rate_limit_interval)
5478 return 0;
5479
5480 if (c->log_rate_limit_interval_usec == 0)
5481 return 0;
5482
5483 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5484
5485 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5486 return log_oom();
5487
5488 r = symlink_atomic(buf, p);
5489 if (r < 0)
5490 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5491
5492 u->exported_log_rate_limit_interval = true;
5493 return 0;
5494 }
5495
5496 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5497 _cleanup_free_ char *buf = NULL;
5498 const char *p;
5499 int r;
5500
5501 assert(u);
5502 assert(c);
5503
5504 if (u->exported_log_rate_limit_burst)
5505 return 0;
5506
5507 if (c->log_rate_limit_burst == 0)
5508 return 0;
5509
5510 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5511
5512 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5513 return log_oom();
5514
5515 r = symlink_atomic(buf, p);
5516 if (r < 0)
5517 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5518
5519 u->exported_log_rate_limit_burst = true;
5520 return 0;
5521 }
5522
5523 void unit_export_state_files(Unit *u) {
5524 const ExecContext *c;
5525
5526 assert(u);
5527
5528 if (!u->id)
5529 return;
5530
5531 if (!MANAGER_IS_SYSTEM(u->manager))
5532 return;
5533
5534 if (MANAGER_IS_TEST_RUN(u->manager))
5535 return;
5536
5537 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5538 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5539 * the IPC system itself and PID 1 also log to the journal.
5540 *
5541 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5542 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5543 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5544 * namespace at least.
5545 *
5546 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5547 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5548 * them with one. */
5549
5550 (void) unit_export_invocation_id(u);
5551
5552 c = unit_get_exec_context(u);
5553 if (c) {
5554 (void) unit_export_log_level_max(u, c);
5555 (void) unit_export_log_extra_fields(u, c);
5556 (void) unit_export_log_rate_limit_interval(u, c);
5557 (void) unit_export_log_rate_limit_burst(u, c);
5558 }
5559 }
5560
5561 void unit_unlink_state_files(Unit *u) {
5562 const char *p;
5563
5564 assert(u);
5565
5566 if (!u->id)
5567 return;
5568
5569 if (!MANAGER_IS_SYSTEM(u->manager))
5570 return;
5571
5572 /* Undoes the effect of unit_export_state() */
5573
5574 if (u->exported_invocation_id) {
5575 p = strjoina("/run/systemd/units/invocation:", u->id);
5576 (void) unlink(p);
5577
5578 u->exported_invocation_id = false;
5579 }
5580
5581 if (u->exported_log_level_max) {
5582 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5583 (void) unlink(p);
5584
5585 u->exported_log_level_max = false;
5586 }
5587
5588 if (u->exported_log_extra_fields) {
5589 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5590 (void) unlink(p);
5591
5592 u->exported_log_extra_fields = false;
5593 }
5594
5595 if (u->exported_log_rate_limit_interval) {
5596 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5597 (void) unlink(p);
5598
5599 u->exported_log_rate_limit_interval = false;
5600 }
5601
5602 if (u->exported_log_rate_limit_burst) {
5603 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5604 (void) unlink(p);
5605
5606 u->exported_log_rate_limit_burst = false;
5607 }
5608 }
5609
5610 int unit_prepare_exec(Unit *u) {
5611 int r;
5612
5613 assert(u);
5614
5615 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5616 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5617 r = bpf_firewall_load_custom(u);
5618 if (r < 0)
5619 return r;
5620
5621 /* Prepares everything so that we can fork of a process for this unit */
5622
5623 (void) unit_realize_cgroup(u);
5624
5625 if (u->reset_accounting) {
5626 (void) unit_reset_accounting(u);
5627 u->reset_accounting = false;
5628 }
5629
5630 unit_export_state_files(u);
5631
5632 r = unit_setup_exec_runtime(u);
5633 if (r < 0)
5634 return r;
5635
5636 r = unit_setup_dynamic_creds(u);
5637 if (r < 0)
5638 return r;
5639
5640 return 0;
5641 }
5642
5643 static int log_leftover(pid_t pid, int sig, void *userdata) {
5644 _cleanup_free_ char *comm = NULL;
5645
5646 (void) get_process_comm(pid, &comm);
5647
5648 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5649 return 0;
5650
5651 log_unit_warning(userdata,
5652 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5653 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5654 pid, strna(comm));
5655
5656 return 1;
5657 }
5658
5659 int unit_warn_leftover_processes(Unit *u) {
5660 assert(u);
5661
5662 (void) unit_pick_cgroup_path(u);
5663
5664 if (!u->cgroup_path)
5665 return 0;
5666
5667 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5668 }
5669
5670 bool unit_needs_console(Unit *u) {
5671 ExecContext *ec;
5672 UnitActiveState state;
5673
5674 assert(u);
5675
5676 state = unit_active_state(u);
5677
5678 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5679 return false;
5680
5681 if (UNIT_VTABLE(u)->needs_console)
5682 return UNIT_VTABLE(u)->needs_console(u);
5683
5684 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5685 ec = unit_get_exec_context(u);
5686 if (!ec)
5687 return false;
5688
5689 return exec_context_may_touch_console(ec);
5690 }
5691
5692 const char *unit_label_path(Unit *u) {
5693 const char *p;
5694
5695 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5696 * when validating access checks. */
5697
5698 p = u->source_path ?: u->fragment_path;
5699 if (!p)
5700 return NULL;
5701
5702 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5703 if (path_equal(p, "/dev/null"))
5704 return NULL;
5705
5706 return p;
5707 }
5708
5709 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5710 int r;
5711
5712 assert(u);
5713
5714 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5715 * and not a kernel thread either */
5716
5717 /* First, a simple range check */
5718 if (!pid_is_valid(pid))
5719 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5720
5721 /* Some extra safety check */
5722 if (pid == 1 || pid == getpid_cached())
5723 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5724
5725 /* Don't even begin to bother with kernel threads */
5726 r = is_kernel_thread(pid);
5727 if (r == -ESRCH)
5728 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5729 if (r < 0)
5730 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5731 if (r > 0)
5732 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5733
5734 return 0;
5735 }
5736
5737 void unit_log_success(Unit *u) {
5738 assert(u);
5739
5740 log_struct(LOG_INFO,
5741 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5742 LOG_UNIT_ID(u),
5743 LOG_UNIT_INVOCATION_ID(u),
5744 LOG_UNIT_MESSAGE(u, "Succeeded."));
5745 }
5746
5747 void unit_log_failure(Unit *u, const char *result) {
5748 assert(u);
5749 assert(result);
5750
5751 log_struct(LOG_WARNING,
5752 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5753 LOG_UNIT_ID(u),
5754 LOG_UNIT_INVOCATION_ID(u),
5755 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5756 "UNIT_RESULT=%s", result);
5757 }
5758
5759 void unit_log_skip(Unit *u, const char *result) {
5760 assert(u);
5761 assert(result);
5762
5763 log_struct(LOG_INFO,
5764 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5765 LOG_UNIT_ID(u),
5766 LOG_UNIT_INVOCATION_ID(u),
5767 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5768 "UNIT_RESULT=%s", result);
5769 }
5770
5771 void unit_log_process_exit(
5772 Unit *u,
5773 int level,
5774 const char *kind,
5775 const char *command,
5776 int code,
5777 int status) {
5778
5779 assert(u);
5780 assert(kind);
5781
5782 if (code != CLD_EXITED)
5783 level = LOG_WARNING;
5784
5785 log_struct(level,
5786 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5787 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5788 kind,
5789 sigchld_code_to_string(code), status,
5790 strna(code == CLD_EXITED
5791 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5792 : signal_to_string(status))),
5793 "EXIT_CODE=%s", sigchld_code_to_string(code),
5794 "EXIT_STATUS=%i", status,
5795 "COMMAND=%s", strna(command),
5796 LOG_UNIT_ID(u),
5797 LOG_UNIT_INVOCATION_ID(u));
5798 }
5799
5800 int unit_exit_status(Unit *u) {
5801 assert(u);
5802
5803 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5804 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5805 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5806 * service process has exited abnormally (signal/coredump). */
5807
5808 if (!UNIT_VTABLE(u)->exit_status)
5809 return -EOPNOTSUPP;
5810
5811 return UNIT_VTABLE(u)->exit_status(u);
5812 }
5813
5814 int unit_failure_action_exit_status(Unit *u) {
5815 int r;
5816
5817 assert(u);
5818
5819 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5820
5821 if (u->failure_action_exit_status >= 0)
5822 return u->failure_action_exit_status;
5823
5824 r = unit_exit_status(u);
5825 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5826 return 255;
5827
5828 return r;
5829 }
5830
5831 int unit_success_action_exit_status(Unit *u) {
5832 int r;
5833
5834 assert(u);
5835
5836 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5837
5838 if (u->success_action_exit_status >= 0)
5839 return u->success_action_exit_status;
5840
5841 r = unit_exit_status(u);
5842 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5843 return 255;
5844
5845 return r;
5846 }
5847
5848 int unit_test_trigger_loaded(Unit *u) {
5849 Unit *trigger;
5850
5851 /* Tests whether the unit to trigger is loaded */
5852
5853 trigger = UNIT_TRIGGER(u);
5854 if (!trigger)
5855 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5856 "Refusing to start, no unit to trigger.");
5857 if (trigger->load_state != UNIT_LOADED)
5858 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5859 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5860
5861 return 0;
5862 }
5863
5864 int unit_clean(Unit *u, ExecCleanMask mask) {
5865 UnitActiveState state;
5866
5867 assert(u);
5868
5869 /* Special return values:
5870 *
5871 * -EOPNOTSUPP → cleaning not supported for this unit type
5872 * -EUNATCH → cleaning not defined for this resource type
5873 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5874 * a job queued or similar
5875 */
5876
5877 if (!UNIT_VTABLE(u)->clean)
5878 return -EOPNOTSUPP;
5879
5880 if (mask == 0)
5881 return -EUNATCH;
5882
5883 if (u->load_state != UNIT_LOADED)
5884 return -EBUSY;
5885
5886 if (u->job)
5887 return -EBUSY;
5888
5889 state = unit_active_state(u);
5890 if (!IN_SET(state, UNIT_INACTIVE))
5891 return -EBUSY;
5892
5893 return UNIT_VTABLE(u)->clean(u, mask);
5894 }
5895
5896 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5897 assert(u);
5898
5899 if (!UNIT_VTABLE(u)->clean ||
5900 u->load_state != UNIT_LOADED) {
5901 *ret = 0;
5902 return 0;
5903 }
5904
5905 /* When the clean() method is set, can_clean() really should be set too */
5906 assert(UNIT_VTABLE(u)->can_clean);
5907
5908 return UNIT_VTABLE(u)->can_clean(u, ret);
5909 }
5910
5911 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5912 [COLLECT_INACTIVE] = "inactive",
5913 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5914 };
5915
5916 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);