]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
log: Add missing "%" in "%m" log format strings
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bpf-firewall.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-util.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "fs-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "install.h"
32 #include "load-dropin.h"
33 #include "load-fragment.h"
34 #include "log.h"
35 #include "macro.h"
36 #include "missing.h"
37 #include "mkdir.h"
38 #include "parse-util.h"
39 #include "path-util.h"
40 #include "process-util.h"
41 #include "rm-rf.h"
42 #include "serialize.h"
43 #include "set.h"
44 #include "signal-util.h"
45 #include "sparse-endian.h"
46 #include "special.h"
47 #include "specifier.h"
48 #include "stat-util.h"
49 #include "stdio-util.h"
50 #include "string-table.h"
51 #include "string-util.h"
52 #include "strv.h"
53 #include "terminal-util.h"
54 #include "tmpfile-util.h"
55 #include "umask-util.h"
56 #include "unit-name.h"
57 #include "unit.h"
58 #include "user-util.h"
59 #include "virt.h"
60
61 /* Thresholds for logging at INFO level about resource consumption */
62 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
63 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
64 #define MENTIONWORTHY_IP_BYTES (0ULL)
65
66 /* Thresholds for logging at INFO level about resource consumption */
67 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
68 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
69 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
70
71 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
72 [UNIT_SERVICE] = &service_vtable,
73 [UNIT_SOCKET] = &socket_vtable,
74 [UNIT_TARGET] = &target_vtable,
75 [UNIT_DEVICE] = &device_vtable,
76 [UNIT_MOUNT] = &mount_vtable,
77 [UNIT_AUTOMOUNT] = &automount_vtable,
78 [UNIT_SWAP] = &swap_vtable,
79 [UNIT_TIMER] = &timer_vtable,
80 [UNIT_PATH] = &path_vtable,
81 [UNIT_SLICE] = &slice_vtable,
82 [UNIT_SCOPE] = &scope_vtable,
83 };
84
85 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
86
87 Unit *unit_new(Manager *m, size_t size) {
88 Unit *u;
89
90 assert(m);
91 assert(size >= sizeof(Unit));
92
93 u = malloc0(size);
94 if (!u)
95 return NULL;
96
97 u->names = set_new(&string_hash_ops);
98 if (!u->names)
99 return mfree(u);
100
101 u->manager = m;
102 u->type = _UNIT_TYPE_INVALID;
103 u->default_dependencies = true;
104 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
105 u->unit_file_preset = -1;
106 u->on_failure_job_mode = JOB_REPLACE;
107 u->cgroup_control_inotify_wd = -1;
108 u->cgroup_memory_inotify_wd = -1;
109 u->job_timeout = USEC_INFINITY;
110 u->job_running_timeout = USEC_INFINITY;
111 u->ref_uid = UID_INVALID;
112 u->ref_gid = GID_INVALID;
113 u->cpu_usage_last = NSEC_INFINITY;
114 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
115 u->failure_action_exit_status = u->success_action_exit_status = -1;
116
117 u->ip_accounting_ingress_map_fd = -1;
118 u->ip_accounting_egress_map_fd = -1;
119 u->ipv4_allow_map_fd = -1;
120 u->ipv6_allow_map_fd = -1;
121 u->ipv4_deny_map_fd = -1;
122 u->ipv6_deny_map_fd = -1;
123
124 u->last_section_private = -1;
125
126 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
127 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
128
129 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
130 u->io_accounting_last[i] = UINT64_MAX;
131
132 return u;
133 }
134
135 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
136 _cleanup_(unit_freep) Unit *u = NULL;
137 int r;
138
139 u = unit_new(m, size);
140 if (!u)
141 return -ENOMEM;
142
143 r = unit_add_name(u, name);
144 if (r < 0)
145 return r;
146
147 *ret = TAKE_PTR(u);
148
149 return r;
150 }
151
152 bool unit_has_name(const Unit *u, const char *name) {
153 assert(u);
154 assert(name);
155
156 return set_contains(u->names, (char*) name);
157 }
158
159 static void unit_init(Unit *u) {
160 CGroupContext *cc;
161 ExecContext *ec;
162 KillContext *kc;
163
164 assert(u);
165 assert(u->manager);
166 assert(u->type >= 0);
167
168 cc = unit_get_cgroup_context(u);
169 if (cc) {
170 cgroup_context_init(cc);
171
172 /* Copy in the manager defaults into the cgroup
173 * context, _before_ the rest of the settings have
174 * been initialized */
175
176 cc->cpu_accounting = u->manager->default_cpu_accounting;
177 cc->io_accounting = u->manager->default_io_accounting;
178 cc->blockio_accounting = u->manager->default_blockio_accounting;
179 cc->memory_accounting = u->manager->default_memory_accounting;
180 cc->tasks_accounting = u->manager->default_tasks_accounting;
181 cc->ip_accounting = u->manager->default_ip_accounting;
182
183 if (u->type != UNIT_SLICE)
184 cc->tasks_max = u->manager->default_tasks_max;
185 }
186
187 ec = unit_get_exec_context(u);
188 if (ec) {
189 exec_context_init(ec);
190
191 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
192 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
193 }
194
195 kc = unit_get_kill_context(u);
196 if (kc)
197 kill_context_init(kc);
198
199 if (UNIT_VTABLE(u)->init)
200 UNIT_VTABLE(u)->init(u);
201 }
202
203 int unit_add_name(Unit *u, const char *text) {
204 _cleanup_free_ char *s = NULL, *i = NULL;
205 UnitType t;
206 int r;
207
208 assert(u);
209 assert(text);
210
211 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
212
213 if (!u->instance)
214 return -EINVAL;
215
216 r = unit_name_replace_instance(text, u->instance, &s);
217 if (r < 0)
218 return r;
219 } else {
220 s = strdup(text);
221 if (!s)
222 return -ENOMEM;
223 }
224
225 if (set_contains(u->names, s))
226 return 0;
227 if (hashmap_contains(u->manager->units, s))
228 return -EEXIST;
229
230 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
231 return -EINVAL;
232
233 t = unit_name_to_type(s);
234 if (t < 0)
235 return -EINVAL;
236
237 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
238 return -EINVAL;
239
240 r = unit_name_to_instance(s, &i);
241 if (r < 0)
242 return r;
243
244 if (i && !unit_type_may_template(t))
245 return -EINVAL;
246
247 /* Ensure that this unit is either instanced or not instanced,
248 * but not both. Note that we do allow names with different
249 * instance names however! */
250 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
251 return -EINVAL;
252
253 if (!unit_type_may_alias(t) && !set_isempty(u->names))
254 return -EEXIST;
255
256 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
257 return -E2BIG;
258
259 r = set_put(u->names, s);
260 if (r < 0)
261 return r;
262 assert(r > 0);
263
264 r = hashmap_put(u->manager->units, s, u);
265 if (r < 0) {
266 (void) set_remove(u->names, s);
267 return r;
268 }
269
270 if (u->type == _UNIT_TYPE_INVALID) {
271 u->type = t;
272 u->id = s;
273 u->instance = TAKE_PTR(i);
274
275 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
276
277 unit_init(u);
278 }
279
280 s = NULL;
281
282 unit_add_to_dbus_queue(u);
283 return 0;
284 }
285
286 int unit_choose_id(Unit *u, const char *name) {
287 _cleanup_free_ char *t = NULL;
288 char *s, *i;
289 int r;
290
291 assert(u);
292 assert(name);
293
294 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
295
296 if (!u->instance)
297 return -EINVAL;
298
299 r = unit_name_replace_instance(name, u->instance, &t);
300 if (r < 0)
301 return r;
302
303 name = t;
304 }
305
306 /* Selects one of the names of this unit as the id */
307 s = set_get(u->names, (char*) name);
308 if (!s)
309 return -ENOENT;
310
311 /* Determine the new instance from the new id */
312 r = unit_name_to_instance(s, &i);
313 if (r < 0)
314 return r;
315
316 u->id = s;
317
318 free(u->instance);
319 u->instance = i;
320
321 unit_add_to_dbus_queue(u);
322
323 return 0;
324 }
325
326 int unit_set_description(Unit *u, const char *description) {
327 int r;
328
329 assert(u);
330
331 r = free_and_strdup(&u->description, empty_to_null(description));
332 if (r < 0)
333 return r;
334 if (r > 0)
335 unit_add_to_dbus_queue(u);
336
337 return 0;
338 }
339
340 bool unit_may_gc(Unit *u) {
341 UnitActiveState state;
342 int r;
343
344 assert(u);
345
346 /* Checks whether the unit is ready to be unloaded for garbage collection.
347 * Returns true when the unit may be collected, and false if there's some
348 * reason to keep it loaded.
349 *
350 * References from other units are *not* checked here. Instead, this is done
351 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
352 */
353
354 if (u->job)
355 return false;
356
357 if (u->nop_job)
358 return false;
359
360 state = unit_active_state(u);
361
362 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
363 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
364 UNIT_VTABLE(u)->release_resources)
365 UNIT_VTABLE(u)->release_resources(u);
366
367 if (u->perpetual)
368 return false;
369
370 if (sd_bus_track_count(u->bus_track) > 0)
371 return false;
372
373 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
374 switch (u->collect_mode) {
375
376 case COLLECT_INACTIVE:
377 if (state != UNIT_INACTIVE)
378 return false;
379
380 break;
381
382 case COLLECT_INACTIVE_OR_FAILED:
383 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
384 return false;
385
386 break;
387
388 default:
389 assert_not_reached("Unknown garbage collection mode");
390 }
391
392 if (u->cgroup_path) {
393 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
394 * around. Units with active processes should never be collected. */
395
396 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
397 if (r < 0)
398 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
399 if (r <= 0)
400 return false;
401 }
402
403 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
404 return false;
405
406 return true;
407 }
408
409 void unit_add_to_load_queue(Unit *u) {
410 assert(u);
411 assert(u->type != _UNIT_TYPE_INVALID);
412
413 if (u->load_state != UNIT_STUB || u->in_load_queue)
414 return;
415
416 LIST_PREPEND(load_queue, u->manager->load_queue, u);
417 u->in_load_queue = true;
418 }
419
420 void unit_add_to_cleanup_queue(Unit *u) {
421 assert(u);
422
423 if (u->in_cleanup_queue)
424 return;
425
426 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
427 u->in_cleanup_queue = true;
428 }
429
430 void unit_add_to_gc_queue(Unit *u) {
431 assert(u);
432
433 if (u->in_gc_queue || u->in_cleanup_queue)
434 return;
435
436 if (!unit_may_gc(u))
437 return;
438
439 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
440 u->in_gc_queue = true;
441 }
442
443 void unit_add_to_dbus_queue(Unit *u) {
444 assert(u);
445 assert(u->type != _UNIT_TYPE_INVALID);
446
447 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
448 return;
449
450 /* Shortcut things if nobody cares */
451 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
452 sd_bus_track_count(u->bus_track) <= 0 &&
453 set_isempty(u->manager->private_buses)) {
454 u->sent_dbus_new_signal = true;
455 return;
456 }
457
458 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
459 u->in_dbus_queue = true;
460 }
461
462 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
463 assert(u);
464
465 if (u->in_stop_when_unneeded_queue)
466 return;
467
468 if (!u->stop_when_unneeded)
469 return;
470
471 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
472 return;
473
474 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
475 u->in_stop_when_unneeded_queue = true;
476 }
477
478 static void bidi_set_free(Unit *u, Hashmap *h) {
479 Unit *other;
480 Iterator i;
481 void *v;
482
483 assert(u);
484
485 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
486
487 HASHMAP_FOREACH_KEY(v, other, h, i) {
488 UnitDependency d;
489
490 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
491 hashmap_remove(other->dependencies[d], u);
492
493 unit_add_to_gc_queue(other);
494 }
495
496 hashmap_free(h);
497 }
498
499 static void unit_remove_transient(Unit *u) {
500 char **i;
501
502 assert(u);
503
504 if (!u->transient)
505 return;
506
507 if (u->fragment_path)
508 (void) unlink(u->fragment_path);
509
510 STRV_FOREACH(i, u->dropin_paths) {
511 _cleanup_free_ char *p = NULL, *pp = NULL;
512
513 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
514 if (!p)
515 continue;
516
517 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
518 if (!pp)
519 continue;
520
521 /* Only drop transient drop-ins */
522 if (!path_equal(u->manager->lookup_paths.transient, pp))
523 continue;
524
525 (void) unlink(*i);
526 (void) rmdir(p);
527 }
528 }
529
530 static void unit_free_requires_mounts_for(Unit *u) {
531 assert(u);
532
533 for (;;) {
534 _cleanup_free_ char *path;
535
536 path = hashmap_steal_first_key(u->requires_mounts_for);
537 if (!path)
538 break;
539 else {
540 char s[strlen(path) + 1];
541
542 PATH_FOREACH_PREFIX_MORE(s, path) {
543 char *y;
544 Set *x;
545
546 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
547 if (!x)
548 continue;
549
550 (void) set_remove(x, u);
551
552 if (set_isempty(x)) {
553 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
554 free(y);
555 set_free(x);
556 }
557 }
558 }
559 }
560
561 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
562 }
563
564 static void unit_done(Unit *u) {
565 ExecContext *ec;
566 CGroupContext *cc;
567
568 assert(u);
569
570 if (u->type < 0)
571 return;
572
573 if (UNIT_VTABLE(u)->done)
574 UNIT_VTABLE(u)->done(u);
575
576 ec = unit_get_exec_context(u);
577 if (ec)
578 exec_context_done(ec);
579
580 cc = unit_get_cgroup_context(u);
581 if (cc)
582 cgroup_context_done(cc);
583 }
584
585 void unit_free(Unit *u) {
586 UnitDependency d;
587 Iterator i;
588 char *t;
589
590 if (!u)
591 return;
592
593 if (UNIT_ISSET(u->slice)) {
594 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
595 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
596
597 /* And make sure the parent is realized again, updating cgroup memberships */
598 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
599 }
600
601 u->transient_file = safe_fclose(u->transient_file);
602
603 if (!MANAGER_IS_RELOADING(u->manager))
604 unit_remove_transient(u);
605
606 bus_unit_send_removed_signal(u);
607
608 unit_done(u);
609
610 unit_dequeue_rewatch_pids(u);
611
612 sd_bus_slot_unref(u->match_bus_slot);
613 sd_bus_track_unref(u->bus_track);
614 u->deserialized_refs = strv_free(u->deserialized_refs);
615
616 unit_free_requires_mounts_for(u);
617
618 SET_FOREACH(t, u->names, i)
619 hashmap_remove_value(u->manager->units, t, u);
620
621 if (!sd_id128_is_null(u->invocation_id))
622 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
623
624 if (u->job) {
625 Job *j = u->job;
626 job_uninstall(j);
627 job_free(j);
628 }
629
630 if (u->nop_job) {
631 Job *j = u->nop_job;
632 job_uninstall(j);
633 job_free(j);
634 }
635
636 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
637 bidi_set_free(u, u->dependencies[d]);
638
639 if (u->on_console)
640 manager_unref_console(u->manager);
641
642 unit_release_cgroup(u);
643
644 if (!MANAGER_IS_RELOADING(u->manager))
645 unit_unlink_state_files(u);
646
647 unit_unref_uid_gid(u, false);
648
649 (void) manager_update_failed_units(u->manager, u, false);
650 set_remove(u->manager->startup_units, u);
651
652 unit_unwatch_all_pids(u);
653
654 unit_ref_unset(&u->slice);
655 while (u->refs_by_target)
656 unit_ref_unset(u->refs_by_target);
657
658 if (u->type != _UNIT_TYPE_INVALID)
659 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
660
661 if (u->in_load_queue)
662 LIST_REMOVE(load_queue, u->manager->load_queue, u);
663
664 if (u->in_dbus_queue)
665 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
666
667 if (u->in_gc_queue)
668 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
669
670 if (u->in_cgroup_realize_queue)
671 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
672
673 if (u->in_cgroup_empty_queue)
674 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
675
676 if (u->in_cleanup_queue)
677 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
678
679 if (u->in_target_deps_queue)
680 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
681
682 if (u->in_stop_when_unneeded_queue)
683 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
684
685 safe_close(u->ip_accounting_ingress_map_fd);
686 safe_close(u->ip_accounting_egress_map_fd);
687
688 safe_close(u->ipv4_allow_map_fd);
689 safe_close(u->ipv6_allow_map_fd);
690 safe_close(u->ipv4_deny_map_fd);
691 safe_close(u->ipv6_deny_map_fd);
692
693 bpf_program_unref(u->ip_bpf_ingress);
694 bpf_program_unref(u->ip_bpf_ingress_installed);
695 bpf_program_unref(u->ip_bpf_egress);
696 bpf_program_unref(u->ip_bpf_egress_installed);
697
698 set_free(u->ip_bpf_custom_ingress);
699 set_free(u->ip_bpf_custom_egress);
700 set_free(u->ip_bpf_custom_ingress_installed);
701 set_free(u->ip_bpf_custom_egress_installed);
702
703 bpf_program_unref(u->bpf_device_control_installed);
704
705 condition_free_list(u->conditions);
706 condition_free_list(u->asserts);
707
708 free(u->description);
709 strv_free(u->documentation);
710 free(u->fragment_path);
711 free(u->source_path);
712 strv_free(u->dropin_paths);
713 free(u->instance);
714
715 free(u->job_timeout_reboot_arg);
716
717 set_free_free(u->names);
718
719 free(u->reboot_arg);
720
721 free(u);
722 }
723
724 UnitActiveState unit_active_state(Unit *u) {
725 assert(u);
726
727 if (u->load_state == UNIT_MERGED)
728 return unit_active_state(unit_follow_merge(u));
729
730 /* After a reload it might happen that a unit is not correctly
731 * loaded but still has a process around. That's why we won't
732 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
733
734 return UNIT_VTABLE(u)->active_state(u);
735 }
736
737 const char* unit_sub_state_to_string(Unit *u) {
738 assert(u);
739
740 return UNIT_VTABLE(u)->sub_state_to_string(u);
741 }
742
743 static int set_complete_move(Set **s, Set **other) {
744 assert(s);
745 assert(other);
746
747 if (!other)
748 return 0;
749
750 if (*s)
751 return set_move(*s, *other);
752 else
753 *s = TAKE_PTR(*other);
754
755 return 0;
756 }
757
758 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
759 assert(s);
760 assert(other);
761
762 if (!*other)
763 return 0;
764
765 if (*s)
766 return hashmap_move(*s, *other);
767 else
768 *s = TAKE_PTR(*other);
769
770 return 0;
771 }
772
773 static int merge_names(Unit *u, Unit *other) {
774 char *t;
775 Iterator i;
776 int r;
777
778 assert(u);
779 assert(other);
780
781 r = set_complete_move(&u->names, &other->names);
782 if (r < 0)
783 return r;
784
785 set_free_free(other->names);
786 other->names = NULL;
787 other->id = NULL;
788
789 SET_FOREACH(t, u->names, i)
790 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
791
792 return 0;
793 }
794
795 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
796 unsigned n_reserve;
797
798 assert(u);
799 assert(other);
800 assert(d < _UNIT_DEPENDENCY_MAX);
801
802 /*
803 * If u does not have this dependency set allocated, there is no need
804 * to reserve anything. In that case other's set will be transferred
805 * as a whole to u by complete_move().
806 */
807 if (!u->dependencies[d])
808 return 0;
809
810 /* merge_dependencies() will skip a u-on-u dependency */
811 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
812
813 return hashmap_reserve(u->dependencies[d], n_reserve);
814 }
815
816 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
817 Iterator i;
818 Unit *back;
819 void *v;
820 int r;
821
822 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
823
824 assert(u);
825 assert(other);
826 assert(d < _UNIT_DEPENDENCY_MAX);
827
828 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
829 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
830 UnitDependency k;
831
832 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
833 * pointers back, and let's fix them up, to instead point to 'u'. */
834
835 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
836 if (back == u) {
837 /* Do not add dependencies between u and itself. */
838 if (hashmap_remove(back->dependencies[k], other))
839 maybe_warn_about_dependency(u, other_id, k);
840 } else {
841 UnitDependencyInfo di_u, di_other, di_merged;
842
843 /* Let's drop this dependency between "back" and "other", and let's create it between
844 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
845 * and any such dependency which might already exist */
846
847 di_other.data = hashmap_get(back->dependencies[k], other);
848 if (!di_other.data)
849 continue; /* dependency isn't set, let's try the next one */
850
851 di_u.data = hashmap_get(back->dependencies[k], u);
852
853 di_merged = (UnitDependencyInfo) {
854 .origin_mask = di_u.origin_mask | di_other.origin_mask,
855 .destination_mask = di_u.destination_mask | di_other.destination_mask,
856 };
857
858 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
859 if (r < 0)
860 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
861 assert(r >= 0);
862
863 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
864 }
865 }
866
867 }
868
869 /* Also do not move dependencies on u to itself */
870 back = hashmap_remove(other->dependencies[d], u);
871 if (back)
872 maybe_warn_about_dependency(u, other_id, d);
873
874 /* The move cannot fail. The caller must have performed a reservation. */
875 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
876
877 other->dependencies[d] = hashmap_free(other->dependencies[d]);
878 }
879
880 int unit_merge(Unit *u, Unit *other) {
881 UnitDependency d;
882 const char *other_id = NULL;
883 int r;
884
885 assert(u);
886 assert(other);
887 assert(u->manager == other->manager);
888 assert(u->type != _UNIT_TYPE_INVALID);
889
890 other = unit_follow_merge(other);
891
892 if (other == u)
893 return 0;
894
895 if (u->type != other->type)
896 return -EINVAL;
897
898 if (!u->instance != !other->instance)
899 return -EINVAL;
900
901 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
902 return -EEXIST;
903
904 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
905 return -EEXIST;
906
907 if (other->job)
908 return -EEXIST;
909
910 if (other->nop_job)
911 return -EEXIST;
912
913 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
914 return -EEXIST;
915
916 if (other->id)
917 other_id = strdupa(other->id);
918
919 /* Make reservations to ensure merge_dependencies() won't fail */
920 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
921 r = reserve_dependencies(u, other, d);
922 /*
923 * We don't rollback reservations if we fail. We don't have
924 * a way to undo reservations. A reservation is not a leak.
925 */
926 if (r < 0)
927 return r;
928 }
929
930 /* Merge names */
931 r = merge_names(u, other);
932 if (r < 0)
933 return r;
934
935 /* Redirect all references */
936 while (other->refs_by_target)
937 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
938
939 /* Merge dependencies */
940 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
941 merge_dependencies(u, other, other_id, d);
942
943 other->load_state = UNIT_MERGED;
944 other->merged_into = u;
945
946 /* If there is still some data attached to the other node, we
947 * don't need it anymore, and can free it. */
948 if (other->load_state != UNIT_STUB)
949 if (UNIT_VTABLE(other)->done)
950 UNIT_VTABLE(other)->done(other);
951
952 unit_add_to_dbus_queue(u);
953 unit_add_to_cleanup_queue(other);
954
955 return 0;
956 }
957
958 int unit_merge_by_name(Unit *u, const char *name) {
959 _cleanup_free_ char *s = NULL;
960 Unit *other;
961 int r;
962
963 /* Either add name to u, or if a unit with name already exists, merge it with u.
964 * If name is a template, do the same for name@instance, where instance is u's instance. */
965
966 assert(u);
967 assert(name);
968
969 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
970 if (!u->instance)
971 return -EINVAL;
972
973 r = unit_name_replace_instance(name, u->instance, &s);
974 if (r < 0)
975 return r;
976
977 name = s;
978 }
979
980 other = manager_get_unit(u->manager, name);
981 if (other)
982 return unit_merge(u, other);
983
984 return unit_add_name(u, name);
985 }
986
987 Unit* unit_follow_merge(Unit *u) {
988 assert(u);
989
990 while (u->load_state == UNIT_MERGED)
991 assert_se(u = u->merged_into);
992
993 return u;
994 }
995
996 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
997 ExecDirectoryType dt;
998 char **dp;
999 int r;
1000
1001 assert(u);
1002 assert(c);
1003
1004 if (c->working_directory && !c->working_directory_missing_ok) {
1005 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1006 if (r < 0)
1007 return r;
1008 }
1009
1010 if (c->root_directory) {
1011 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1012 if (r < 0)
1013 return r;
1014 }
1015
1016 if (c->root_image) {
1017 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1018 if (r < 0)
1019 return r;
1020 }
1021
1022 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1023 if (!u->manager->prefix[dt])
1024 continue;
1025
1026 STRV_FOREACH(dp, c->directories[dt].paths) {
1027 _cleanup_free_ char *p;
1028
1029 p = path_join(u->manager->prefix[dt], *dp);
1030 if (!p)
1031 return -ENOMEM;
1032
1033 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1034 if (r < 0)
1035 return r;
1036 }
1037 }
1038
1039 if (!MANAGER_IS_SYSTEM(u->manager))
1040 return 0;
1041
1042 if (c->private_tmp) {
1043 const char *p;
1044
1045 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1046 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1047 if (r < 0)
1048 return r;
1049 }
1050
1051 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1052 if (r < 0)
1053 return r;
1054 }
1055
1056 if (!IN_SET(c->std_output,
1057 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1058 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1059 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1060 !IN_SET(c->std_error,
1061 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1062 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1063 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1064 return 0;
1065
1066 /* If syslog or kernel logging is requested, make sure our own
1067 * logging daemon is run first. */
1068
1069 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1070 if (r < 0)
1071 return r;
1072
1073 return 0;
1074 }
1075
1076 const char *unit_description(Unit *u) {
1077 assert(u);
1078
1079 if (u->description)
1080 return u->description;
1081
1082 return strna(u->id);
1083 }
1084
1085 const char *unit_status_string(Unit *u) {
1086 assert(u);
1087
1088 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1089 return u->id;
1090
1091 return unit_description(u);
1092 }
1093
1094 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1095 const struct {
1096 UnitDependencyMask mask;
1097 const char *name;
1098 } table[] = {
1099 { UNIT_DEPENDENCY_FILE, "file" },
1100 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1101 { UNIT_DEPENDENCY_DEFAULT, "default" },
1102 { UNIT_DEPENDENCY_UDEV, "udev" },
1103 { UNIT_DEPENDENCY_PATH, "path" },
1104 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1105 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1106 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1107 };
1108 size_t i;
1109
1110 assert(f);
1111 assert(kind);
1112 assert(space);
1113
1114 for (i = 0; i < ELEMENTSOF(table); i++) {
1115
1116 if (mask == 0)
1117 break;
1118
1119 if (FLAGS_SET(mask, table[i].mask)) {
1120 if (*space)
1121 fputc(' ', f);
1122 else
1123 *space = true;
1124
1125 fputs(kind, f);
1126 fputs("-", f);
1127 fputs(table[i].name, f);
1128
1129 mask &= ~table[i].mask;
1130 }
1131 }
1132
1133 assert(mask == 0);
1134 }
1135
1136 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1137 char *t, **j;
1138 UnitDependency d;
1139 Iterator i;
1140 const char *prefix2;
1141 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1142 Unit *following;
1143 _cleanup_set_free_ Set *following_set = NULL;
1144 const char *n;
1145 CGroupMask m;
1146 int r;
1147
1148 assert(u);
1149 assert(u->type >= 0);
1150
1151 prefix = strempty(prefix);
1152 prefix2 = strjoina(prefix, "\t");
1153
1154 fprintf(f,
1155 "%s-> Unit %s:\n",
1156 prefix, u->id);
1157
1158 SET_FOREACH(t, u->names, i)
1159 if (!streq(t, u->id))
1160 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1161
1162 fprintf(f,
1163 "%s\tDescription: %s\n"
1164 "%s\tInstance: %s\n"
1165 "%s\tUnit Load State: %s\n"
1166 "%s\tUnit Active State: %s\n"
1167 "%s\tState Change Timestamp: %s\n"
1168 "%s\tInactive Exit Timestamp: %s\n"
1169 "%s\tActive Enter Timestamp: %s\n"
1170 "%s\tActive Exit Timestamp: %s\n"
1171 "%s\tInactive Enter Timestamp: %s\n"
1172 "%s\tMay GC: %s\n"
1173 "%s\tNeed Daemon Reload: %s\n"
1174 "%s\tTransient: %s\n"
1175 "%s\tPerpetual: %s\n"
1176 "%s\tGarbage Collection Mode: %s\n"
1177 "%s\tSlice: %s\n"
1178 "%s\tCGroup: %s\n"
1179 "%s\tCGroup realized: %s\n",
1180 prefix, unit_description(u),
1181 prefix, strna(u->instance),
1182 prefix, unit_load_state_to_string(u->load_state),
1183 prefix, unit_active_state_to_string(unit_active_state(u)),
1184 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1185 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1186 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1187 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1188 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1189 prefix, yes_no(unit_may_gc(u)),
1190 prefix, yes_no(unit_need_daemon_reload(u)),
1191 prefix, yes_no(u->transient),
1192 prefix, yes_no(u->perpetual),
1193 prefix, collect_mode_to_string(u->collect_mode),
1194 prefix, strna(unit_slice_name(u)),
1195 prefix, strna(u->cgroup_path),
1196 prefix, yes_no(u->cgroup_realized));
1197
1198 if (u->cgroup_realized_mask != 0) {
1199 _cleanup_free_ char *s = NULL;
1200 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1201 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1202 }
1203
1204 if (u->cgroup_enabled_mask != 0) {
1205 _cleanup_free_ char *s = NULL;
1206 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1207 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1208 }
1209
1210 m = unit_get_own_mask(u);
1211 if (m != 0) {
1212 _cleanup_free_ char *s = NULL;
1213 (void) cg_mask_to_string(m, &s);
1214 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1215 }
1216
1217 m = unit_get_members_mask(u);
1218 if (m != 0) {
1219 _cleanup_free_ char *s = NULL;
1220 (void) cg_mask_to_string(m, &s);
1221 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1222 }
1223
1224 m = unit_get_delegate_mask(u);
1225 if (m != 0) {
1226 _cleanup_free_ char *s = NULL;
1227 (void) cg_mask_to_string(m, &s);
1228 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1229 }
1230
1231 if (!sd_id128_is_null(u->invocation_id))
1232 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1233 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1234
1235 STRV_FOREACH(j, u->documentation)
1236 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1237
1238 following = unit_following(u);
1239 if (following)
1240 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1241
1242 r = unit_following_set(u, &following_set);
1243 if (r >= 0) {
1244 Unit *other;
1245
1246 SET_FOREACH(other, following_set, i)
1247 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1248 }
1249
1250 if (u->fragment_path)
1251 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1252
1253 if (u->source_path)
1254 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1255
1256 STRV_FOREACH(j, u->dropin_paths)
1257 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1258
1259 if (u->failure_action != EMERGENCY_ACTION_NONE)
1260 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1261 if (u->failure_action_exit_status >= 0)
1262 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1263 if (u->success_action != EMERGENCY_ACTION_NONE)
1264 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1265 if (u->success_action_exit_status >= 0)
1266 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1267
1268 if (u->job_timeout != USEC_INFINITY)
1269 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1270
1271 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1272 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1273
1274 if (u->job_timeout_reboot_arg)
1275 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1276
1277 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1278 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1279
1280 if (dual_timestamp_is_set(&u->condition_timestamp))
1281 fprintf(f,
1282 "%s\tCondition Timestamp: %s\n"
1283 "%s\tCondition Result: %s\n",
1284 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1285 prefix, yes_no(u->condition_result));
1286
1287 if (dual_timestamp_is_set(&u->assert_timestamp))
1288 fprintf(f,
1289 "%s\tAssert Timestamp: %s\n"
1290 "%s\tAssert Result: %s\n",
1291 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1292 prefix, yes_no(u->assert_result));
1293
1294 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1295 UnitDependencyInfo di;
1296 Unit *other;
1297
1298 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1299 bool space = false;
1300
1301 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1302
1303 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1304 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1305
1306 fputs(")\n", f);
1307 }
1308 }
1309
1310 if (!hashmap_isempty(u->requires_mounts_for)) {
1311 UnitDependencyInfo di;
1312 const char *path;
1313
1314 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1315 bool space = false;
1316
1317 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1318
1319 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1320 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1321
1322 fputs(")\n", f);
1323 }
1324 }
1325
1326 if (u->load_state == UNIT_LOADED) {
1327
1328 fprintf(f,
1329 "%s\tStopWhenUnneeded: %s\n"
1330 "%s\tRefuseManualStart: %s\n"
1331 "%s\tRefuseManualStop: %s\n"
1332 "%s\tDefaultDependencies: %s\n"
1333 "%s\tOnFailureJobMode: %s\n"
1334 "%s\tIgnoreOnIsolate: %s\n",
1335 prefix, yes_no(u->stop_when_unneeded),
1336 prefix, yes_no(u->refuse_manual_start),
1337 prefix, yes_no(u->refuse_manual_stop),
1338 prefix, yes_no(u->default_dependencies),
1339 prefix, job_mode_to_string(u->on_failure_job_mode),
1340 prefix, yes_no(u->ignore_on_isolate));
1341
1342 if (UNIT_VTABLE(u)->dump)
1343 UNIT_VTABLE(u)->dump(u, f, prefix2);
1344
1345 } else if (u->load_state == UNIT_MERGED)
1346 fprintf(f,
1347 "%s\tMerged into: %s\n",
1348 prefix, u->merged_into->id);
1349 else if (u->load_state == UNIT_ERROR)
1350 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1351
1352 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1353 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1354
1355 if (u->job)
1356 job_dump(u->job, f, prefix2);
1357
1358 if (u->nop_job)
1359 job_dump(u->nop_job, f, prefix2);
1360 }
1361
1362 /* Common implementation for multiple backends */
1363 int unit_load_fragment_and_dropin(Unit *u) {
1364 int r;
1365
1366 assert(u);
1367
1368 /* Load a .{service,socket,...} file */
1369 r = unit_load_fragment(u);
1370 if (r < 0)
1371 return r;
1372
1373 if (u->load_state == UNIT_STUB)
1374 return -ENOENT;
1375
1376 /* Load drop-in directory data. If u is an alias, we might be reloading the
1377 * target unit needlessly. But we cannot be sure which drops-ins have already
1378 * been loaded and which not, at least without doing complicated book-keeping,
1379 * so let's always reread all drop-ins. */
1380 return unit_load_dropin(unit_follow_merge(u));
1381 }
1382
1383 /* Common implementation for multiple backends */
1384 int unit_load_fragment_and_dropin_optional(Unit *u) {
1385 int r;
1386
1387 assert(u);
1388
1389 /* Same as unit_load_fragment_and_dropin(), but whether
1390 * something can be loaded or not doesn't matter. */
1391
1392 /* Load a .service/.socket/.slice/… file */
1393 r = unit_load_fragment(u);
1394 if (r < 0)
1395 return r;
1396
1397 if (u->load_state == UNIT_STUB)
1398 u->load_state = UNIT_LOADED;
1399
1400 /* Load drop-in directory data */
1401 return unit_load_dropin(unit_follow_merge(u));
1402 }
1403
1404 void unit_add_to_target_deps_queue(Unit *u) {
1405 Manager *m = u->manager;
1406
1407 assert(u);
1408
1409 if (u->in_target_deps_queue)
1410 return;
1411
1412 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1413 u->in_target_deps_queue = true;
1414 }
1415
1416 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1417 assert(u);
1418 assert(target);
1419
1420 if (target->type != UNIT_TARGET)
1421 return 0;
1422
1423 /* Only add the dependency if both units are loaded, so that
1424 * that loop check below is reliable */
1425 if (u->load_state != UNIT_LOADED ||
1426 target->load_state != UNIT_LOADED)
1427 return 0;
1428
1429 /* If either side wants no automatic dependencies, then let's
1430 * skip this */
1431 if (!u->default_dependencies ||
1432 !target->default_dependencies)
1433 return 0;
1434
1435 /* Don't create loops */
1436 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1437 return 0;
1438
1439 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1440 }
1441
1442 static int unit_add_slice_dependencies(Unit *u) {
1443 UnitDependencyMask mask;
1444 assert(u);
1445
1446 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1447 return 0;
1448
1449 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1450 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1451 relationship). */
1452 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1453
1454 if (UNIT_ISSET(u->slice))
1455 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1456
1457 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1458 return 0;
1459
1460 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1461 }
1462
1463 static int unit_add_mount_dependencies(Unit *u) {
1464 UnitDependencyInfo di;
1465 const char *path;
1466 Iterator i;
1467 int r;
1468
1469 assert(u);
1470
1471 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1472 char prefix[strlen(path) + 1];
1473
1474 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1475 _cleanup_free_ char *p = NULL;
1476 Unit *m;
1477
1478 r = unit_name_from_path(prefix, ".mount", &p);
1479 if (r < 0)
1480 return r;
1481
1482 m = manager_get_unit(u->manager, p);
1483 if (!m) {
1484 /* Make sure to load the mount unit if
1485 * it exists. If so the dependencies
1486 * on this unit will be added later
1487 * during the loading of the mount
1488 * unit. */
1489 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1490 continue;
1491 }
1492 if (m == u)
1493 continue;
1494
1495 if (m->load_state != UNIT_LOADED)
1496 continue;
1497
1498 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1499 if (r < 0)
1500 return r;
1501
1502 if (m->fragment_path) {
1503 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1504 if (r < 0)
1505 return r;
1506 }
1507 }
1508 }
1509
1510 return 0;
1511 }
1512
1513 static int unit_add_startup_units(Unit *u) {
1514 CGroupContext *c;
1515 int r;
1516
1517 c = unit_get_cgroup_context(u);
1518 if (!c)
1519 return 0;
1520
1521 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1522 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1523 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1524 return 0;
1525
1526 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1527 if (r < 0)
1528 return r;
1529
1530 return set_put(u->manager->startup_units, u);
1531 }
1532
1533 int unit_load(Unit *u) {
1534 int r;
1535
1536 assert(u);
1537
1538 if (u->in_load_queue) {
1539 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1540 u->in_load_queue = false;
1541 }
1542
1543 if (u->type == _UNIT_TYPE_INVALID)
1544 return -EINVAL;
1545
1546 if (u->load_state != UNIT_STUB)
1547 return 0;
1548
1549 if (u->transient_file) {
1550 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1551 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1552
1553 r = fflush_and_check(u->transient_file);
1554 if (r < 0)
1555 goto fail;
1556
1557 u->transient_file = safe_fclose(u->transient_file);
1558 u->fragment_mtime = now(CLOCK_REALTIME);
1559 }
1560
1561 if (UNIT_VTABLE(u)->load) {
1562 r = UNIT_VTABLE(u)->load(u);
1563 if (r < 0)
1564 goto fail;
1565 }
1566
1567 if (u->load_state == UNIT_STUB) {
1568 r = -ENOENT;
1569 goto fail;
1570 }
1571
1572 if (u->load_state == UNIT_LOADED) {
1573 unit_add_to_target_deps_queue(u);
1574
1575 r = unit_add_slice_dependencies(u);
1576 if (r < 0)
1577 goto fail;
1578
1579 r = unit_add_mount_dependencies(u);
1580 if (r < 0)
1581 goto fail;
1582
1583 r = unit_add_startup_units(u);
1584 if (r < 0)
1585 goto fail;
1586
1587 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1588 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1589 r = -ENOEXEC;
1590 goto fail;
1591 }
1592
1593 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1594 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1595
1596 /* We finished loading, let's ensure our parents recalculate the members mask */
1597 unit_invalidate_cgroup_members_masks(u);
1598 }
1599
1600 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1601
1602 unit_add_to_dbus_queue(unit_follow_merge(u));
1603 unit_add_to_gc_queue(u);
1604
1605 return 0;
1606
1607 fail:
1608 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1609 * return ENOEXEC to ensure units are placed in this state after loading */
1610
1611 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1612 r == -ENOEXEC ? UNIT_BAD_SETTING :
1613 UNIT_ERROR;
1614 u->load_error = r;
1615
1616 unit_add_to_dbus_queue(u);
1617 unit_add_to_gc_queue(u);
1618
1619 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1620 }
1621
1622 _printf_(7, 8)
1623 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1624 Unit *u = userdata;
1625 va_list ap;
1626 int r;
1627
1628 va_start(ap, format);
1629 if (u)
1630 r = log_object_internalv(level, error, file, line, func,
1631 u->manager->unit_log_field,
1632 u->id,
1633 u->manager->invocation_log_field,
1634 u->invocation_id_string,
1635 format, ap);
1636 else
1637 r = log_internalv(level, error, file, line, func, format, ap);
1638 va_end(ap);
1639
1640 return r;
1641 }
1642
1643 static bool unit_test_condition(Unit *u) {
1644 assert(u);
1645
1646 dual_timestamp_get(&u->condition_timestamp);
1647 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1648
1649 unit_add_to_dbus_queue(u);
1650
1651 return u->condition_result;
1652 }
1653
1654 static bool unit_test_assert(Unit *u) {
1655 assert(u);
1656
1657 dual_timestamp_get(&u->assert_timestamp);
1658 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1659
1660 unit_add_to_dbus_queue(u);
1661
1662 return u->assert_result;
1663 }
1664
1665 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1666 const char *d;
1667
1668 d = unit_status_string(u);
1669 if (log_get_show_color())
1670 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1671
1672 DISABLE_WARNING_FORMAT_NONLITERAL;
1673 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1674 REENABLE_WARNING;
1675 }
1676
1677 int unit_test_start_limit(Unit *u) {
1678 const char *reason;
1679
1680 assert(u);
1681
1682 if (ratelimit_below(&u->start_ratelimit)) {
1683 u->start_limit_hit = false;
1684 return 0;
1685 }
1686
1687 log_unit_warning(u, "Start request repeated too quickly.");
1688 u->start_limit_hit = true;
1689
1690 reason = strjoina("unit ", u->id, " failed");
1691
1692 emergency_action(u->manager, u->start_limit_action,
1693 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1694 u->reboot_arg, -1, reason);
1695
1696 return -ECANCELED;
1697 }
1698
1699 bool unit_shall_confirm_spawn(Unit *u) {
1700 assert(u);
1701
1702 if (manager_is_confirm_spawn_disabled(u->manager))
1703 return false;
1704
1705 /* For some reasons units remaining in the same process group
1706 * as PID 1 fail to acquire the console even if it's not used
1707 * by any process. So skip the confirmation question for them. */
1708 return !unit_get_exec_context(u)->same_pgrp;
1709 }
1710
1711 static bool unit_verify_deps(Unit *u) {
1712 Unit *other;
1713 Iterator j;
1714 void *v;
1715
1716 assert(u);
1717
1718 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1719 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1720 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1721 * conjunction with After= as for them any such check would make things entirely racy. */
1722
1723 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1724
1725 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1726 continue;
1727
1728 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1729 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1730 return false;
1731 }
1732 }
1733
1734 return true;
1735 }
1736
1737 /* Errors that aren't really errors:
1738 * -EALREADY: Unit is already started.
1739 * -ECOMM: Condition failed
1740 * -EAGAIN: An operation is already in progress. Retry later.
1741 *
1742 * Errors that are real errors:
1743 * -EBADR: This unit type does not support starting.
1744 * -ECANCELED: Start limit hit, too many requests for now
1745 * -EPROTO: Assert failed
1746 * -EINVAL: Unit not loaded
1747 * -EOPNOTSUPP: Unit type not supported
1748 * -ENOLINK: The necessary dependencies are not fulfilled.
1749 * -ESTALE: This unit has been started before and can't be started a second time
1750 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1751 */
1752 int unit_start(Unit *u) {
1753 UnitActiveState state;
1754 Unit *following;
1755
1756 assert(u);
1757
1758 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1759 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1760 * waiting is finished. */
1761 state = unit_active_state(u);
1762 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1763 return -EALREADY;
1764 if (state == UNIT_MAINTENANCE)
1765 return -EAGAIN;
1766
1767 /* Units that aren't loaded cannot be started */
1768 if (u->load_state != UNIT_LOADED)
1769 return -EINVAL;
1770
1771 /* Refuse starting scope units more than once */
1772 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1773 return -ESTALE;
1774
1775 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1776 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1777 * recheck the condition in that case. */
1778 if (state != UNIT_ACTIVATING &&
1779 !unit_test_condition(u))
1780 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1781
1782 /* If the asserts failed, fail the entire job */
1783 if (state != UNIT_ACTIVATING &&
1784 !unit_test_assert(u))
1785 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1786
1787 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1788 * condition checks, so that we rather return condition check errors (which are usually not
1789 * considered a true failure) than "not supported" errors (which are considered a failure).
1790 */
1791 if (!unit_type_supported(u->type))
1792 return -EOPNOTSUPP;
1793
1794 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1795 * should have taken care of this already, but let's check this here again. After all, our
1796 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1797 if (!unit_verify_deps(u))
1798 return -ENOLINK;
1799
1800 /* Forward to the main object, if we aren't it. */
1801 following = unit_following(u);
1802 if (following) {
1803 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1804 return unit_start(following);
1805 }
1806
1807 /* If it is stopped, but we cannot start it, then fail */
1808 if (!UNIT_VTABLE(u)->start)
1809 return -EBADR;
1810
1811 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1812 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1813 * waits for a holdoff timer to elapse before it will start again. */
1814
1815 unit_add_to_dbus_queue(u);
1816
1817 return UNIT_VTABLE(u)->start(u);
1818 }
1819
1820 bool unit_can_start(Unit *u) {
1821 assert(u);
1822
1823 if (u->load_state != UNIT_LOADED)
1824 return false;
1825
1826 if (!unit_type_supported(u->type))
1827 return false;
1828
1829 /* Scope units may be started only once */
1830 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1831 return false;
1832
1833 return !!UNIT_VTABLE(u)->start;
1834 }
1835
1836 bool unit_can_isolate(Unit *u) {
1837 assert(u);
1838
1839 return unit_can_start(u) &&
1840 u->allow_isolate;
1841 }
1842
1843 /* Errors:
1844 * -EBADR: This unit type does not support stopping.
1845 * -EALREADY: Unit is already stopped.
1846 * -EAGAIN: An operation is already in progress. Retry later.
1847 */
1848 int unit_stop(Unit *u) {
1849 UnitActiveState state;
1850 Unit *following;
1851
1852 assert(u);
1853
1854 state = unit_active_state(u);
1855 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1856 return -EALREADY;
1857
1858 following = unit_following(u);
1859 if (following) {
1860 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1861 return unit_stop(following);
1862 }
1863
1864 if (!UNIT_VTABLE(u)->stop)
1865 return -EBADR;
1866
1867 unit_add_to_dbus_queue(u);
1868
1869 return UNIT_VTABLE(u)->stop(u);
1870 }
1871
1872 bool unit_can_stop(Unit *u) {
1873 assert(u);
1874
1875 if (!unit_type_supported(u->type))
1876 return false;
1877
1878 if (u->perpetual)
1879 return false;
1880
1881 return !!UNIT_VTABLE(u)->stop;
1882 }
1883
1884 /* Errors:
1885 * -EBADR: This unit type does not support reloading.
1886 * -ENOEXEC: Unit is not started.
1887 * -EAGAIN: An operation is already in progress. Retry later.
1888 */
1889 int unit_reload(Unit *u) {
1890 UnitActiveState state;
1891 Unit *following;
1892
1893 assert(u);
1894
1895 if (u->load_state != UNIT_LOADED)
1896 return -EINVAL;
1897
1898 if (!unit_can_reload(u))
1899 return -EBADR;
1900
1901 state = unit_active_state(u);
1902 if (state == UNIT_RELOADING)
1903 return -EAGAIN;
1904
1905 if (state != UNIT_ACTIVE) {
1906 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1907 return -ENOEXEC;
1908 }
1909
1910 following = unit_following(u);
1911 if (following) {
1912 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1913 return unit_reload(following);
1914 }
1915
1916 unit_add_to_dbus_queue(u);
1917
1918 if (!UNIT_VTABLE(u)->reload) {
1919 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1920 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1921 return 0;
1922 }
1923
1924 return UNIT_VTABLE(u)->reload(u);
1925 }
1926
1927 bool unit_can_reload(Unit *u) {
1928 assert(u);
1929
1930 if (UNIT_VTABLE(u)->can_reload)
1931 return UNIT_VTABLE(u)->can_reload(u);
1932
1933 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1934 return true;
1935
1936 return UNIT_VTABLE(u)->reload;
1937 }
1938
1939 bool unit_is_unneeded(Unit *u) {
1940 static const UnitDependency deps[] = {
1941 UNIT_REQUIRED_BY,
1942 UNIT_REQUISITE_OF,
1943 UNIT_WANTED_BY,
1944 UNIT_BOUND_BY,
1945 };
1946 size_t j;
1947
1948 assert(u);
1949
1950 if (!u->stop_when_unneeded)
1951 return false;
1952
1953 /* Don't clean up while the unit is transitioning or is even inactive. */
1954 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1955 return false;
1956 if (u->job)
1957 return false;
1958
1959 for (j = 0; j < ELEMENTSOF(deps); j++) {
1960 Unit *other;
1961 Iterator i;
1962 void *v;
1963
1964 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1965 * restart, then don't clean this one up. */
1966
1967 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1968 if (other->job)
1969 return false;
1970
1971 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1972 return false;
1973
1974 if (unit_will_restart(other))
1975 return false;
1976 }
1977 }
1978
1979 return true;
1980 }
1981
1982 static void check_unneeded_dependencies(Unit *u) {
1983
1984 static const UnitDependency deps[] = {
1985 UNIT_REQUIRES,
1986 UNIT_REQUISITE,
1987 UNIT_WANTS,
1988 UNIT_BINDS_TO,
1989 };
1990 size_t j;
1991
1992 assert(u);
1993
1994 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1995
1996 for (j = 0; j < ELEMENTSOF(deps); j++) {
1997 Unit *other;
1998 Iterator i;
1999 void *v;
2000
2001 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2002 unit_submit_to_stop_when_unneeded_queue(other);
2003 }
2004 }
2005
2006 static void unit_check_binds_to(Unit *u) {
2007 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2008 bool stop = false;
2009 Unit *other;
2010 Iterator i;
2011 void *v;
2012 int r;
2013
2014 assert(u);
2015
2016 if (u->job)
2017 return;
2018
2019 if (unit_active_state(u) != UNIT_ACTIVE)
2020 return;
2021
2022 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2023 if (other->job)
2024 continue;
2025
2026 if (!other->coldplugged)
2027 /* We might yet create a job for the other unit… */
2028 continue;
2029
2030 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2031 continue;
2032
2033 stop = true;
2034 break;
2035 }
2036
2037 if (!stop)
2038 return;
2039
2040 /* If stopping a unit fails continuously we might enter a stop
2041 * loop here, hence stop acting on the service being
2042 * unnecessary after a while. */
2043 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2044 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2045 return;
2046 }
2047
2048 assert(other);
2049 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2050
2051 /* A unit we need to run is gone. Sniff. Let's stop this. */
2052 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2053 if (r < 0)
2054 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2055 }
2056
2057 static void retroactively_start_dependencies(Unit *u) {
2058 Iterator i;
2059 Unit *other;
2060 void *v;
2061
2062 assert(u);
2063 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2064
2065 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2066 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2067 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2068 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2069
2070 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2071 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2072 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2073 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2074
2075 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2076 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2077 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2078 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2079
2080 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2081 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2082 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2083
2084 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2085 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2086 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2087 }
2088
2089 static void retroactively_stop_dependencies(Unit *u) {
2090 Unit *other;
2091 Iterator i;
2092 void *v;
2093
2094 assert(u);
2095 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2096
2097 /* Pull down units which are bound to us recursively if enabled */
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2099 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2100 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2101 }
2102
2103 void unit_start_on_failure(Unit *u) {
2104 Unit *other;
2105 Iterator i;
2106 void *v;
2107 int r;
2108
2109 assert(u);
2110
2111 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2112 return;
2113
2114 log_unit_info(u, "Triggering OnFailure= dependencies.");
2115
2116 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2117 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2118
2119 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2120 if (r < 0)
2121 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2122 }
2123 }
2124
2125 void unit_trigger_notify(Unit *u) {
2126 Unit *other;
2127 Iterator i;
2128 void *v;
2129
2130 assert(u);
2131
2132 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2133 if (UNIT_VTABLE(other)->trigger_notify)
2134 UNIT_VTABLE(other)->trigger_notify(other, u);
2135 }
2136
2137 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2138 if (condition_notice && log_level > LOG_NOTICE)
2139 return LOG_NOTICE;
2140 if (condition_info && log_level > LOG_INFO)
2141 return LOG_INFO;
2142 return log_level;
2143 }
2144
2145 static int unit_log_resources(Unit *u) {
2146 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2147 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2148 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2149 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2150 size_t n_message_parts = 0, n_iovec = 0;
2151 char* message_parts[1 + 2 + 2 + 1], *t;
2152 nsec_t nsec = NSEC_INFINITY;
2153 CGroupIPAccountingMetric m;
2154 size_t i;
2155 int r;
2156 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2157 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2158 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2159 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2160 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2161 };
2162 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2163 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2164 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2165 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2166 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2167 };
2168
2169 assert(u);
2170
2171 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2172 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2173 * information and the complete data in structured fields. */
2174
2175 (void) unit_get_cpu_usage(u, &nsec);
2176 if (nsec != NSEC_INFINITY) {
2177 char buf[FORMAT_TIMESPAN_MAX] = "";
2178
2179 /* Format the CPU time for inclusion in the structured log message */
2180 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2181 r = log_oom();
2182 goto finish;
2183 }
2184 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2185
2186 /* Format the CPU time for inclusion in the human language message string */
2187 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2188 t = strjoin("consumed ", buf, " CPU time");
2189 if (!t) {
2190 r = log_oom();
2191 goto finish;
2192 }
2193
2194 message_parts[n_message_parts++] = t;
2195
2196 log_level = raise_level(log_level,
2197 nsec > NOTICEWORTHY_CPU_NSEC,
2198 nsec > MENTIONWORTHY_CPU_NSEC);
2199 }
2200
2201 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2202 char buf[FORMAT_BYTES_MAX] = "";
2203 uint64_t value = UINT64_MAX;
2204
2205 assert(io_fields[k]);
2206
2207 (void) unit_get_io_accounting(u, k, k > 0, &value);
2208 if (value == UINT64_MAX)
2209 continue;
2210
2211 have_io_accounting = true;
2212 if (value > 0)
2213 any_io = true;
2214
2215 /* Format IO accounting data for inclusion in the structured log message */
2216 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2217 r = log_oom();
2218 goto finish;
2219 }
2220 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2221
2222 /* Format the IO accounting data for inclusion in the human language message string, but only
2223 * for the bytes counters (and not for the operations counters) */
2224 if (k == CGROUP_IO_READ_BYTES) {
2225 assert(!rr);
2226 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2227 if (!rr) {
2228 r = log_oom();
2229 goto finish;
2230 }
2231 } else if (k == CGROUP_IO_WRITE_BYTES) {
2232 assert(!wr);
2233 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2234 if (!wr) {
2235 r = log_oom();
2236 goto finish;
2237 }
2238 }
2239
2240 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2241 log_level = raise_level(log_level,
2242 value > MENTIONWORTHY_IO_BYTES,
2243 value > NOTICEWORTHY_IO_BYTES);
2244 }
2245
2246 if (have_io_accounting) {
2247 if (any_io) {
2248 if (rr)
2249 message_parts[n_message_parts++] = TAKE_PTR(rr);
2250 if (wr)
2251 message_parts[n_message_parts++] = TAKE_PTR(wr);
2252
2253 } else {
2254 char *k;
2255
2256 k = strdup("no IO");
2257 if (!k) {
2258 r = log_oom();
2259 goto finish;
2260 }
2261
2262 message_parts[n_message_parts++] = k;
2263 }
2264 }
2265
2266 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2267 char buf[FORMAT_BYTES_MAX] = "";
2268 uint64_t value = UINT64_MAX;
2269
2270 assert(ip_fields[m]);
2271
2272 (void) unit_get_ip_accounting(u, m, &value);
2273 if (value == UINT64_MAX)
2274 continue;
2275
2276 have_ip_accounting = true;
2277 if (value > 0)
2278 any_traffic = true;
2279
2280 /* Format IP accounting data for inclusion in the structured log message */
2281 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2282 r = log_oom();
2283 goto finish;
2284 }
2285 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2286
2287 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2288 * bytes counters (and not for the packets counters) */
2289 if (m == CGROUP_IP_INGRESS_BYTES) {
2290 assert(!igress);
2291 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2292 if (!igress) {
2293 r = log_oom();
2294 goto finish;
2295 }
2296 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2297 assert(!egress);
2298 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2299 if (!egress) {
2300 r = log_oom();
2301 goto finish;
2302 }
2303 }
2304
2305 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2306 log_level = raise_level(log_level,
2307 value > MENTIONWORTHY_IP_BYTES,
2308 value > NOTICEWORTHY_IP_BYTES);
2309 }
2310
2311 if (have_ip_accounting) {
2312 if (any_traffic) {
2313 if (igress)
2314 message_parts[n_message_parts++] = TAKE_PTR(igress);
2315 if (egress)
2316 message_parts[n_message_parts++] = TAKE_PTR(egress);
2317
2318 } else {
2319 char *k;
2320
2321 k = strdup("no IP traffic");
2322 if (!k) {
2323 r = log_oom();
2324 goto finish;
2325 }
2326
2327 message_parts[n_message_parts++] = k;
2328 }
2329 }
2330
2331 /* Is there any accounting data available at all? */
2332 if (n_iovec == 0) {
2333 r = 0;
2334 goto finish;
2335 }
2336
2337 if (n_message_parts == 0)
2338 t = strjoina("MESSAGE=", u->id, ": Completed.");
2339 else {
2340 _cleanup_free_ char *joined;
2341
2342 message_parts[n_message_parts] = NULL;
2343
2344 joined = strv_join(message_parts, ", ");
2345 if (!joined) {
2346 r = log_oom();
2347 goto finish;
2348 }
2349
2350 joined[0] = ascii_toupper(joined[0]);
2351 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2352 }
2353
2354 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2355 * and hence don't increase n_iovec for them */
2356 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2357 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2358
2359 t = strjoina(u->manager->unit_log_field, u->id);
2360 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2361
2362 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2363 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2364
2365 log_struct_iovec(log_level, iovec, n_iovec + 4);
2366 r = 0;
2367
2368 finish:
2369 for (i = 0; i < n_message_parts; i++)
2370 free(message_parts[i]);
2371
2372 for (i = 0; i < n_iovec; i++)
2373 free(iovec[i].iov_base);
2374
2375 return r;
2376
2377 }
2378
2379 static void unit_update_on_console(Unit *u) {
2380 bool b;
2381
2382 assert(u);
2383
2384 b = unit_needs_console(u);
2385 if (u->on_console == b)
2386 return;
2387
2388 u->on_console = b;
2389 if (b)
2390 manager_ref_console(u->manager);
2391 else
2392 manager_unref_console(u->manager);
2393 }
2394
2395 static void unit_emit_audit_start(Unit *u) {
2396 assert(u);
2397
2398 if (u->type != UNIT_SERVICE)
2399 return;
2400
2401 /* Write audit record if we have just finished starting up */
2402 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2403 u->in_audit = true;
2404 }
2405
2406 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2407 assert(u);
2408
2409 if (u->type != UNIT_SERVICE)
2410 return;
2411
2412 if (u->in_audit) {
2413 /* Write audit record if we have just finished shutting down */
2414 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2415 u->in_audit = false;
2416 } else {
2417 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2418 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2419
2420 if (state == UNIT_INACTIVE)
2421 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2422 }
2423 }
2424
2425 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2426 bool unexpected = false;
2427 JobResult result;
2428
2429 assert(j);
2430
2431 if (j->state == JOB_WAITING)
2432
2433 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2434 * due to EAGAIN. */
2435 job_add_to_run_queue(j);
2436
2437 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2438 * hence needs to invalidate jobs. */
2439
2440 switch (j->type) {
2441
2442 case JOB_START:
2443 case JOB_VERIFY_ACTIVE:
2444
2445 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2446 job_finish_and_invalidate(j, JOB_DONE, true, false);
2447 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2448 unexpected = true;
2449
2450 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2451 if (ns == UNIT_FAILED)
2452 result = JOB_FAILED;
2453 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2454 result = JOB_SKIPPED;
2455 else
2456 result = JOB_DONE;
2457
2458 job_finish_and_invalidate(j, result, true, false);
2459 }
2460 }
2461
2462 break;
2463
2464 case JOB_RELOAD:
2465 case JOB_RELOAD_OR_START:
2466 case JOB_TRY_RELOAD:
2467
2468 if (j->state == JOB_RUNNING) {
2469 if (ns == UNIT_ACTIVE)
2470 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2471 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2472 unexpected = true;
2473
2474 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2475 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2476 }
2477 }
2478
2479 break;
2480
2481 case JOB_STOP:
2482 case JOB_RESTART:
2483 case JOB_TRY_RESTART:
2484
2485 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2486 job_finish_and_invalidate(j, JOB_DONE, true, false);
2487 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2488 unexpected = true;
2489 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2490 }
2491
2492 break;
2493
2494 default:
2495 assert_not_reached("Job type unknown");
2496 }
2497
2498 return unexpected;
2499 }
2500
2501 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2502 const char *reason;
2503 Manager *m;
2504
2505 assert(u);
2506 assert(os < _UNIT_ACTIVE_STATE_MAX);
2507 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2508
2509 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2510 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2511 * remounted this function will be called too! */
2512
2513 m = u->manager;
2514
2515 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2516 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2517 unit_add_to_dbus_queue(u);
2518
2519 /* Update timestamps for state changes */
2520 if (!MANAGER_IS_RELOADING(m)) {
2521 dual_timestamp_get(&u->state_change_timestamp);
2522
2523 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2524 u->inactive_exit_timestamp = u->state_change_timestamp;
2525 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2526 u->inactive_enter_timestamp = u->state_change_timestamp;
2527
2528 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2529 u->active_enter_timestamp = u->state_change_timestamp;
2530 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2531 u->active_exit_timestamp = u->state_change_timestamp;
2532 }
2533
2534 /* Keep track of failed units */
2535 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2536
2537 /* Make sure the cgroup and state files are always removed when we become inactive */
2538 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2539 unit_prune_cgroup(u);
2540 unit_unlink_state_files(u);
2541 }
2542
2543 unit_update_on_console(u);
2544
2545 if (!MANAGER_IS_RELOADING(m)) {
2546 bool unexpected;
2547
2548 /* Let's propagate state changes to the job */
2549 if (u->job)
2550 unexpected = unit_process_job(u->job, ns, flags);
2551 else
2552 unexpected = true;
2553
2554 /* If this state change happened without being requested by a job, then let's retroactively start or
2555 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2556 * additional jobs just because something is already activated. */
2557
2558 if (unexpected) {
2559 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2560 retroactively_start_dependencies(u);
2561 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2562 retroactively_stop_dependencies(u);
2563 }
2564
2565 /* stop unneeded units regardless if going down was expected or not */
2566 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2567 check_unneeded_dependencies(u);
2568
2569 if (ns != os && ns == UNIT_FAILED) {
2570 log_unit_debug(u, "Unit entered failed state.");
2571
2572 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2573 unit_start_on_failure(u);
2574 }
2575
2576 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2577 /* This unit just finished starting up */
2578
2579 unit_emit_audit_start(u);
2580 manager_send_unit_plymouth(m, u);
2581 }
2582
2583 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2584 /* This unit just stopped/failed. */
2585
2586 unit_emit_audit_stop(u, ns);
2587 unit_log_resources(u);
2588 }
2589 }
2590
2591 manager_recheck_journal(m);
2592 manager_recheck_dbus(m);
2593
2594 unit_trigger_notify(u);
2595
2596 if (!MANAGER_IS_RELOADING(m)) {
2597 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2598 unit_submit_to_stop_when_unneeded_queue(u);
2599
2600 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2601 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2602 * without ever entering started.) */
2603 unit_check_binds_to(u);
2604
2605 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2606 reason = strjoina("unit ", u->id, " failed");
2607 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2608 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2609 reason = strjoina("unit ", u->id, " succeeded");
2610 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2611 }
2612 }
2613
2614 unit_add_to_gc_queue(u);
2615 }
2616
2617 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2618 int r;
2619
2620 assert(u);
2621 assert(pid_is_valid(pid));
2622
2623 /* Watch a specific PID */
2624
2625 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2626 * opportunity to remove any stalled references to this PID as they can be created
2627 * easily (when watching a process which is not our direct child). */
2628 if (exclusive)
2629 manager_unwatch_pid(u->manager, pid);
2630
2631 r = set_ensure_allocated(&u->pids, NULL);
2632 if (r < 0)
2633 return r;
2634
2635 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2636 if (r < 0)
2637 return r;
2638
2639 /* First try, let's add the unit keyed by "pid". */
2640 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2641 if (r == -EEXIST) {
2642 Unit **array;
2643 bool found = false;
2644 size_t n = 0;
2645
2646 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2647 * to an array of Units rather than just a Unit), lists us already. */
2648
2649 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2650 if (array)
2651 for (; array[n]; n++)
2652 if (array[n] == u)
2653 found = true;
2654
2655 if (found) /* Found it already? if so, do nothing */
2656 r = 0;
2657 else {
2658 Unit **new_array;
2659
2660 /* Allocate a new array */
2661 new_array = new(Unit*, n + 2);
2662 if (!new_array)
2663 return -ENOMEM;
2664
2665 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2666 new_array[n] = u;
2667 new_array[n+1] = NULL;
2668
2669 /* Add or replace the old array */
2670 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2671 if (r < 0) {
2672 free(new_array);
2673 return r;
2674 }
2675
2676 free(array);
2677 }
2678 } else if (r < 0)
2679 return r;
2680
2681 r = set_put(u->pids, PID_TO_PTR(pid));
2682 if (r < 0)
2683 return r;
2684
2685 return 0;
2686 }
2687
2688 void unit_unwatch_pid(Unit *u, pid_t pid) {
2689 Unit **array;
2690
2691 assert(u);
2692 assert(pid_is_valid(pid));
2693
2694 /* First let's drop the unit in case it's keyed as "pid". */
2695 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2696
2697 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2698 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2699 if (array) {
2700 size_t n, m = 0;
2701
2702 /* Let's iterate through the array, dropping our own entry */
2703 for (n = 0; array[n]; n++)
2704 if (array[n] != u)
2705 array[m++] = array[n];
2706 array[m] = NULL;
2707
2708 if (m == 0) {
2709 /* The array is now empty, remove the entire entry */
2710 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2711 free(array);
2712 }
2713 }
2714
2715 (void) set_remove(u->pids, PID_TO_PTR(pid));
2716 }
2717
2718 void unit_unwatch_all_pids(Unit *u) {
2719 assert(u);
2720
2721 while (!set_isempty(u->pids))
2722 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2723
2724 u->pids = set_free(u->pids);
2725 }
2726
2727 static void unit_tidy_watch_pids(Unit *u) {
2728 pid_t except1, except2;
2729 Iterator i;
2730 void *e;
2731
2732 assert(u);
2733
2734 /* Cleans dead PIDs from our list */
2735
2736 except1 = unit_main_pid(u);
2737 except2 = unit_control_pid(u);
2738
2739 SET_FOREACH(e, u->pids, i) {
2740 pid_t pid = PTR_TO_PID(e);
2741
2742 if (pid == except1 || pid == except2)
2743 continue;
2744
2745 if (!pid_is_unwaited(pid))
2746 unit_unwatch_pid(u, pid);
2747 }
2748 }
2749
2750 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2751 Unit *u = userdata;
2752
2753 assert(s);
2754 assert(u);
2755
2756 unit_tidy_watch_pids(u);
2757 unit_watch_all_pids(u);
2758
2759 /* If the PID set is empty now, then let's finish this off. */
2760 unit_synthesize_cgroup_empty_event(u);
2761
2762 return 0;
2763 }
2764
2765 int unit_enqueue_rewatch_pids(Unit *u) {
2766 int r;
2767
2768 assert(u);
2769
2770 if (!u->cgroup_path)
2771 return -ENOENT;
2772
2773 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2774 if (r < 0)
2775 return r;
2776 if (r > 0) /* On unified we can use proper notifications */
2777 return 0;
2778
2779 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2780 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2781 * involves issuing kill(pid, 0) on all processes we watch. */
2782
2783 if (!u->rewatch_pids_event_source) {
2784 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2785
2786 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2787 if (r < 0)
2788 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2789
2790 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2791 if (r < 0)
2792 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2793
2794 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2795
2796 u->rewatch_pids_event_source = TAKE_PTR(s);
2797 }
2798
2799 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2800 if (r < 0)
2801 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2802
2803 return 0;
2804 }
2805
2806 void unit_dequeue_rewatch_pids(Unit *u) {
2807 int r;
2808 assert(u);
2809
2810 if (!u->rewatch_pids_event_source)
2811 return;
2812
2813 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2814 if (r < 0)
2815 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2816
2817 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2818 }
2819
2820 bool unit_job_is_applicable(Unit *u, JobType j) {
2821 assert(u);
2822 assert(j >= 0 && j < _JOB_TYPE_MAX);
2823
2824 switch (j) {
2825
2826 case JOB_VERIFY_ACTIVE:
2827 case JOB_START:
2828 case JOB_NOP:
2829 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2830 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2831 * jobs for it. */
2832 return true;
2833
2834 case JOB_STOP:
2835 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2836 * external events), hence it makes no sense to permit enqueing such a request either. */
2837 return !u->perpetual;
2838
2839 case JOB_RESTART:
2840 case JOB_TRY_RESTART:
2841 return unit_can_stop(u) && unit_can_start(u);
2842
2843 case JOB_RELOAD:
2844 case JOB_TRY_RELOAD:
2845 return unit_can_reload(u);
2846
2847 case JOB_RELOAD_OR_START:
2848 return unit_can_reload(u) && unit_can_start(u);
2849
2850 default:
2851 assert_not_reached("Invalid job type");
2852 }
2853 }
2854
2855 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2856 assert(u);
2857
2858 /* Only warn about some unit types */
2859 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2860 return;
2861
2862 if (streq_ptr(u->id, other))
2863 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2864 else
2865 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2866 }
2867
2868 static int unit_add_dependency_hashmap(
2869 Hashmap **h,
2870 Unit *other,
2871 UnitDependencyMask origin_mask,
2872 UnitDependencyMask destination_mask) {
2873
2874 UnitDependencyInfo info;
2875 int r;
2876
2877 assert(h);
2878 assert(other);
2879 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2880 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2881 assert(origin_mask > 0 || destination_mask > 0);
2882
2883 r = hashmap_ensure_allocated(h, NULL);
2884 if (r < 0)
2885 return r;
2886
2887 assert_cc(sizeof(void*) == sizeof(info));
2888
2889 info.data = hashmap_get(*h, other);
2890 if (info.data) {
2891 /* Entry already exists. Add in our mask. */
2892
2893 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2894 FLAGS_SET(destination_mask, info.destination_mask))
2895 return 0; /* NOP */
2896
2897 info.origin_mask |= origin_mask;
2898 info.destination_mask |= destination_mask;
2899
2900 r = hashmap_update(*h, other, info.data);
2901 } else {
2902 info = (UnitDependencyInfo) {
2903 .origin_mask = origin_mask,
2904 .destination_mask = destination_mask,
2905 };
2906
2907 r = hashmap_put(*h, other, info.data);
2908 }
2909 if (r < 0)
2910 return r;
2911
2912 return 1;
2913 }
2914
2915 int unit_add_dependency(
2916 Unit *u,
2917 UnitDependency d,
2918 Unit *other,
2919 bool add_reference,
2920 UnitDependencyMask mask) {
2921
2922 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2923 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2924 [UNIT_WANTS] = UNIT_WANTED_BY,
2925 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2926 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2927 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2928 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2929 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2930 [UNIT_WANTED_BY] = UNIT_WANTS,
2931 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2932 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2933 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2934 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2935 [UNIT_BEFORE] = UNIT_AFTER,
2936 [UNIT_AFTER] = UNIT_BEFORE,
2937 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2938 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2939 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2940 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2941 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2942 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2943 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2944 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2945 };
2946 Unit *original_u = u, *original_other = other;
2947 int r;
2948
2949 assert(u);
2950 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2951 assert(other);
2952
2953 u = unit_follow_merge(u);
2954 other = unit_follow_merge(other);
2955
2956 /* We won't allow dependencies on ourselves. We will not
2957 * consider them an error however. */
2958 if (u == other) {
2959 maybe_warn_about_dependency(original_u, original_other->id, d);
2960 return 0;
2961 }
2962
2963 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2964 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2965 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2966 return 0;
2967 }
2968
2969 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2970 if (r < 0)
2971 return r;
2972
2973 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2974 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2975 if (r < 0)
2976 return r;
2977 }
2978
2979 if (add_reference) {
2980 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2981 if (r < 0)
2982 return r;
2983
2984 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2985 if (r < 0)
2986 return r;
2987 }
2988
2989 unit_add_to_dbus_queue(u);
2990 return 0;
2991 }
2992
2993 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2994 int r;
2995
2996 assert(u);
2997
2998 r = unit_add_dependency(u, d, other, add_reference, mask);
2999 if (r < 0)
3000 return r;
3001
3002 return unit_add_dependency(u, e, other, add_reference, mask);
3003 }
3004
3005 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3006 int r;
3007
3008 assert(u);
3009 assert(name);
3010 assert(buf);
3011 assert(ret);
3012
3013 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3014 *buf = NULL;
3015 *ret = name;
3016 return 0;
3017 }
3018
3019 if (u->instance)
3020 r = unit_name_replace_instance(name, u->instance, buf);
3021 else {
3022 _cleanup_free_ char *i = NULL;
3023
3024 r = unit_name_to_prefix(u->id, &i);
3025 if (r < 0)
3026 return r;
3027
3028 r = unit_name_replace_instance(name, i, buf);
3029 }
3030 if (r < 0)
3031 return r;
3032
3033 *ret = *buf;
3034 return 0;
3035 }
3036
3037 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3038 _cleanup_free_ char *buf = NULL;
3039 Unit *other;
3040 int r;
3041
3042 assert(u);
3043 assert(name);
3044
3045 r = resolve_template(u, name, &buf, &name);
3046 if (r < 0)
3047 return r;
3048
3049 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3050 if (r < 0)
3051 return r;
3052
3053 return unit_add_dependency(u, d, other, add_reference, mask);
3054 }
3055
3056 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3057 _cleanup_free_ char *buf = NULL;
3058 Unit *other;
3059 int r;
3060
3061 assert(u);
3062 assert(name);
3063
3064 r = resolve_template(u, name, &buf, &name);
3065 if (r < 0)
3066 return r;
3067
3068 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3069 if (r < 0)
3070 return r;
3071
3072 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3073 }
3074
3075 int set_unit_path(const char *p) {
3076 /* This is mostly for debug purposes */
3077 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3078 return -errno;
3079
3080 return 0;
3081 }
3082
3083 char *unit_dbus_path(Unit *u) {
3084 assert(u);
3085
3086 if (!u->id)
3087 return NULL;
3088
3089 return unit_dbus_path_from_name(u->id);
3090 }
3091
3092 char *unit_dbus_path_invocation_id(Unit *u) {
3093 assert(u);
3094
3095 if (sd_id128_is_null(u->invocation_id))
3096 return NULL;
3097
3098 return unit_dbus_path_from_name(u->invocation_id_string);
3099 }
3100
3101 int unit_set_slice(Unit *u, Unit *slice) {
3102 assert(u);
3103 assert(slice);
3104
3105 /* Sets the unit slice if it has not been set before. Is extra
3106 * careful, to only allow this for units that actually have a
3107 * cgroup context. Also, we don't allow to set this for slices
3108 * (since the parent slice is derived from the name). Make
3109 * sure the unit we set is actually a slice. */
3110
3111 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3112 return -EOPNOTSUPP;
3113
3114 if (u->type == UNIT_SLICE)
3115 return -EINVAL;
3116
3117 if (unit_active_state(u) != UNIT_INACTIVE)
3118 return -EBUSY;
3119
3120 if (slice->type != UNIT_SLICE)
3121 return -EINVAL;
3122
3123 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3124 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3125 return -EPERM;
3126
3127 if (UNIT_DEREF(u->slice) == slice)
3128 return 0;
3129
3130 /* Disallow slice changes if @u is already bound to cgroups */
3131 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3132 return -EBUSY;
3133
3134 unit_ref_set(&u->slice, u, slice);
3135 return 1;
3136 }
3137
3138 int unit_set_default_slice(Unit *u) {
3139 const char *slice_name;
3140 Unit *slice;
3141 int r;
3142
3143 assert(u);
3144
3145 if (UNIT_ISSET(u->slice))
3146 return 0;
3147
3148 if (u->instance) {
3149 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3150
3151 /* Implicitly place all instantiated units in their
3152 * own per-template slice */
3153
3154 r = unit_name_to_prefix(u->id, &prefix);
3155 if (r < 0)
3156 return r;
3157
3158 /* The prefix is already escaped, but it might include
3159 * "-" which has a special meaning for slice units,
3160 * hence escape it here extra. */
3161 escaped = unit_name_escape(prefix);
3162 if (!escaped)
3163 return -ENOMEM;
3164
3165 if (MANAGER_IS_SYSTEM(u->manager))
3166 slice_name = strjoina("system-", escaped, ".slice");
3167 else
3168 slice_name = strjoina(escaped, ".slice");
3169 } else
3170 slice_name =
3171 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3172 ? SPECIAL_SYSTEM_SLICE
3173 : SPECIAL_ROOT_SLICE;
3174
3175 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3176 if (r < 0)
3177 return r;
3178
3179 return unit_set_slice(u, slice);
3180 }
3181
3182 const char *unit_slice_name(Unit *u) {
3183 assert(u);
3184
3185 if (!UNIT_ISSET(u->slice))
3186 return NULL;
3187
3188 return UNIT_DEREF(u->slice)->id;
3189 }
3190
3191 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3192 _cleanup_free_ char *t = NULL;
3193 int r;
3194
3195 assert(u);
3196 assert(type);
3197 assert(_found);
3198
3199 r = unit_name_change_suffix(u->id, type, &t);
3200 if (r < 0)
3201 return r;
3202 if (unit_has_name(u, t))
3203 return -EINVAL;
3204
3205 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3206 assert(r < 0 || *_found != u);
3207 return r;
3208 }
3209
3210 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3211 const char *name, *old_owner, *new_owner;
3212 Unit *u = userdata;
3213 int r;
3214
3215 assert(message);
3216 assert(u);
3217
3218 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3219 if (r < 0) {
3220 bus_log_parse_error(r);
3221 return 0;
3222 }
3223
3224 old_owner = empty_to_null(old_owner);
3225 new_owner = empty_to_null(new_owner);
3226
3227 if (UNIT_VTABLE(u)->bus_name_owner_change)
3228 UNIT_VTABLE(u)->bus_name_owner_change(u, old_owner, new_owner);
3229
3230 return 0;
3231 }
3232
3233 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3234 const sd_bus_error *e;
3235 const char *new_owner;
3236 Unit *u = userdata;
3237 int r;
3238
3239 assert(message);
3240 assert(u);
3241
3242 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3243
3244 if (sd_bus_error_is_set(error)) {
3245 log_error("Failed to get name owner from bus: %s", error->message);
3246 return 0;
3247 }
3248
3249 e = sd_bus_message_get_error(message);
3250 if (sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3251 return 0;
3252
3253 if (e) {
3254 log_error("Unexpected error response from GetNameOwner: %s", e->message);
3255 return 0;
3256 }
3257
3258 r = sd_bus_message_read(message, "s", &new_owner);
3259 if (r < 0) {
3260 bus_log_parse_error(r);
3261 return 0;
3262 }
3263
3264 new_owner = empty_to_null(new_owner);
3265
3266 if (UNIT_VTABLE(u)->bus_name_owner_change)
3267 UNIT_VTABLE(u)->bus_name_owner_change(u, NULL, new_owner);
3268
3269 return 0;
3270 }
3271
3272 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3273 const char *match;
3274
3275 assert(u);
3276 assert(bus);
3277 assert(name);
3278
3279 if (u->match_bus_slot)
3280 return -EBUSY;
3281
3282 match = strjoina("type='signal',"
3283 "sender='org.freedesktop.DBus',"
3284 "path='/org/freedesktop/DBus',"
3285 "interface='org.freedesktop.DBus',"
3286 "member='NameOwnerChanged',"
3287 "arg0='", name, "'");
3288
3289 int r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3290 if (r < 0)
3291 return r;
3292
3293 return sd_bus_call_method_async(bus,
3294 &u->get_name_owner_slot,
3295 "org.freedesktop.DBus",
3296 "/org/freedesktop/DBus",
3297 "org.freedesktop.DBus",
3298 "GetNameOwner",
3299 get_name_owner_handler,
3300 u,
3301 "s", name);
3302 }
3303
3304 int unit_watch_bus_name(Unit *u, const char *name) {
3305 int r;
3306
3307 assert(u);
3308 assert(name);
3309
3310 /* Watch a specific name on the bus. We only support one unit
3311 * watching each name for now. */
3312
3313 if (u->manager->api_bus) {
3314 /* If the bus is already available, install the match directly.
3315 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3316 r = unit_install_bus_match(u, u->manager->api_bus, name);
3317 if (r < 0)
3318 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3319 }
3320
3321 r = hashmap_put(u->manager->watch_bus, name, u);
3322 if (r < 0) {
3323 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3324 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3325 }
3326
3327 return 0;
3328 }
3329
3330 void unit_unwatch_bus_name(Unit *u, const char *name) {
3331 assert(u);
3332 assert(name);
3333
3334 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3335 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3336 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3337 }
3338
3339 bool unit_can_serialize(Unit *u) {
3340 assert(u);
3341
3342 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3343 }
3344
3345 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3346 _cleanup_free_ char *s = NULL;
3347 int r;
3348
3349 assert(f);
3350 assert(key);
3351
3352 if (mask == 0)
3353 return 0;
3354
3355 r = cg_mask_to_string(mask, &s);
3356 if (r < 0)
3357 return log_error_errno(r, "Failed to format cgroup mask: %m");
3358
3359 return serialize_item(f, key, s);
3360 }
3361
3362 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3363 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3364 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3365 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3366 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3367 };
3368
3369 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3370 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3371 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3372 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3373 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3374 };
3375
3376 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3377 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3378 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3379 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3380 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3381 };
3382
3383 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3384 CGroupIPAccountingMetric m;
3385 int r;
3386
3387 assert(u);
3388 assert(f);
3389 assert(fds);
3390
3391 if (unit_can_serialize(u)) {
3392 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3393 if (r < 0)
3394 return r;
3395 }
3396
3397 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3398
3399 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3400 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3401 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3402 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3403
3404 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3405 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3406
3407 if (dual_timestamp_is_set(&u->condition_timestamp))
3408 (void) serialize_bool(f, "condition-result", u->condition_result);
3409
3410 if (dual_timestamp_is_set(&u->assert_timestamp))
3411 (void) serialize_bool(f, "assert-result", u->assert_result);
3412
3413 (void) serialize_bool(f, "transient", u->transient);
3414 (void) serialize_bool(f, "in-audit", u->in_audit);
3415
3416 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3417 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3418 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3419 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_ratelimit_interval);
3420 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_ratelimit_burst);
3421
3422 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3423 if (u->cpu_usage_last != NSEC_INFINITY)
3424 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3425
3426 if (u->oom_kill_last > 0)
3427 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3428
3429 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3430 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3431
3432 if (u->io_accounting_last[im] != UINT64_MAX)
3433 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3434 }
3435
3436 if (u->cgroup_path)
3437 (void) serialize_item(f, "cgroup", u->cgroup_path);
3438
3439 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3440 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3441 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3442 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3443
3444 if (uid_is_valid(u->ref_uid))
3445 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3446 if (gid_is_valid(u->ref_gid))
3447 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3448
3449 if (!sd_id128_is_null(u->invocation_id))
3450 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3451
3452 bus_track_serialize(u->bus_track, f, "ref");
3453
3454 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3455 uint64_t v;
3456
3457 r = unit_get_ip_accounting(u, m, &v);
3458 if (r >= 0)
3459 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3460 }
3461
3462 if (serialize_jobs) {
3463 if (u->job) {
3464 fputs("job\n", f);
3465 job_serialize(u->job, f);
3466 }
3467
3468 if (u->nop_job) {
3469 fputs("job\n", f);
3470 job_serialize(u->nop_job, f);
3471 }
3472 }
3473
3474 /* End marker */
3475 fputc('\n', f);
3476 return 0;
3477 }
3478
3479 static int unit_deserialize_job(Unit *u, FILE *f) {
3480 _cleanup_(job_freep) Job *j = NULL;
3481 int r;
3482
3483 assert(u);
3484 assert(f);
3485
3486 j = job_new_raw(u);
3487 if (!j)
3488 return log_oom();
3489
3490 r = job_deserialize(j, f);
3491 if (r < 0)
3492 return r;
3493
3494 r = job_install_deserialized(j);
3495 if (r < 0)
3496 return r;
3497
3498 TAKE_PTR(j);
3499 return 0;
3500 }
3501
3502 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3503 int r;
3504
3505 assert(u);
3506 assert(f);
3507 assert(fds);
3508
3509 for (;;) {
3510 _cleanup_free_ char *line = NULL;
3511 char *l, *v;
3512 ssize_t m;
3513 size_t k;
3514
3515 r = read_line(f, LONG_LINE_MAX, &line);
3516 if (r < 0)
3517 return log_error_errno(r, "Failed to read serialization line: %m");
3518 if (r == 0) /* eof */
3519 break;
3520
3521 l = strstrip(line);
3522 if (isempty(l)) /* End marker */
3523 break;
3524
3525 k = strcspn(l, "=");
3526
3527 if (l[k] == '=') {
3528 l[k] = 0;
3529 v = l+k+1;
3530 } else
3531 v = l+k;
3532
3533 if (streq(l, "job")) {
3534 if (v[0] == '\0') {
3535 /* New-style serialized job */
3536 r = unit_deserialize_job(u, f);
3537 if (r < 0)
3538 return r;
3539 } else /* Legacy for pre-44 */
3540 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3541 continue;
3542 } else if (streq(l, "state-change-timestamp")) {
3543 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3544 continue;
3545 } else if (streq(l, "inactive-exit-timestamp")) {
3546 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3547 continue;
3548 } else if (streq(l, "active-enter-timestamp")) {
3549 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3550 continue;
3551 } else if (streq(l, "active-exit-timestamp")) {
3552 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3553 continue;
3554 } else if (streq(l, "inactive-enter-timestamp")) {
3555 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3556 continue;
3557 } else if (streq(l, "condition-timestamp")) {
3558 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3559 continue;
3560 } else if (streq(l, "assert-timestamp")) {
3561 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3562 continue;
3563 } else if (streq(l, "condition-result")) {
3564
3565 r = parse_boolean(v);
3566 if (r < 0)
3567 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3568 else
3569 u->condition_result = r;
3570
3571 continue;
3572
3573 } else if (streq(l, "assert-result")) {
3574
3575 r = parse_boolean(v);
3576 if (r < 0)
3577 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3578 else
3579 u->assert_result = r;
3580
3581 continue;
3582
3583 } else if (streq(l, "transient")) {
3584
3585 r = parse_boolean(v);
3586 if (r < 0)
3587 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3588 else
3589 u->transient = r;
3590
3591 continue;
3592
3593 } else if (streq(l, "in-audit")) {
3594
3595 r = parse_boolean(v);
3596 if (r < 0)
3597 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3598 else
3599 u->in_audit = r;
3600
3601 continue;
3602
3603 } else if (streq(l, "exported-invocation-id")) {
3604
3605 r = parse_boolean(v);
3606 if (r < 0)
3607 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3608 else
3609 u->exported_invocation_id = r;
3610
3611 continue;
3612
3613 } else if (streq(l, "exported-log-level-max")) {
3614
3615 r = parse_boolean(v);
3616 if (r < 0)
3617 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3618 else
3619 u->exported_log_level_max = r;
3620
3621 continue;
3622
3623 } else if (streq(l, "exported-log-extra-fields")) {
3624
3625 r = parse_boolean(v);
3626 if (r < 0)
3627 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3628 else
3629 u->exported_log_extra_fields = r;
3630
3631 continue;
3632
3633 } else if (streq(l, "exported-log-rate-limit-interval")) {
3634
3635 r = parse_boolean(v);
3636 if (r < 0)
3637 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3638 else
3639 u->exported_log_ratelimit_interval = r;
3640
3641 continue;
3642
3643 } else if (streq(l, "exported-log-rate-limit-burst")) {
3644
3645 r = parse_boolean(v);
3646 if (r < 0)
3647 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3648 else
3649 u->exported_log_ratelimit_burst = r;
3650
3651 continue;
3652
3653 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3654
3655 r = safe_atou64(v, &u->cpu_usage_base);
3656 if (r < 0)
3657 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3658
3659 continue;
3660
3661 } else if (streq(l, "cpu-usage-last")) {
3662
3663 r = safe_atou64(v, &u->cpu_usage_last);
3664 if (r < 0)
3665 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3666
3667 continue;
3668
3669 } else if (streq(l, "oom-kill-last")) {
3670
3671 r = safe_atou64(v, &u->oom_kill_last);
3672 if (r < 0)
3673 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3674
3675 continue;
3676
3677 } else if (streq(l, "cgroup")) {
3678
3679 r = unit_set_cgroup_path(u, v);
3680 if (r < 0)
3681 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3682
3683 (void) unit_watch_cgroup(u);
3684 (void) unit_watch_cgroup_memory(u);
3685
3686 continue;
3687 } else if (streq(l, "cgroup-realized")) {
3688 int b;
3689
3690 b = parse_boolean(v);
3691 if (b < 0)
3692 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3693 else
3694 u->cgroup_realized = b;
3695
3696 continue;
3697
3698 } else if (streq(l, "cgroup-realized-mask")) {
3699
3700 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3701 if (r < 0)
3702 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3703 continue;
3704
3705 } else if (streq(l, "cgroup-enabled-mask")) {
3706
3707 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3708 if (r < 0)
3709 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3710 continue;
3711
3712 } else if (streq(l, "cgroup-invalidated-mask")) {
3713
3714 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3715 if (r < 0)
3716 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3717 continue;
3718
3719 } else if (streq(l, "ref-uid")) {
3720 uid_t uid;
3721
3722 r = parse_uid(v, &uid);
3723 if (r < 0)
3724 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3725 else
3726 unit_ref_uid_gid(u, uid, GID_INVALID);
3727
3728 continue;
3729
3730 } else if (streq(l, "ref-gid")) {
3731 gid_t gid;
3732
3733 r = parse_gid(v, &gid);
3734 if (r < 0)
3735 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3736 else
3737 unit_ref_uid_gid(u, UID_INVALID, gid);
3738
3739 continue;
3740
3741 } else if (streq(l, "ref")) {
3742
3743 r = strv_extend(&u->deserialized_refs, v);
3744 if (r < 0)
3745 return log_oom();
3746
3747 continue;
3748 } else if (streq(l, "invocation-id")) {
3749 sd_id128_t id;
3750
3751 r = sd_id128_from_string(v, &id);
3752 if (r < 0)
3753 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3754 else {
3755 r = unit_set_invocation_id(u, id);
3756 if (r < 0)
3757 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3758 }
3759
3760 continue;
3761 }
3762
3763 /* Check if this is an IP accounting metric serialization field */
3764 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3765 if (m >= 0) {
3766 uint64_t c;
3767
3768 r = safe_atou64(v, &c);
3769 if (r < 0)
3770 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3771 else
3772 u->ip_accounting_extra[m] = c;
3773 continue;
3774 }
3775
3776 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3777 if (m >= 0) {
3778 uint64_t c;
3779
3780 r = safe_atou64(v, &c);
3781 if (r < 0)
3782 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3783 else
3784 u->io_accounting_base[m] = c;
3785 continue;
3786 }
3787
3788 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3789 if (m >= 0) {
3790 uint64_t c;
3791
3792 r = safe_atou64(v, &c);
3793 if (r < 0)
3794 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3795 else
3796 u->io_accounting_last[m] = c;
3797 continue;
3798 }
3799
3800 if (unit_can_serialize(u)) {
3801 r = exec_runtime_deserialize_compat(u, l, v, fds);
3802 if (r < 0) {
3803 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3804 continue;
3805 }
3806
3807 /* Returns positive if key was handled by the call */
3808 if (r > 0)
3809 continue;
3810
3811 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3812 if (r < 0)
3813 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3814 }
3815 }
3816
3817 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3818 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3819 * before 228 where the base for timeouts was not persistent across reboots. */
3820
3821 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3822 dual_timestamp_get(&u->state_change_timestamp);
3823
3824 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3825 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3826 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3827 unit_invalidate_cgroup_bpf(u);
3828
3829 return 0;
3830 }
3831
3832 int unit_deserialize_skip(FILE *f) {
3833 int r;
3834 assert(f);
3835
3836 /* Skip serialized data for this unit. We don't know what it is. */
3837
3838 for (;;) {
3839 _cleanup_free_ char *line = NULL;
3840 char *l;
3841
3842 r = read_line(f, LONG_LINE_MAX, &line);
3843 if (r < 0)
3844 return log_error_errno(r, "Failed to read serialization line: %m");
3845 if (r == 0)
3846 return 0;
3847
3848 l = strstrip(line);
3849
3850 /* End marker */
3851 if (isempty(l))
3852 return 1;
3853 }
3854 }
3855
3856 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3857 Unit *device;
3858 _cleanup_free_ char *e = NULL;
3859 int r;
3860
3861 assert(u);
3862
3863 /* Adds in links to the device node that this unit is based on */
3864 if (isempty(what))
3865 return 0;
3866
3867 if (!is_device_path(what))
3868 return 0;
3869
3870 /* When device units aren't supported (such as in a
3871 * container), don't create dependencies on them. */
3872 if (!unit_type_supported(UNIT_DEVICE))
3873 return 0;
3874
3875 r = unit_name_from_path(what, ".device", &e);
3876 if (r < 0)
3877 return r;
3878
3879 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3880 if (r < 0)
3881 return r;
3882
3883 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3884 dep = UNIT_BINDS_TO;
3885
3886 r = unit_add_two_dependencies(u, UNIT_AFTER,
3887 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3888 device, true, mask);
3889 if (r < 0)
3890 return r;
3891
3892 if (wants) {
3893 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3894 if (r < 0)
3895 return r;
3896 }
3897
3898 return 0;
3899 }
3900
3901 int unit_coldplug(Unit *u) {
3902 int r = 0, q;
3903 char **i;
3904 Job *uj;
3905
3906 assert(u);
3907
3908 /* Make sure we don't enter a loop, when coldplugging recursively. */
3909 if (u->coldplugged)
3910 return 0;
3911
3912 u->coldplugged = true;
3913
3914 STRV_FOREACH(i, u->deserialized_refs) {
3915 q = bus_unit_track_add_name(u, *i);
3916 if (q < 0 && r >= 0)
3917 r = q;
3918 }
3919 u->deserialized_refs = strv_free(u->deserialized_refs);
3920
3921 if (UNIT_VTABLE(u)->coldplug) {
3922 q = UNIT_VTABLE(u)->coldplug(u);
3923 if (q < 0 && r >= 0)
3924 r = q;
3925 }
3926
3927 uj = u->job ?: u->nop_job;
3928 if (uj) {
3929 q = job_coldplug(uj);
3930 if (q < 0 && r >= 0)
3931 r = q;
3932 }
3933
3934 return r;
3935 }
3936
3937 void unit_catchup(Unit *u) {
3938 assert(u);
3939
3940 if (UNIT_VTABLE(u)->catchup)
3941 UNIT_VTABLE(u)->catchup(u);
3942 }
3943
3944 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3945 struct stat st;
3946
3947 if (!path)
3948 return false;
3949
3950 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3951 * are never out-of-date. */
3952 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3953 return false;
3954
3955 if (stat(path, &st) < 0)
3956 /* What, cannot access this anymore? */
3957 return true;
3958
3959 if (path_masked)
3960 /* For masked files check if they are still so */
3961 return !null_or_empty(&st);
3962 else
3963 /* For non-empty files check the mtime */
3964 return timespec_load(&st.st_mtim) > mtime;
3965
3966 return false;
3967 }
3968
3969 bool unit_need_daemon_reload(Unit *u) {
3970 _cleanup_strv_free_ char **t = NULL;
3971 char **path;
3972
3973 assert(u);
3974
3975 /* For unit files, we allow masking… */
3976 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3977 u->load_state == UNIT_MASKED))
3978 return true;
3979
3980 /* Source paths should not be masked… */
3981 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3982 return true;
3983
3984 if (u->load_state == UNIT_LOADED)
3985 (void) unit_find_dropin_paths(u, &t);
3986 if (!strv_equal(u->dropin_paths, t))
3987 return true;
3988
3989 /* … any drop-ins that are masked are simply omitted from the list. */
3990 STRV_FOREACH(path, u->dropin_paths)
3991 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3992 return true;
3993
3994 return false;
3995 }
3996
3997 void unit_reset_failed(Unit *u) {
3998 assert(u);
3999
4000 if (UNIT_VTABLE(u)->reset_failed)
4001 UNIT_VTABLE(u)->reset_failed(u);
4002
4003 ratelimit_reset(&u->start_ratelimit);
4004 u->start_limit_hit = false;
4005 }
4006
4007 Unit *unit_following(Unit *u) {
4008 assert(u);
4009
4010 if (UNIT_VTABLE(u)->following)
4011 return UNIT_VTABLE(u)->following(u);
4012
4013 return NULL;
4014 }
4015
4016 bool unit_stop_pending(Unit *u) {
4017 assert(u);
4018
4019 /* This call does check the current state of the unit. It's
4020 * hence useful to be called from state change calls of the
4021 * unit itself, where the state isn't updated yet. This is
4022 * different from unit_inactive_or_pending() which checks both
4023 * the current state and for a queued job. */
4024
4025 return u->job && u->job->type == JOB_STOP;
4026 }
4027
4028 bool unit_inactive_or_pending(Unit *u) {
4029 assert(u);
4030
4031 /* Returns true if the unit is inactive or going down */
4032
4033 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4034 return true;
4035
4036 if (unit_stop_pending(u))
4037 return true;
4038
4039 return false;
4040 }
4041
4042 bool unit_active_or_pending(Unit *u) {
4043 assert(u);
4044
4045 /* Returns true if the unit is active or going up */
4046
4047 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4048 return true;
4049
4050 if (u->job &&
4051 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4052 return true;
4053
4054 return false;
4055 }
4056
4057 bool unit_will_restart_default(Unit *u) {
4058 assert(u);
4059
4060 if (!u->job)
4061 return false;
4062 if (u->job->type == JOB_START)
4063 return true;
4064
4065 return false;
4066 }
4067
4068 bool unit_will_restart(Unit *u) {
4069 assert(u);
4070
4071 if (!UNIT_VTABLE(u)->will_restart)
4072 return false;
4073
4074 return UNIT_VTABLE(u)->will_restart(u);
4075 }
4076
4077 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4078 assert(u);
4079 assert(w >= 0 && w < _KILL_WHO_MAX);
4080 assert(SIGNAL_VALID(signo));
4081
4082 if (!UNIT_VTABLE(u)->kill)
4083 return -EOPNOTSUPP;
4084
4085 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4086 }
4087
4088 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4089 _cleanup_set_free_ Set *pid_set = NULL;
4090 int r;
4091
4092 pid_set = set_new(NULL);
4093 if (!pid_set)
4094 return NULL;
4095
4096 /* Exclude the main/control pids from being killed via the cgroup */
4097 if (main_pid > 0) {
4098 r = set_put(pid_set, PID_TO_PTR(main_pid));
4099 if (r < 0)
4100 return NULL;
4101 }
4102
4103 if (control_pid > 0) {
4104 r = set_put(pid_set, PID_TO_PTR(control_pid));
4105 if (r < 0)
4106 return NULL;
4107 }
4108
4109 return TAKE_PTR(pid_set);
4110 }
4111
4112 int unit_kill_common(
4113 Unit *u,
4114 KillWho who,
4115 int signo,
4116 pid_t main_pid,
4117 pid_t control_pid,
4118 sd_bus_error *error) {
4119
4120 int r = 0;
4121 bool killed = false;
4122
4123 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4124 if (main_pid < 0)
4125 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4126 else if (main_pid == 0)
4127 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4128 }
4129
4130 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4131 if (control_pid < 0)
4132 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4133 else if (control_pid == 0)
4134 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4135 }
4136
4137 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4138 if (control_pid > 0) {
4139 if (kill(control_pid, signo) < 0)
4140 r = -errno;
4141 else
4142 killed = true;
4143 }
4144
4145 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4146 if (main_pid > 0) {
4147 if (kill(main_pid, signo) < 0)
4148 r = -errno;
4149 else
4150 killed = true;
4151 }
4152
4153 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4154 _cleanup_set_free_ Set *pid_set = NULL;
4155 int q;
4156
4157 /* Exclude the main/control pids from being killed via the cgroup */
4158 pid_set = unit_pid_set(main_pid, control_pid);
4159 if (!pid_set)
4160 return -ENOMEM;
4161
4162 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4163 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4164 r = q;
4165 else
4166 killed = true;
4167 }
4168
4169 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4170 return -ESRCH;
4171
4172 return r;
4173 }
4174
4175 int unit_following_set(Unit *u, Set **s) {
4176 assert(u);
4177 assert(s);
4178
4179 if (UNIT_VTABLE(u)->following_set)
4180 return UNIT_VTABLE(u)->following_set(u, s);
4181
4182 *s = NULL;
4183 return 0;
4184 }
4185
4186 UnitFileState unit_get_unit_file_state(Unit *u) {
4187 int r;
4188
4189 assert(u);
4190
4191 if (u->unit_file_state < 0 && u->fragment_path) {
4192 r = unit_file_get_state(
4193 u->manager->unit_file_scope,
4194 NULL,
4195 u->id,
4196 &u->unit_file_state);
4197 if (r < 0)
4198 u->unit_file_state = UNIT_FILE_BAD;
4199 }
4200
4201 return u->unit_file_state;
4202 }
4203
4204 int unit_get_unit_file_preset(Unit *u) {
4205 assert(u);
4206
4207 if (u->unit_file_preset < 0 && u->fragment_path)
4208 u->unit_file_preset = unit_file_query_preset(
4209 u->manager->unit_file_scope,
4210 NULL,
4211 basename(u->fragment_path));
4212
4213 return u->unit_file_preset;
4214 }
4215
4216 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4217 assert(ref);
4218 assert(source);
4219 assert(target);
4220
4221 if (ref->target)
4222 unit_ref_unset(ref);
4223
4224 ref->source = source;
4225 ref->target = target;
4226 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4227 return target;
4228 }
4229
4230 void unit_ref_unset(UnitRef *ref) {
4231 assert(ref);
4232
4233 if (!ref->target)
4234 return;
4235
4236 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4237 * be unreferenced now. */
4238 unit_add_to_gc_queue(ref->target);
4239
4240 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4241 ref->source = ref->target = NULL;
4242 }
4243
4244 static int user_from_unit_name(Unit *u, char **ret) {
4245
4246 static const uint8_t hash_key[] = {
4247 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4248 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4249 };
4250
4251 _cleanup_free_ char *n = NULL;
4252 int r;
4253
4254 r = unit_name_to_prefix(u->id, &n);
4255 if (r < 0)
4256 return r;
4257
4258 if (valid_user_group_name(n)) {
4259 *ret = TAKE_PTR(n);
4260 return 0;
4261 }
4262
4263 /* If we can't use the unit name as a user name, then let's hash it and use that */
4264 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4265 return -ENOMEM;
4266
4267 return 0;
4268 }
4269
4270 int unit_patch_contexts(Unit *u) {
4271 CGroupContext *cc;
4272 ExecContext *ec;
4273 unsigned i;
4274 int r;
4275
4276 assert(u);
4277
4278 /* Patch in the manager defaults into the exec and cgroup
4279 * contexts, _after_ the rest of the settings have been
4280 * initialized */
4281
4282 ec = unit_get_exec_context(u);
4283 if (ec) {
4284 /* This only copies in the ones that need memory */
4285 for (i = 0; i < _RLIMIT_MAX; i++)
4286 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4287 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4288 if (!ec->rlimit[i])
4289 return -ENOMEM;
4290 }
4291
4292 if (MANAGER_IS_USER(u->manager) &&
4293 !ec->working_directory) {
4294
4295 r = get_home_dir(&ec->working_directory);
4296 if (r < 0)
4297 return r;
4298
4299 /* Allow user services to run, even if the
4300 * home directory is missing */
4301 ec->working_directory_missing_ok = true;
4302 }
4303
4304 if (ec->private_devices)
4305 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4306
4307 if (ec->protect_kernel_modules)
4308 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4309
4310 if (ec->dynamic_user) {
4311 if (!ec->user) {
4312 r = user_from_unit_name(u, &ec->user);
4313 if (r < 0)
4314 return r;
4315 }
4316
4317 if (!ec->group) {
4318 ec->group = strdup(ec->user);
4319 if (!ec->group)
4320 return -ENOMEM;
4321 }
4322
4323 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4324 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4325 * sandbox. */
4326
4327 ec->private_tmp = true;
4328 ec->remove_ipc = true;
4329 ec->protect_system = PROTECT_SYSTEM_STRICT;
4330 if (ec->protect_home == PROTECT_HOME_NO)
4331 ec->protect_home = PROTECT_HOME_READ_ONLY;
4332
4333 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4334 * them. */
4335 ec->no_new_privileges = true;
4336 ec->restrict_suid_sgid = true;
4337 }
4338 }
4339
4340 cc = unit_get_cgroup_context(u);
4341 if (cc && ec) {
4342
4343 if (ec->private_devices &&
4344 cc->device_policy == CGROUP_AUTO)
4345 cc->device_policy = CGROUP_CLOSED;
4346
4347 if (ec->root_image &&
4348 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4349
4350 /* When RootImage= is specified, the following devices are touched. */
4351 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4352 if (r < 0)
4353 return r;
4354
4355 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4356 if (r < 0)
4357 return r;
4358
4359 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4360 if (r < 0)
4361 return r;
4362 }
4363 }
4364
4365 return 0;
4366 }
4367
4368 ExecContext *unit_get_exec_context(Unit *u) {
4369 size_t offset;
4370 assert(u);
4371
4372 if (u->type < 0)
4373 return NULL;
4374
4375 offset = UNIT_VTABLE(u)->exec_context_offset;
4376 if (offset <= 0)
4377 return NULL;
4378
4379 return (ExecContext*) ((uint8_t*) u + offset);
4380 }
4381
4382 KillContext *unit_get_kill_context(Unit *u) {
4383 size_t offset;
4384 assert(u);
4385
4386 if (u->type < 0)
4387 return NULL;
4388
4389 offset = UNIT_VTABLE(u)->kill_context_offset;
4390 if (offset <= 0)
4391 return NULL;
4392
4393 return (KillContext*) ((uint8_t*) u + offset);
4394 }
4395
4396 CGroupContext *unit_get_cgroup_context(Unit *u) {
4397 size_t offset;
4398
4399 if (u->type < 0)
4400 return NULL;
4401
4402 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4403 if (offset <= 0)
4404 return NULL;
4405
4406 return (CGroupContext*) ((uint8_t*) u + offset);
4407 }
4408
4409 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4410 size_t offset;
4411
4412 if (u->type < 0)
4413 return NULL;
4414
4415 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4416 if (offset <= 0)
4417 return NULL;
4418
4419 return *(ExecRuntime**) ((uint8_t*) u + offset);
4420 }
4421
4422 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4423 assert(u);
4424
4425 if (UNIT_WRITE_FLAGS_NOOP(flags))
4426 return NULL;
4427
4428 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4429 return u->manager->lookup_paths.transient;
4430
4431 if (flags & UNIT_PERSISTENT)
4432 return u->manager->lookup_paths.persistent_control;
4433
4434 if (flags & UNIT_RUNTIME)
4435 return u->manager->lookup_paths.runtime_control;
4436
4437 return NULL;
4438 }
4439
4440 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4441 char *ret = NULL;
4442
4443 if (!s)
4444 return NULL;
4445
4446 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4447 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4448 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4449 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4450 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4451 * allocations. */
4452
4453 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4454 ret = specifier_escape(s);
4455 if (!ret)
4456 return NULL;
4457
4458 s = ret;
4459 }
4460
4461 if (flags & UNIT_ESCAPE_C) {
4462 char *a;
4463
4464 a = cescape(s);
4465 free(ret);
4466 if (!a)
4467 return NULL;
4468
4469 ret = a;
4470 }
4471
4472 if (buf) {
4473 *buf = ret;
4474 return ret ?: (char*) s;
4475 }
4476
4477 return ret ?: strdup(s);
4478 }
4479
4480 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4481 _cleanup_free_ char *result = NULL;
4482 size_t n = 0, allocated = 0;
4483 char **i;
4484
4485 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4486 * way suitable for ExecStart= stanzas */
4487
4488 STRV_FOREACH(i, l) {
4489 _cleanup_free_ char *buf = NULL;
4490 const char *p;
4491 size_t a;
4492 char *q;
4493
4494 p = unit_escape_setting(*i, flags, &buf);
4495 if (!p)
4496 return NULL;
4497
4498 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4499 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4500 return NULL;
4501
4502 q = result + n;
4503 if (n > 0)
4504 *(q++) = ' ';
4505
4506 *(q++) = '"';
4507 q = stpcpy(q, p);
4508 *(q++) = '"';
4509
4510 n += a;
4511 }
4512
4513 if (!GREEDY_REALLOC(result, allocated, n + 1))
4514 return NULL;
4515
4516 result[n] = 0;
4517
4518 return TAKE_PTR(result);
4519 }
4520
4521 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4522 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4523 const char *dir, *wrapped;
4524 int r;
4525
4526 assert(u);
4527 assert(name);
4528 assert(data);
4529
4530 if (UNIT_WRITE_FLAGS_NOOP(flags))
4531 return 0;
4532
4533 data = unit_escape_setting(data, flags, &escaped);
4534 if (!data)
4535 return -ENOMEM;
4536
4537 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4538 * previous section header is the same */
4539
4540 if (flags & UNIT_PRIVATE) {
4541 if (!UNIT_VTABLE(u)->private_section)
4542 return -EINVAL;
4543
4544 if (!u->transient_file || u->last_section_private < 0)
4545 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4546 else if (u->last_section_private == 0)
4547 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4548 } else {
4549 if (!u->transient_file || u->last_section_private < 0)
4550 data = strjoina("[Unit]\n", data);
4551 else if (u->last_section_private > 0)
4552 data = strjoina("\n[Unit]\n", data);
4553 }
4554
4555 if (u->transient_file) {
4556 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4557 * write to the transient unit file. */
4558 fputs(data, u->transient_file);
4559
4560 if (!endswith(data, "\n"))
4561 fputc('\n', u->transient_file);
4562
4563 /* Remember which section we wrote this entry to */
4564 u->last_section_private = !!(flags & UNIT_PRIVATE);
4565 return 0;
4566 }
4567
4568 dir = unit_drop_in_dir(u, flags);
4569 if (!dir)
4570 return -EINVAL;
4571
4572 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4573 "# or an equivalent operation. Do not edit.\n",
4574 data,
4575 "\n");
4576
4577 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4578 if (r < 0)
4579 return r;
4580
4581 (void) mkdir_p_label(p, 0755);
4582
4583 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4584 * recreate the cache after every drop-in we write. */
4585 if (u->manager->unit_path_cache) {
4586 r = set_put_strdup(u->manager->unit_path_cache, p);
4587 if (r < 0)
4588 return r;
4589 }
4590
4591 r = write_string_file_atomic_label(q, wrapped);
4592 if (r < 0)
4593 return r;
4594
4595 r = strv_push(&u->dropin_paths, q);
4596 if (r < 0)
4597 return r;
4598 q = NULL;
4599
4600 strv_uniq(u->dropin_paths);
4601
4602 u->dropin_mtime = now(CLOCK_REALTIME);
4603
4604 return 0;
4605 }
4606
4607 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4608 _cleanup_free_ char *p = NULL;
4609 va_list ap;
4610 int r;
4611
4612 assert(u);
4613 assert(name);
4614 assert(format);
4615
4616 if (UNIT_WRITE_FLAGS_NOOP(flags))
4617 return 0;
4618
4619 va_start(ap, format);
4620 r = vasprintf(&p, format, ap);
4621 va_end(ap);
4622
4623 if (r < 0)
4624 return -ENOMEM;
4625
4626 return unit_write_setting(u, flags, name, p);
4627 }
4628
4629 int unit_make_transient(Unit *u) {
4630 _cleanup_free_ char *path = NULL;
4631 FILE *f;
4632
4633 assert(u);
4634
4635 if (!UNIT_VTABLE(u)->can_transient)
4636 return -EOPNOTSUPP;
4637
4638 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4639
4640 path = path_join(u->manager->lookup_paths.transient, u->id);
4641 if (!path)
4642 return -ENOMEM;
4643
4644 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4645 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4646
4647 RUN_WITH_UMASK(0022) {
4648 f = fopen(path, "we");
4649 if (!f)
4650 return -errno;
4651 }
4652
4653 safe_fclose(u->transient_file);
4654 u->transient_file = f;
4655
4656 free_and_replace(u->fragment_path, path);
4657
4658 u->source_path = mfree(u->source_path);
4659 u->dropin_paths = strv_free(u->dropin_paths);
4660 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4661
4662 u->load_state = UNIT_STUB;
4663 u->load_error = 0;
4664 u->transient = true;
4665
4666 unit_add_to_dbus_queue(u);
4667 unit_add_to_gc_queue(u);
4668
4669 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4670 u->transient_file);
4671
4672 return 0;
4673 }
4674
4675 static int log_kill(pid_t pid, int sig, void *userdata) {
4676 _cleanup_free_ char *comm = NULL;
4677
4678 (void) get_process_comm(pid, &comm);
4679
4680 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4681 only, like for example systemd's own PAM stub process. */
4682 if (comm && comm[0] == '(')
4683 return 0;
4684
4685 log_unit_notice(userdata,
4686 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4687 pid,
4688 strna(comm),
4689 signal_to_string(sig));
4690
4691 return 1;
4692 }
4693
4694 static int operation_to_signal(KillContext *c, KillOperation k) {
4695 assert(c);
4696
4697 switch (k) {
4698
4699 case KILL_TERMINATE:
4700 case KILL_TERMINATE_AND_LOG:
4701 return c->kill_signal;
4702
4703 case KILL_KILL:
4704 return c->final_kill_signal;
4705
4706 case KILL_WATCHDOG:
4707 return c->watchdog_signal;
4708
4709 default:
4710 assert_not_reached("KillOperation unknown");
4711 }
4712 }
4713
4714 int unit_kill_context(
4715 Unit *u,
4716 KillContext *c,
4717 KillOperation k,
4718 pid_t main_pid,
4719 pid_t control_pid,
4720 bool main_pid_alien) {
4721
4722 bool wait_for_exit = false, send_sighup;
4723 cg_kill_log_func_t log_func = NULL;
4724 int sig, r;
4725
4726 assert(u);
4727 assert(c);
4728
4729 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4730 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4731
4732 if (c->kill_mode == KILL_NONE)
4733 return 0;
4734
4735 sig = operation_to_signal(c, k);
4736
4737 send_sighup =
4738 c->send_sighup &&
4739 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4740 sig != SIGHUP;
4741
4742 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4743 log_func = log_kill;
4744
4745 if (main_pid > 0) {
4746 if (log_func)
4747 log_func(main_pid, sig, u);
4748
4749 r = kill_and_sigcont(main_pid, sig);
4750 if (r < 0 && r != -ESRCH) {
4751 _cleanup_free_ char *comm = NULL;
4752 (void) get_process_comm(main_pid, &comm);
4753
4754 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4755 } else {
4756 if (!main_pid_alien)
4757 wait_for_exit = true;
4758
4759 if (r != -ESRCH && send_sighup)
4760 (void) kill(main_pid, SIGHUP);
4761 }
4762 }
4763
4764 if (control_pid > 0) {
4765 if (log_func)
4766 log_func(control_pid, sig, u);
4767
4768 r = kill_and_sigcont(control_pid, sig);
4769 if (r < 0 && r != -ESRCH) {
4770 _cleanup_free_ char *comm = NULL;
4771 (void) get_process_comm(control_pid, &comm);
4772
4773 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4774 } else {
4775 wait_for_exit = true;
4776
4777 if (r != -ESRCH && send_sighup)
4778 (void) kill(control_pid, SIGHUP);
4779 }
4780 }
4781
4782 if (u->cgroup_path &&
4783 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4784 _cleanup_set_free_ Set *pid_set = NULL;
4785
4786 /* Exclude the main/control pids from being killed via the cgroup */
4787 pid_set = unit_pid_set(main_pid, control_pid);
4788 if (!pid_set)
4789 return -ENOMEM;
4790
4791 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4792 sig,
4793 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4794 pid_set,
4795 log_func, u);
4796 if (r < 0) {
4797 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4798 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4799
4800 } else if (r > 0) {
4801
4802 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4803 * we are running in a container or if this is a delegation unit, simply because cgroup
4804 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4805 * of containers it can be confused easily by left-over directories in the cgroup — which
4806 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4807 * there we get proper events. Hence rely on them. */
4808
4809 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4810 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4811 wait_for_exit = true;
4812
4813 if (send_sighup) {
4814 set_free(pid_set);
4815
4816 pid_set = unit_pid_set(main_pid, control_pid);
4817 if (!pid_set)
4818 return -ENOMEM;
4819
4820 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4821 SIGHUP,
4822 CGROUP_IGNORE_SELF,
4823 pid_set,
4824 NULL, NULL);
4825 }
4826 }
4827 }
4828
4829 return wait_for_exit;
4830 }
4831
4832 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4833 _cleanup_free_ char *p = NULL;
4834 UnitDependencyInfo di;
4835 int r;
4836
4837 assert(u);
4838 assert(path);
4839
4840 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4841 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4842 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4843 * determine which units to make themselves a dependency of. */
4844
4845 if (!path_is_absolute(path))
4846 return -EINVAL;
4847
4848 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4849 if (r < 0)
4850 return r;
4851
4852 p = strdup(path);
4853 if (!p)
4854 return -ENOMEM;
4855
4856 path = path_simplify(p, true);
4857
4858 if (!path_is_normalized(path))
4859 return -EPERM;
4860
4861 if (hashmap_contains(u->requires_mounts_for, path))
4862 return 0;
4863
4864 di = (UnitDependencyInfo) {
4865 .origin_mask = mask
4866 };
4867
4868 r = hashmap_put(u->requires_mounts_for, path, di.data);
4869 if (r < 0)
4870 return r;
4871 p = NULL;
4872
4873 char prefix[strlen(path) + 1];
4874 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4875 Set *x;
4876
4877 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4878 if (!x) {
4879 _cleanup_free_ char *q = NULL;
4880
4881 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4882 if (r < 0)
4883 return r;
4884
4885 q = strdup(prefix);
4886 if (!q)
4887 return -ENOMEM;
4888
4889 x = set_new(NULL);
4890 if (!x)
4891 return -ENOMEM;
4892
4893 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4894 if (r < 0) {
4895 set_free(x);
4896 return r;
4897 }
4898 q = NULL;
4899 }
4900
4901 r = set_put(x, u);
4902 if (r < 0)
4903 return r;
4904 }
4905
4906 return 0;
4907 }
4908
4909 int unit_setup_exec_runtime(Unit *u) {
4910 ExecRuntime **rt;
4911 size_t offset;
4912 Unit *other;
4913 Iterator i;
4914 void *v;
4915 int r;
4916
4917 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4918 assert(offset > 0);
4919
4920 /* Check if there already is an ExecRuntime for this unit? */
4921 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4922 if (*rt)
4923 return 0;
4924
4925 /* Try to get it from somebody else */
4926 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4927 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4928 if (r == 1)
4929 return 1;
4930 }
4931
4932 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4933 }
4934
4935 int unit_setup_dynamic_creds(Unit *u) {
4936 ExecContext *ec;
4937 DynamicCreds *dcreds;
4938 size_t offset;
4939
4940 assert(u);
4941
4942 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4943 assert(offset > 0);
4944 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4945
4946 ec = unit_get_exec_context(u);
4947 assert(ec);
4948
4949 if (!ec->dynamic_user)
4950 return 0;
4951
4952 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4953 }
4954
4955 bool unit_type_supported(UnitType t) {
4956 if (_unlikely_(t < 0))
4957 return false;
4958 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4959 return false;
4960
4961 if (!unit_vtable[t]->supported)
4962 return true;
4963
4964 return unit_vtable[t]->supported();
4965 }
4966
4967 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4968 int r;
4969
4970 assert(u);
4971 assert(where);
4972
4973 r = dir_is_empty(where);
4974 if (r > 0 || r == -ENOTDIR)
4975 return;
4976 if (r < 0) {
4977 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4978 return;
4979 }
4980
4981 log_struct(LOG_NOTICE,
4982 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4983 LOG_UNIT_ID(u),
4984 LOG_UNIT_INVOCATION_ID(u),
4985 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4986 "WHERE=%s", where);
4987 }
4988
4989 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4990 _cleanup_free_ char *canonical_where = NULL;
4991 int r;
4992
4993 assert(u);
4994 assert(where);
4995
4996 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4997 if (r < 0) {
4998 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4999 return 0;
5000 }
5001
5002 /* We will happily ignore a trailing slash (or any redundant slashes) */
5003 if (path_equal(where, canonical_where))
5004 return 0;
5005
5006 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5007 log_struct(LOG_ERR,
5008 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5009 LOG_UNIT_ID(u),
5010 LOG_UNIT_INVOCATION_ID(u),
5011 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5012 "WHERE=%s", where);
5013
5014 return -ELOOP;
5015 }
5016
5017 bool unit_is_pristine(Unit *u) {
5018 assert(u);
5019
5020 /* Check if the unit already exists or is already around,
5021 * in a number of different ways. Note that to cater for unit
5022 * types such as slice, we are generally fine with units that
5023 * are marked UNIT_LOADED even though nothing was actually
5024 * loaded, as those unit types don't require a file on disk. */
5025
5026 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5027 u->fragment_path ||
5028 u->source_path ||
5029 !strv_isempty(u->dropin_paths) ||
5030 u->job ||
5031 u->merged_into);
5032 }
5033
5034 pid_t unit_control_pid(Unit *u) {
5035 assert(u);
5036
5037 if (UNIT_VTABLE(u)->control_pid)
5038 return UNIT_VTABLE(u)->control_pid(u);
5039
5040 return 0;
5041 }
5042
5043 pid_t unit_main_pid(Unit *u) {
5044 assert(u);
5045
5046 if (UNIT_VTABLE(u)->main_pid)
5047 return UNIT_VTABLE(u)->main_pid(u);
5048
5049 return 0;
5050 }
5051
5052 static void unit_unref_uid_internal(
5053 Unit *u,
5054 uid_t *ref_uid,
5055 bool destroy_now,
5056 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5057
5058 assert(u);
5059 assert(ref_uid);
5060 assert(_manager_unref_uid);
5061
5062 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5063 * gid_t are actually the same time, with the same validity rules.
5064 *
5065 * Drops a reference to UID/GID from a unit. */
5066
5067 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5068 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5069
5070 if (!uid_is_valid(*ref_uid))
5071 return;
5072
5073 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5074 *ref_uid = UID_INVALID;
5075 }
5076
5077 void unit_unref_uid(Unit *u, bool destroy_now) {
5078 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5079 }
5080
5081 void unit_unref_gid(Unit *u, bool destroy_now) {
5082 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5083 }
5084
5085 static int unit_ref_uid_internal(
5086 Unit *u,
5087 uid_t *ref_uid,
5088 uid_t uid,
5089 bool clean_ipc,
5090 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5091
5092 int r;
5093
5094 assert(u);
5095 assert(ref_uid);
5096 assert(uid_is_valid(uid));
5097 assert(_manager_ref_uid);
5098
5099 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5100 * are actually the same type, and have the same validity rules.
5101 *
5102 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5103 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5104 * drops to zero. */
5105
5106 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5107 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5108
5109 if (*ref_uid == uid)
5110 return 0;
5111
5112 if (uid_is_valid(*ref_uid)) /* Already set? */
5113 return -EBUSY;
5114
5115 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5116 if (r < 0)
5117 return r;
5118
5119 *ref_uid = uid;
5120 return 1;
5121 }
5122
5123 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5124 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5125 }
5126
5127 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5128 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5129 }
5130
5131 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5132 int r = 0, q = 0;
5133
5134 assert(u);
5135
5136 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5137
5138 if (uid_is_valid(uid)) {
5139 r = unit_ref_uid(u, uid, clean_ipc);
5140 if (r < 0)
5141 return r;
5142 }
5143
5144 if (gid_is_valid(gid)) {
5145 q = unit_ref_gid(u, gid, clean_ipc);
5146 if (q < 0) {
5147 if (r > 0)
5148 unit_unref_uid(u, false);
5149
5150 return q;
5151 }
5152 }
5153
5154 return r > 0 || q > 0;
5155 }
5156
5157 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5158 ExecContext *c;
5159 int r;
5160
5161 assert(u);
5162
5163 c = unit_get_exec_context(u);
5164
5165 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5166 if (r < 0)
5167 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5168
5169 return r;
5170 }
5171
5172 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5173 assert(u);
5174
5175 unit_unref_uid(u, destroy_now);
5176 unit_unref_gid(u, destroy_now);
5177 }
5178
5179 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5180 int r;
5181
5182 assert(u);
5183
5184 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5185 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5186 * objects when no service references the UID/GID anymore. */
5187
5188 r = unit_ref_uid_gid(u, uid, gid);
5189 if (r > 0)
5190 unit_add_to_dbus_queue(u);
5191 }
5192
5193 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5194 int r;
5195
5196 assert(u);
5197
5198 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5199
5200 if (sd_id128_equal(u->invocation_id, id))
5201 return 0;
5202
5203 if (!sd_id128_is_null(u->invocation_id))
5204 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5205
5206 if (sd_id128_is_null(id)) {
5207 r = 0;
5208 goto reset;
5209 }
5210
5211 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5212 if (r < 0)
5213 goto reset;
5214
5215 u->invocation_id = id;
5216 sd_id128_to_string(id, u->invocation_id_string);
5217
5218 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5219 if (r < 0)
5220 goto reset;
5221
5222 return 0;
5223
5224 reset:
5225 u->invocation_id = SD_ID128_NULL;
5226 u->invocation_id_string[0] = 0;
5227 return r;
5228 }
5229
5230 int unit_acquire_invocation_id(Unit *u) {
5231 sd_id128_t id;
5232 int r;
5233
5234 assert(u);
5235
5236 r = sd_id128_randomize(&id);
5237 if (r < 0)
5238 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5239
5240 r = unit_set_invocation_id(u, id);
5241 if (r < 0)
5242 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5243
5244 unit_add_to_dbus_queue(u);
5245 return 0;
5246 }
5247
5248 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5249 int r;
5250
5251 assert(u);
5252 assert(p);
5253
5254 /* Copy parameters from manager */
5255 r = manager_get_effective_environment(u->manager, &p->environment);
5256 if (r < 0)
5257 return r;
5258
5259 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5260 p->cgroup_supported = u->manager->cgroup_supported;
5261 p->prefix = u->manager->prefix;
5262 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5263
5264 /* Copy parameters from unit */
5265 p->cgroup_path = u->cgroup_path;
5266 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5267
5268 return 0;
5269 }
5270
5271 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5272 int r;
5273
5274 assert(u);
5275 assert(ret);
5276
5277 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5278 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5279
5280 (void) unit_realize_cgroup(u);
5281
5282 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5283 if (r != 0)
5284 return r;
5285
5286 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5287 (void) ignore_signals(SIGPIPE, -1);
5288
5289 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5290
5291 if (u->cgroup_path) {
5292 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5293 if (r < 0) {
5294 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5295 _exit(EXIT_CGROUP);
5296 }
5297 }
5298
5299 return 0;
5300 }
5301
5302 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5303 pid_t pid;
5304 int r;
5305
5306 assert(u);
5307 assert(ret_pid);
5308
5309 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5310 if (r < 0)
5311 return r;
5312 if (r == 0) {
5313 int ret = EXIT_SUCCESS;
5314 char **i;
5315
5316 STRV_FOREACH(i, paths) {
5317 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5318 if (r < 0) {
5319 log_error_errno(r, "Failed to remove '%s': %m", *i);
5320 ret = EXIT_FAILURE;
5321 }
5322 }
5323
5324 _exit(ret);
5325 }
5326
5327 r = unit_watch_pid(u, pid, true);
5328 if (r < 0)
5329 return r;
5330
5331 *ret_pid = pid;
5332 return 0;
5333 }
5334
5335 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5336 assert(u);
5337 assert(d >= 0);
5338 assert(d < _UNIT_DEPENDENCY_MAX);
5339 assert(other);
5340
5341 if (di.origin_mask == 0 && di.destination_mask == 0) {
5342 /* No bit set anymore, let's drop the whole entry */
5343 assert_se(hashmap_remove(u->dependencies[d], other));
5344 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5345 } else
5346 /* Mask was reduced, let's update the entry */
5347 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5348 }
5349
5350 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5351 UnitDependency d;
5352
5353 assert(u);
5354
5355 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5356
5357 if (mask == 0)
5358 return;
5359
5360 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5361 bool done;
5362
5363 do {
5364 UnitDependencyInfo di;
5365 Unit *other;
5366 Iterator i;
5367
5368 done = true;
5369
5370 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5371 UnitDependency q;
5372
5373 if ((di.origin_mask & ~mask) == di.origin_mask)
5374 continue;
5375 di.origin_mask &= ~mask;
5376 unit_update_dependency_mask(u, d, other, di);
5377
5378 /* We updated the dependency from our unit to the other unit now. But most dependencies
5379 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5380 * all dependency types on the other unit and delete all those which point to us and
5381 * have the right mask set. */
5382
5383 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5384 UnitDependencyInfo dj;
5385
5386 dj.data = hashmap_get(other->dependencies[q], u);
5387 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5388 continue;
5389 dj.destination_mask &= ~mask;
5390
5391 unit_update_dependency_mask(other, q, u, dj);
5392 }
5393
5394 unit_add_to_gc_queue(other);
5395
5396 done = false;
5397 break;
5398 }
5399
5400 } while (!done);
5401 }
5402 }
5403
5404 static int unit_export_invocation_id(Unit *u) {
5405 const char *p;
5406 int r;
5407
5408 assert(u);
5409
5410 if (u->exported_invocation_id)
5411 return 0;
5412
5413 if (sd_id128_is_null(u->invocation_id))
5414 return 0;
5415
5416 p = strjoina("/run/systemd/units/invocation:", u->id);
5417 r = symlink_atomic(u->invocation_id_string, p);
5418 if (r < 0)
5419 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5420
5421 u->exported_invocation_id = true;
5422 return 0;
5423 }
5424
5425 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5426 const char *p;
5427 char buf[2];
5428 int r;
5429
5430 assert(u);
5431 assert(c);
5432
5433 if (u->exported_log_level_max)
5434 return 0;
5435
5436 if (c->log_level_max < 0)
5437 return 0;
5438
5439 assert(c->log_level_max <= 7);
5440
5441 buf[0] = '0' + c->log_level_max;
5442 buf[1] = 0;
5443
5444 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5445 r = symlink_atomic(buf, p);
5446 if (r < 0)
5447 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5448
5449 u->exported_log_level_max = true;
5450 return 0;
5451 }
5452
5453 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5454 _cleanup_close_ int fd = -1;
5455 struct iovec *iovec;
5456 const char *p;
5457 char *pattern;
5458 le64_t *sizes;
5459 ssize_t n;
5460 size_t i;
5461 int r;
5462
5463 if (u->exported_log_extra_fields)
5464 return 0;
5465
5466 if (c->n_log_extra_fields <= 0)
5467 return 0;
5468
5469 sizes = newa(le64_t, c->n_log_extra_fields);
5470 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5471
5472 for (i = 0; i < c->n_log_extra_fields; i++) {
5473 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5474
5475 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5476 iovec[i*2+1] = c->log_extra_fields[i];
5477 }
5478
5479 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5480 pattern = strjoina(p, ".XXXXXX");
5481
5482 fd = mkostemp_safe(pattern);
5483 if (fd < 0)
5484 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5485
5486 n = writev(fd, iovec, c->n_log_extra_fields*2);
5487 if (n < 0) {
5488 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5489 goto fail;
5490 }
5491
5492 (void) fchmod(fd, 0644);
5493
5494 if (rename(pattern, p) < 0) {
5495 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5496 goto fail;
5497 }
5498
5499 u->exported_log_extra_fields = true;
5500 return 0;
5501
5502 fail:
5503 (void) unlink(pattern);
5504 return r;
5505 }
5506
5507 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5508 _cleanup_free_ char *buf = NULL;
5509 const char *p;
5510 int r;
5511
5512 assert(u);
5513 assert(c);
5514
5515 if (u->exported_log_ratelimit_interval)
5516 return 0;
5517
5518 if (c->log_ratelimit_interval_usec == 0)
5519 return 0;
5520
5521 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5522
5523 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5524 return log_oom();
5525
5526 r = symlink_atomic(buf, p);
5527 if (r < 0)
5528 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5529
5530 u->exported_log_ratelimit_interval = true;
5531 return 0;
5532 }
5533
5534 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5535 _cleanup_free_ char *buf = NULL;
5536 const char *p;
5537 int r;
5538
5539 assert(u);
5540 assert(c);
5541
5542 if (u->exported_log_ratelimit_burst)
5543 return 0;
5544
5545 if (c->log_ratelimit_burst == 0)
5546 return 0;
5547
5548 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5549
5550 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5551 return log_oom();
5552
5553 r = symlink_atomic(buf, p);
5554 if (r < 0)
5555 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5556
5557 u->exported_log_ratelimit_burst = true;
5558 return 0;
5559 }
5560
5561 void unit_export_state_files(Unit *u) {
5562 const ExecContext *c;
5563
5564 assert(u);
5565
5566 if (!u->id)
5567 return;
5568
5569 if (!MANAGER_IS_SYSTEM(u->manager))
5570 return;
5571
5572 if (MANAGER_IS_TEST_RUN(u->manager))
5573 return;
5574
5575 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5576 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5577 * the IPC system itself and PID 1 also log to the journal.
5578 *
5579 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5580 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5581 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5582 * namespace at least.
5583 *
5584 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5585 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5586 * them with one. */
5587
5588 (void) unit_export_invocation_id(u);
5589
5590 c = unit_get_exec_context(u);
5591 if (c) {
5592 (void) unit_export_log_level_max(u, c);
5593 (void) unit_export_log_extra_fields(u, c);
5594 (void) unit_export_log_ratelimit_interval(u, c);
5595 (void) unit_export_log_ratelimit_burst(u, c);
5596 }
5597 }
5598
5599 void unit_unlink_state_files(Unit *u) {
5600 const char *p;
5601
5602 assert(u);
5603
5604 if (!u->id)
5605 return;
5606
5607 if (!MANAGER_IS_SYSTEM(u->manager))
5608 return;
5609
5610 /* Undoes the effect of unit_export_state() */
5611
5612 if (u->exported_invocation_id) {
5613 p = strjoina("/run/systemd/units/invocation:", u->id);
5614 (void) unlink(p);
5615
5616 u->exported_invocation_id = false;
5617 }
5618
5619 if (u->exported_log_level_max) {
5620 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5621 (void) unlink(p);
5622
5623 u->exported_log_level_max = false;
5624 }
5625
5626 if (u->exported_log_extra_fields) {
5627 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5628 (void) unlink(p);
5629
5630 u->exported_log_extra_fields = false;
5631 }
5632
5633 if (u->exported_log_ratelimit_interval) {
5634 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5635 (void) unlink(p);
5636
5637 u->exported_log_ratelimit_interval = false;
5638 }
5639
5640 if (u->exported_log_ratelimit_burst) {
5641 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5642 (void) unlink(p);
5643
5644 u->exported_log_ratelimit_burst = false;
5645 }
5646 }
5647
5648 int unit_prepare_exec(Unit *u) {
5649 int r;
5650
5651 assert(u);
5652
5653 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5654 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5655 r = bpf_firewall_load_custom(u);
5656 if (r < 0)
5657 return r;
5658
5659 /* Prepares everything so that we can fork of a process for this unit */
5660
5661 (void) unit_realize_cgroup(u);
5662
5663 if (u->reset_accounting) {
5664 (void) unit_reset_accounting(u);
5665 u->reset_accounting = false;
5666 }
5667
5668 unit_export_state_files(u);
5669
5670 r = unit_setup_exec_runtime(u);
5671 if (r < 0)
5672 return r;
5673
5674 r = unit_setup_dynamic_creds(u);
5675 if (r < 0)
5676 return r;
5677
5678 return 0;
5679 }
5680
5681 static int log_leftover(pid_t pid, int sig, void *userdata) {
5682 _cleanup_free_ char *comm = NULL;
5683
5684 (void) get_process_comm(pid, &comm);
5685
5686 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5687 return 0;
5688
5689 log_unit_warning(userdata,
5690 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5691 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5692 pid, strna(comm));
5693
5694 return 1;
5695 }
5696
5697 int unit_warn_leftover_processes(Unit *u) {
5698 assert(u);
5699
5700 (void) unit_pick_cgroup_path(u);
5701
5702 if (!u->cgroup_path)
5703 return 0;
5704
5705 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5706 }
5707
5708 bool unit_needs_console(Unit *u) {
5709 ExecContext *ec;
5710 UnitActiveState state;
5711
5712 assert(u);
5713
5714 state = unit_active_state(u);
5715
5716 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5717 return false;
5718
5719 if (UNIT_VTABLE(u)->needs_console)
5720 return UNIT_VTABLE(u)->needs_console(u);
5721
5722 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5723 ec = unit_get_exec_context(u);
5724 if (!ec)
5725 return false;
5726
5727 return exec_context_may_touch_console(ec);
5728 }
5729
5730 const char *unit_label_path(Unit *u) {
5731 const char *p;
5732
5733 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5734 * when validating access checks. */
5735
5736 p = u->source_path ?: u->fragment_path;
5737 if (!p)
5738 return NULL;
5739
5740 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5741 if (path_equal(p, "/dev/null"))
5742 return NULL;
5743
5744 return p;
5745 }
5746
5747 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5748 int r;
5749
5750 assert(u);
5751
5752 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5753 * and not a kernel thread either */
5754
5755 /* First, a simple range check */
5756 if (!pid_is_valid(pid))
5757 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5758
5759 /* Some extra safety check */
5760 if (pid == 1 || pid == getpid_cached())
5761 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5762
5763 /* Don't even begin to bother with kernel threads */
5764 r = is_kernel_thread(pid);
5765 if (r == -ESRCH)
5766 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5767 if (r < 0)
5768 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5769 if (r > 0)
5770 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5771
5772 return 0;
5773 }
5774
5775 void unit_log_success(Unit *u) {
5776 assert(u);
5777
5778 log_struct(LOG_INFO,
5779 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5780 LOG_UNIT_ID(u),
5781 LOG_UNIT_INVOCATION_ID(u),
5782 LOG_UNIT_MESSAGE(u, "Succeeded."));
5783 }
5784
5785 void unit_log_failure(Unit *u, const char *result) {
5786 assert(u);
5787 assert(result);
5788
5789 log_struct(LOG_WARNING,
5790 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5791 LOG_UNIT_ID(u),
5792 LOG_UNIT_INVOCATION_ID(u),
5793 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5794 "UNIT_RESULT=%s", result);
5795 }
5796
5797 void unit_log_skip(Unit *u, const char *result) {
5798 assert(u);
5799 assert(result);
5800
5801 log_struct(LOG_INFO,
5802 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5803 LOG_UNIT_ID(u),
5804 LOG_UNIT_INVOCATION_ID(u),
5805 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5806 "UNIT_RESULT=%s", result);
5807 }
5808
5809 void unit_log_process_exit(
5810 Unit *u,
5811 const char *kind,
5812 const char *command,
5813 bool success,
5814 int code,
5815 int status) {
5816
5817 int level;
5818
5819 assert(u);
5820 assert(kind);
5821
5822 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5823 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5824 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5825 * WARNING. */
5826 if (success)
5827 level = LOG_DEBUG;
5828 else if (code == CLD_EXITED)
5829 level = LOG_NOTICE;
5830 else
5831 level = LOG_WARNING;
5832
5833 log_struct(level,
5834 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5835 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5836 kind,
5837 sigchld_code_to_string(code), status,
5838 strna(code == CLD_EXITED
5839 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5840 : signal_to_string(status))),
5841 "EXIT_CODE=%s", sigchld_code_to_string(code),
5842 "EXIT_STATUS=%i", status,
5843 "COMMAND=%s", strna(command),
5844 LOG_UNIT_ID(u),
5845 LOG_UNIT_INVOCATION_ID(u));
5846 }
5847
5848 int unit_exit_status(Unit *u) {
5849 assert(u);
5850
5851 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5852 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5853 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5854 * service process has exited abnormally (signal/coredump). */
5855
5856 if (!UNIT_VTABLE(u)->exit_status)
5857 return -EOPNOTSUPP;
5858
5859 return UNIT_VTABLE(u)->exit_status(u);
5860 }
5861
5862 int unit_failure_action_exit_status(Unit *u) {
5863 int r;
5864
5865 assert(u);
5866
5867 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5868
5869 if (u->failure_action_exit_status >= 0)
5870 return u->failure_action_exit_status;
5871
5872 r = unit_exit_status(u);
5873 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5874 return 255;
5875
5876 return r;
5877 }
5878
5879 int unit_success_action_exit_status(Unit *u) {
5880 int r;
5881
5882 assert(u);
5883
5884 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5885
5886 if (u->success_action_exit_status >= 0)
5887 return u->success_action_exit_status;
5888
5889 r = unit_exit_status(u);
5890 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5891 return 255;
5892
5893 return r;
5894 }
5895
5896 int unit_test_trigger_loaded(Unit *u) {
5897 Unit *trigger;
5898
5899 /* Tests whether the unit to trigger is loaded */
5900
5901 trigger = UNIT_TRIGGER(u);
5902 if (!trigger)
5903 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5904 "Refusing to start, no unit to trigger.");
5905 if (trigger->load_state != UNIT_LOADED)
5906 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5907 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5908
5909 return 0;
5910 }
5911
5912 void unit_destroy_runtime_directory(Unit *u, const ExecContext *context) {
5913 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
5914 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
5915 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5916 }
5917
5918 int unit_clean(Unit *u, ExecCleanMask mask) {
5919 UnitActiveState state;
5920
5921 assert(u);
5922
5923 /* Special return values:
5924 *
5925 * -EOPNOTSUPP → cleaning not supported for this unit type
5926 * -EUNATCH → cleaning not defined for this resource type
5927 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5928 * a job queued or similar
5929 */
5930
5931 if (!UNIT_VTABLE(u)->clean)
5932 return -EOPNOTSUPP;
5933
5934 if (mask == 0)
5935 return -EUNATCH;
5936
5937 if (u->load_state != UNIT_LOADED)
5938 return -EBUSY;
5939
5940 if (u->job)
5941 return -EBUSY;
5942
5943 state = unit_active_state(u);
5944 if (!IN_SET(state, UNIT_INACTIVE))
5945 return -EBUSY;
5946
5947 return UNIT_VTABLE(u)->clean(u, mask);
5948 }
5949
5950 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5951 assert(u);
5952
5953 if (!UNIT_VTABLE(u)->clean ||
5954 u->load_state != UNIT_LOADED) {
5955 *ret = 0;
5956 return 0;
5957 }
5958
5959 /* When the clean() method is set, can_clean() really should be set too */
5960 assert(UNIT_VTABLE(u)->can_clean);
5961
5962 return UNIT_VTABLE(u)->can_clean(u, ret);
5963 }
5964
5965 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5966 [COLLECT_INACTIVE] = "inactive",
5967 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5968 };
5969
5970 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);