]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
Merge pull request #13207 from keszybz/symbolic-exit-code-names
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <sys/prctl.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9
10 #include "sd-id128.h"
11 #include "sd-messages.h"
12
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bpf-firewall.h"
16 #include "bus-common-errors.h"
17 #include "bus-util.h"
18 #include "cgroup-util.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "fs-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "install.h"
32 #include "load-dropin.h"
33 #include "load-fragment.h"
34 #include "log.h"
35 #include "macro.h"
36 #include "missing.h"
37 #include "mkdir.h"
38 #include "parse-util.h"
39 #include "path-util.h"
40 #include "process-util.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
126 RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 }
193
194 kc = unit_get_kill_context(u);
195 if (kc)
196 kill_context_init(kc);
197
198 if (UNIT_VTABLE(u)->init)
199 UNIT_VTABLE(u)->init(u);
200 }
201
202 int unit_add_name(Unit *u, const char *text) {
203 _cleanup_free_ char *s = NULL, *i = NULL;
204 UnitType t;
205 int r;
206
207 assert(u);
208 assert(text);
209
210 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211
212 if (!u->instance)
213 return -EINVAL;
214
215 r = unit_name_replace_instance(text, u->instance, &s);
216 if (r < 0)
217 return r;
218 } else {
219 s = strdup(text);
220 if (!s)
221 return -ENOMEM;
222 }
223
224 if (set_contains(u->names, s))
225 return 0;
226 if (hashmap_contains(u->manager->units, s))
227 return -EEXIST;
228
229 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
230 return -EINVAL;
231
232 t = unit_name_to_type(s);
233 if (t < 0)
234 return -EINVAL;
235
236 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
237 return -EINVAL;
238
239 r = unit_name_to_instance(s, &i);
240 if (r < 0)
241 return r;
242
243 if (i && !unit_type_may_template(t))
244 return -EINVAL;
245
246 /* Ensure that this unit is either instanced or not instanced,
247 * but not both. Note that we do allow names with different
248 * instance names however! */
249 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
250 return -EINVAL;
251
252 if (!unit_type_may_alias(t) && !set_isempty(u->names))
253 return -EEXIST;
254
255 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
256 return -E2BIG;
257
258 r = set_put(u->names, s);
259 if (r < 0)
260 return r;
261 assert(r > 0);
262
263 r = hashmap_put(u->manager->units, s, u);
264 if (r < 0) {
265 (void) set_remove(u->names, s);
266 return r;
267 }
268
269 if (u->type == _UNIT_TYPE_INVALID) {
270 u->type = t;
271 u->id = s;
272 u->instance = TAKE_PTR(i);
273
274 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
275
276 unit_init(u);
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_may_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection.
346 * Returns true when the unit may be collected, and false if there's some
347 * reason to keep it loaded.
348 *
349 * References from other units are *not* checked here. Instead, this is done
350 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
351 */
352
353 if (u->job)
354 return false;
355
356 if (u->nop_job)
357 return false;
358
359 state = unit_active_state(u);
360
361 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
362 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
363 UNIT_VTABLE(u)->release_resources)
364 UNIT_VTABLE(u)->release_resources(u);
365
366 if (u->perpetual)
367 return false;
368
369 if (sd_bus_track_count(u->bus_track) > 0)
370 return false;
371
372 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
373 switch (u->collect_mode) {
374
375 case COLLECT_INACTIVE:
376 if (state != UNIT_INACTIVE)
377 return false;
378
379 break;
380
381 case COLLECT_INACTIVE_OR_FAILED:
382 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
383 return false;
384
385 break;
386
387 default:
388 assert_not_reached("Unknown garbage collection mode");
389 }
390
391 if (u->cgroup_path) {
392 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
393 * around. Units with active processes should never be collected. */
394
395 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
396 if (r < 0)
397 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
398 if (r <= 0)
399 return false;
400 }
401
402 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
403 return false;
404
405 return true;
406 }
407
408 void unit_add_to_load_queue(Unit *u) {
409 assert(u);
410 assert(u->type != _UNIT_TYPE_INVALID);
411
412 if (u->load_state != UNIT_STUB || u->in_load_queue)
413 return;
414
415 LIST_PREPEND(load_queue, u->manager->load_queue, u);
416 u->in_load_queue = true;
417 }
418
419 void unit_add_to_cleanup_queue(Unit *u) {
420 assert(u);
421
422 if (u->in_cleanup_queue)
423 return;
424
425 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
426 u->in_cleanup_queue = true;
427 }
428
429 void unit_add_to_gc_queue(Unit *u) {
430 assert(u);
431
432 if (u->in_gc_queue || u->in_cleanup_queue)
433 return;
434
435 if (!unit_may_gc(u))
436 return;
437
438 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
439 u->in_gc_queue = true;
440 }
441
442 void unit_add_to_dbus_queue(Unit *u) {
443 assert(u);
444 assert(u->type != _UNIT_TYPE_INVALID);
445
446 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
447 return;
448
449 /* Shortcut things if nobody cares */
450 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
451 sd_bus_track_count(u->bus_track) <= 0 &&
452 set_isempty(u->manager->private_buses)) {
453 u->sent_dbus_new_signal = true;
454 return;
455 }
456
457 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
458 u->in_dbus_queue = true;
459 }
460
461 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
462 assert(u);
463
464 if (u->in_stop_when_unneeded_queue)
465 return;
466
467 if (!u->stop_when_unneeded)
468 return;
469
470 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
471 return;
472
473 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
474 u->in_stop_when_unneeded_queue = true;
475 }
476
477 static void bidi_set_free(Unit *u, Hashmap *h) {
478 Unit *other;
479 Iterator i;
480 void *v;
481
482 assert(u);
483
484 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
485
486 HASHMAP_FOREACH_KEY(v, other, h, i) {
487 UnitDependency d;
488
489 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
490 hashmap_remove(other->dependencies[d], u);
491
492 unit_add_to_gc_queue(other);
493 }
494
495 hashmap_free(h);
496 }
497
498 static void unit_remove_transient(Unit *u) {
499 char **i;
500
501 assert(u);
502
503 if (!u->transient)
504 return;
505
506 if (u->fragment_path)
507 (void) unlink(u->fragment_path);
508
509 STRV_FOREACH(i, u->dropin_paths) {
510 _cleanup_free_ char *p = NULL, *pp = NULL;
511
512 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
513 if (!p)
514 continue;
515
516 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
517 if (!pp)
518 continue;
519
520 /* Only drop transient drop-ins */
521 if (!path_equal(u->manager->lookup_paths.transient, pp))
522 continue;
523
524 (void) unlink(*i);
525 (void) rmdir(p);
526 }
527 }
528
529 static void unit_free_requires_mounts_for(Unit *u) {
530 assert(u);
531
532 for (;;) {
533 _cleanup_free_ char *path;
534
535 path = hashmap_steal_first_key(u->requires_mounts_for);
536 if (!path)
537 break;
538 else {
539 char s[strlen(path) + 1];
540
541 PATH_FOREACH_PREFIX_MORE(s, path) {
542 char *y;
543 Set *x;
544
545 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
546 if (!x)
547 continue;
548
549 (void) set_remove(x, u);
550
551 if (set_isempty(x)) {
552 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
553 free(y);
554 set_free(x);
555 }
556 }
557 }
558 }
559
560 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
561 }
562
563 static void unit_done(Unit *u) {
564 ExecContext *ec;
565 CGroupContext *cc;
566
567 assert(u);
568
569 if (u->type < 0)
570 return;
571
572 if (UNIT_VTABLE(u)->done)
573 UNIT_VTABLE(u)->done(u);
574
575 ec = unit_get_exec_context(u);
576 if (ec)
577 exec_context_done(ec);
578
579 cc = unit_get_cgroup_context(u);
580 if (cc)
581 cgroup_context_done(cc);
582 }
583
584 void unit_free(Unit *u) {
585 UnitDependency d;
586 Iterator i;
587 char *t;
588
589 if (!u)
590 return;
591
592 if (UNIT_ISSET(u->slice)) {
593 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
594 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
595
596 /* And make sure the parent is realized again, updating cgroup memberships */
597 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
598 }
599
600 u->transient_file = safe_fclose(u->transient_file);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_remove_transient(u);
604
605 bus_unit_send_removed_signal(u);
606
607 unit_done(u);
608
609 unit_dequeue_rewatch_pids(u);
610
611 sd_bus_slot_unref(u->match_bus_slot);
612 sd_bus_track_unref(u->bus_track);
613 u->deserialized_refs = strv_free(u->deserialized_refs);
614
615 unit_free_requires_mounts_for(u);
616
617 SET_FOREACH(t, u->names, i)
618 hashmap_remove_value(u->manager->units, t, u);
619
620 if (!sd_id128_is_null(u->invocation_id))
621 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
622
623 if (u->job) {
624 Job *j = u->job;
625 job_uninstall(j);
626 job_free(j);
627 }
628
629 if (u->nop_job) {
630 Job *j = u->nop_job;
631 job_uninstall(j);
632 job_free(j);
633 }
634
635 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
636 bidi_set_free(u, u->dependencies[d]);
637
638 if (u->on_console)
639 manager_unref_console(u->manager);
640
641 unit_release_cgroup(u);
642
643 if (!MANAGER_IS_RELOADING(u->manager))
644 unit_unlink_state_files(u);
645
646 unit_unref_uid_gid(u, false);
647
648 (void) manager_update_failed_units(u->manager, u, false);
649 set_remove(u->manager->startup_units, u);
650
651 unit_unwatch_all_pids(u);
652
653 unit_ref_unset(&u->slice);
654 while (u->refs_by_target)
655 unit_ref_unset(u->refs_by_target);
656
657 if (u->type != _UNIT_TYPE_INVALID)
658 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
659
660 if (u->in_load_queue)
661 LIST_REMOVE(load_queue, u->manager->load_queue, u);
662
663 if (u->in_dbus_queue)
664 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
665
666 if (u->in_gc_queue)
667 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
668
669 if (u->in_cgroup_realize_queue)
670 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
671
672 if (u->in_cgroup_empty_queue)
673 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
674
675 if (u->in_cleanup_queue)
676 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
677
678 if (u->in_target_deps_queue)
679 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
680
681 if (u->in_stop_when_unneeded_queue)
682 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
683
684 safe_close(u->ip_accounting_ingress_map_fd);
685 safe_close(u->ip_accounting_egress_map_fd);
686
687 safe_close(u->ipv4_allow_map_fd);
688 safe_close(u->ipv6_allow_map_fd);
689 safe_close(u->ipv4_deny_map_fd);
690 safe_close(u->ipv6_deny_map_fd);
691
692 bpf_program_unref(u->ip_bpf_ingress);
693 bpf_program_unref(u->ip_bpf_ingress_installed);
694 bpf_program_unref(u->ip_bpf_egress);
695 bpf_program_unref(u->ip_bpf_egress_installed);
696
697 set_free(u->ip_bpf_custom_ingress);
698 set_free(u->ip_bpf_custom_egress);
699 set_free(u->ip_bpf_custom_ingress_installed);
700 set_free(u->ip_bpf_custom_egress_installed);
701
702 bpf_program_unref(u->bpf_device_control_installed);
703
704 condition_free_list(u->conditions);
705 condition_free_list(u->asserts);
706
707 free(u->description);
708 strv_free(u->documentation);
709 free(u->fragment_path);
710 free(u->source_path);
711 strv_free(u->dropin_paths);
712 free(u->instance);
713
714 free(u->job_timeout_reboot_arg);
715
716 set_free_free(u->names);
717
718 free(u->reboot_arg);
719
720 free(u);
721 }
722
723 UnitActiveState unit_active_state(Unit *u) {
724 assert(u);
725
726 if (u->load_state == UNIT_MERGED)
727 return unit_active_state(unit_follow_merge(u));
728
729 /* After a reload it might happen that a unit is not correctly
730 * loaded but still has a process around. That's why we won't
731 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
732
733 return UNIT_VTABLE(u)->active_state(u);
734 }
735
736 const char* unit_sub_state_to_string(Unit *u) {
737 assert(u);
738
739 return UNIT_VTABLE(u)->sub_state_to_string(u);
740 }
741
742 static int set_complete_move(Set **s, Set **other) {
743 assert(s);
744 assert(other);
745
746 if (!other)
747 return 0;
748
749 if (*s)
750 return set_move(*s, *other);
751 else
752 *s = TAKE_PTR(*other);
753
754 return 0;
755 }
756
757 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
758 assert(s);
759 assert(other);
760
761 if (!*other)
762 return 0;
763
764 if (*s)
765 return hashmap_move(*s, *other);
766 else
767 *s = TAKE_PTR(*other);
768
769 return 0;
770 }
771
772 static int merge_names(Unit *u, Unit *other) {
773 char *t;
774 Iterator i;
775 int r;
776
777 assert(u);
778 assert(other);
779
780 r = set_complete_move(&u->names, &other->names);
781 if (r < 0)
782 return r;
783
784 set_free_free(other->names);
785 other->names = NULL;
786 other->id = NULL;
787
788 SET_FOREACH(t, u->names, i)
789 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
790
791 return 0;
792 }
793
794 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
795 unsigned n_reserve;
796
797 assert(u);
798 assert(other);
799 assert(d < _UNIT_DEPENDENCY_MAX);
800
801 /*
802 * If u does not have this dependency set allocated, there is no need
803 * to reserve anything. In that case other's set will be transferred
804 * as a whole to u by complete_move().
805 */
806 if (!u->dependencies[d])
807 return 0;
808
809 /* merge_dependencies() will skip a u-on-u dependency */
810 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
811
812 return hashmap_reserve(u->dependencies[d], n_reserve);
813 }
814
815 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
816 Iterator i;
817 Unit *back;
818 void *v;
819 int r;
820
821 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
822
823 assert(u);
824 assert(other);
825 assert(d < _UNIT_DEPENDENCY_MAX);
826
827 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
828 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
829 UnitDependency k;
830
831 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
832 * pointers back, and let's fix them up, to instead point to 'u'. */
833
834 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
835 if (back == u) {
836 /* Do not add dependencies between u and itself. */
837 if (hashmap_remove(back->dependencies[k], other))
838 maybe_warn_about_dependency(u, other_id, k);
839 } else {
840 UnitDependencyInfo di_u, di_other, di_merged;
841
842 /* Let's drop this dependency between "back" and "other", and let's create it between
843 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
844 * and any such dependency which might already exist */
845
846 di_other.data = hashmap_get(back->dependencies[k], other);
847 if (!di_other.data)
848 continue; /* dependency isn't set, let's try the next one */
849
850 di_u.data = hashmap_get(back->dependencies[k], u);
851
852 di_merged = (UnitDependencyInfo) {
853 .origin_mask = di_u.origin_mask | di_other.origin_mask,
854 .destination_mask = di_u.destination_mask | di_other.destination_mask,
855 };
856
857 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
858 if (r < 0)
859 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
860 assert(r >= 0);
861
862 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
863 }
864 }
865
866 }
867
868 /* Also do not move dependencies on u to itself */
869 back = hashmap_remove(other->dependencies[d], u);
870 if (back)
871 maybe_warn_about_dependency(u, other_id, d);
872
873 /* The move cannot fail. The caller must have performed a reservation. */
874 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
875
876 other->dependencies[d] = hashmap_free(other->dependencies[d]);
877 }
878
879 int unit_merge(Unit *u, Unit *other) {
880 UnitDependency d;
881 const char *other_id = NULL;
882 int r;
883
884 assert(u);
885 assert(other);
886 assert(u->manager == other->manager);
887 assert(u->type != _UNIT_TYPE_INVALID);
888
889 other = unit_follow_merge(other);
890
891 if (other == u)
892 return 0;
893
894 if (u->type != other->type)
895 return -EINVAL;
896
897 if (!u->instance != !other->instance)
898 return -EINVAL;
899
900 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
901 return -EEXIST;
902
903 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
904 return -EEXIST;
905
906 if (other->job)
907 return -EEXIST;
908
909 if (other->nop_job)
910 return -EEXIST;
911
912 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
913 return -EEXIST;
914
915 if (other->id)
916 other_id = strdupa(other->id);
917
918 /* Make reservations to ensure merge_dependencies() won't fail */
919 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
920 r = reserve_dependencies(u, other, d);
921 /*
922 * We don't rollback reservations if we fail. We don't have
923 * a way to undo reservations. A reservation is not a leak.
924 */
925 if (r < 0)
926 return r;
927 }
928
929 /* Merge names */
930 r = merge_names(u, other);
931 if (r < 0)
932 return r;
933
934 /* Redirect all references */
935 while (other->refs_by_target)
936 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
937
938 /* Merge dependencies */
939 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
940 merge_dependencies(u, other, other_id, d);
941
942 other->load_state = UNIT_MERGED;
943 other->merged_into = u;
944
945 /* If there is still some data attached to the other node, we
946 * don't need it anymore, and can free it. */
947 if (other->load_state != UNIT_STUB)
948 if (UNIT_VTABLE(other)->done)
949 UNIT_VTABLE(other)->done(other);
950
951 unit_add_to_dbus_queue(u);
952 unit_add_to_cleanup_queue(other);
953
954 return 0;
955 }
956
957 int unit_merge_by_name(Unit *u, const char *name) {
958 _cleanup_free_ char *s = NULL;
959 Unit *other;
960 int r;
961
962 assert(u);
963 assert(name);
964
965 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
966 if (!u->instance)
967 return -EINVAL;
968
969 r = unit_name_replace_instance(name, u->instance, &s);
970 if (r < 0)
971 return r;
972
973 name = s;
974 }
975
976 other = manager_get_unit(u->manager, name);
977 if (other)
978 return unit_merge(u, other);
979
980 return unit_add_name(u, name);
981 }
982
983 Unit* unit_follow_merge(Unit *u) {
984 assert(u);
985
986 while (u->load_state == UNIT_MERGED)
987 assert_se(u = u->merged_into);
988
989 return u;
990 }
991
992 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
993 ExecDirectoryType dt;
994 char **dp;
995 int r;
996
997 assert(u);
998 assert(c);
999
1000 if (c->working_directory && !c->working_directory_missing_ok) {
1001 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1002 if (r < 0)
1003 return r;
1004 }
1005
1006 if (c->root_directory) {
1007 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1008 if (r < 0)
1009 return r;
1010 }
1011
1012 if (c->root_image) {
1013 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1014 if (r < 0)
1015 return r;
1016 }
1017
1018 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1019 if (!u->manager->prefix[dt])
1020 continue;
1021
1022 STRV_FOREACH(dp, c->directories[dt].paths) {
1023 _cleanup_free_ char *p;
1024
1025 p = path_join(u->manager->prefix[dt], *dp);
1026 if (!p)
1027 return -ENOMEM;
1028
1029 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1030 if (r < 0)
1031 return r;
1032 }
1033 }
1034
1035 if (!MANAGER_IS_SYSTEM(u->manager))
1036 return 0;
1037
1038 if (c->private_tmp) {
1039 const char *p;
1040
1041 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1042 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1043 if (r < 0)
1044 return r;
1045 }
1046
1047 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1048 if (r < 0)
1049 return r;
1050 }
1051
1052 if (!IN_SET(c->std_output,
1053 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1054 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1055 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1056 !IN_SET(c->std_error,
1057 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1058 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1059 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1060 return 0;
1061
1062 /* If syslog or kernel logging is requested, make sure our own
1063 * logging daemon is run first. */
1064
1065 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1066 if (r < 0)
1067 return r;
1068
1069 return 0;
1070 }
1071
1072 const char *unit_description(Unit *u) {
1073 assert(u);
1074
1075 if (u->description)
1076 return u->description;
1077
1078 return strna(u->id);
1079 }
1080
1081 const char *unit_status_string(Unit *u) {
1082 assert(u);
1083
1084 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1085 return u->id;
1086
1087 return unit_description(u);
1088 }
1089
1090 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1091 const struct {
1092 UnitDependencyMask mask;
1093 const char *name;
1094 } table[] = {
1095 { UNIT_DEPENDENCY_FILE, "file" },
1096 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1097 { UNIT_DEPENDENCY_DEFAULT, "default" },
1098 { UNIT_DEPENDENCY_UDEV, "udev" },
1099 { UNIT_DEPENDENCY_PATH, "path" },
1100 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1101 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1102 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1103 };
1104 size_t i;
1105
1106 assert(f);
1107 assert(kind);
1108 assert(space);
1109
1110 for (i = 0; i < ELEMENTSOF(table); i++) {
1111
1112 if (mask == 0)
1113 break;
1114
1115 if (FLAGS_SET(mask, table[i].mask)) {
1116 if (*space)
1117 fputc(' ', f);
1118 else
1119 *space = true;
1120
1121 fputs(kind, f);
1122 fputs("-", f);
1123 fputs(table[i].name, f);
1124
1125 mask &= ~table[i].mask;
1126 }
1127 }
1128
1129 assert(mask == 0);
1130 }
1131
1132 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1133 char *t, **j;
1134 UnitDependency d;
1135 Iterator i;
1136 const char *prefix2;
1137 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1138 Unit *following;
1139 _cleanup_set_free_ Set *following_set = NULL;
1140 const char *n;
1141 CGroupMask m;
1142 int r;
1143
1144 assert(u);
1145 assert(u->type >= 0);
1146
1147 prefix = strempty(prefix);
1148 prefix2 = strjoina(prefix, "\t");
1149
1150 fprintf(f,
1151 "%s-> Unit %s:\n",
1152 prefix, u->id);
1153
1154 SET_FOREACH(t, u->names, i)
1155 if (!streq(t, u->id))
1156 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1157
1158 fprintf(f,
1159 "%s\tDescription: %s\n"
1160 "%s\tInstance: %s\n"
1161 "%s\tUnit Load State: %s\n"
1162 "%s\tUnit Active State: %s\n"
1163 "%s\tState Change Timestamp: %s\n"
1164 "%s\tInactive Exit Timestamp: %s\n"
1165 "%s\tActive Enter Timestamp: %s\n"
1166 "%s\tActive Exit Timestamp: %s\n"
1167 "%s\tInactive Enter Timestamp: %s\n"
1168 "%s\tMay GC: %s\n"
1169 "%s\tNeed Daemon Reload: %s\n"
1170 "%s\tTransient: %s\n"
1171 "%s\tPerpetual: %s\n"
1172 "%s\tGarbage Collection Mode: %s\n"
1173 "%s\tSlice: %s\n"
1174 "%s\tCGroup: %s\n"
1175 "%s\tCGroup realized: %s\n",
1176 prefix, unit_description(u),
1177 prefix, strna(u->instance),
1178 prefix, unit_load_state_to_string(u->load_state),
1179 prefix, unit_active_state_to_string(unit_active_state(u)),
1180 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1181 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1182 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1183 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1184 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1185 prefix, yes_no(unit_may_gc(u)),
1186 prefix, yes_no(unit_need_daemon_reload(u)),
1187 prefix, yes_no(u->transient),
1188 prefix, yes_no(u->perpetual),
1189 prefix, collect_mode_to_string(u->collect_mode),
1190 prefix, strna(unit_slice_name(u)),
1191 prefix, strna(u->cgroup_path),
1192 prefix, yes_no(u->cgroup_realized));
1193
1194 if (u->cgroup_realized_mask != 0) {
1195 _cleanup_free_ char *s = NULL;
1196 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1197 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1198 }
1199
1200 if (u->cgroup_enabled_mask != 0) {
1201 _cleanup_free_ char *s = NULL;
1202 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1203 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1204 }
1205
1206 m = unit_get_own_mask(u);
1207 if (m != 0) {
1208 _cleanup_free_ char *s = NULL;
1209 (void) cg_mask_to_string(m, &s);
1210 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1211 }
1212
1213 m = unit_get_members_mask(u);
1214 if (m != 0) {
1215 _cleanup_free_ char *s = NULL;
1216 (void) cg_mask_to_string(m, &s);
1217 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1218 }
1219
1220 m = unit_get_delegate_mask(u);
1221 if (m != 0) {
1222 _cleanup_free_ char *s = NULL;
1223 (void) cg_mask_to_string(m, &s);
1224 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1225 }
1226
1227 if (!sd_id128_is_null(u->invocation_id))
1228 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1229 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1230
1231 STRV_FOREACH(j, u->documentation)
1232 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1233
1234 following = unit_following(u);
1235 if (following)
1236 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1237
1238 r = unit_following_set(u, &following_set);
1239 if (r >= 0) {
1240 Unit *other;
1241
1242 SET_FOREACH(other, following_set, i)
1243 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1244 }
1245
1246 if (u->fragment_path)
1247 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1248
1249 if (u->source_path)
1250 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1251
1252 STRV_FOREACH(j, u->dropin_paths)
1253 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1254
1255 if (u->failure_action != EMERGENCY_ACTION_NONE)
1256 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1257 if (u->failure_action_exit_status >= 0)
1258 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1259 if (u->success_action != EMERGENCY_ACTION_NONE)
1260 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1261 if (u->success_action_exit_status >= 0)
1262 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1263
1264 if (u->job_timeout != USEC_INFINITY)
1265 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1266
1267 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1268 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1269
1270 if (u->job_timeout_reboot_arg)
1271 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1272
1273 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1274 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1275
1276 if (dual_timestamp_is_set(&u->condition_timestamp))
1277 fprintf(f,
1278 "%s\tCondition Timestamp: %s\n"
1279 "%s\tCondition Result: %s\n",
1280 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1281 prefix, yes_no(u->condition_result));
1282
1283 if (dual_timestamp_is_set(&u->assert_timestamp))
1284 fprintf(f,
1285 "%s\tAssert Timestamp: %s\n"
1286 "%s\tAssert Result: %s\n",
1287 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1288 prefix, yes_no(u->assert_result));
1289
1290 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1291 UnitDependencyInfo di;
1292 Unit *other;
1293
1294 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1295 bool space = false;
1296
1297 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1298
1299 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1300 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1301
1302 fputs(")\n", f);
1303 }
1304 }
1305
1306 if (!hashmap_isempty(u->requires_mounts_for)) {
1307 UnitDependencyInfo di;
1308 const char *path;
1309
1310 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1311 bool space = false;
1312
1313 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1314
1315 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1316 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1317
1318 fputs(")\n", f);
1319 }
1320 }
1321
1322 if (u->load_state == UNIT_LOADED) {
1323
1324 fprintf(f,
1325 "%s\tStopWhenUnneeded: %s\n"
1326 "%s\tRefuseManualStart: %s\n"
1327 "%s\tRefuseManualStop: %s\n"
1328 "%s\tDefaultDependencies: %s\n"
1329 "%s\tOnFailureJobMode: %s\n"
1330 "%s\tIgnoreOnIsolate: %s\n",
1331 prefix, yes_no(u->stop_when_unneeded),
1332 prefix, yes_no(u->refuse_manual_start),
1333 prefix, yes_no(u->refuse_manual_stop),
1334 prefix, yes_no(u->default_dependencies),
1335 prefix, job_mode_to_string(u->on_failure_job_mode),
1336 prefix, yes_no(u->ignore_on_isolate));
1337
1338 if (UNIT_VTABLE(u)->dump)
1339 UNIT_VTABLE(u)->dump(u, f, prefix2);
1340
1341 } else if (u->load_state == UNIT_MERGED)
1342 fprintf(f,
1343 "%s\tMerged into: %s\n",
1344 prefix, u->merged_into->id);
1345 else if (u->load_state == UNIT_ERROR)
1346 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1347
1348 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1349 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1350
1351 if (u->job)
1352 job_dump(u->job, f, prefix2);
1353
1354 if (u->nop_job)
1355 job_dump(u->nop_job, f, prefix2);
1356 }
1357
1358 /* Common implementation for multiple backends */
1359 int unit_load_fragment_and_dropin(Unit *u) {
1360 int r;
1361
1362 assert(u);
1363
1364 /* Load a .{service,socket,...} file */
1365 r = unit_load_fragment(u);
1366 if (r < 0)
1367 return r;
1368
1369 if (u->load_state == UNIT_STUB)
1370 return -ENOENT;
1371
1372 /* Load drop-in directory data. If u is an alias, we might be reloading the
1373 * target unit needlessly. But we cannot be sure which drops-ins have already
1374 * been loaded and which not, at least without doing complicated book-keeping,
1375 * so let's always reread all drop-ins. */
1376 return unit_load_dropin(unit_follow_merge(u));
1377 }
1378
1379 /* Common implementation for multiple backends */
1380 int unit_load_fragment_and_dropin_optional(Unit *u) {
1381 int r;
1382
1383 assert(u);
1384
1385 /* Same as unit_load_fragment_and_dropin(), but whether
1386 * something can be loaded or not doesn't matter. */
1387
1388 /* Load a .service/.socket/.slice/… file */
1389 r = unit_load_fragment(u);
1390 if (r < 0)
1391 return r;
1392
1393 if (u->load_state == UNIT_STUB)
1394 u->load_state = UNIT_LOADED;
1395
1396 /* Load drop-in directory data */
1397 return unit_load_dropin(unit_follow_merge(u));
1398 }
1399
1400 void unit_add_to_target_deps_queue(Unit *u) {
1401 Manager *m = u->manager;
1402
1403 assert(u);
1404
1405 if (u->in_target_deps_queue)
1406 return;
1407
1408 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1409 u->in_target_deps_queue = true;
1410 }
1411
1412 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1413 assert(u);
1414 assert(target);
1415
1416 if (target->type != UNIT_TARGET)
1417 return 0;
1418
1419 /* Only add the dependency if both units are loaded, so that
1420 * that loop check below is reliable */
1421 if (u->load_state != UNIT_LOADED ||
1422 target->load_state != UNIT_LOADED)
1423 return 0;
1424
1425 /* If either side wants no automatic dependencies, then let's
1426 * skip this */
1427 if (!u->default_dependencies ||
1428 !target->default_dependencies)
1429 return 0;
1430
1431 /* Don't create loops */
1432 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1433 return 0;
1434
1435 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1436 }
1437
1438 static int unit_add_slice_dependencies(Unit *u) {
1439 UnitDependencyMask mask;
1440 assert(u);
1441
1442 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1443 return 0;
1444
1445 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1446 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1447 relationship). */
1448 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1449
1450 if (UNIT_ISSET(u->slice))
1451 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1452
1453 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1454 return 0;
1455
1456 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1457 }
1458
1459 static int unit_add_mount_dependencies(Unit *u) {
1460 UnitDependencyInfo di;
1461 const char *path;
1462 Iterator i;
1463 int r;
1464
1465 assert(u);
1466
1467 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1468 char prefix[strlen(path) + 1];
1469
1470 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1471 _cleanup_free_ char *p = NULL;
1472 Unit *m;
1473
1474 r = unit_name_from_path(prefix, ".mount", &p);
1475 if (r < 0)
1476 return r;
1477
1478 m = manager_get_unit(u->manager, p);
1479 if (!m) {
1480 /* Make sure to load the mount unit if
1481 * it exists. If so the dependencies
1482 * on this unit will be added later
1483 * during the loading of the mount
1484 * unit. */
1485 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1486 continue;
1487 }
1488 if (m == u)
1489 continue;
1490
1491 if (m->load_state != UNIT_LOADED)
1492 continue;
1493
1494 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1495 if (r < 0)
1496 return r;
1497
1498 if (m->fragment_path) {
1499 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1500 if (r < 0)
1501 return r;
1502 }
1503 }
1504 }
1505
1506 return 0;
1507 }
1508
1509 static int unit_add_startup_units(Unit *u) {
1510 CGroupContext *c;
1511 int r;
1512
1513 c = unit_get_cgroup_context(u);
1514 if (!c)
1515 return 0;
1516
1517 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1518 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1519 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1520 return 0;
1521
1522 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1523 if (r < 0)
1524 return r;
1525
1526 return set_put(u->manager->startup_units, u);
1527 }
1528
1529 int unit_load(Unit *u) {
1530 int r;
1531
1532 assert(u);
1533
1534 if (u->in_load_queue) {
1535 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1536 u->in_load_queue = false;
1537 }
1538
1539 if (u->type == _UNIT_TYPE_INVALID)
1540 return -EINVAL;
1541
1542 if (u->load_state != UNIT_STUB)
1543 return 0;
1544
1545 if (u->transient_file) {
1546 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1547 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1548
1549 r = fflush_and_check(u->transient_file);
1550 if (r < 0)
1551 goto fail;
1552
1553 u->transient_file = safe_fclose(u->transient_file);
1554 u->fragment_mtime = now(CLOCK_REALTIME);
1555 }
1556
1557 if (UNIT_VTABLE(u)->load) {
1558 r = UNIT_VTABLE(u)->load(u);
1559 if (r < 0)
1560 goto fail;
1561 }
1562
1563 if (u->load_state == UNIT_STUB) {
1564 r = -ENOENT;
1565 goto fail;
1566 }
1567
1568 if (u->load_state == UNIT_LOADED) {
1569 unit_add_to_target_deps_queue(u);
1570
1571 r = unit_add_slice_dependencies(u);
1572 if (r < 0)
1573 goto fail;
1574
1575 r = unit_add_mount_dependencies(u);
1576 if (r < 0)
1577 goto fail;
1578
1579 r = unit_add_startup_units(u);
1580 if (r < 0)
1581 goto fail;
1582
1583 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1584 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1585 r = -ENOEXEC;
1586 goto fail;
1587 }
1588
1589 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1590 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1591
1592 /* We finished loading, let's ensure our parents recalculate the members mask */
1593 unit_invalidate_cgroup_members_masks(u);
1594 }
1595
1596 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1597
1598 unit_add_to_dbus_queue(unit_follow_merge(u));
1599 unit_add_to_gc_queue(u);
1600
1601 return 0;
1602
1603 fail:
1604 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1605 * return ENOEXEC to ensure units are placed in this state after loading */
1606
1607 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1608 r == -ENOEXEC ? UNIT_BAD_SETTING :
1609 UNIT_ERROR;
1610 u->load_error = r;
1611
1612 unit_add_to_dbus_queue(u);
1613 unit_add_to_gc_queue(u);
1614
1615 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1616 }
1617
1618 _printf_(7, 8)
1619 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1620 Unit *u = userdata;
1621 va_list ap;
1622 int r;
1623
1624 va_start(ap, format);
1625 if (u)
1626 r = log_object_internalv(level, error, file, line, func,
1627 u->manager->unit_log_field,
1628 u->id,
1629 u->manager->invocation_log_field,
1630 u->invocation_id_string,
1631 format, ap);
1632 else
1633 r = log_internalv(level, error, file, line, func, format, ap);
1634 va_end(ap);
1635
1636 return r;
1637 }
1638
1639 static bool unit_test_condition(Unit *u) {
1640 assert(u);
1641
1642 dual_timestamp_get(&u->condition_timestamp);
1643 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1644
1645 unit_add_to_dbus_queue(u);
1646
1647 return u->condition_result;
1648 }
1649
1650 static bool unit_test_assert(Unit *u) {
1651 assert(u);
1652
1653 dual_timestamp_get(&u->assert_timestamp);
1654 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1655
1656 unit_add_to_dbus_queue(u);
1657
1658 return u->assert_result;
1659 }
1660
1661 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1662 const char *d;
1663
1664 d = unit_status_string(u);
1665 if (log_get_show_color())
1666 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1667
1668 DISABLE_WARNING_FORMAT_NONLITERAL;
1669 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1670 REENABLE_WARNING;
1671 }
1672
1673 int unit_test_start_limit(Unit *u) {
1674 const char *reason;
1675
1676 assert(u);
1677
1678 if (ratelimit_below(&u->start_limit)) {
1679 u->start_limit_hit = false;
1680 return 0;
1681 }
1682
1683 log_unit_warning(u, "Start request repeated too quickly.");
1684 u->start_limit_hit = true;
1685
1686 reason = strjoina("unit ", u->id, " failed");
1687
1688 emergency_action(u->manager, u->start_limit_action,
1689 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1690 u->reboot_arg, -1, reason);
1691
1692 return -ECANCELED;
1693 }
1694
1695 bool unit_shall_confirm_spawn(Unit *u) {
1696 assert(u);
1697
1698 if (manager_is_confirm_spawn_disabled(u->manager))
1699 return false;
1700
1701 /* For some reasons units remaining in the same process group
1702 * as PID 1 fail to acquire the console even if it's not used
1703 * by any process. So skip the confirmation question for them. */
1704 return !unit_get_exec_context(u)->same_pgrp;
1705 }
1706
1707 static bool unit_verify_deps(Unit *u) {
1708 Unit *other;
1709 Iterator j;
1710 void *v;
1711
1712 assert(u);
1713
1714 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1715 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1716 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1717 * conjunction with After= as for them any such check would make things entirely racy. */
1718
1719 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1720
1721 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1722 continue;
1723
1724 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1725 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1726 return false;
1727 }
1728 }
1729
1730 return true;
1731 }
1732
1733 /* Errors that aren't really errors:
1734 * -EALREADY: Unit is already started.
1735 * -ECOMM: Condition failed
1736 * -EAGAIN: An operation is already in progress. Retry later.
1737 *
1738 * Errors that are real errors:
1739 * -EBADR: This unit type does not support starting.
1740 * -ECANCELED: Start limit hit, too many requests for now
1741 * -EPROTO: Assert failed
1742 * -EINVAL: Unit not loaded
1743 * -EOPNOTSUPP: Unit type not supported
1744 * -ENOLINK: The necessary dependencies are not fulfilled.
1745 * -ESTALE: This unit has been started before and can't be started a second time
1746 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1747 */
1748 int unit_start(Unit *u) {
1749 UnitActiveState state;
1750 Unit *following;
1751 int r;
1752
1753 assert(u);
1754
1755 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1756 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1757 * waiting is finished. */
1758 state = unit_active_state(u);
1759 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1760 return -EALREADY;
1761 if (state == UNIT_MAINTENANCE)
1762 return -EAGAIN;
1763
1764 /* Units that aren't loaded cannot be started */
1765 if (u->load_state != UNIT_LOADED)
1766 return -EINVAL;
1767
1768 /* Refuse starting scope units more than once */
1769 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1770 return -ESTALE;
1771
1772 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1773 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1774 * recheck the condition in that case. */
1775 if (state != UNIT_ACTIVATING &&
1776 !unit_test_condition(u)) {
1777
1778 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1779 * .start() method of the unit type after it did some additional checks verifying everything
1780 * is in order (so that those other checks can propagate errors properly). However, if a
1781 * condition check doesn't hold we don't get that far but we should still ensure we are not
1782 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1783 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1784 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1785 * condition check is a bit different from the condition check inside the per-unit .start()
1786 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1787 * after all the condition failed). */
1788
1789 r = unit_test_start_limit(u);
1790 if (r < 0)
1791 return r;
1792
1793 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1794 }
1795
1796 /* If the asserts failed, fail the entire job */
1797 if (state != UNIT_ACTIVATING &&
1798 !unit_test_assert(u))
1799 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1800
1801 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1802 * condition checks, so that we rather return condition check errors (which are usually not
1803 * considered a true failure) than "not supported" errors (which are considered a failure).
1804 */
1805 if (!unit_type_supported(u->type))
1806 return -EOPNOTSUPP;
1807
1808 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1809 * should have taken care of this already, but let's check this here again. After all, our
1810 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1811 if (!unit_verify_deps(u))
1812 return -ENOLINK;
1813
1814 /* Forward to the main object, if we aren't it. */
1815 following = unit_following(u);
1816 if (following) {
1817 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1818 return unit_start(following);
1819 }
1820
1821 /* If it is stopped, but we cannot start it, then fail */
1822 if (!UNIT_VTABLE(u)->start)
1823 return -EBADR;
1824
1825 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1826 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1827 * waits for a holdoff timer to elapse before it will start again. */
1828
1829 unit_add_to_dbus_queue(u);
1830
1831 return UNIT_VTABLE(u)->start(u);
1832 }
1833
1834 bool unit_can_start(Unit *u) {
1835 assert(u);
1836
1837 if (u->load_state != UNIT_LOADED)
1838 return false;
1839
1840 if (!unit_type_supported(u->type))
1841 return false;
1842
1843 /* Scope units may be started only once */
1844 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1845 return false;
1846
1847 return !!UNIT_VTABLE(u)->start;
1848 }
1849
1850 bool unit_can_isolate(Unit *u) {
1851 assert(u);
1852
1853 return unit_can_start(u) &&
1854 u->allow_isolate;
1855 }
1856
1857 /* Errors:
1858 * -EBADR: This unit type does not support stopping.
1859 * -EALREADY: Unit is already stopped.
1860 * -EAGAIN: An operation is already in progress. Retry later.
1861 */
1862 int unit_stop(Unit *u) {
1863 UnitActiveState state;
1864 Unit *following;
1865
1866 assert(u);
1867
1868 state = unit_active_state(u);
1869 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1870 return -EALREADY;
1871
1872 following = unit_following(u);
1873 if (following) {
1874 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1875 return unit_stop(following);
1876 }
1877
1878 if (!UNIT_VTABLE(u)->stop)
1879 return -EBADR;
1880
1881 unit_add_to_dbus_queue(u);
1882
1883 return UNIT_VTABLE(u)->stop(u);
1884 }
1885
1886 bool unit_can_stop(Unit *u) {
1887 assert(u);
1888
1889 if (!unit_type_supported(u->type))
1890 return false;
1891
1892 if (u->perpetual)
1893 return false;
1894
1895 return !!UNIT_VTABLE(u)->stop;
1896 }
1897
1898 /* Errors:
1899 * -EBADR: This unit type does not support reloading.
1900 * -ENOEXEC: Unit is not started.
1901 * -EAGAIN: An operation is already in progress. Retry later.
1902 */
1903 int unit_reload(Unit *u) {
1904 UnitActiveState state;
1905 Unit *following;
1906
1907 assert(u);
1908
1909 if (u->load_state != UNIT_LOADED)
1910 return -EINVAL;
1911
1912 if (!unit_can_reload(u))
1913 return -EBADR;
1914
1915 state = unit_active_state(u);
1916 if (state == UNIT_RELOADING)
1917 return -EAGAIN;
1918
1919 if (state != UNIT_ACTIVE) {
1920 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1921 return -ENOEXEC;
1922 }
1923
1924 following = unit_following(u);
1925 if (following) {
1926 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1927 return unit_reload(following);
1928 }
1929
1930 unit_add_to_dbus_queue(u);
1931
1932 if (!UNIT_VTABLE(u)->reload) {
1933 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1934 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1935 return 0;
1936 }
1937
1938 return UNIT_VTABLE(u)->reload(u);
1939 }
1940
1941 bool unit_can_reload(Unit *u) {
1942 assert(u);
1943
1944 if (UNIT_VTABLE(u)->can_reload)
1945 return UNIT_VTABLE(u)->can_reload(u);
1946
1947 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1948 return true;
1949
1950 return UNIT_VTABLE(u)->reload;
1951 }
1952
1953 bool unit_is_unneeded(Unit *u) {
1954 static const UnitDependency deps[] = {
1955 UNIT_REQUIRED_BY,
1956 UNIT_REQUISITE_OF,
1957 UNIT_WANTED_BY,
1958 UNIT_BOUND_BY,
1959 };
1960 size_t j;
1961
1962 assert(u);
1963
1964 if (!u->stop_when_unneeded)
1965 return false;
1966
1967 /* Don't clean up while the unit is transitioning or is even inactive. */
1968 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1969 return false;
1970 if (u->job)
1971 return false;
1972
1973 for (j = 0; j < ELEMENTSOF(deps); j++) {
1974 Unit *other;
1975 Iterator i;
1976 void *v;
1977
1978 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1979 * restart, then don't clean this one up. */
1980
1981 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1982 if (other->job)
1983 return false;
1984
1985 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1986 return false;
1987
1988 if (unit_will_restart(other))
1989 return false;
1990 }
1991 }
1992
1993 return true;
1994 }
1995
1996 static void check_unneeded_dependencies(Unit *u) {
1997
1998 static const UnitDependency deps[] = {
1999 UNIT_REQUIRES,
2000 UNIT_REQUISITE,
2001 UNIT_WANTS,
2002 UNIT_BINDS_TO,
2003 };
2004 size_t j;
2005
2006 assert(u);
2007
2008 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2009
2010 for (j = 0; j < ELEMENTSOF(deps); j++) {
2011 Unit *other;
2012 Iterator i;
2013 void *v;
2014
2015 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2016 unit_submit_to_stop_when_unneeded_queue(other);
2017 }
2018 }
2019
2020 static void unit_check_binds_to(Unit *u) {
2021 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2022 bool stop = false;
2023 Unit *other;
2024 Iterator i;
2025 void *v;
2026 int r;
2027
2028 assert(u);
2029
2030 if (u->job)
2031 return;
2032
2033 if (unit_active_state(u) != UNIT_ACTIVE)
2034 return;
2035
2036 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2037 if (other->job)
2038 continue;
2039
2040 if (!other->coldplugged)
2041 /* We might yet create a job for the other unit… */
2042 continue;
2043
2044 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2045 continue;
2046
2047 stop = true;
2048 break;
2049 }
2050
2051 if (!stop)
2052 return;
2053
2054 /* If stopping a unit fails continuously we might enter a stop
2055 * loop here, hence stop acting on the service being
2056 * unnecessary after a while. */
2057 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2058 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2059 return;
2060 }
2061
2062 assert(other);
2063 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2064
2065 /* A unit we need to run is gone. Sniff. Let's stop this. */
2066 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2067 if (r < 0)
2068 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2069 }
2070
2071 static void retroactively_start_dependencies(Unit *u) {
2072 Iterator i;
2073 Unit *other;
2074 void *v;
2075
2076 assert(u);
2077 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2078
2079 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2080 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2081 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2082 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2083
2084 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2085 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2086 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2087 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2088
2089 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2090 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2091 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2092 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2093
2094 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2095 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2096 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2097
2098 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2099 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2100 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2101 }
2102
2103 static void retroactively_stop_dependencies(Unit *u) {
2104 Unit *other;
2105 Iterator i;
2106 void *v;
2107
2108 assert(u);
2109 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2110
2111 /* Pull down units which are bound to us recursively if enabled */
2112 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2113 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2114 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2115 }
2116
2117 void unit_start_on_failure(Unit *u) {
2118 Unit *other;
2119 Iterator i;
2120 void *v;
2121 int r;
2122
2123 assert(u);
2124
2125 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2126 return;
2127
2128 log_unit_info(u, "Triggering OnFailure= dependencies.");
2129
2130 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2131 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2132
2133 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2134 if (r < 0)
2135 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2136 }
2137 }
2138
2139 void unit_trigger_notify(Unit *u) {
2140 Unit *other;
2141 Iterator i;
2142 void *v;
2143
2144 assert(u);
2145
2146 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2147 if (UNIT_VTABLE(other)->trigger_notify)
2148 UNIT_VTABLE(other)->trigger_notify(other, u);
2149 }
2150
2151 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2152 if (condition_notice && log_level > LOG_NOTICE)
2153 return LOG_NOTICE;
2154 if (condition_info && log_level > LOG_INFO)
2155 return LOG_INFO;
2156 return log_level;
2157 }
2158
2159 static int unit_log_resources(Unit *u) {
2160 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2161 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2162 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2163 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2164 size_t n_message_parts = 0, n_iovec = 0;
2165 char* message_parts[1 + 2 + 2 + 1], *t;
2166 nsec_t nsec = NSEC_INFINITY;
2167 CGroupIPAccountingMetric m;
2168 size_t i;
2169 int r;
2170 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2171 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2172 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2173 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2174 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2175 };
2176 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2177 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2178 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2179 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2180 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2181 };
2182
2183 assert(u);
2184
2185 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2186 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2187 * information and the complete data in structured fields. */
2188
2189 (void) unit_get_cpu_usage(u, &nsec);
2190 if (nsec != NSEC_INFINITY) {
2191 char buf[FORMAT_TIMESPAN_MAX] = "";
2192
2193 /* Format the CPU time for inclusion in the structured log message */
2194 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2195 r = log_oom();
2196 goto finish;
2197 }
2198 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2199
2200 /* Format the CPU time for inclusion in the human language message string */
2201 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2202 t = strjoin("consumed ", buf, " CPU time");
2203 if (!t) {
2204 r = log_oom();
2205 goto finish;
2206 }
2207
2208 message_parts[n_message_parts++] = t;
2209
2210 log_level = raise_level(log_level,
2211 nsec > NOTICEWORTHY_CPU_NSEC,
2212 nsec > MENTIONWORTHY_CPU_NSEC);
2213 }
2214
2215 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2216 char buf[FORMAT_BYTES_MAX] = "";
2217 uint64_t value = UINT64_MAX;
2218
2219 assert(io_fields[k]);
2220
2221 (void) unit_get_io_accounting(u, k, k > 0, &value);
2222 if (value == UINT64_MAX)
2223 continue;
2224
2225 have_io_accounting = true;
2226 if (value > 0)
2227 any_io = true;
2228
2229 /* Format IO accounting data for inclusion in the structured log message */
2230 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2231 r = log_oom();
2232 goto finish;
2233 }
2234 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2235
2236 /* Format the IO accounting data for inclusion in the human language message string, but only
2237 * for the bytes counters (and not for the operations counters) */
2238 if (k == CGROUP_IO_READ_BYTES) {
2239 assert(!rr);
2240 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2241 if (!rr) {
2242 r = log_oom();
2243 goto finish;
2244 }
2245 } else if (k == CGROUP_IO_WRITE_BYTES) {
2246 assert(!wr);
2247 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2248 if (!wr) {
2249 r = log_oom();
2250 goto finish;
2251 }
2252 }
2253
2254 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2255 log_level = raise_level(log_level,
2256 value > MENTIONWORTHY_IO_BYTES,
2257 value > NOTICEWORTHY_IO_BYTES);
2258 }
2259
2260 if (have_io_accounting) {
2261 if (any_io) {
2262 if (rr)
2263 message_parts[n_message_parts++] = TAKE_PTR(rr);
2264 if (wr)
2265 message_parts[n_message_parts++] = TAKE_PTR(wr);
2266
2267 } else {
2268 char *k;
2269
2270 k = strdup("no IO");
2271 if (!k) {
2272 r = log_oom();
2273 goto finish;
2274 }
2275
2276 message_parts[n_message_parts++] = k;
2277 }
2278 }
2279
2280 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2281 char buf[FORMAT_BYTES_MAX] = "";
2282 uint64_t value = UINT64_MAX;
2283
2284 assert(ip_fields[m]);
2285
2286 (void) unit_get_ip_accounting(u, m, &value);
2287 if (value == UINT64_MAX)
2288 continue;
2289
2290 have_ip_accounting = true;
2291 if (value > 0)
2292 any_traffic = true;
2293
2294 /* Format IP accounting data for inclusion in the structured log message */
2295 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2296 r = log_oom();
2297 goto finish;
2298 }
2299 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2300
2301 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2302 * bytes counters (and not for the packets counters) */
2303 if (m == CGROUP_IP_INGRESS_BYTES) {
2304 assert(!igress);
2305 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2306 if (!igress) {
2307 r = log_oom();
2308 goto finish;
2309 }
2310 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2311 assert(!egress);
2312 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2313 if (!egress) {
2314 r = log_oom();
2315 goto finish;
2316 }
2317 }
2318
2319 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2320 log_level = raise_level(log_level,
2321 value > MENTIONWORTHY_IP_BYTES,
2322 value > NOTICEWORTHY_IP_BYTES);
2323 }
2324
2325 if (have_ip_accounting) {
2326 if (any_traffic) {
2327 if (igress)
2328 message_parts[n_message_parts++] = TAKE_PTR(igress);
2329 if (egress)
2330 message_parts[n_message_parts++] = TAKE_PTR(egress);
2331
2332 } else {
2333 char *k;
2334
2335 k = strdup("no IP traffic");
2336 if (!k) {
2337 r = log_oom();
2338 goto finish;
2339 }
2340
2341 message_parts[n_message_parts++] = k;
2342 }
2343 }
2344
2345 /* Is there any accounting data available at all? */
2346 if (n_iovec == 0) {
2347 r = 0;
2348 goto finish;
2349 }
2350
2351 if (n_message_parts == 0)
2352 t = strjoina("MESSAGE=", u->id, ": Completed.");
2353 else {
2354 _cleanup_free_ char *joined;
2355
2356 message_parts[n_message_parts] = NULL;
2357
2358 joined = strv_join(message_parts, ", ");
2359 if (!joined) {
2360 r = log_oom();
2361 goto finish;
2362 }
2363
2364 joined[0] = ascii_toupper(joined[0]);
2365 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2366 }
2367
2368 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2369 * and hence don't increase n_iovec for them */
2370 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2371 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2372
2373 t = strjoina(u->manager->unit_log_field, u->id);
2374 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2375
2376 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2377 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2378
2379 log_struct_iovec(log_level, iovec, n_iovec + 4);
2380 r = 0;
2381
2382 finish:
2383 for (i = 0; i < n_message_parts; i++)
2384 free(message_parts[i]);
2385
2386 for (i = 0; i < n_iovec; i++)
2387 free(iovec[i].iov_base);
2388
2389 return r;
2390
2391 }
2392
2393 static void unit_update_on_console(Unit *u) {
2394 bool b;
2395
2396 assert(u);
2397
2398 b = unit_needs_console(u);
2399 if (u->on_console == b)
2400 return;
2401
2402 u->on_console = b;
2403 if (b)
2404 manager_ref_console(u->manager);
2405 else
2406 manager_unref_console(u->manager);
2407 }
2408
2409 static void unit_emit_audit_start(Unit *u) {
2410 assert(u);
2411
2412 if (u->type != UNIT_SERVICE)
2413 return;
2414
2415 /* Write audit record if we have just finished starting up */
2416 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2417 u->in_audit = true;
2418 }
2419
2420 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2421 assert(u);
2422
2423 if (u->type != UNIT_SERVICE)
2424 return;
2425
2426 if (u->in_audit) {
2427 /* Write audit record if we have just finished shutting down */
2428 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2429 u->in_audit = false;
2430 } else {
2431 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2432 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2433
2434 if (state == UNIT_INACTIVE)
2435 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2436 }
2437 }
2438
2439 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2440 bool unexpected = false;
2441 JobResult result;
2442
2443 assert(j);
2444
2445 if (j->state == JOB_WAITING)
2446
2447 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2448 * due to EAGAIN. */
2449 job_add_to_run_queue(j);
2450
2451 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2452 * hence needs to invalidate jobs. */
2453
2454 switch (j->type) {
2455
2456 case JOB_START:
2457 case JOB_VERIFY_ACTIVE:
2458
2459 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2460 job_finish_and_invalidate(j, JOB_DONE, true, false);
2461 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2462 unexpected = true;
2463
2464 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2465 if (ns == UNIT_FAILED)
2466 result = JOB_FAILED;
2467 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2468 result = JOB_SKIPPED;
2469 else
2470 result = JOB_DONE;
2471
2472 job_finish_and_invalidate(j, result, true, false);
2473 }
2474 }
2475
2476 break;
2477
2478 case JOB_RELOAD:
2479 case JOB_RELOAD_OR_START:
2480 case JOB_TRY_RELOAD:
2481
2482 if (j->state == JOB_RUNNING) {
2483 if (ns == UNIT_ACTIVE)
2484 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2485 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2486 unexpected = true;
2487
2488 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2489 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2490 }
2491 }
2492
2493 break;
2494
2495 case JOB_STOP:
2496 case JOB_RESTART:
2497 case JOB_TRY_RESTART:
2498
2499 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2500 job_finish_and_invalidate(j, JOB_DONE, true, false);
2501 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2502 unexpected = true;
2503 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2504 }
2505
2506 break;
2507
2508 default:
2509 assert_not_reached("Job type unknown");
2510 }
2511
2512 return unexpected;
2513 }
2514
2515 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2516 const char *reason;
2517 Manager *m;
2518
2519 assert(u);
2520 assert(os < _UNIT_ACTIVE_STATE_MAX);
2521 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2522
2523 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2524 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2525 * remounted this function will be called too! */
2526
2527 m = u->manager;
2528
2529 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2530 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2531 unit_add_to_dbus_queue(u);
2532
2533 /* Update timestamps for state changes */
2534 if (!MANAGER_IS_RELOADING(m)) {
2535 dual_timestamp_get(&u->state_change_timestamp);
2536
2537 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2538 u->inactive_exit_timestamp = u->state_change_timestamp;
2539 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2540 u->inactive_enter_timestamp = u->state_change_timestamp;
2541
2542 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2543 u->active_enter_timestamp = u->state_change_timestamp;
2544 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2545 u->active_exit_timestamp = u->state_change_timestamp;
2546 }
2547
2548 /* Keep track of failed units */
2549 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2550
2551 /* Make sure the cgroup and state files are always removed when we become inactive */
2552 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2553 unit_prune_cgroup(u);
2554 unit_unlink_state_files(u);
2555 }
2556
2557 unit_update_on_console(u);
2558
2559 if (!MANAGER_IS_RELOADING(m)) {
2560 bool unexpected;
2561
2562 /* Let's propagate state changes to the job */
2563 if (u->job)
2564 unexpected = unit_process_job(u->job, ns, flags);
2565 else
2566 unexpected = true;
2567
2568 /* If this state change happened without being requested by a job, then let's retroactively start or
2569 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2570 * additional jobs just because something is already activated. */
2571
2572 if (unexpected) {
2573 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2574 retroactively_start_dependencies(u);
2575 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2576 retroactively_stop_dependencies(u);
2577 }
2578
2579 /* stop unneeded units regardless if going down was expected or not */
2580 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2581 check_unneeded_dependencies(u);
2582
2583 if (ns != os && ns == UNIT_FAILED) {
2584 log_unit_debug(u, "Unit entered failed state.");
2585
2586 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2587 unit_start_on_failure(u);
2588 }
2589
2590 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2591 /* This unit just finished starting up */
2592
2593 unit_emit_audit_start(u);
2594 manager_send_unit_plymouth(m, u);
2595 }
2596
2597 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2598 /* This unit just stopped/failed. */
2599
2600 unit_emit_audit_stop(u, ns);
2601 unit_log_resources(u);
2602 }
2603 }
2604
2605 manager_recheck_journal(m);
2606 manager_recheck_dbus(m);
2607
2608 unit_trigger_notify(u);
2609
2610 if (!MANAGER_IS_RELOADING(m)) {
2611 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2612 unit_submit_to_stop_when_unneeded_queue(u);
2613
2614 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2615 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2616 * without ever entering started.) */
2617 unit_check_binds_to(u);
2618
2619 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2620 reason = strjoina("unit ", u->id, " failed");
2621 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2622 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2623 reason = strjoina("unit ", u->id, " succeeded");
2624 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2625 }
2626 }
2627
2628 unit_add_to_gc_queue(u);
2629 }
2630
2631 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2632 int r;
2633
2634 assert(u);
2635 assert(pid_is_valid(pid));
2636
2637 /* Watch a specific PID */
2638
2639 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2640 * opportunity to remove any stalled references to this PID as they can be created
2641 * easily (when watching a process which is not our direct child). */
2642 if (exclusive)
2643 manager_unwatch_pid(u->manager, pid);
2644
2645 r = set_ensure_allocated(&u->pids, NULL);
2646 if (r < 0)
2647 return r;
2648
2649 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2650 if (r < 0)
2651 return r;
2652
2653 /* First try, let's add the unit keyed by "pid". */
2654 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2655 if (r == -EEXIST) {
2656 Unit **array;
2657 bool found = false;
2658 size_t n = 0;
2659
2660 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2661 * to an array of Units rather than just a Unit), lists us already. */
2662
2663 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2664 if (array)
2665 for (; array[n]; n++)
2666 if (array[n] == u)
2667 found = true;
2668
2669 if (found) /* Found it already? if so, do nothing */
2670 r = 0;
2671 else {
2672 Unit **new_array;
2673
2674 /* Allocate a new array */
2675 new_array = new(Unit*, n + 2);
2676 if (!new_array)
2677 return -ENOMEM;
2678
2679 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2680 new_array[n] = u;
2681 new_array[n+1] = NULL;
2682
2683 /* Add or replace the old array */
2684 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2685 if (r < 0) {
2686 free(new_array);
2687 return r;
2688 }
2689
2690 free(array);
2691 }
2692 } else if (r < 0)
2693 return r;
2694
2695 r = set_put(u->pids, PID_TO_PTR(pid));
2696 if (r < 0)
2697 return r;
2698
2699 return 0;
2700 }
2701
2702 void unit_unwatch_pid(Unit *u, pid_t pid) {
2703 Unit **array;
2704
2705 assert(u);
2706 assert(pid_is_valid(pid));
2707
2708 /* First let's drop the unit in case it's keyed as "pid". */
2709 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2710
2711 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2712 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2713 if (array) {
2714 size_t n, m = 0;
2715
2716 /* Let's iterate through the array, dropping our own entry */
2717 for (n = 0; array[n]; n++)
2718 if (array[n] != u)
2719 array[m++] = array[n];
2720 array[m] = NULL;
2721
2722 if (m == 0) {
2723 /* The array is now empty, remove the entire entry */
2724 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2725 free(array);
2726 }
2727 }
2728
2729 (void) set_remove(u->pids, PID_TO_PTR(pid));
2730 }
2731
2732 void unit_unwatch_all_pids(Unit *u) {
2733 assert(u);
2734
2735 while (!set_isempty(u->pids))
2736 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2737
2738 u->pids = set_free(u->pids);
2739 }
2740
2741 static void unit_tidy_watch_pids(Unit *u) {
2742 pid_t except1, except2;
2743 Iterator i;
2744 void *e;
2745
2746 assert(u);
2747
2748 /* Cleans dead PIDs from our list */
2749
2750 except1 = unit_main_pid(u);
2751 except2 = unit_control_pid(u);
2752
2753 SET_FOREACH(e, u->pids, i) {
2754 pid_t pid = PTR_TO_PID(e);
2755
2756 if (pid == except1 || pid == except2)
2757 continue;
2758
2759 if (!pid_is_unwaited(pid))
2760 unit_unwatch_pid(u, pid);
2761 }
2762 }
2763
2764 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2765 Unit *u = userdata;
2766
2767 assert(s);
2768 assert(u);
2769
2770 unit_tidy_watch_pids(u);
2771 unit_watch_all_pids(u);
2772
2773 /* If the PID set is empty now, then let's finish this off. */
2774 unit_synthesize_cgroup_empty_event(u);
2775
2776 return 0;
2777 }
2778
2779 int unit_enqueue_rewatch_pids(Unit *u) {
2780 int r;
2781
2782 assert(u);
2783
2784 if (!u->cgroup_path)
2785 return -ENOENT;
2786
2787 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2788 if (r < 0)
2789 return r;
2790 if (r > 0) /* On unified we can use proper notifications */
2791 return 0;
2792
2793 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2794 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2795 * involves issuing kill(pid, 0) on all processes we watch. */
2796
2797 if (!u->rewatch_pids_event_source) {
2798 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2799
2800 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2801 if (r < 0)
2802 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2803
2804 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2805 if (r < 0)
2806 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2807
2808 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2809
2810 u->rewatch_pids_event_source = TAKE_PTR(s);
2811 }
2812
2813 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2814 if (r < 0)
2815 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2816
2817 return 0;
2818 }
2819
2820 void unit_dequeue_rewatch_pids(Unit *u) {
2821 int r;
2822 assert(u);
2823
2824 if (!u->rewatch_pids_event_source)
2825 return;
2826
2827 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2828 if (r < 0)
2829 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2830
2831 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2832 }
2833
2834 bool unit_job_is_applicable(Unit *u, JobType j) {
2835 assert(u);
2836 assert(j >= 0 && j < _JOB_TYPE_MAX);
2837
2838 switch (j) {
2839
2840 case JOB_VERIFY_ACTIVE:
2841 case JOB_START:
2842 case JOB_NOP:
2843 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2844 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2845 * jobs for it. */
2846 return true;
2847
2848 case JOB_STOP:
2849 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2850 * external events), hence it makes no sense to permit enqueing such a request either. */
2851 return !u->perpetual;
2852
2853 case JOB_RESTART:
2854 case JOB_TRY_RESTART:
2855 return unit_can_stop(u) && unit_can_start(u);
2856
2857 case JOB_RELOAD:
2858 case JOB_TRY_RELOAD:
2859 return unit_can_reload(u);
2860
2861 case JOB_RELOAD_OR_START:
2862 return unit_can_reload(u) && unit_can_start(u);
2863
2864 default:
2865 assert_not_reached("Invalid job type");
2866 }
2867 }
2868
2869 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2870 assert(u);
2871
2872 /* Only warn about some unit types */
2873 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2874 return;
2875
2876 if (streq_ptr(u->id, other))
2877 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2878 else
2879 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2880 }
2881
2882 static int unit_add_dependency_hashmap(
2883 Hashmap **h,
2884 Unit *other,
2885 UnitDependencyMask origin_mask,
2886 UnitDependencyMask destination_mask) {
2887
2888 UnitDependencyInfo info;
2889 int r;
2890
2891 assert(h);
2892 assert(other);
2893 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2894 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2895 assert(origin_mask > 0 || destination_mask > 0);
2896
2897 r = hashmap_ensure_allocated(h, NULL);
2898 if (r < 0)
2899 return r;
2900
2901 assert_cc(sizeof(void*) == sizeof(info));
2902
2903 info.data = hashmap_get(*h, other);
2904 if (info.data) {
2905 /* Entry already exists. Add in our mask. */
2906
2907 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2908 FLAGS_SET(destination_mask, info.destination_mask))
2909 return 0; /* NOP */
2910
2911 info.origin_mask |= origin_mask;
2912 info.destination_mask |= destination_mask;
2913
2914 r = hashmap_update(*h, other, info.data);
2915 } else {
2916 info = (UnitDependencyInfo) {
2917 .origin_mask = origin_mask,
2918 .destination_mask = destination_mask,
2919 };
2920
2921 r = hashmap_put(*h, other, info.data);
2922 }
2923 if (r < 0)
2924 return r;
2925
2926 return 1;
2927 }
2928
2929 int unit_add_dependency(
2930 Unit *u,
2931 UnitDependency d,
2932 Unit *other,
2933 bool add_reference,
2934 UnitDependencyMask mask) {
2935
2936 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2937 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2938 [UNIT_WANTS] = UNIT_WANTED_BY,
2939 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2940 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2941 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2942 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2943 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2944 [UNIT_WANTED_BY] = UNIT_WANTS,
2945 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2946 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2947 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2948 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2949 [UNIT_BEFORE] = UNIT_AFTER,
2950 [UNIT_AFTER] = UNIT_BEFORE,
2951 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2952 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2953 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2954 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2955 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2956 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2957 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2958 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2959 };
2960 Unit *original_u = u, *original_other = other;
2961 int r;
2962
2963 assert(u);
2964 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2965 assert(other);
2966
2967 u = unit_follow_merge(u);
2968 other = unit_follow_merge(other);
2969
2970 /* We won't allow dependencies on ourselves. We will not
2971 * consider them an error however. */
2972 if (u == other) {
2973 maybe_warn_about_dependency(original_u, original_other->id, d);
2974 return 0;
2975 }
2976
2977 if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2978 (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2979 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2980 return 0;
2981 }
2982
2983 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2984 if (r < 0)
2985 return r;
2986
2987 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2988 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2989 if (r < 0)
2990 return r;
2991 }
2992
2993 if (add_reference) {
2994 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2995 if (r < 0)
2996 return r;
2997
2998 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2999 if (r < 0)
3000 return r;
3001 }
3002
3003 unit_add_to_dbus_queue(u);
3004 return 0;
3005 }
3006
3007 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3008 int r;
3009
3010 assert(u);
3011
3012 r = unit_add_dependency(u, d, other, add_reference, mask);
3013 if (r < 0)
3014 return r;
3015
3016 return unit_add_dependency(u, e, other, add_reference, mask);
3017 }
3018
3019 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3020 int r;
3021
3022 assert(u);
3023 assert(name);
3024 assert(buf);
3025 assert(ret);
3026
3027 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3028 *buf = NULL;
3029 *ret = name;
3030 return 0;
3031 }
3032
3033 if (u->instance)
3034 r = unit_name_replace_instance(name, u->instance, buf);
3035 else {
3036 _cleanup_free_ char *i = NULL;
3037
3038 r = unit_name_to_prefix(u->id, &i);
3039 if (r < 0)
3040 return r;
3041
3042 r = unit_name_replace_instance(name, i, buf);
3043 }
3044 if (r < 0)
3045 return r;
3046
3047 *ret = *buf;
3048 return 0;
3049 }
3050
3051 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3052 _cleanup_free_ char *buf = NULL;
3053 Unit *other;
3054 int r;
3055
3056 assert(u);
3057 assert(name);
3058
3059 r = resolve_template(u, name, &buf, &name);
3060 if (r < 0)
3061 return r;
3062
3063 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3064 if (r < 0)
3065 return r;
3066
3067 return unit_add_dependency(u, d, other, add_reference, mask);
3068 }
3069
3070 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3071 _cleanup_free_ char *buf = NULL;
3072 Unit *other;
3073 int r;
3074
3075 assert(u);
3076 assert(name);
3077
3078 r = resolve_template(u, name, &buf, &name);
3079 if (r < 0)
3080 return r;
3081
3082 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3083 if (r < 0)
3084 return r;
3085
3086 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3087 }
3088
3089 int set_unit_path(const char *p) {
3090 /* This is mostly for debug purposes */
3091 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3092 return -errno;
3093
3094 return 0;
3095 }
3096
3097 char *unit_dbus_path(Unit *u) {
3098 assert(u);
3099
3100 if (!u->id)
3101 return NULL;
3102
3103 return unit_dbus_path_from_name(u->id);
3104 }
3105
3106 char *unit_dbus_path_invocation_id(Unit *u) {
3107 assert(u);
3108
3109 if (sd_id128_is_null(u->invocation_id))
3110 return NULL;
3111
3112 return unit_dbus_path_from_name(u->invocation_id_string);
3113 }
3114
3115 int unit_set_slice(Unit *u, Unit *slice) {
3116 assert(u);
3117 assert(slice);
3118
3119 /* Sets the unit slice if it has not been set before. Is extra
3120 * careful, to only allow this for units that actually have a
3121 * cgroup context. Also, we don't allow to set this for slices
3122 * (since the parent slice is derived from the name). Make
3123 * sure the unit we set is actually a slice. */
3124
3125 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3126 return -EOPNOTSUPP;
3127
3128 if (u->type == UNIT_SLICE)
3129 return -EINVAL;
3130
3131 if (unit_active_state(u) != UNIT_INACTIVE)
3132 return -EBUSY;
3133
3134 if (slice->type != UNIT_SLICE)
3135 return -EINVAL;
3136
3137 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3138 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3139 return -EPERM;
3140
3141 if (UNIT_DEREF(u->slice) == slice)
3142 return 0;
3143
3144 /* Disallow slice changes if @u is already bound to cgroups */
3145 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3146 return -EBUSY;
3147
3148 unit_ref_set(&u->slice, u, slice);
3149 return 1;
3150 }
3151
3152 int unit_set_default_slice(Unit *u) {
3153 const char *slice_name;
3154 Unit *slice;
3155 int r;
3156
3157 assert(u);
3158
3159 if (UNIT_ISSET(u->slice))
3160 return 0;
3161
3162 if (u->instance) {
3163 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3164
3165 /* Implicitly place all instantiated units in their
3166 * own per-template slice */
3167
3168 r = unit_name_to_prefix(u->id, &prefix);
3169 if (r < 0)
3170 return r;
3171
3172 /* The prefix is already escaped, but it might include
3173 * "-" which has a special meaning for slice units,
3174 * hence escape it here extra. */
3175 escaped = unit_name_escape(prefix);
3176 if (!escaped)
3177 return -ENOMEM;
3178
3179 if (MANAGER_IS_SYSTEM(u->manager))
3180 slice_name = strjoina("system-", escaped, ".slice");
3181 else
3182 slice_name = strjoina(escaped, ".slice");
3183 } else
3184 slice_name =
3185 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3186 ? SPECIAL_SYSTEM_SLICE
3187 : SPECIAL_ROOT_SLICE;
3188
3189 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3190 if (r < 0)
3191 return r;
3192
3193 return unit_set_slice(u, slice);
3194 }
3195
3196 const char *unit_slice_name(Unit *u) {
3197 assert(u);
3198
3199 if (!UNIT_ISSET(u->slice))
3200 return NULL;
3201
3202 return UNIT_DEREF(u->slice)->id;
3203 }
3204
3205 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3206 _cleanup_free_ char *t = NULL;
3207 int r;
3208
3209 assert(u);
3210 assert(type);
3211 assert(_found);
3212
3213 r = unit_name_change_suffix(u->id, type, &t);
3214 if (r < 0)
3215 return r;
3216 if (unit_has_name(u, t))
3217 return -EINVAL;
3218
3219 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3220 assert(r < 0 || *_found != u);
3221 return r;
3222 }
3223
3224 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3225 const char *name, *old_owner, *new_owner;
3226 Unit *u = userdata;
3227 int r;
3228
3229 assert(message);
3230 assert(u);
3231
3232 r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3233 if (r < 0) {
3234 bus_log_parse_error(r);
3235 return 0;
3236 }
3237
3238 old_owner = empty_to_null(old_owner);
3239 new_owner = empty_to_null(new_owner);
3240
3241 if (UNIT_VTABLE(u)->bus_name_owner_change)
3242 UNIT_VTABLE(u)->bus_name_owner_change(u, name, old_owner, new_owner);
3243
3244 return 0;
3245 }
3246
3247 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3248 const char *match;
3249
3250 assert(u);
3251 assert(bus);
3252 assert(name);
3253
3254 if (u->match_bus_slot)
3255 return -EBUSY;
3256
3257 match = strjoina("type='signal',"
3258 "sender='org.freedesktop.DBus',"
3259 "path='/org/freedesktop/DBus',"
3260 "interface='org.freedesktop.DBus',"
3261 "member='NameOwnerChanged',"
3262 "arg0='", name, "'");
3263
3264 return sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3265 }
3266
3267 int unit_watch_bus_name(Unit *u, const char *name) {
3268 int r;
3269
3270 assert(u);
3271 assert(name);
3272
3273 /* Watch a specific name on the bus. We only support one unit
3274 * watching each name for now. */
3275
3276 if (u->manager->api_bus) {
3277 /* If the bus is already available, install the match directly.
3278 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3279 r = unit_install_bus_match(u, u->manager->api_bus, name);
3280 if (r < 0)
3281 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3282 }
3283
3284 r = hashmap_put(u->manager->watch_bus, name, u);
3285 if (r < 0) {
3286 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3287 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3288 }
3289
3290 return 0;
3291 }
3292
3293 void unit_unwatch_bus_name(Unit *u, const char *name) {
3294 assert(u);
3295 assert(name);
3296
3297 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3298 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3299 }
3300
3301 bool unit_can_serialize(Unit *u) {
3302 assert(u);
3303
3304 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3305 }
3306
3307 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3308 _cleanup_free_ char *s = NULL;
3309 int r;
3310
3311 assert(f);
3312 assert(key);
3313
3314 if (mask == 0)
3315 return 0;
3316
3317 r = cg_mask_to_string(mask, &s);
3318 if (r < 0)
3319 return log_error_errno(r, "Failed to format cgroup mask: %m");
3320
3321 return serialize_item(f, key, s);
3322 }
3323
3324 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3325 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3326 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3327 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3328 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3329 };
3330
3331 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3332 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3333 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3334 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3335 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3336 };
3337
3338 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3339 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3340 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3341 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3342 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3343 };
3344
3345 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3346 CGroupIPAccountingMetric m;
3347 int r;
3348
3349 assert(u);
3350 assert(f);
3351 assert(fds);
3352
3353 if (unit_can_serialize(u)) {
3354 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3355 if (r < 0)
3356 return r;
3357 }
3358
3359 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3360
3361 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3362 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3363 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3364 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3365
3366 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3367 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3368
3369 if (dual_timestamp_is_set(&u->condition_timestamp))
3370 (void) serialize_bool(f, "condition-result", u->condition_result);
3371
3372 if (dual_timestamp_is_set(&u->assert_timestamp))
3373 (void) serialize_bool(f, "assert-result", u->assert_result);
3374
3375 (void) serialize_bool(f, "transient", u->transient);
3376 (void) serialize_bool(f, "in-audit", u->in_audit);
3377
3378 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3379 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3380 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3381 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3382 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3383
3384 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3385 if (u->cpu_usage_last != NSEC_INFINITY)
3386 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3387
3388 if (u->oom_kill_last > 0)
3389 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3390
3391 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3392 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3393
3394 if (u->io_accounting_last[im] != UINT64_MAX)
3395 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3396 }
3397
3398 if (u->cgroup_path)
3399 (void) serialize_item(f, "cgroup", u->cgroup_path);
3400
3401 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3402 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3403 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3404 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3405
3406 if (uid_is_valid(u->ref_uid))
3407 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3408 if (gid_is_valid(u->ref_gid))
3409 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3410
3411 if (!sd_id128_is_null(u->invocation_id))
3412 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3413
3414 bus_track_serialize(u->bus_track, f, "ref");
3415
3416 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3417 uint64_t v;
3418
3419 r = unit_get_ip_accounting(u, m, &v);
3420 if (r >= 0)
3421 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3422 }
3423
3424 if (serialize_jobs) {
3425 if (u->job) {
3426 fputs("job\n", f);
3427 job_serialize(u->job, f);
3428 }
3429
3430 if (u->nop_job) {
3431 fputs("job\n", f);
3432 job_serialize(u->nop_job, f);
3433 }
3434 }
3435
3436 /* End marker */
3437 fputc('\n', f);
3438 return 0;
3439 }
3440
3441 static int unit_deserialize_job(Unit *u, FILE *f) {
3442 _cleanup_(job_freep) Job *j = NULL;
3443 int r;
3444
3445 assert(u);
3446 assert(f);
3447
3448 j = job_new_raw(u);
3449 if (!j)
3450 return log_oom();
3451
3452 r = job_deserialize(j, f);
3453 if (r < 0)
3454 return r;
3455
3456 r = job_install_deserialized(j);
3457 if (r < 0)
3458 return r;
3459
3460 TAKE_PTR(j);
3461 return 0;
3462 }
3463
3464 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3465 int r;
3466
3467 assert(u);
3468 assert(f);
3469 assert(fds);
3470
3471 for (;;) {
3472 _cleanup_free_ char *line = NULL;
3473 char *l, *v;
3474 ssize_t m;
3475 size_t k;
3476
3477 r = read_line(f, LONG_LINE_MAX, &line);
3478 if (r < 0)
3479 return log_error_errno(r, "Failed to read serialization line: %m");
3480 if (r == 0) /* eof */
3481 break;
3482
3483 l = strstrip(line);
3484 if (isempty(l)) /* End marker */
3485 break;
3486
3487 k = strcspn(l, "=");
3488
3489 if (l[k] == '=') {
3490 l[k] = 0;
3491 v = l+k+1;
3492 } else
3493 v = l+k;
3494
3495 if (streq(l, "job")) {
3496 if (v[0] == '\0') {
3497 /* New-style serialized job */
3498 r = unit_deserialize_job(u, f);
3499 if (r < 0)
3500 return r;
3501 } else /* Legacy for pre-44 */
3502 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3503 continue;
3504 } else if (streq(l, "state-change-timestamp")) {
3505 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3506 continue;
3507 } else if (streq(l, "inactive-exit-timestamp")) {
3508 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3509 continue;
3510 } else if (streq(l, "active-enter-timestamp")) {
3511 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3512 continue;
3513 } else if (streq(l, "active-exit-timestamp")) {
3514 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3515 continue;
3516 } else if (streq(l, "inactive-enter-timestamp")) {
3517 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3518 continue;
3519 } else if (streq(l, "condition-timestamp")) {
3520 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3521 continue;
3522 } else if (streq(l, "assert-timestamp")) {
3523 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3524 continue;
3525 } else if (streq(l, "condition-result")) {
3526
3527 r = parse_boolean(v);
3528 if (r < 0)
3529 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3530 else
3531 u->condition_result = r;
3532
3533 continue;
3534
3535 } else if (streq(l, "assert-result")) {
3536
3537 r = parse_boolean(v);
3538 if (r < 0)
3539 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3540 else
3541 u->assert_result = r;
3542
3543 continue;
3544
3545 } else if (streq(l, "transient")) {
3546
3547 r = parse_boolean(v);
3548 if (r < 0)
3549 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3550 else
3551 u->transient = r;
3552
3553 continue;
3554
3555 } else if (streq(l, "in-audit")) {
3556
3557 r = parse_boolean(v);
3558 if (r < 0)
3559 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3560 else
3561 u->in_audit = r;
3562
3563 continue;
3564
3565 } else if (streq(l, "exported-invocation-id")) {
3566
3567 r = parse_boolean(v);
3568 if (r < 0)
3569 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3570 else
3571 u->exported_invocation_id = r;
3572
3573 continue;
3574
3575 } else if (streq(l, "exported-log-level-max")) {
3576
3577 r = parse_boolean(v);
3578 if (r < 0)
3579 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3580 else
3581 u->exported_log_level_max = r;
3582
3583 continue;
3584
3585 } else if (streq(l, "exported-log-extra-fields")) {
3586
3587 r = parse_boolean(v);
3588 if (r < 0)
3589 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3590 else
3591 u->exported_log_extra_fields = r;
3592
3593 continue;
3594
3595 } else if (streq(l, "exported-log-rate-limit-interval")) {
3596
3597 r = parse_boolean(v);
3598 if (r < 0)
3599 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3600 else
3601 u->exported_log_rate_limit_interval = r;
3602
3603 continue;
3604
3605 } else if (streq(l, "exported-log-rate-limit-burst")) {
3606
3607 r = parse_boolean(v);
3608 if (r < 0)
3609 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3610 else
3611 u->exported_log_rate_limit_burst = r;
3612
3613 continue;
3614
3615 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3616
3617 r = safe_atou64(v, &u->cpu_usage_base);
3618 if (r < 0)
3619 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3620
3621 continue;
3622
3623 } else if (streq(l, "cpu-usage-last")) {
3624
3625 r = safe_atou64(v, &u->cpu_usage_last);
3626 if (r < 0)
3627 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3628
3629 continue;
3630
3631 } else if (streq(l, "oom-kill-last")) {
3632
3633 r = safe_atou64(v, &u->oom_kill_last);
3634 if (r < 0)
3635 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3636
3637 continue;
3638
3639 } else if (streq(l, "cgroup")) {
3640
3641 r = unit_set_cgroup_path(u, v);
3642 if (r < 0)
3643 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3644
3645 (void) unit_watch_cgroup(u);
3646 (void) unit_watch_cgroup_memory(u);
3647
3648 continue;
3649 } else if (streq(l, "cgroup-realized")) {
3650 int b;
3651
3652 b = parse_boolean(v);
3653 if (b < 0)
3654 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3655 else
3656 u->cgroup_realized = b;
3657
3658 continue;
3659
3660 } else if (streq(l, "cgroup-realized-mask")) {
3661
3662 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3663 if (r < 0)
3664 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3665 continue;
3666
3667 } else if (streq(l, "cgroup-enabled-mask")) {
3668
3669 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3670 if (r < 0)
3671 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3672 continue;
3673
3674 } else if (streq(l, "cgroup-invalidated-mask")) {
3675
3676 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3677 if (r < 0)
3678 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3679 continue;
3680
3681 } else if (streq(l, "ref-uid")) {
3682 uid_t uid;
3683
3684 r = parse_uid(v, &uid);
3685 if (r < 0)
3686 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3687 else
3688 unit_ref_uid_gid(u, uid, GID_INVALID);
3689
3690 continue;
3691
3692 } else if (streq(l, "ref-gid")) {
3693 gid_t gid;
3694
3695 r = parse_gid(v, &gid);
3696 if (r < 0)
3697 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3698 else
3699 unit_ref_uid_gid(u, UID_INVALID, gid);
3700
3701 continue;
3702
3703 } else if (streq(l, "ref")) {
3704
3705 r = strv_extend(&u->deserialized_refs, v);
3706 if (r < 0)
3707 return log_oom();
3708
3709 continue;
3710 } else if (streq(l, "invocation-id")) {
3711 sd_id128_t id;
3712
3713 r = sd_id128_from_string(v, &id);
3714 if (r < 0)
3715 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3716 else {
3717 r = unit_set_invocation_id(u, id);
3718 if (r < 0)
3719 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3720 }
3721
3722 continue;
3723 }
3724
3725 /* Check if this is an IP accounting metric serialization field */
3726 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3727 if (m >= 0) {
3728 uint64_t c;
3729
3730 r = safe_atou64(v, &c);
3731 if (r < 0)
3732 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3733 else
3734 u->ip_accounting_extra[m] = c;
3735 continue;
3736 }
3737
3738 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3739 if (m >= 0) {
3740 uint64_t c;
3741
3742 r = safe_atou64(v, &c);
3743 if (r < 0)
3744 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3745 else
3746 u->io_accounting_base[m] = c;
3747 continue;
3748 }
3749
3750 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3751 if (m >= 0) {
3752 uint64_t c;
3753
3754 r = safe_atou64(v, &c);
3755 if (r < 0)
3756 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3757 else
3758 u->io_accounting_last[m] = c;
3759 continue;
3760 }
3761
3762 if (unit_can_serialize(u)) {
3763 r = exec_runtime_deserialize_compat(u, l, v, fds);
3764 if (r < 0) {
3765 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3766 continue;
3767 }
3768
3769 /* Returns positive if key was handled by the call */
3770 if (r > 0)
3771 continue;
3772
3773 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3774 if (r < 0)
3775 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3776 }
3777 }
3778
3779 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3780 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3781 * before 228 where the base for timeouts was not persistent across reboots. */
3782
3783 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3784 dual_timestamp_get(&u->state_change_timestamp);
3785
3786 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3787 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3788 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3789 unit_invalidate_cgroup_bpf(u);
3790
3791 return 0;
3792 }
3793
3794 int unit_deserialize_skip(FILE *f) {
3795 int r;
3796 assert(f);
3797
3798 /* Skip serialized data for this unit. We don't know what it is. */
3799
3800 for (;;) {
3801 _cleanup_free_ char *line = NULL;
3802 char *l;
3803
3804 r = read_line(f, LONG_LINE_MAX, &line);
3805 if (r < 0)
3806 return log_error_errno(r, "Failed to read serialization line: %m");
3807 if (r == 0)
3808 return 0;
3809
3810 l = strstrip(line);
3811
3812 /* End marker */
3813 if (isempty(l))
3814 return 1;
3815 }
3816 }
3817
3818 int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3819 Unit *device;
3820 _cleanup_free_ char *e = NULL;
3821 int r;
3822
3823 assert(u);
3824
3825 /* Adds in links to the device node that this unit is based on */
3826 if (isempty(what))
3827 return 0;
3828
3829 if (!is_device_path(what))
3830 return 0;
3831
3832 /* When device units aren't supported (such as in a
3833 * container), don't create dependencies on them. */
3834 if (!unit_type_supported(UNIT_DEVICE))
3835 return 0;
3836
3837 r = unit_name_from_path(what, ".device", &e);
3838 if (r < 0)
3839 return r;
3840
3841 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3842 if (r < 0)
3843 return r;
3844
3845 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3846 dep = UNIT_BINDS_TO;
3847
3848 r = unit_add_two_dependencies(u, UNIT_AFTER,
3849 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3850 device, true, mask);
3851 if (r < 0)
3852 return r;
3853
3854 if (wants) {
3855 r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3856 if (r < 0)
3857 return r;
3858 }
3859
3860 return 0;
3861 }
3862
3863 int unit_coldplug(Unit *u) {
3864 int r = 0, q;
3865 char **i;
3866
3867 assert(u);
3868
3869 /* Make sure we don't enter a loop, when coldplugging recursively. */
3870 if (u->coldplugged)
3871 return 0;
3872
3873 u->coldplugged = true;
3874
3875 STRV_FOREACH(i, u->deserialized_refs) {
3876 q = bus_unit_track_add_name(u, *i);
3877 if (q < 0 && r >= 0)
3878 r = q;
3879 }
3880 u->deserialized_refs = strv_free(u->deserialized_refs);
3881
3882 if (UNIT_VTABLE(u)->coldplug) {
3883 q = UNIT_VTABLE(u)->coldplug(u);
3884 if (q < 0 && r >= 0)
3885 r = q;
3886 }
3887
3888 if (u->job) {
3889 q = job_coldplug(u->job);
3890 if (q < 0 && r >= 0)
3891 r = q;
3892 }
3893
3894 return r;
3895 }
3896
3897 void unit_catchup(Unit *u) {
3898 assert(u);
3899
3900 if (UNIT_VTABLE(u)->catchup)
3901 UNIT_VTABLE(u)->catchup(u);
3902 }
3903
3904 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3905 struct stat st;
3906
3907 if (!path)
3908 return false;
3909
3910 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3911 * are never out-of-date. */
3912 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3913 return false;
3914
3915 if (stat(path, &st) < 0)
3916 /* What, cannot access this anymore? */
3917 return true;
3918
3919 if (path_masked)
3920 /* For masked files check if they are still so */
3921 return !null_or_empty(&st);
3922 else
3923 /* For non-empty files check the mtime */
3924 return timespec_load(&st.st_mtim) > mtime;
3925
3926 return false;
3927 }
3928
3929 bool unit_need_daemon_reload(Unit *u) {
3930 _cleanup_strv_free_ char **t = NULL;
3931 char **path;
3932
3933 assert(u);
3934
3935 /* For unit files, we allow masking… */
3936 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3937 u->load_state == UNIT_MASKED))
3938 return true;
3939
3940 /* Source paths should not be masked… */
3941 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3942 return true;
3943
3944 if (u->load_state == UNIT_LOADED)
3945 (void) unit_find_dropin_paths(u, &t);
3946 if (!strv_equal(u->dropin_paths, t))
3947 return true;
3948
3949 /* … any drop-ins that are masked are simply omitted from the list. */
3950 STRV_FOREACH(path, u->dropin_paths)
3951 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3952 return true;
3953
3954 return false;
3955 }
3956
3957 void unit_reset_failed(Unit *u) {
3958 assert(u);
3959
3960 if (UNIT_VTABLE(u)->reset_failed)
3961 UNIT_VTABLE(u)->reset_failed(u);
3962
3963 RATELIMIT_RESET(u->start_limit);
3964 u->start_limit_hit = false;
3965 }
3966
3967 Unit *unit_following(Unit *u) {
3968 assert(u);
3969
3970 if (UNIT_VTABLE(u)->following)
3971 return UNIT_VTABLE(u)->following(u);
3972
3973 return NULL;
3974 }
3975
3976 bool unit_stop_pending(Unit *u) {
3977 assert(u);
3978
3979 /* This call does check the current state of the unit. It's
3980 * hence useful to be called from state change calls of the
3981 * unit itself, where the state isn't updated yet. This is
3982 * different from unit_inactive_or_pending() which checks both
3983 * the current state and for a queued job. */
3984
3985 return u->job && u->job->type == JOB_STOP;
3986 }
3987
3988 bool unit_inactive_or_pending(Unit *u) {
3989 assert(u);
3990
3991 /* Returns true if the unit is inactive or going down */
3992
3993 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3994 return true;
3995
3996 if (unit_stop_pending(u))
3997 return true;
3998
3999 return false;
4000 }
4001
4002 bool unit_active_or_pending(Unit *u) {
4003 assert(u);
4004
4005 /* Returns true if the unit is active or going up */
4006
4007 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4008 return true;
4009
4010 if (u->job &&
4011 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4012 return true;
4013
4014 return false;
4015 }
4016
4017 bool unit_will_restart(Unit *u) {
4018 assert(u);
4019
4020 if (!UNIT_VTABLE(u)->will_restart)
4021 return false;
4022
4023 return UNIT_VTABLE(u)->will_restart(u);
4024 }
4025
4026 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4027 assert(u);
4028 assert(w >= 0 && w < _KILL_WHO_MAX);
4029 assert(SIGNAL_VALID(signo));
4030
4031 if (!UNIT_VTABLE(u)->kill)
4032 return -EOPNOTSUPP;
4033
4034 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4035 }
4036
4037 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4038 _cleanup_set_free_ Set *pid_set = NULL;
4039 int r;
4040
4041 pid_set = set_new(NULL);
4042 if (!pid_set)
4043 return NULL;
4044
4045 /* Exclude the main/control pids from being killed via the cgroup */
4046 if (main_pid > 0) {
4047 r = set_put(pid_set, PID_TO_PTR(main_pid));
4048 if (r < 0)
4049 return NULL;
4050 }
4051
4052 if (control_pid > 0) {
4053 r = set_put(pid_set, PID_TO_PTR(control_pid));
4054 if (r < 0)
4055 return NULL;
4056 }
4057
4058 return TAKE_PTR(pid_set);
4059 }
4060
4061 int unit_kill_common(
4062 Unit *u,
4063 KillWho who,
4064 int signo,
4065 pid_t main_pid,
4066 pid_t control_pid,
4067 sd_bus_error *error) {
4068
4069 int r = 0;
4070 bool killed = false;
4071
4072 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4073 if (main_pid < 0)
4074 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4075 else if (main_pid == 0)
4076 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4077 }
4078
4079 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4080 if (control_pid < 0)
4081 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4082 else if (control_pid == 0)
4083 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4084 }
4085
4086 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4087 if (control_pid > 0) {
4088 if (kill(control_pid, signo) < 0)
4089 r = -errno;
4090 else
4091 killed = true;
4092 }
4093
4094 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4095 if (main_pid > 0) {
4096 if (kill(main_pid, signo) < 0)
4097 r = -errno;
4098 else
4099 killed = true;
4100 }
4101
4102 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4103 _cleanup_set_free_ Set *pid_set = NULL;
4104 int q;
4105
4106 /* Exclude the main/control pids from being killed via the cgroup */
4107 pid_set = unit_pid_set(main_pid, control_pid);
4108 if (!pid_set)
4109 return -ENOMEM;
4110
4111 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4112 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4113 r = q;
4114 else
4115 killed = true;
4116 }
4117
4118 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4119 return -ESRCH;
4120
4121 return r;
4122 }
4123
4124 int unit_following_set(Unit *u, Set **s) {
4125 assert(u);
4126 assert(s);
4127
4128 if (UNIT_VTABLE(u)->following_set)
4129 return UNIT_VTABLE(u)->following_set(u, s);
4130
4131 *s = NULL;
4132 return 0;
4133 }
4134
4135 UnitFileState unit_get_unit_file_state(Unit *u) {
4136 int r;
4137
4138 assert(u);
4139
4140 if (u->unit_file_state < 0 && u->fragment_path) {
4141 r = unit_file_get_state(
4142 u->manager->unit_file_scope,
4143 NULL,
4144 u->id,
4145 &u->unit_file_state);
4146 if (r < 0)
4147 u->unit_file_state = UNIT_FILE_BAD;
4148 }
4149
4150 return u->unit_file_state;
4151 }
4152
4153 int unit_get_unit_file_preset(Unit *u) {
4154 assert(u);
4155
4156 if (u->unit_file_preset < 0 && u->fragment_path)
4157 u->unit_file_preset = unit_file_query_preset(
4158 u->manager->unit_file_scope,
4159 NULL,
4160 basename(u->fragment_path));
4161
4162 return u->unit_file_preset;
4163 }
4164
4165 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4166 assert(ref);
4167 assert(source);
4168 assert(target);
4169
4170 if (ref->target)
4171 unit_ref_unset(ref);
4172
4173 ref->source = source;
4174 ref->target = target;
4175 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4176 return target;
4177 }
4178
4179 void unit_ref_unset(UnitRef *ref) {
4180 assert(ref);
4181
4182 if (!ref->target)
4183 return;
4184
4185 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4186 * be unreferenced now. */
4187 unit_add_to_gc_queue(ref->target);
4188
4189 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4190 ref->source = ref->target = NULL;
4191 }
4192
4193 static int user_from_unit_name(Unit *u, char **ret) {
4194
4195 static const uint8_t hash_key[] = {
4196 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4197 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4198 };
4199
4200 _cleanup_free_ char *n = NULL;
4201 int r;
4202
4203 r = unit_name_to_prefix(u->id, &n);
4204 if (r < 0)
4205 return r;
4206
4207 if (valid_user_group_name(n)) {
4208 *ret = TAKE_PTR(n);
4209 return 0;
4210 }
4211
4212 /* If we can't use the unit name as a user name, then let's hash it and use that */
4213 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4214 return -ENOMEM;
4215
4216 return 0;
4217 }
4218
4219 int unit_patch_contexts(Unit *u) {
4220 CGroupContext *cc;
4221 ExecContext *ec;
4222 unsigned i;
4223 int r;
4224
4225 assert(u);
4226
4227 /* Patch in the manager defaults into the exec and cgroup
4228 * contexts, _after_ the rest of the settings have been
4229 * initialized */
4230
4231 ec = unit_get_exec_context(u);
4232 if (ec) {
4233 /* This only copies in the ones that need memory */
4234 for (i = 0; i < _RLIMIT_MAX; i++)
4235 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4236 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4237 if (!ec->rlimit[i])
4238 return -ENOMEM;
4239 }
4240
4241 if (MANAGER_IS_USER(u->manager) &&
4242 !ec->working_directory) {
4243
4244 r = get_home_dir(&ec->working_directory);
4245 if (r < 0)
4246 return r;
4247
4248 /* Allow user services to run, even if the
4249 * home directory is missing */
4250 ec->working_directory_missing_ok = true;
4251 }
4252
4253 if (ec->private_devices)
4254 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4255
4256 if (ec->protect_kernel_modules)
4257 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4258
4259 if (ec->dynamic_user) {
4260 if (!ec->user) {
4261 r = user_from_unit_name(u, &ec->user);
4262 if (r < 0)
4263 return r;
4264 }
4265
4266 if (!ec->group) {
4267 ec->group = strdup(ec->user);
4268 if (!ec->group)
4269 return -ENOMEM;
4270 }
4271
4272 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4273 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4274 * sandbox. */
4275
4276 ec->private_tmp = true;
4277 ec->remove_ipc = true;
4278 ec->protect_system = PROTECT_SYSTEM_STRICT;
4279 if (ec->protect_home == PROTECT_HOME_NO)
4280 ec->protect_home = PROTECT_HOME_READ_ONLY;
4281
4282 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4283 * them. */
4284 ec->no_new_privileges = true;
4285 ec->restrict_suid_sgid = true;
4286 }
4287 }
4288
4289 cc = unit_get_cgroup_context(u);
4290 if (cc && ec) {
4291
4292 if (ec->private_devices &&
4293 cc->device_policy == CGROUP_AUTO)
4294 cc->device_policy = CGROUP_CLOSED;
4295
4296 if (ec->root_image &&
4297 (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4298
4299 /* When RootImage= is specified, the following devices are touched. */
4300 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4301 if (r < 0)
4302 return r;
4303
4304 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4305 if (r < 0)
4306 return r;
4307
4308 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4309 if (r < 0)
4310 return r;
4311 }
4312 }
4313
4314 return 0;
4315 }
4316
4317 ExecContext *unit_get_exec_context(Unit *u) {
4318 size_t offset;
4319 assert(u);
4320
4321 if (u->type < 0)
4322 return NULL;
4323
4324 offset = UNIT_VTABLE(u)->exec_context_offset;
4325 if (offset <= 0)
4326 return NULL;
4327
4328 return (ExecContext*) ((uint8_t*) u + offset);
4329 }
4330
4331 KillContext *unit_get_kill_context(Unit *u) {
4332 size_t offset;
4333 assert(u);
4334
4335 if (u->type < 0)
4336 return NULL;
4337
4338 offset = UNIT_VTABLE(u)->kill_context_offset;
4339 if (offset <= 0)
4340 return NULL;
4341
4342 return (KillContext*) ((uint8_t*) u + offset);
4343 }
4344
4345 CGroupContext *unit_get_cgroup_context(Unit *u) {
4346 size_t offset;
4347
4348 if (u->type < 0)
4349 return NULL;
4350
4351 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4352 if (offset <= 0)
4353 return NULL;
4354
4355 return (CGroupContext*) ((uint8_t*) u + offset);
4356 }
4357
4358 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4359 size_t offset;
4360
4361 if (u->type < 0)
4362 return NULL;
4363
4364 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4365 if (offset <= 0)
4366 return NULL;
4367
4368 return *(ExecRuntime**) ((uint8_t*) u + offset);
4369 }
4370
4371 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4372 assert(u);
4373
4374 if (UNIT_WRITE_FLAGS_NOOP(flags))
4375 return NULL;
4376
4377 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4378 return u->manager->lookup_paths.transient;
4379
4380 if (flags & UNIT_PERSISTENT)
4381 return u->manager->lookup_paths.persistent_control;
4382
4383 if (flags & UNIT_RUNTIME)
4384 return u->manager->lookup_paths.runtime_control;
4385
4386 return NULL;
4387 }
4388
4389 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4390 char *ret = NULL;
4391
4392 if (!s)
4393 return NULL;
4394
4395 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4396 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4397 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4398 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4399 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4400 * allocations. */
4401
4402 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4403 ret = specifier_escape(s);
4404 if (!ret)
4405 return NULL;
4406
4407 s = ret;
4408 }
4409
4410 if (flags & UNIT_ESCAPE_C) {
4411 char *a;
4412
4413 a = cescape(s);
4414 free(ret);
4415 if (!a)
4416 return NULL;
4417
4418 ret = a;
4419 }
4420
4421 if (buf) {
4422 *buf = ret;
4423 return ret ?: (char*) s;
4424 }
4425
4426 return ret ?: strdup(s);
4427 }
4428
4429 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4430 _cleanup_free_ char *result = NULL;
4431 size_t n = 0, allocated = 0;
4432 char **i;
4433
4434 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4435 * way suitable for ExecStart= stanzas */
4436
4437 STRV_FOREACH(i, l) {
4438 _cleanup_free_ char *buf = NULL;
4439 const char *p;
4440 size_t a;
4441 char *q;
4442
4443 p = unit_escape_setting(*i, flags, &buf);
4444 if (!p)
4445 return NULL;
4446
4447 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4448 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4449 return NULL;
4450
4451 q = result + n;
4452 if (n > 0)
4453 *(q++) = ' ';
4454
4455 *(q++) = '"';
4456 q = stpcpy(q, p);
4457 *(q++) = '"';
4458
4459 n += a;
4460 }
4461
4462 if (!GREEDY_REALLOC(result, allocated, n + 1))
4463 return NULL;
4464
4465 result[n] = 0;
4466
4467 return TAKE_PTR(result);
4468 }
4469
4470 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4471 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4472 const char *dir, *wrapped;
4473 int r;
4474
4475 assert(u);
4476 assert(name);
4477 assert(data);
4478
4479 if (UNIT_WRITE_FLAGS_NOOP(flags))
4480 return 0;
4481
4482 data = unit_escape_setting(data, flags, &escaped);
4483 if (!data)
4484 return -ENOMEM;
4485
4486 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4487 * previous section header is the same */
4488
4489 if (flags & UNIT_PRIVATE) {
4490 if (!UNIT_VTABLE(u)->private_section)
4491 return -EINVAL;
4492
4493 if (!u->transient_file || u->last_section_private < 0)
4494 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4495 else if (u->last_section_private == 0)
4496 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4497 } else {
4498 if (!u->transient_file || u->last_section_private < 0)
4499 data = strjoina("[Unit]\n", data);
4500 else if (u->last_section_private > 0)
4501 data = strjoina("\n[Unit]\n", data);
4502 }
4503
4504 if (u->transient_file) {
4505 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4506 * write to the transient unit file. */
4507 fputs(data, u->transient_file);
4508
4509 if (!endswith(data, "\n"))
4510 fputc('\n', u->transient_file);
4511
4512 /* Remember which section we wrote this entry to */
4513 u->last_section_private = !!(flags & UNIT_PRIVATE);
4514 return 0;
4515 }
4516
4517 dir = unit_drop_in_dir(u, flags);
4518 if (!dir)
4519 return -EINVAL;
4520
4521 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4522 "# or an equivalent operation. Do not edit.\n",
4523 data,
4524 "\n");
4525
4526 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4527 if (r < 0)
4528 return r;
4529
4530 (void) mkdir_p_label(p, 0755);
4531 r = write_string_file_atomic_label(q, wrapped);
4532 if (r < 0)
4533 return r;
4534
4535 r = strv_push(&u->dropin_paths, q);
4536 if (r < 0)
4537 return r;
4538 q = NULL;
4539
4540 strv_uniq(u->dropin_paths);
4541
4542 u->dropin_mtime = now(CLOCK_REALTIME);
4543
4544 return 0;
4545 }
4546
4547 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4548 _cleanup_free_ char *p = NULL;
4549 va_list ap;
4550 int r;
4551
4552 assert(u);
4553 assert(name);
4554 assert(format);
4555
4556 if (UNIT_WRITE_FLAGS_NOOP(flags))
4557 return 0;
4558
4559 va_start(ap, format);
4560 r = vasprintf(&p, format, ap);
4561 va_end(ap);
4562
4563 if (r < 0)
4564 return -ENOMEM;
4565
4566 return unit_write_setting(u, flags, name, p);
4567 }
4568
4569 int unit_make_transient(Unit *u) {
4570 _cleanup_free_ char *path = NULL;
4571 FILE *f;
4572
4573 assert(u);
4574
4575 if (!UNIT_VTABLE(u)->can_transient)
4576 return -EOPNOTSUPP;
4577
4578 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4579
4580 path = path_join(u->manager->lookup_paths.transient, u->id);
4581 if (!path)
4582 return -ENOMEM;
4583
4584 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4585 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4586
4587 RUN_WITH_UMASK(0022) {
4588 f = fopen(path, "we");
4589 if (!f)
4590 return -errno;
4591 }
4592
4593 safe_fclose(u->transient_file);
4594 u->transient_file = f;
4595
4596 free_and_replace(u->fragment_path, path);
4597
4598 u->source_path = mfree(u->source_path);
4599 u->dropin_paths = strv_free(u->dropin_paths);
4600 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4601
4602 u->load_state = UNIT_STUB;
4603 u->load_error = 0;
4604 u->transient = true;
4605
4606 unit_add_to_dbus_queue(u);
4607 unit_add_to_gc_queue(u);
4608
4609 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4610 u->transient_file);
4611
4612 return 0;
4613 }
4614
4615 static int log_kill(pid_t pid, int sig, void *userdata) {
4616 _cleanup_free_ char *comm = NULL;
4617
4618 (void) get_process_comm(pid, &comm);
4619
4620 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4621 only, like for example systemd's own PAM stub process. */
4622 if (comm && comm[0] == '(')
4623 return 0;
4624
4625 log_unit_notice(userdata,
4626 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4627 pid,
4628 strna(comm),
4629 signal_to_string(sig));
4630
4631 return 1;
4632 }
4633
4634 static int operation_to_signal(KillContext *c, KillOperation k) {
4635 assert(c);
4636
4637 switch (k) {
4638
4639 case KILL_TERMINATE:
4640 case KILL_TERMINATE_AND_LOG:
4641 return c->kill_signal;
4642
4643 case KILL_KILL:
4644 return c->final_kill_signal;
4645
4646 case KILL_WATCHDOG:
4647 return c->watchdog_signal;
4648
4649 default:
4650 assert_not_reached("KillOperation unknown");
4651 }
4652 }
4653
4654 int unit_kill_context(
4655 Unit *u,
4656 KillContext *c,
4657 KillOperation k,
4658 pid_t main_pid,
4659 pid_t control_pid,
4660 bool main_pid_alien) {
4661
4662 bool wait_for_exit = false, send_sighup;
4663 cg_kill_log_func_t log_func = NULL;
4664 int sig, r;
4665
4666 assert(u);
4667 assert(c);
4668
4669 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4670 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4671
4672 if (c->kill_mode == KILL_NONE)
4673 return 0;
4674
4675 sig = operation_to_signal(c, k);
4676
4677 send_sighup =
4678 c->send_sighup &&
4679 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4680 sig != SIGHUP;
4681
4682 if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4683 log_func = log_kill;
4684
4685 if (main_pid > 0) {
4686 if (log_func)
4687 log_func(main_pid, sig, u);
4688
4689 r = kill_and_sigcont(main_pid, sig);
4690 if (r < 0 && r != -ESRCH) {
4691 _cleanup_free_ char *comm = NULL;
4692 (void) get_process_comm(main_pid, &comm);
4693
4694 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4695 } else {
4696 if (!main_pid_alien)
4697 wait_for_exit = true;
4698
4699 if (r != -ESRCH && send_sighup)
4700 (void) kill(main_pid, SIGHUP);
4701 }
4702 }
4703
4704 if (control_pid > 0) {
4705 if (log_func)
4706 log_func(control_pid, sig, u);
4707
4708 r = kill_and_sigcont(control_pid, sig);
4709 if (r < 0 && r != -ESRCH) {
4710 _cleanup_free_ char *comm = NULL;
4711 (void) get_process_comm(control_pid, &comm);
4712
4713 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4714 } else {
4715 wait_for_exit = true;
4716
4717 if (r != -ESRCH && send_sighup)
4718 (void) kill(control_pid, SIGHUP);
4719 }
4720 }
4721
4722 if (u->cgroup_path &&
4723 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4724 _cleanup_set_free_ Set *pid_set = NULL;
4725
4726 /* Exclude the main/control pids from being killed via the cgroup */
4727 pid_set = unit_pid_set(main_pid, control_pid);
4728 if (!pid_set)
4729 return -ENOMEM;
4730
4731 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4732 sig,
4733 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4734 pid_set,
4735 log_func, u);
4736 if (r < 0) {
4737 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4738 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4739
4740 } else if (r > 0) {
4741
4742 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4743 * we are running in a container or if this is a delegation unit, simply because cgroup
4744 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4745 * of containers it can be confused easily by left-over directories in the cgroup — which
4746 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4747 * there we get proper events. Hence rely on them. */
4748
4749 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4750 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4751 wait_for_exit = true;
4752
4753 if (send_sighup) {
4754 set_free(pid_set);
4755
4756 pid_set = unit_pid_set(main_pid, control_pid);
4757 if (!pid_set)
4758 return -ENOMEM;
4759
4760 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4761 SIGHUP,
4762 CGROUP_IGNORE_SELF,
4763 pid_set,
4764 NULL, NULL);
4765 }
4766 }
4767 }
4768
4769 return wait_for_exit;
4770 }
4771
4772 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4773 _cleanup_free_ char *p = NULL;
4774 UnitDependencyInfo di;
4775 int r;
4776
4777 assert(u);
4778 assert(path);
4779
4780 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4781 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4782 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4783 * determine which units to make themselves a dependency of. */
4784
4785 if (!path_is_absolute(path))
4786 return -EINVAL;
4787
4788 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4789 if (r < 0)
4790 return r;
4791
4792 p = strdup(path);
4793 if (!p)
4794 return -ENOMEM;
4795
4796 path = path_simplify(p, true);
4797
4798 if (!path_is_normalized(path))
4799 return -EPERM;
4800
4801 if (hashmap_contains(u->requires_mounts_for, path))
4802 return 0;
4803
4804 di = (UnitDependencyInfo) {
4805 .origin_mask = mask
4806 };
4807
4808 r = hashmap_put(u->requires_mounts_for, path, di.data);
4809 if (r < 0)
4810 return r;
4811 p = NULL;
4812
4813 char prefix[strlen(path) + 1];
4814 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4815 Set *x;
4816
4817 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4818 if (!x) {
4819 _cleanup_free_ char *q = NULL;
4820
4821 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4822 if (r < 0)
4823 return r;
4824
4825 q = strdup(prefix);
4826 if (!q)
4827 return -ENOMEM;
4828
4829 x = set_new(NULL);
4830 if (!x)
4831 return -ENOMEM;
4832
4833 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4834 if (r < 0) {
4835 set_free(x);
4836 return r;
4837 }
4838 q = NULL;
4839 }
4840
4841 r = set_put(x, u);
4842 if (r < 0)
4843 return r;
4844 }
4845
4846 return 0;
4847 }
4848
4849 int unit_setup_exec_runtime(Unit *u) {
4850 ExecRuntime **rt;
4851 size_t offset;
4852 Unit *other;
4853 Iterator i;
4854 void *v;
4855 int r;
4856
4857 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4858 assert(offset > 0);
4859
4860 /* Check if there already is an ExecRuntime for this unit? */
4861 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4862 if (*rt)
4863 return 0;
4864
4865 /* Try to get it from somebody else */
4866 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4867 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4868 if (r == 1)
4869 return 1;
4870 }
4871
4872 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4873 }
4874
4875 int unit_setup_dynamic_creds(Unit *u) {
4876 ExecContext *ec;
4877 DynamicCreds *dcreds;
4878 size_t offset;
4879
4880 assert(u);
4881
4882 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4883 assert(offset > 0);
4884 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4885
4886 ec = unit_get_exec_context(u);
4887 assert(ec);
4888
4889 if (!ec->dynamic_user)
4890 return 0;
4891
4892 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4893 }
4894
4895 bool unit_type_supported(UnitType t) {
4896 if (_unlikely_(t < 0))
4897 return false;
4898 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4899 return false;
4900
4901 if (!unit_vtable[t]->supported)
4902 return true;
4903
4904 return unit_vtable[t]->supported();
4905 }
4906
4907 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4908 int r;
4909
4910 assert(u);
4911 assert(where);
4912
4913 r = dir_is_empty(where);
4914 if (r > 0 || r == -ENOTDIR)
4915 return;
4916 if (r < 0) {
4917 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4918 return;
4919 }
4920
4921 log_struct(LOG_NOTICE,
4922 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4923 LOG_UNIT_ID(u),
4924 LOG_UNIT_INVOCATION_ID(u),
4925 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4926 "WHERE=%s", where);
4927 }
4928
4929 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4930 _cleanup_free_ char *canonical_where = NULL;
4931 int r;
4932
4933 assert(u);
4934 assert(where);
4935
4936 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
4937 if (r < 0) {
4938 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4939 return 0;
4940 }
4941
4942 /* We will happily ignore a trailing slash (or any redundant slashes) */
4943 if (path_equal(where, canonical_where))
4944 return 0;
4945
4946 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4947 log_struct(LOG_ERR,
4948 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4949 LOG_UNIT_ID(u),
4950 LOG_UNIT_INVOCATION_ID(u),
4951 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4952 "WHERE=%s", where);
4953
4954 return -ELOOP;
4955 }
4956
4957 bool unit_is_pristine(Unit *u) {
4958 assert(u);
4959
4960 /* Check if the unit already exists or is already around,
4961 * in a number of different ways. Note that to cater for unit
4962 * types such as slice, we are generally fine with units that
4963 * are marked UNIT_LOADED even though nothing was actually
4964 * loaded, as those unit types don't require a file on disk. */
4965
4966 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4967 u->fragment_path ||
4968 u->source_path ||
4969 !strv_isempty(u->dropin_paths) ||
4970 u->job ||
4971 u->merged_into);
4972 }
4973
4974 pid_t unit_control_pid(Unit *u) {
4975 assert(u);
4976
4977 if (UNIT_VTABLE(u)->control_pid)
4978 return UNIT_VTABLE(u)->control_pid(u);
4979
4980 return 0;
4981 }
4982
4983 pid_t unit_main_pid(Unit *u) {
4984 assert(u);
4985
4986 if (UNIT_VTABLE(u)->main_pid)
4987 return UNIT_VTABLE(u)->main_pid(u);
4988
4989 return 0;
4990 }
4991
4992 static void unit_unref_uid_internal(
4993 Unit *u,
4994 uid_t *ref_uid,
4995 bool destroy_now,
4996 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4997
4998 assert(u);
4999 assert(ref_uid);
5000 assert(_manager_unref_uid);
5001
5002 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5003 * gid_t are actually the same time, with the same validity rules.
5004 *
5005 * Drops a reference to UID/GID from a unit. */
5006
5007 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5008 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5009
5010 if (!uid_is_valid(*ref_uid))
5011 return;
5012
5013 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5014 *ref_uid = UID_INVALID;
5015 }
5016
5017 void unit_unref_uid(Unit *u, bool destroy_now) {
5018 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5019 }
5020
5021 void unit_unref_gid(Unit *u, bool destroy_now) {
5022 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5023 }
5024
5025 static int unit_ref_uid_internal(
5026 Unit *u,
5027 uid_t *ref_uid,
5028 uid_t uid,
5029 bool clean_ipc,
5030 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5031
5032 int r;
5033
5034 assert(u);
5035 assert(ref_uid);
5036 assert(uid_is_valid(uid));
5037 assert(_manager_ref_uid);
5038
5039 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5040 * are actually the same type, and have the same validity rules.
5041 *
5042 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5043 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5044 * drops to zero. */
5045
5046 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5047 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5048
5049 if (*ref_uid == uid)
5050 return 0;
5051
5052 if (uid_is_valid(*ref_uid)) /* Already set? */
5053 return -EBUSY;
5054
5055 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5056 if (r < 0)
5057 return r;
5058
5059 *ref_uid = uid;
5060 return 1;
5061 }
5062
5063 int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5064 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5065 }
5066
5067 int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5068 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5069 }
5070
5071 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5072 int r = 0, q = 0;
5073
5074 assert(u);
5075
5076 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5077
5078 if (uid_is_valid(uid)) {
5079 r = unit_ref_uid(u, uid, clean_ipc);
5080 if (r < 0)
5081 return r;
5082 }
5083
5084 if (gid_is_valid(gid)) {
5085 q = unit_ref_gid(u, gid, clean_ipc);
5086 if (q < 0) {
5087 if (r > 0)
5088 unit_unref_uid(u, false);
5089
5090 return q;
5091 }
5092 }
5093
5094 return r > 0 || q > 0;
5095 }
5096
5097 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5098 ExecContext *c;
5099 int r;
5100
5101 assert(u);
5102
5103 c = unit_get_exec_context(u);
5104
5105 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5106 if (r < 0)
5107 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5108
5109 return r;
5110 }
5111
5112 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5113 assert(u);
5114
5115 unit_unref_uid(u, destroy_now);
5116 unit_unref_gid(u, destroy_now);
5117 }
5118
5119 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5120 int r;
5121
5122 assert(u);
5123
5124 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5125 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5126 * objects when no service references the UID/GID anymore. */
5127
5128 r = unit_ref_uid_gid(u, uid, gid);
5129 if (r > 0)
5130 unit_add_to_dbus_queue(u);
5131 }
5132
5133 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5134 int r;
5135
5136 assert(u);
5137
5138 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5139
5140 if (sd_id128_equal(u->invocation_id, id))
5141 return 0;
5142
5143 if (!sd_id128_is_null(u->invocation_id))
5144 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5145
5146 if (sd_id128_is_null(id)) {
5147 r = 0;
5148 goto reset;
5149 }
5150
5151 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5152 if (r < 0)
5153 goto reset;
5154
5155 u->invocation_id = id;
5156 sd_id128_to_string(id, u->invocation_id_string);
5157
5158 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5159 if (r < 0)
5160 goto reset;
5161
5162 return 0;
5163
5164 reset:
5165 u->invocation_id = SD_ID128_NULL;
5166 u->invocation_id_string[0] = 0;
5167 return r;
5168 }
5169
5170 int unit_acquire_invocation_id(Unit *u) {
5171 sd_id128_t id;
5172 int r;
5173
5174 assert(u);
5175
5176 r = sd_id128_randomize(&id);
5177 if (r < 0)
5178 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5179
5180 r = unit_set_invocation_id(u, id);
5181 if (r < 0)
5182 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5183
5184 unit_add_to_dbus_queue(u);
5185 return 0;
5186 }
5187
5188 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5189 int r;
5190
5191 assert(u);
5192 assert(p);
5193
5194 /* Copy parameters from manager */
5195 r = manager_get_effective_environment(u->manager, &p->environment);
5196 if (r < 0)
5197 return r;
5198
5199 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5200 p->cgroup_supported = u->manager->cgroup_supported;
5201 p->prefix = u->manager->prefix;
5202 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5203
5204 /* Copy parameters from unit */
5205 p->cgroup_path = u->cgroup_path;
5206 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5207
5208 return 0;
5209 }
5210
5211 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5212 int r;
5213
5214 assert(u);
5215 assert(ret);
5216
5217 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5218 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5219
5220 (void) unit_realize_cgroup(u);
5221
5222 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5223 if (r != 0)
5224 return r;
5225
5226 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5227 (void) ignore_signals(SIGPIPE, -1);
5228
5229 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5230
5231 if (u->cgroup_path) {
5232 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5233 if (r < 0) {
5234 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5235 _exit(EXIT_CGROUP);
5236 }
5237 }
5238
5239 return 0;
5240 }
5241
5242 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5243 assert(u);
5244 assert(d >= 0);
5245 assert(d < _UNIT_DEPENDENCY_MAX);
5246 assert(other);
5247
5248 if (di.origin_mask == 0 && di.destination_mask == 0) {
5249 /* No bit set anymore, let's drop the whole entry */
5250 assert_se(hashmap_remove(u->dependencies[d], other));
5251 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5252 } else
5253 /* Mask was reduced, let's update the entry */
5254 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5255 }
5256
5257 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5258 UnitDependency d;
5259
5260 assert(u);
5261
5262 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5263
5264 if (mask == 0)
5265 return;
5266
5267 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5268 bool done;
5269
5270 do {
5271 UnitDependencyInfo di;
5272 Unit *other;
5273 Iterator i;
5274
5275 done = true;
5276
5277 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5278 UnitDependency q;
5279
5280 if ((di.origin_mask & ~mask) == di.origin_mask)
5281 continue;
5282 di.origin_mask &= ~mask;
5283 unit_update_dependency_mask(u, d, other, di);
5284
5285 /* We updated the dependency from our unit to the other unit now. But most dependencies
5286 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5287 * all dependency types on the other unit and delete all those which point to us and
5288 * have the right mask set. */
5289
5290 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5291 UnitDependencyInfo dj;
5292
5293 dj.data = hashmap_get(other->dependencies[q], u);
5294 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5295 continue;
5296 dj.destination_mask &= ~mask;
5297
5298 unit_update_dependency_mask(other, q, u, dj);
5299 }
5300
5301 unit_add_to_gc_queue(other);
5302
5303 done = false;
5304 break;
5305 }
5306
5307 } while (!done);
5308 }
5309 }
5310
5311 static int unit_export_invocation_id(Unit *u) {
5312 const char *p;
5313 int r;
5314
5315 assert(u);
5316
5317 if (u->exported_invocation_id)
5318 return 0;
5319
5320 if (sd_id128_is_null(u->invocation_id))
5321 return 0;
5322
5323 p = strjoina("/run/systemd/units/invocation:", u->id);
5324 r = symlink_atomic(u->invocation_id_string, p);
5325 if (r < 0)
5326 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5327
5328 u->exported_invocation_id = true;
5329 return 0;
5330 }
5331
5332 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5333 const char *p;
5334 char buf[2];
5335 int r;
5336
5337 assert(u);
5338 assert(c);
5339
5340 if (u->exported_log_level_max)
5341 return 0;
5342
5343 if (c->log_level_max < 0)
5344 return 0;
5345
5346 assert(c->log_level_max <= 7);
5347
5348 buf[0] = '0' + c->log_level_max;
5349 buf[1] = 0;
5350
5351 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5352 r = symlink_atomic(buf, p);
5353 if (r < 0)
5354 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5355
5356 u->exported_log_level_max = true;
5357 return 0;
5358 }
5359
5360 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5361 _cleanup_close_ int fd = -1;
5362 struct iovec *iovec;
5363 const char *p;
5364 char *pattern;
5365 le64_t *sizes;
5366 ssize_t n;
5367 size_t i;
5368 int r;
5369
5370 if (u->exported_log_extra_fields)
5371 return 0;
5372
5373 if (c->n_log_extra_fields <= 0)
5374 return 0;
5375
5376 sizes = newa(le64_t, c->n_log_extra_fields);
5377 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5378
5379 for (i = 0; i < c->n_log_extra_fields; i++) {
5380 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5381
5382 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5383 iovec[i*2+1] = c->log_extra_fields[i];
5384 }
5385
5386 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5387 pattern = strjoina(p, ".XXXXXX");
5388
5389 fd = mkostemp_safe(pattern);
5390 if (fd < 0)
5391 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5392
5393 n = writev(fd, iovec, c->n_log_extra_fields*2);
5394 if (n < 0) {
5395 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5396 goto fail;
5397 }
5398
5399 (void) fchmod(fd, 0644);
5400
5401 if (rename(pattern, p) < 0) {
5402 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5403 goto fail;
5404 }
5405
5406 u->exported_log_extra_fields = true;
5407 return 0;
5408
5409 fail:
5410 (void) unlink(pattern);
5411 return r;
5412 }
5413
5414 static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5415 _cleanup_free_ char *buf = NULL;
5416 const char *p;
5417 int r;
5418
5419 assert(u);
5420 assert(c);
5421
5422 if (u->exported_log_rate_limit_interval)
5423 return 0;
5424
5425 if (c->log_rate_limit_interval_usec == 0)
5426 return 0;
5427
5428 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5429
5430 if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5431 return log_oom();
5432
5433 r = symlink_atomic(buf, p);
5434 if (r < 0)
5435 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5436
5437 u->exported_log_rate_limit_interval = true;
5438 return 0;
5439 }
5440
5441 static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5442 _cleanup_free_ char *buf = NULL;
5443 const char *p;
5444 int r;
5445
5446 assert(u);
5447 assert(c);
5448
5449 if (u->exported_log_rate_limit_burst)
5450 return 0;
5451
5452 if (c->log_rate_limit_burst == 0)
5453 return 0;
5454
5455 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5456
5457 if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5458 return log_oom();
5459
5460 r = symlink_atomic(buf, p);
5461 if (r < 0)
5462 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5463
5464 u->exported_log_rate_limit_burst = true;
5465 return 0;
5466 }
5467
5468 void unit_export_state_files(Unit *u) {
5469 const ExecContext *c;
5470
5471 assert(u);
5472
5473 if (!u->id)
5474 return;
5475
5476 if (!MANAGER_IS_SYSTEM(u->manager))
5477 return;
5478
5479 if (MANAGER_IS_TEST_RUN(u->manager))
5480 return;
5481
5482 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5483 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5484 * the IPC system itself and PID 1 also log to the journal.
5485 *
5486 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5487 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5488 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5489 * namespace at least.
5490 *
5491 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5492 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5493 * them with one. */
5494
5495 (void) unit_export_invocation_id(u);
5496
5497 c = unit_get_exec_context(u);
5498 if (c) {
5499 (void) unit_export_log_level_max(u, c);
5500 (void) unit_export_log_extra_fields(u, c);
5501 (void) unit_export_log_rate_limit_interval(u, c);
5502 (void) unit_export_log_rate_limit_burst(u, c);
5503 }
5504 }
5505
5506 void unit_unlink_state_files(Unit *u) {
5507 const char *p;
5508
5509 assert(u);
5510
5511 if (!u->id)
5512 return;
5513
5514 if (!MANAGER_IS_SYSTEM(u->manager))
5515 return;
5516
5517 /* Undoes the effect of unit_export_state() */
5518
5519 if (u->exported_invocation_id) {
5520 p = strjoina("/run/systemd/units/invocation:", u->id);
5521 (void) unlink(p);
5522
5523 u->exported_invocation_id = false;
5524 }
5525
5526 if (u->exported_log_level_max) {
5527 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5528 (void) unlink(p);
5529
5530 u->exported_log_level_max = false;
5531 }
5532
5533 if (u->exported_log_extra_fields) {
5534 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5535 (void) unlink(p);
5536
5537 u->exported_log_extra_fields = false;
5538 }
5539
5540 if (u->exported_log_rate_limit_interval) {
5541 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5542 (void) unlink(p);
5543
5544 u->exported_log_rate_limit_interval = false;
5545 }
5546
5547 if (u->exported_log_rate_limit_burst) {
5548 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5549 (void) unlink(p);
5550
5551 u->exported_log_rate_limit_burst = false;
5552 }
5553 }
5554
5555 int unit_prepare_exec(Unit *u) {
5556 int r;
5557
5558 assert(u);
5559
5560 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5561 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5562 r = bpf_firewall_load_custom(u);
5563 if (r < 0)
5564 return r;
5565
5566 /* Prepares everything so that we can fork of a process for this unit */
5567
5568 (void) unit_realize_cgroup(u);
5569
5570 if (u->reset_accounting) {
5571 (void) unit_reset_accounting(u);
5572 u->reset_accounting = false;
5573 }
5574
5575 unit_export_state_files(u);
5576
5577 r = unit_setup_exec_runtime(u);
5578 if (r < 0)
5579 return r;
5580
5581 r = unit_setup_dynamic_creds(u);
5582 if (r < 0)
5583 return r;
5584
5585 return 0;
5586 }
5587
5588 static int log_leftover(pid_t pid, int sig, void *userdata) {
5589 _cleanup_free_ char *comm = NULL;
5590
5591 (void) get_process_comm(pid, &comm);
5592
5593 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5594 return 0;
5595
5596 log_unit_warning(userdata,
5597 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5598 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5599 pid, strna(comm));
5600
5601 return 1;
5602 }
5603
5604 int unit_warn_leftover_processes(Unit *u) {
5605 assert(u);
5606
5607 (void) unit_pick_cgroup_path(u);
5608
5609 if (!u->cgroup_path)
5610 return 0;
5611
5612 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5613 }
5614
5615 bool unit_needs_console(Unit *u) {
5616 ExecContext *ec;
5617 UnitActiveState state;
5618
5619 assert(u);
5620
5621 state = unit_active_state(u);
5622
5623 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5624 return false;
5625
5626 if (UNIT_VTABLE(u)->needs_console)
5627 return UNIT_VTABLE(u)->needs_console(u);
5628
5629 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5630 ec = unit_get_exec_context(u);
5631 if (!ec)
5632 return false;
5633
5634 return exec_context_may_touch_console(ec);
5635 }
5636
5637 const char *unit_label_path(Unit *u) {
5638 const char *p;
5639
5640 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5641 * when validating access checks. */
5642
5643 p = u->source_path ?: u->fragment_path;
5644 if (!p)
5645 return NULL;
5646
5647 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5648 if (path_equal(p, "/dev/null"))
5649 return NULL;
5650
5651 return p;
5652 }
5653
5654 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5655 int r;
5656
5657 assert(u);
5658
5659 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5660 * and not a kernel thread either */
5661
5662 /* First, a simple range check */
5663 if (!pid_is_valid(pid))
5664 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5665
5666 /* Some extra safety check */
5667 if (pid == 1 || pid == getpid_cached())
5668 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5669
5670 /* Don't even begin to bother with kernel threads */
5671 r = is_kernel_thread(pid);
5672 if (r == -ESRCH)
5673 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5674 if (r < 0)
5675 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5676 if (r > 0)
5677 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5678
5679 return 0;
5680 }
5681
5682 void unit_log_success(Unit *u) {
5683 assert(u);
5684
5685 log_struct(LOG_INFO,
5686 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5687 LOG_UNIT_ID(u),
5688 LOG_UNIT_INVOCATION_ID(u),
5689 LOG_UNIT_MESSAGE(u, "Succeeded."));
5690 }
5691
5692 void unit_log_failure(Unit *u, const char *result) {
5693 assert(u);
5694 assert(result);
5695
5696 log_struct(LOG_WARNING,
5697 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5698 LOG_UNIT_ID(u),
5699 LOG_UNIT_INVOCATION_ID(u),
5700 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5701 "UNIT_RESULT=%s", result);
5702 }
5703
5704 void unit_log_skip(Unit *u, const char *result) {
5705 assert(u);
5706 assert(result);
5707
5708 log_struct(LOG_INFO,
5709 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5710 LOG_UNIT_ID(u),
5711 LOG_UNIT_INVOCATION_ID(u),
5712 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5713 "UNIT_RESULT=%s", result);
5714 }
5715
5716 void unit_log_process_exit(
5717 Unit *u,
5718 int level,
5719 const char *kind,
5720 const char *command,
5721 int code,
5722 int status) {
5723
5724 assert(u);
5725 assert(kind);
5726
5727 if (code != CLD_EXITED)
5728 level = LOG_WARNING;
5729
5730 log_struct(level,
5731 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5732 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5733 kind,
5734 sigchld_code_to_string(code), status,
5735 strna(code == CLD_EXITED
5736 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5737 : signal_to_string(status))),
5738 "EXIT_CODE=%s", sigchld_code_to_string(code),
5739 "EXIT_STATUS=%i", status,
5740 "COMMAND=%s", strna(command),
5741 LOG_UNIT_ID(u),
5742 LOG_UNIT_INVOCATION_ID(u));
5743 }
5744
5745 int unit_exit_status(Unit *u) {
5746 assert(u);
5747
5748 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5749 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5750 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5751 * service process has exited abnormally (signal/coredump). */
5752
5753 if (!UNIT_VTABLE(u)->exit_status)
5754 return -EOPNOTSUPP;
5755
5756 return UNIT_VTABLE(u)->exit_status(u);
5757 }
5758
5759 int unit_failure_action_exit_status(Unit *u) {
5760 int r;
5761
5762 assert(u);
5763
5764 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5765
5766 if (u->failure_action_exit_status >= 0)
5767 return u->failure_action_exit_status;
5768
5769 r = unit_exit_status(u);
5770 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5771 return 255;
5772
5773 return r;
5774 }
5775
5776 int unit_success_action_exit_status(Unit *u) {
5777 int r;
5778
5779 assert(u);
5780
5781 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5782
5783 if (u->success_action_exit_status >= 0)
5784 return u->success_action_exit_status;
5785
5786 r = unit_exit_status(u);
5787 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5788 return 255;
5789
5790 return r;
5791 }
5792
5793 int unit_test_trigger_loaded(Unit *u) {
5794 Unit *trigger;
5795
5796 /* Tests whether the unit to trigger is loaded */
5797
5798 trigger = UNIT_TRIGGER(u);
5799 if (!trigger)
5800 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5801 "Refusing to start, no unit to trigger.");
5802 if (trigger->load_state != UNIT_LOADED)
5803 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5804 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5805
5806 return 0;
5807 }
5808
5809 int unit_clean(Unit *u, ExecCleanMask mask) {
5810 UnitActiveState state;
5811
5812 assert(u);
5813
5814 /* Special return values:
5815 *
5816 * -EOPNOTSUPP → cleaning not supported for this unit type
5817 * -EUNATCH → cleaning not defined for this resource type
5818 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5819 * a job queued or similar
5820 */
5821
5822 if (!UNIT_VTABLE(u)->clean)
5823 return -EOPNOTSUPP;
5824
5825 if (mask == 0)
5826 return -EUNATCH;
5827
5828 if (u->load_state != UNIT_LOADED)
5829 return -EBUSY;
5830
5831 if (u->job)
5832 return -EBUSY;
5833
5834 state = unit_active_state(u);
5835 if (!IN_SET(state, UNIT_INACTIVE))
5836 return -EBUSY;
5837
5838 return UNIT_VTABLE(u)->clean(u, mask);
5839 }
5840
5841 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5842 assert(u);
5843
5844 if (!UNIT_VTABLE(u)->clean ||
5845 u->load_state != UNIT_LOADED) {
5846 *ret = 0;
5847 return 0;
5848 }
5849
5850 /* When the clean() method is set, can_clean() really should be set too */
5851 assert(UNIT_VTABLE(u)->can_clean);
5852
5853 return UNIT_VTABLE(u)->can_clean(u, ret);
5854 }
5855
5856 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5857 [COLLECT_INACTIVE] = "inactive",
5858 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5859 };
5860
5861 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);