]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
core: add implicit ordering dep on blockdev@.target from all mount units
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bus-common-errors.h"
15 #include "bus-util.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "install.h"
31 #include "load-dropin.h"
32 #include "load-fragment.h"
33 #include "log.h"
34 #include "macro.h"
35 #include "missing_audit.h"
36 #include "mkdir.h"
37 #include "parse-util.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "rm-rf.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
126 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 }
193
194 kc = unit_get_kill_context(u);
195 if (kc)
196 kill_context_init(kc);
197
198 if (UNIT_VTABLE(u)->init)
199 UNIT_VTABLE(u)->init(u);
200 }
201
202 int unit_add_name(Unit *u, const char *text) {
203 _cleanup_free_ char *s = NULL, *i = NULL;
204 UnitType t;
205 int r;
206
207 assert(u);
208 assert(text);
209
210 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211
212 if (!u->instance)
213 return -EINVAL;
214
215 r = unit_name_replace_instance(text, u->instance, &s);
216 if (r < 0)
217 return r;
218 } else {
219 s = strdup(text);
220 if (!s)
221 return -ENOMEM;
222 }
223
224 if (set_contains(u->names, s))
225 return 0;
226 if (hashmap_contains(u->manager->units, s))
227 return -EEXIST;
228
229 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
230 return -EINVAL;
231
232 t = unit_name_to_type(s);
233 if (t < 0)
234 return -EINVAL;
235
236 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
237 return -EINVAL;
238
239 r = unit_name_to_instance(s, &i);
240 if (r < 0)
241 return r;
242
243 if (i && !unit_type_may_template(t))
244 return -EINVAL;
245
246 /* Ensure that this unit is either instanced or not instanced,
247 * but not both. Note that we do allow names with different
248 * instance names however! */
249 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
250 return -EINVAL;
251
252 if (!unit_type_may_alias(t) && !set_isempty(u->names))
253 return -EEXIST;
254
255 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
256 return -E2BIG;
257
258 r = set_put(u->names, s);
259 if (r < 0)
260 return r;
261 assert(r > 0);
262
263 r = hashmap_put(u->manager->units, s, u);
264 if (r < 0) {
265 (void) set_remove(u->names, s);
266 return r;
267 }
268
269 if (u->type == _UNIT_TYPE_INVALID) {
270 u->type = t;
271 u->id = s;
272 u->instance = TAKE_PTR(i);
273
274 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
275
276 unit_init(u);
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_may_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection.
346 * Returns true when the unit may be collected, and false if there's some
347 * reason to keep it loaded.
348 *
349 * References from other units are *not* checked here. Instead, this is done
350 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
351 */
352
353 if (u->job)
354 return false;
355
356 if (u->nop_job)
357 return false;
358
359 state = unit_active_state(u);
360
361 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
362 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
363 UNIT_VTABLE(u)->release_resources)
364 UNIT_VTABLE(u)->release_resources(u);
365
366 if (u->perpetual)
367 return false;
368
369 if (sd_bus_track_count(u->bus_track) > 0)
370 return false;
371
372 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
373 switch (u->collect_mode) {
374
375 case COLLECT_INACTIVE:
376 if (state != UNIT_INACTIVE)
377 return false;
378
379 break;
380
381 case COLLECT_INACTIVE_OR_FAILED:
382 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
383 return false;
384
385 break;
386
387 default:
388 assert_not_reached("Unknown garbage collection mode");
389 }
390
391 if (u->cgroup_path) {
392 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
393 * around. Units with active processes should never be collected. */
394
395 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
396 if (r < 0)
397 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
398 if (r <= 0)
399 return false;
400 }
401
402 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
403 return false;
404
405 return true;
406 }
407
408 void unit_add_to_load_queue(Unit *u) {
409 assert(u);
410 assert(u->type != _UNIT_TYPE_INVALID);
411
412 if (u->load_state != UNIT_STUB || u->in_load_queue)
413 return;
414
415 LIST_PREPEND(load_queue, u->manager->load_queue, u);
416 u->in_load_queue = true;
417 }
418
419 void unit_add_to_cleanup_queue(Unit *u) {
420 assert(u);
421
422 if (u->in_cleanup_queue)
423 return;
424
425 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
426 u->in_cleanup_queue = true;
427 }
428
429 void unit_add_to_gc_queue(Unit *u) {
430 assert(u);
431
432 if (u->in_gc_queue || u->in_cleanup_queue)
433 return;
434
435 if (!unit_may_gc(u))
436 return;
437
438 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
439 u->in_gc_queue = true;
440 }
441
442 void unit_add_to_dbus_queue(Unit *u) {
443 assert(u);
444 assert(u->type != _UNIT_TYPE_INVALID);
445
446 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
447 return;
448
449 /* Shortcut things if nobody cares */
450 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
451 sd_bus_track_count(u->bus_track) <= 0 &&
452 set_isempty(u->manager->private_buses)) {
453 u->sent_dbus_new_signal = true;
454 return;
455 }
456
457 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
458 u->in_dbus_queue = true;
459 }
460
461 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
462 assert(u);
463
464 if (u->in_stop_when_unneeded_queue)
465 return;
466
467 if (!u->stop_when_unneeded)
468 return;
469
470 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
471 return;
472
473 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
474 u->in_stop_when_unneeded_queue = true;
475 }
476
477 static void bidi_set_free(Unit *u, Hashmap *h) {
478 Unit *other;
479 Iterator i;
480 void *v;
481
482 assert(u);
483
484 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
485
486 HASHMAP_FOREACH_KEY(v, other, h, i) {
487 UnitDependency d;
488
489 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
490 hashmap_remove(other->dependencies[d], u);
491
492 unit_add_to_gc_queue(other);
493 }
494
495 hashmap_free(h);
496 }
497
498 static void unit_remove_transient(Unit *u) {
499 char **i;
500
501 assert(u);
502
503 if (!u->transient)
504 return;
505
506 if (u->fragment_path)
507 (void) unlink(u->fragment_path);
508
509 STRV_FOREACH(i, u->dropin_paths) {
510 _cleanup_free_ char *p = NULL, *pp = NULL;
511
512 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
513 if (!p)
514 continue;
515
516 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
517 if (!pp)
518 continue;
519
520 /* Only drop transient drop-ins */
521 if (!path_equal(u->manager->lookup_paths.transient, pp))
522 continue;
523
524 (void) unlink(*i);
525 (void) rmdir(p);
526 }
527 }
528
529 static void unit_free_requires_mounts_for(Unit *u) {
530 assert(u);
531
532 for (;;) {
533 _cleanup_free_ char *path;
534
535 path = hashmap_steal_first_key(u->requires_mounts_for);
536 if (!path)
537 break;
538 else {
539 char s[strlen(path) + 1];
540
541 PATH_FOREACH_PREFIX_MORE(s, path) {
542 char *y;
543 Set *x;
544
545 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
546 if (!x)
547 continue;
548
549 (void) set_remove(x, u);
550
551 if (set_isempty(x)) {
552 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
553 free(y);
554 set_free(x);
555 }
556 }
557 }
558 }
559
560 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
561 }
562
563 static void unit_done(Unit *u) {
564 ExecContext *ec;
565 CGroupContext *cc;
566
567 assert(u);
568
569 if (u->type < 0)
570 return;
571
572 if (UNIT_VTABLE(u)->done)
573 UNIT_VTABLE(u)->done(u);
574
575 ec = unit_get_exec_context(u);
576 if (ec)
577 exec_context_done(ec);
578
579 cc = unit_get_cgroup_context(u);
580 if (cc)
581 cgroup_context_done(cc);
582 }
583
584 void unit_free(Unit *u) {
585 UnitDependency d;
586 Iterator i;
587 char *t;
588
589 if (!u)
590 return;
591
592 if (UNIT_ISSET(u->slice)) {
593 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
594 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
595
596 /* And make sure the parent is realized again, updating cgroup memberships */
597 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
598 }
599
600 u->transient_file = safe_fclose(u->transient_file);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_remove_transient(u);
604
605 bus_unit_send_removed_signal(u);
606
607 unit_done(u);
608
609 unit_dequeue_rewatch_pids(u);
610
611 sd_bus_slot_unref(u->match_bus_slot);
612 sd_bus_track_unref(u->bus_track);
613 u->deserialized_refs = strv_free(u->deserialized_refs);
614
615 unit_free_requires_mounts_for(u);
616
617 SET_FOREACH(t, u->names, i)
618 hashmap_remove_value(u->manager->units, t, u);
619
620 if (!sd_id128_is_null(u->invocation_id))
621 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
622
623 if (u->job) {
624 Job *j = u->job;
625 job_uninstall(j);
626 job_free(j);
627 }
628
629 if (u->nop_job) {
630 Job *j = u->nop_job;
631 job_uninstall(j);
632 job_free(j);
633 }
634
635 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
636 bidi_set_free(u, u->dependencies[d]);
637
638 if (u->on_console)
639 manager_unref_console(u->manager);
640
641 unit_release_cgroup(u);
642
643 if (!MANAGER_IS_RELOADING(u->manager))
644 unit_unlink_state_files(u);
645
646 unit_unref_uid_gid(u, false);
647
648 (void) manager_update_failed_units(u->manager, u, false);
649 set_remove(u->manager->startup_units, u);
650
651 unit_unwatch_all_pids(u);
652
653 unit_ref_unset(&u->slice);
654 while (u->refs_by_target)
655 unit_ref_unset(u->refs_by_target);
656
657 if (u->type != _UNIT_TYPE_INVALID)
658 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
659
660 if (u->in_load_queue)
661 LIST_REMOVE(load_queue, u->manager->load_queue, u);
662
663 if (u->in_dbus_queue)
664 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
665
666 if (u->in_gc_queue)
667 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
668
669 if (u->in_cgroup_realize_queue)
670 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
671
672 if (u->in_cgroup_empty_queue)
673 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
674
675 if (u->in_cleanup_queue)
676 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
677
678 if (u->in_target_deps_queue)
679 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
680
681 if (u->in_stop_when_unneeded_queue)
682 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
683
684 safe_close(u->ip_accounting_ingress_map_fd);
685 safe_close(u->ip_accounting_egress_map_fd);
686
687 safe_close(u->ipv4_allow_map_fd);
688 safe_close(u->ipv6_allow_map_fd);
689 safe_close(u->ipv4_deny_map_fd);
690 safe_close(u->ipv6_deny_map_fd);
691
692 bpf_program_unref(u->ip_bpf_ingress);
693 bpf_program_unref(u->ip_bpf_ingress_installed);
694 bpf_program_unref(u->ip_bpf_egress);
695 bpf_program_unref(u->ip_bpf_egress_installed);
696
697 set_free(u->ip_bpf_custom_ingress);
698 set_free(u->ip_bpf_custom_egress);
699 set_free(u->ip_bpf_custom_ingress_installed);
700 set_free(u->ip_bpf_custom_egress_installed);
701
702 bpf_program_unref(u->bpf_device_control_installed);
703
704 condition_free_list(u->conditions);
705 condition_free_list(u->asserts);
706
707 free(u->description);
708 strv_free(u->documentation);
709 free(u->fragment_path);
710 free(u->source_path);
711 strv_free(u->dropin_paths);
712 free(u->instance);
713
714 free(u->job_timeout_reboot_arg);
715
716 set_free_free(u->names);
717
718 free(u->reboot_arg);
719
720 free(u);
721 }
722
723 UnitActiveState unit_active_state(Unit *u) {
724 assert(u);
725
726 if (u->load_state == UNIT_MERGED)
727 return unit_active_state(unit_follow_merge(u));
728
729 /* After a reload it might happen that a unit is not correctly
730 * loaded but still has a process around. That's why we won't
731 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
732
733 return UNIT_VTABLE(u)->active_state(u);
734 }
735
736 const char* unit_sub_state_to_string(Unit *u) {
737 assert(u);
738
739 return UNIT_VTABLE(u)->sub_state_to_string(u);
740 }
741
742 static int set_complete_move(Set **s, Set **other) {
743 assert(s);
744 assert(other);
745
746 if (!other)
747 return 0;
748
749 if (*s)
750 return set_move(*s, *other);
751 else
752 *s = TAKE_PTR(*other);
753
754 return 0;
755 }
756
757 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
758 assert(s);
759 assert(other);
760
761 if (!*other)
762 return 0;
763
764 if (*s)
765 return hashmap_move(*s, *other);
766 else
767 *s = TAKE_PTR(*other);
768
769 return 0;
770 }
771
772 static int merge_names(Unit *u, Unit *other) {
773 char *t;
774 Iterator i;
775 int r;
776
777 assert(u);
778 assert(other);
779
780 r = set_complete_move(&u->names, &other->names);
781 if (r < 0)
782 return r;
783
784 set_free_free(other->names);
785 other->names = NULL;
786 other->id = NULL;
787
788 SET_FOREACH(t, u->names, i)
789 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
790
791 return 0;
792 }
793
794 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
795 unsigned n_reserve;
796
797 assert(u);
798 assert(other);
799 assert(d < _UNIT_DEPENDENCY_MAX);
800
801 /*
802 * If u does not have this dependency set allocated, there is no need
803 * to reserve anything. In that case other's set will be transferred
804 * as a whole to u by complete_move().
805 */
806 if (!u->dependencies[d])
807 return 0;
808
809 /* merge_dependencies() will skip a u-on-u dependency */
810 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
811
812 return hashmap_reserve(u->dependencies[d], n_reserve);
813 }
814
815 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
816 Iterator i;
817 Unit *back;
818 void *v;
819 int r;
820
821 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
822
823 assert(u);
824 assert(other);
825 assert(d < _UNIT_DEPENDENCY_MAX);
826
827 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
828 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
829 UnitDependency k;
830
831 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
832 * pointers back, and let's fix them up, to instead point to 'u'. */
833
834 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
835 if (back == u) {
836 /* Do not add dependencies between u and itself. */
837 if (hashmap_remove(back->dependencies[k], other))
838 maybe_warn_about_dependency(u, other_id, k);
839 } else {
840 UnitDependencyInfo di_u, di_other, di_merged;
841
842 /* Let's drop this dependency between "back" and "other", and let's create it between
843 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
844 * and any such dependency which might already exist */
845
846 di_other.data = hashmap_get(back->dependencies[k], other);
847 if (!di_other.data)
848 continue; /* dependency isn't set, let's try the next one */
849
850 di_u.data = hashmap_get(back->dependencies[k], u);
851
852 di_merged = (UnitDependencyInfo) {
853 .origin_mask = di_u.origin_mask | di_other.origin_mask,
854 .destination_mask = di_u.destination_mask | di_other.destination_mask,
855 };
856
857 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
858 if (r < 0)
859 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
860 assert(r >= 0);
861
862 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
863 }
864 }
865
866 }
867
868 /* Also do not move dependencies on u to itself */
869 back = hashmap_remove(other->dependencies[d], u);
870 if (back)
871 maybe_warn_about_dependency(u, other_id, d);
872
873 /* The move cannot fail. The caller must have performed a reservation. */
874 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
875
876 other->dependencies[d] = hashmap_free(other->dependencies[d]);
877 }
878
879 int unit_merge(Unit *u, Unit *other) {
880 UnitDependency d;
881 const char *other_id = NULL;
882 int r;
883
884 assert(u);
885 assert(other);
886 assert(u->manager == other->manager);
887 assert(u->type != _UNIT_TYPE_INVALID);
888
889 other = unit_follow_merge(other);
890
891 if (other == u)
892 return 0;
893
894 if (u->type != other->type)
895 return -EINVAL;
896
897 if (!u->instance != !other->instance)
898 return -EINVAL;
899
900 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
901 return -EEXIST;
902
903 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
904 return -EEXIST;
905
906 if (other->job)
907 return -EEXIST;
908
909 if (other->nop_job)
910 return -EEXIST;
911
912 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
913 return -EEXIST;
914
915 if (other->id)
916 other_id = strdupa(other->id);
917
918 /* Make reservations to ensure merge_dependencies() won't fail */
919 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
920 r = reserve_dependencies(u, other, d);
921 /*
922 * We don't rollback reservations if we fail. We don't have
923 * a way to undo reservations. A reservation is not a leak.
924 */
925 if (r < 0)
926 return r;
927 }
928
929 /* Merge names */
930 r = merge_names(u, other);
931 if (r < 0)
932 return r;
933
934 /* Redirect all references */
935 while (other->refs_by_target)
936 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
937
938 /* Merge dependencies */
939 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
940 merge_dependencies(u, other, other_id, d);
941
942 other->load_state = UNIT_MERGED;
943 other->merged_into = u;
944
945 /* If there is still some data attached to the other node, we
946 * don't need it anymore, and can free it. */
947 if (other->load_state != UNIT_STUB)
948 if (UNIT_VTABLE(other)->done)
949 UNIT_VTABLE(other)->done(other);
950
951 unit_add_to_dbus_queue(u);
952 unit_add_to_cleanup_queue(other);
953
954 return 0;
955 }
956
957 int unit_merge_by_name(Unit *u, const char *name) {
958 _cleanup_free_ char *s = NULL;
959 Unit *other;
960 int r;
961
962 /* Either add name to u, or if a unit with name already exists, merge it with u.
963 * If name is a template, do the same for name@instance, where instance is u's instance. */
964
965 assert(u);
966 assert(name);
967
968 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
969 if (!u->instance)
970 return -EINVAL;
971
972 r = unit_name_replace_instance(name, u->instance, &s);
973 if (r < 0)
974 return r;
975
976 name = s;
977 }
978
979 other = manager_get_unit(u->manager, name);
980 if (other)
981 return unit_merge(u, other);
982
983 return unit_add_name(u, name);
984 }
985
986 Unit* unit_follow_merge(Unit *u) {
987 assert(u);
988
989 while (u->load_state == UNIT_MERGED)
990 assert_se(u = u->merged_into);
991
992 return u;
993 }
994
995 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
996 ExecDirectoryType dt;
997 char **dp;
998 int r;
999
1000 assert(u);
1001 assert(c);
1002
1003 if (c->working_directory && !c->working_directory_missing_ok) {
1004 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1005 if (r < 0)
1006 return r;
1007 }
1008
1009 if (c->root_directory) {
1010 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 if (c->root_image) {
1016 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1017 if (r < 0)
1018 return r;
1019 }
1020
1021 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1022 if (!u->manager->prefix[dt])
1023 continue;
1024
1025 STRV_FOREACH(dp, c->directories[dt].paths) {
1026 _cleanup_free_ char *p;
1027
1028 p = path_join(u->manager->prefix[dt], *dp);
1029 if (!p)
1030 return -ENOMEM;
1031
1032 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1033 if (r < 0)
1034 return r;
1035 }
1036 }
1037
1038 if (!MANAGER_IS_SYSTEM(u->manager))
1039 return 0;
1040
1041 if (c->private_tmp) {
1042 const char *p;
1043
1044 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1045 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1046 if (r < 0)
1047 return r;
1048 }
1049
1050 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1051 if (r < 0)
1052 return r;
1053 }
1054
1055 if (!IN_SET(c->std_output,
1056 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1057 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1058 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1059 !IN_SET(c->std_error,
1060 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1061 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1062 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1063 return 0;
1064
1065 /* If syslog or kernel logging is requested, make sure our own
1066 * logging daemon is run first. */
1067
1068 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1069 if (r < 0)
1070 return r;
1071
1072 return 0;
1073 }
1074
1075 const char *unit_description(Unit *u) {
1076 assert(u);
1077
1078 if (u->description)
1079 return u->description;
1080
1081 return strna(u->id);
1082 }
1083
1084 const char *unit_status_string(Unit *u) {
1085 assert(u);
1086
1087 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1088 return u->id;
1089
1090 return unit_description(u);
1091 }
1092
1093 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1094 const struct {
1095 UnitDependencyMask mask;
1096 const char *name;
1097 } table[] = {
1098 { UNIT_DEPENDENCY_FILE, "file" },
1099 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1100 { UNIT_DEPENDENCY_DEFAULT, "default" },
1101 { UNIT_DEPENDENCY_UDEV, "udev" },
1102 { UNIT_DEPENDENCY_PATH, "path" },
1103 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1104 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1105 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1106 };
1107 size_t i;
1108
1109 assert(f);
1110 assert(kind);
1111 assert(space);
1112
1113 for (i = 0; i < ELEMENTSOF(table); i++) {
1114
1115 if (mask == 0)
1116 break;
1117
1118 if (FLAGS_SET(mask, table[i].mask)) {
1119 if (*space)
1120 fputc(' ', f);
1121 else
1122 *space = true;
1123
1124 fputs(kind, f);
1125 fputs("-", f);
1126 fputs(table[i].name, f);
1127
1128 mask &= ~table[i].mask;
1129 }
1130 }
1131
1132 assert(mask == 0);
1133 }
1134
1135 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1136 char *t, **j;
1137 UnitDependency d;
1138 Iterator i;
1139 const char *prefix2;
1140 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1141 Unit *following;
1142 _cleanup_set_free_ Set *following_set = NULL;
1143 const char *n;
1144 CGroupMask m;
1145 int r;
1146
1147 assert(u);
1148 assert(u->type >= 0);
1149
1150 prefix = strempty(prefix);
1151 prefix2 = strjoina(prefix, "\t");
1152
1153 fprintf(f,
1154 "%s-> Unit %s:\n",
1155 prefix, u->id);
1156
1157 SET_FOREACH(t, u->names, i)
1158 if (!streq(t, u->id))
1159 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1160
1161 fprintf(f,
1162 "%s\tDescription: %s\n"
1163 "%s\tInstance: %s\n"
1164 "%s\tUnit Load State: %s\n"
1165 "%s\tUnit Active State: %s\n"
1166 "%s\tState Change Timestamp: %s\n"
1167 "%s\tInactive Exit Timestamp: %s\n"
1168 "%s\tActive Enter Timestamp: %s\n"
1169 "%s\tActive Exit Timestamp: %s\n"
1170 "%s\tInactive Enter Timestamp: %s\n"
1171 "%s\tMay GC: %s\n"
1172 "%s\tNeed Daemon Reload: %s\n"
1173 "%s\tTransient: %s\n"
1174 "%s\tPerpetual: %s\n"
1175 "%s\tGarbage Collection Mode: %s\n"
1176 "%s\tSlice: %s\n"
1177 "%s\tCGroup: %s\n"
1178 "%s\tCGroup realized: %s\n",
1179 prefix, unit_description(u),
1180 prefix, strna(u->instance),
1181 prefix, unit_load_state_to_string(u->load_state),
1182 prefix, unit_active_state_to_string(unit_active_state(u)),
1183 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1184 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1185 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1186 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1187 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1188 prefix, yes_no(unit_may_gc(u)),
1189 prefix, yes_no(unit_need_daemon_reload(u)),
1190 prefix, yes_no(u->transient),
1191 prefix, yes_no(u->perpetual),
1192 prefix, collect_mode_to_string(u->collect_mode),
1193 prefix, strna(unit_slice_name(u)),
1194 prefix, strna(u->cgroup_path),
1195 prefix, yes_no(u->cgroup_realized));
1196
1197 if (u->cgroup_realized_mask != 0) {
1198 _cleanup_free_ char *s = NULL;
1199 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1200 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1201 }
1202
1203 if (u->cgroup_enabled_mask != 0) {
1204 _cleanup_free_ char *s = NULL;
1205 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1206 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1207 }
1208
1209 m = unit_get_own_mask(u);
1210 if (m != 0) {
1211 _cleanup_free_ char *s = NULL;
1212 (void) cg_mask_to_string(m, &s);
1213 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1214 }
1215
1216 m = unit_get_members_mask(u);
1217 if (m != 0) {
1218 _cleanup_free_ char *s = NULL;
1219 (void) cg_mask_to_string(m, &s);
1220 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1221 }
1222
1223 m = unit_get_delegate_mask(u);
1224 if (m != 0) {
1225 _cleanup_free_ char *s = NULL;
1226 (void) cg_mask_to_string(m, &s);
1227 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1228 }
1229
1230 if (!sd_id128_is_null(u->invocation_id))
1231 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1232 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1233
1234 STRV_FOREACH(j, u->documentation)
1235 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1236
1237 following = unit_following(u);
1238 if (following)
1239 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1240
1241 r = unit_following_set(u, &following_set);
1242 if (r >= 0) {
1243 Unit *other;
1244
1245 SET_FOREACH(other, following_set, i)
1246 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1247 }
1248
1249 if (u->fragment_path)
1250 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1251
1252 if (u->source_path)
1253 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1254
1255 STRV_FOREACH(j, u->dropin_paths)
1256 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1257
1258 if (u->failure_action != EMERGENCY_ACTION_NONE)
1259 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1260 if (u->failure_action_exit_status >= 0)
1261 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1262 if (u->success_action != EMERGENCY_ACTION_NONE)
1263 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1264 if (u->success_action_exit_status >= 0)
1265 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1266
1267 if (u->job_timeout != USEC_INFINITY)
1268 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1269
1270 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1271 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1272
1273 if (u->job_timeout_reboot_arg)
1274 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1275
1276 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1277 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1278
1279 if (dual_timestamp_is_set(&u->condition_timestamp))
1280 fprintf(f,
1281 "%s\tCondition Timestamp: %s\n"
1282 "%s\tCondition Result: %s\n",
1283 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1284 prefix, yes_no(u->condition_result));
1285
1286 if (dual_timestamp_is_set(&u->assert_timestamp))
1287 fprintf(f,
1288 "%s\tAssert Timestamp: %s\n"
1289 "%s\tAssert Result: %s\n",
1290 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1291 prefix, yes_no(u->assert_result));
1292
1293 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1294 UnitDependencyInfo di;
1295 Unit *other;
1296
1297 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1298 bool space = false;
1299
1300 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1301
1302 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1303 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1304
1305 fputs(")\n", f);
1306 }
1307 }
1308
1309 if (!hashmap_isempty(u->requires_mounts_for)) {
1310 UnitDependencyInfo di;
1311 const char *path;
1312
1313 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1314 bool space = false;
1315
1316 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1317
1318 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1319 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1320
1321 fputs(")\n", f);
1322 }
1323 }
1324
1325 if (u->load_state == UNIT_LOADED) {
1326
1327 fprintf(f,
1328 "%s\tStopWhenUnneeded: %s\n"
1329 "%s\tRefuseManualStart: %s\n"
1330 "%s\tRefuseManualStop: %s\n"
1331 "%s\tDefaultDependencies: %s\n"
1332 "%s\tOnFailureJobMode: %s\n"
1333 "%s\tIgnoreOnIsolate: %s\n",
1334 prefix, yes_no(u->stop_when_unneeded),
1335 prefix, yes_no(u->refuse_manual_start),
1336 prefix, yes_no(u->refuse_manual_stop),
1337 prefix, yes_no(u->default_dependencies),
1338 prefix, job_mode_to_string(u->on_failure_job_mode),
1339 prefix, yes_no(u->ignore_on_isolate));
1340
1341 if (UNIT_VTABLE(u)->dump)
1342 UNIT_VTABLE(u)->dump(u, f, prefix2);
1343
1344 } else if (u->load_state == UNIT_MERGED)
1345 fprintf(f,
1346 "%s\tMerged into: %s\n",
1347 prefix, u->merged_into->id);
1348 else if (u->load_state == UNIT_ERROR)
1349 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1350
1351 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1352 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1353
1354 if (u->job)
1355 job_dump(u->job, f, prefix2);
1356
1357 if (u->nop_job)
1358 job_dump(u->nop_job, f, prefix2);
1359 }
1360
1361 /* Common implementation for multiple backends */
1362 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1363 int r;
1364
1365 assert(u);
1366
1367 /* Load a .{service,socket,...} file */
1368 r = unit_load_fragment(u);
1369 if (r < 0)
1370 return r;
1371
1372 if (u->load_state == UNIT_STUB) {
1373 if (fragment_required)
1374 return -ENOENT;
1375
1376 u->load_state = UNIT_LOADED;
1377 }
1378
1379 /* Load drop-in directory data. If u is an alias, we might be reloading the
1380 * target unit needlessly. But we cannot be sure which drops-ins have already
1381 * been loaded and which not, at least without doing complicated book-keeping,
1382 * so let's always reread all drop-ins. */
1383 return unit_load_dropin(unit_follow_merge(u));
1384 }
1385
1386 void unit_add_to_target_deps_queue(Unit *u) {
1387 Manager *m = u->manager;
1388
1389 assert(u);
1390
1391 if (u->in_target_deps_queue)
1392 return;
1393
1394 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1395 u->in_target_deps_queue = true;
1396 }
1397
1398 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1399 assert(u);
1400 assert(target);
1401
1402 if (target->type != UNIT_TARGET)
1403 return 0;
1404
1405 /* Only add the dependency if both units are loaded, so that
1406 * that loop check below is reliable */
1407 if (u->load_state != UNIT_LOADED ||
1408 target->load_state != UNIT_LOADED)
1409 return 0;
1410
1411 /* If either side wants no automatic dependencies, then let's
1412 * skip this */
1413 if (!u->default_dependencies ||
1414 !target->default_dependencies)
1415 return 0;
1416
1417 /* Don't create loops */
1418 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1419 return 0;
1420
1421 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1422 }
1423
1424 static int unit_add_slice_dependencies(Unit *u) {
1425 UnitDependencyMask mask;
1426 assert(u);
1427
1428 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1429 return 0;
1430
1431 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1432 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1433 relationship). */
1434 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1435
1436 if (UNIT_ISSET(u->slice))
1437 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1438
1439 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1440 return 0;
1441
1442 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1443 }
1444
1445 static int unit_add_mount_dependencies(Unit *u) {
1446 UnitDependencyInfo di;
1447 const char *path;
1448 Iterator i;
1449 int r;
1450
1451 assert(u);
1452
1453 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1454 char prefix[strlen(path) + 1];
1455
1456 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1457 _cleanup_free_ char *p = NULL;
1458 Unit *m;
1459
1460 r = unit_name_from_path(prefix, ".mount", &p);
1461 if (r < 0)
1462 return r;
1463
1464 m = manager_get_unit(u->manager, p);
1465 if (!m) {
1466 /* Make sure to load the mount unit if
1467 * it exists. If so the dependencies
1468 * on this unit will be added later
1469 * during the loading of the mount
1470 * unit. */
1471 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1472 continue;
1473 }
1474 if (m == u)
1475 continue;
1476
1477 if (m->load_state != UNIT_LOADED)
1478 continue;
1479
1480 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1481 if (r < 0)
1482 return r;
1483
1484 if (m->fragment_path) {
1485 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1486 if (r < 0)
1487 return r;
1488 }
1489 }
1490 }
1491
1492 return 0;
1493 }
1494
1495 static int unit_add_startup_units(Unit *u) {
1496 CGroupContext *c;
1497 int r;
1498
1499 c = unit_get_cgroup_context(u);
1500 if (!c)
1501 return 0;
1502
1503 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1504 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1505 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1506 return 0;
1507
1508 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1509 if (r < 0)
1510 return r;
1511
1512 return set_put(u->manager->startup_units, u);
1513 }
1514
1515 int unit_load(Unit *u) {
1516 int r;
1517
1518 assert(u);
1519
1520 if (u->in_load_queue) {
1521 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1522 u->in_load_queue = false;
1523 }
1524
1525 if (u->type == _UNIT_TYPE_INVALID)
1526 return -EINVAL;
1527
1528 if (u->load_state != UNIT_STUB)
1529 return 0;
1530
1531 if (u->transient_file) {
1532 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1533 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1534
1535 r = fflush_and_check(u->transient_file);
1536 if (r < 0)
1537 goto fail;
1538
1539 u->transient_file = safe_fclose(u->transient_file);
1540 u->fragment_mtime = now(CLOCK_REALTIME);
1541 }
1542
1543 r = UNIT_VTABLE(u)->load(u);
1544 if (r < 0)
1545 goto fail;
1546
1547 assert(u->load_state != UNIT_STUB);
1548
1549 if (u->load_state == UNIT_LOADED) {
1550 unit_add_to_target_deps_queue(u);
1551
1552 r = unit_add_slice_dependencies(u);
1553 if (r < 0)
1554 goto fail;
1555
1556 r = unit_add_mount_dependencies(u);
1557 if (r < 0)
1558 goto fail;
1559
1560 r = unit_add_startup_units(u);
1561 if (r < 0)
1562 goto fail;
1563
1564 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1565 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1566 r = -ENOEXEC;
1567 goto fail;
1568 }
1569
1570 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1571 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1572
1573 /* We finished loading, let's ensure our parents recalculate the members mask */
1574 unit_invalidate_cgroup_members_masks(u);
1575 }
1576
1577 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1578
1579 unit_add_to_dbus_queue(unit_follow_merge(u));
1580 unit_add_to_gc_queue(u);
1581
1582 return 0;
1583
1584 fail:
1585 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1586 * return ENOEXEC to ensure units are placed in this state after loading */
1587
1588 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1589 r == -ENOEXEC ? UNIT_BAD_SETTING :
1590 UNIT_ERROR;
1591 u->load_error = r;
1592
1593 unit_add_to_dbus_queue(u);
1594 unit_add_to_gc_queue(u);
1595
1596 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1597 }
1598
1599 _printf_(7, 8)
1600 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1601 Unit *u = userdata;
1602 va_list ap;
1603 int r;
1604
1605 va_start(ap, format);
1606 if (u)
1607 r = log_object_internalv(level, error, file, line, func,
1608 u->manager->unit_log_field,
1609 u->id,
1610 u->manager->invocation_log_field,
1611 u->invocation_id_string,
1612 format, ap);
1613 else
1614 r = log_internalv(level, error, file, line, func, format, ap);
1615 va_end(ap);
1616
1617 return r;
1618 }
1619
1620 static bool unit_test_condition(Unit *u) {
1621 assert(u);
1622
1623 dual_timestamp_get(&u->condition_timestamp);
1624 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1625
1626 unit_add_to_dbus_queue(u);
1627
1628 return u->condition_result;
1629 }
1630
1631 static bool unit_test_assert(Unit *u) {
1632 assert(u);
1633
1634 dual_timestamp_get(&u->assert_timestamp);
1635 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1636
1637 unit_add_to_dbus_queue(u);
1638
1639 return u->assert_result;
1640 }
1641
1642 void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1643 const char *d;
1644
1645 d = unit_status_string(u);
1646 if (log_get_show_color())
1647 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1648
1649 DISABLE_WARNING_FORMAT_NONLITERAL;
1650 manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1651 REENABLE_WARNING;
1652 }
1653
1654 int unit_test_start_limit(Unit *u) {
1655 const char *reason;
1656
1657 assert(u);
1658
1659 if (ratelimit_below(&u->start_ratelimit)) {
1660 u->start_limit_hit = false;
1661 return 0;
1662 }
1663
1664 log_unit_warning(u, "Start request repeated too quickly.");
1665 u->start_limit_hit = true;
1666
1667 reason = strjoina("unit ", u->id, " failed");
1668
1669 emergency_action(u->manager, u->start_limit_action,
1670 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1671 u->reboot_arg, -1, reason);
1672
1673 return -ECANCELED;
1674 }
1675
1676 bool unit_shall_confirm_spawn(Unit *u) {
1677 assert(u);
1678
1679 if (manager_is_confirm_spawn_disabled(u->manager))
1680 return false;
1681
1682 /* For some reasons units remaining in the same process group
1683 * as PID 1 fail to acquire the console even if it's not used
1684 * by any process. So skip the confirmation question for them. */
1685 return !unit_get_exec_context(u)->same_pgrp;
1686 }
1687
1688 static bool unit_verify_deps(Unit *u) {
1689 Unit *other;
1690 Iterator j;
1691 void *v;
1692
1693 assert(u);
1694
1695 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1696 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1697 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1698 * conjunction with After= as for them any such check would make things entirely racy. */
1699
1700 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1701
1702 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1703 continue;
1704
1705 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1706 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1707 return false;
1708 }
1709 }
1710
1711 return true;
1712 }
1713
1714 /* Errors that aren't really errors:
1715 * -EALREADY: Unit is already started.
1716 * -ECOMM: Condition failed
1717 * -EAGAIN: An operation is already in progress. Retry later.
1718 *
1719 * Errors that are real errors:
1720 * -EBADR: This unit type does not support starting.
1721 * -ECANCELED: Start limit hit, too many requests for now
1722 * -EPROTO: Assert failed
1723 * -EINVAL: Unit not loaded
1724 * -EOPNOTSUPP: Unit type not supported
1725 * -ENOLINK: The necessary dependencies are not fulfilled.
1726 * -ESTALE: This unit has been started before and can't be started a second time
1727 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1728 */
1729 int unit_start(Unit *u) {
1730 UnitActiveState state;
1731 Unit *following;
1732
1733 assert(u);
1734
1735 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1736 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1737 * waiting is finished. */
1738 state = unit_active_state(u);
1739 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1740 return -EALREADY;
1741 if (state == UNIT_MAINTENANCE)
1742 return -EAGAIN;
1743
1744 /* Units that aren't loaded cannot be started */
1745 if (u->load_state != UNIT_LOADED)
1746 return -EINVAL;
1747
1748 /* Refuse starting scope units more than once */
1749 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1750 return -ESTALE;
1751
1752 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1753 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1754 * recheck the condition in that case. */
1755 if (state != UNIT_ACTIVATING &&
1756 !unit_test_condition(u))
1757 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1758
1759 /* If the asserts failed, fail the entire job */
1760 if (state != UNIT_ACTIVATING &&
1761 !unit_test_assert(u))
1762 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1763
1764 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1765 * condition checks, so that we rather return condition check errors (which are usually not
1766 * considered a true failure) than "not supported" errors (which are considered a failure).
1767 */
1768 if (!unit_type_supported(u->type))
1769 return -EOPNOTSUPP;
1770
1771 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1772 * should have taken care of this already, but let's check this here again. After all, our
1773 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1774 if (!unit_verify_deps(u))
1775 return -ENOLINK;
1776
1777 /* Forward to the main object, if we aren't it. */
1778 following = unit_following(u);
1779 if (following) {
1780 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1781 return unit_start(following);
1782 }
1783
1784 /* If it is stopped, but we cannot start it, then fail */
1785 if (!UNIT_VTABLE(u)->start)
1786 return -EBADR;
1787
1788 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1789 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1790 * waits for a holdoff timer to elapse before it will start again. */
1791
1792 unit_add_to_dbus_queue(u);
1793
1794 return UNIT_VTABLE(u)->start(u);
1795 }
1796
1797 bool unit_can_start(Unit *u) {
1798 assert(u);
1799
1800 if (u->load_state != UNIT_LOADED)
1801 return false;
1802
1803 if (!unit_type_supported(u->type))
1804 return false;
1805
1806 /* Scope units may be started only once */
1807 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1808 return false;
1809
1810 return !!UNIT_VTABLE(u)->start;
1811 }
1812
1813 bool unit_can_isolate(Unit *u) {
1814 assert(u);
1815
1816 return unit_can_start(u) &&
1817 u->allow_isolate;
1818 }
1819
1820 /* Errors:
1821 * -EBADR: This unit type does not support stopping.
1822 * -EALREADY: Unit is already stopped.
1823 * -EAGAIN: An operation is already in progress. Retry later.
1824 */
1825 int unit_stop(Unit *u) {
1826 UnitActiveState state;
1827 Unit *following;
1828
1829 assert(u);
1830
1831 state = unit_active_state(u);
1832 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1833 return -EALREADY;
1834
1835 following = unit_following(u);
1836 if (following) {
1837 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1838 return unit_stop(following);
1839 }
1840
1841 if (!UNIT_VTABLE(u)->stop)
1842 return -EBADR;
1843
1844 unit_add_to_dbus_queue(u);
1845
1846 return UNIT_VTABLE(u)->stop(u);
1847 }
1848
1849 bool unit_can_stop(Unit *u) {
1850 assert(u);
1851
1852 if (!unit_type_supported(u->type))
1853 return false;
1854
1855 if (u->perpetual)
1856 return false;
1857
1858 return !!UNIT_VTABLE(u)->stop;
1859 }
1860
1861 /* Errors:
1862 * -EBADR: This unit type does not support reloading.
1863 * -ENOEXEC: Unit is not started.
1864 * -EAGAIN: An operation is already in progress. Retry later.
1865 */
1866 int unit_reload(Unit *u) {
1867 UnitActiveState state;
1868 Unit *following;
1869
1870 assert(u);
1871
1872 if (u->load_state != UNIT_LOADED)
1873 return -EINVAL;
1874
1875 if (!unit_can_reload(u))
1876 return -EBADR;
1877
1878 state = unit_active_state(u);
1879 if (state == UNIT_RELOADING)
1880 return -EAGAIN;
1881
1882 if (state != UNIT_ACTIVE) {
1883 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1884 return -ENOEXEC;
1885 }
1886
1887 following = unit_following(u);
1888 if (following) {
1889 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1890 return unit_reload(following);
1891 }
1892
1893 unit_add_to_dbus_queue(u);
1894
1895 if (!UNIT_VTABLE(u)->reload) {
1896 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1897 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1898 return 0;
1899 }
1900
1901 return UNIT_VTABLE(u)->reload(u);
1902 }
1903
1904 bool unit_can_reload(Unit *u) {
1905 assert(u);
1906
1907 if (UNIT_VTABLE(u)->can_reload)
1908 return UNIT_VTABLE(u)->can_reload(u);
1909
1910 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1911 return true;
1912
1913 return UNIT_VTABLE(u)->reload;
1914 }
1915
1916 bool unit_is_unneeded(Unit *u) {
1917 static const UnitDependency deps[] = {
1918 UNIT_REQUIRED_BY,
1919 UNIT_REQUISITE_OF,
1920 UNIT_WANTED_BY,
1921 UNIT_BOUND_BY,
1922 };
1923 size_t j;
1924
1925 assert(u);
1926
1927 if (!u->stop_when_unneeded)
1928 return false;
1929
1930 /* Don't clean up while the unit is transitioning or is even inactive. */
1931 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1932 return false;
1933 if (u->job)
1934 return false;
1935
1936 for (j = 0; j < ELEMENTSOF(deps); j++) {
1937 Unit *other;
1938 Iterator i;
1939 void *v;
1940
1941 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1942 * restart, then don't clean this one up. */
1943
1944 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1945 if (other->job)
1946 return false;
1947
1948 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1949 return false;
1950
1951 if (unit_will_restart(other))
1952 return false;
1953 }
1954 }
1955
1956 return true;
1957 }
1958
1959 static void check_unneeded_dependencies(Unit *u) {
1960
1961 static const UnitDependency deps[] = {
1962 UNIT_REQUIRES,
1963 UNIT_REQUISITE,
1964 UNIT_WANTS,
1965 UNIT_BINDS_TO,
1966 };
1967 size_t j;
1968
1969 assert(u);
1970
1971 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1972
1973 for (j = 0; j < ELEMENTSOF(deps); j++) {
1974 Unit *other;
1975 Iterator i;
1976 void *v;
1977
1978 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
1979 unit_submit_to_stop_when_unneeded_queue(other);
1980 }
1981 }
1982
1983 static void unit_check_binds_to(Unit *u) {
1984 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1985 bool stop = false;
1986 Unit *other;
1987 Iterator i;
1988 void *v;
1989 int r;
1990
1991 assert(u);
1992
1993 if (u->job)
1994 return;
1995
1996 if (unit_active_state(u) != UNIT_ACTIVE)
1997 return;
1998
1999 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2000 if (other->job)
2001 continue;
2002
2003 if (!other->coldplugged)
2004 /* We might yet create a job for the other unit… */
2005 continue;
2006
2007 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2008 continue;
2009
2010 stop = true;
2011 break;
2012 }
2013
2014 if (!stop)
2015 return;
2016
2017 /* If stopping a unit fails continuously we might enter a stop
2018 * loop here, hence stop acting on the service being
2019 * unnecessary after a while. */
2020 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2021 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2022 return;
2023 }
2024
2025 assert(other);
2026 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2027
2028 /* A unit we need to run is gone. Sniff. Let's stop this. */
2029 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2030 if (r < 0)
2031 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2032 }
2033
2034 static void retroactively_start_dependencies(Unit *u) {
2035 Iterator i;
2036 Unit *other;
2037 void *v;
2038
2039 assert(u);
2040 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2041
2042 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2043 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2044 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2045 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2046
2047 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2048 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2049 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2050 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2051
2052 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2053 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2054 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2055 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2056
2057 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2058 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2059 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2060
2061 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2062 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2063 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2064 }
2065
2066 static void retroactively_stop_dependencies(Unit *u) {
2067 Unit *other;
2068 Iterator i;
2069 void *v;
2070
2071 assert(u);
2072 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2073
2074 /* Pull down units which are bound to us recursively if enabled */
2075 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2076 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2077 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2078 }
2079
2080 void unit_start_on_failure(Unit *u) {
2081 Unit *other;
2082 Iterator i;
2083 void *v;
2084 int r;
2085
2086 assert(u);
2087
2088 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2089 return;
2090
2091 log_unit_info(u, "Triggering OnFailure= dependencies.");
2092
2093 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2094 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2095
2096 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2097 if (r < 0)
2098 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2099 }
2100 }
2101
2102 void unit_trigger_notify(Unit *u) {
2103 Unit *other;
2104 Iterator i;
2105 void *v;
2106
2107 assert(u);
2108
2109 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2110 if (UNIT_VTABLE(other)->trigger_notify)
2111 UNIT_VTABLE(other)->trigger_notify(other, u);
2112 }
2113
2114 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2115 if (condition_notice && log_level > LOG_NOTICE)
2116 return LOG_NOTICE;
2117 if (condition_info && log_level > LOG_INFO)
2118 return LOG_INFO;
2119 return log_level;
2120 }
2121
2122 static int unit_log_resources(Unit *u) {
2123 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2124 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2125 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2126 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2127 size_t n_message_parts = 0, n_iovec = 0;
2128 char* message_parts[1 + 2 + 2 + 1], *t;
2129 nsec_t nsec = NSEC_INFINITY;
2130 CGroupIPAccountingMetric m;
2131 size_t i;
2132 int r;
2133 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2134 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2135 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2136 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2137 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2138 };
2139 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2140 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2141 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2142 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2143 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2144 };
2145
2146 assert(u);
2147
2148 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2149 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2150 * information and the complete data in structured fields. */
2151
2152 (void) unit_get_cpu_usage(u, &nsec);
2153 if (nsec != NSEC_INFINITY) {
2154 char buf[FORMAT_TIMESPAN_MAX] = "";
2155
2156 /* Format the CPU time for inclusion in the structured log message */
2157 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2158 r = log_oom();
2159 goto finish;
2160 }
2161 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2162
2163 /* Format the CPU time for inclusion in the human language message string */
2164 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2165 t = strjoin("consumed ", buf, " CPU time");
2166 if (!t) {
2167 r = log_oom();
2168 goto finish;
2169 }
2170
2171 message_parts[n_message_parts++] = t;
2172
2173 log_level = raise_level(log_level,
2174 nsec > NOTICEWORTHY_CPU_NSEC,
2175 nsec > MENTIONWORTHY_CPU_NSEC);
2176 }
2177
2178 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2179 char buf[FORMAT_BYTES_MAX] = "";
2180 uint64_t value = UINT64_MAX;
2181
2182 assert(io_fields[k]);
2183
2184 (void) unit_get_io_accounting(u, k, k > 0, &value);
2185 if (value == UINT64_MAX)
2186 continue;
2187
2188 have_io_accounting = true;
2189 if (value > 0)
2190 any_io = true;
2191
2192 /* Format IO accounting data for inclusion in the structured log message */
2193 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2194 r = log_oom();
2195 goto finish;
2196 }
2197 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2198
2199 /* Format the IO accounting data for inclusion in the human language message string, but only
2200 * for the bytes counters (and not for the operations counters) */
2201 if (k == CGROUP_IO_READ_BYTES) {
2202 assert(!rr);
2203 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2204 if (!rr) {
2205 r = log_oom();
2206 goto finish;
2207 }
2208 } else if (k == CGROUP_IO_WRITE_BYTES) {
2209 assert(!wr);
2210 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2211 if (!wr) {
2212 r = log_oom();
2213 goto finish;
2214 }
2215 }
2216
2217 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2218 log_level = raise_level(log_level,
2219 value > MENTIONWORTHY_IO_BYTES,
2220 value > NOTICEWORTHY_IO_BYTES);
2221 }
2222
2223 if (have_io_accounting) {
2224 if (any_io) {
2225 if (rr)
2226 message_parts[n_message_parts++] = TAKE_PTR(rr);
2227 if (wr)
2228 message_parts[n_message_parts++] = TAKE_PTR(wr);
2229
2230 } else {
2231 char *k;
2232
2233 k = strdup("no IO");
2234 if (!k) {
2235 r = log_oom();
2236 goto finish;
2237 }
2238
2239 message_parts[n_message_parts++] = k;
2240 }
2241 }
2242
2243 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2244 char buf[FORMAT_BYTES_MAX] = "";
2245 uint64_t value = UINT64_MAX;
2246
2247 assert(ip_fields[m]);
2248
2249 (void) unit_get_ip_accounting(u, m, &value);
2250 if (value == UINT64_MAX)
2251 continue;
2252
2253 have_ip_accounting = true;
2254 if (value > 0)
2255 any_traffic = true;
2256
2257 /* Format IP accounting data for inclusion in the structured log message */
2258 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2259 r = log_oom();
2260 goto finish;
2261 }
2262 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2263
2264 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2265 * bytes counters (and not for the packets counters) */
2266 if (m == CGROUP_IP_INGRESS_BYTES) {
2267 assert(!igress);
2268 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2269 if (!igress) {
2270 r = log_oom();
2271 goto finish;
2272 }
2273 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2274 assert(!egress);
2275 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2276 if (!egress) {
2277 r = log_oom();
2278 goto finish;
2279 }
2280 }
2281
2282 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2283 log_level = raise_level(log_level,
2284 value > MENTIONWORTHY_IP_BYTES,
2285 value > NOTICEWORTHY_IP_BYTES);
2286 }
2287
2288 if (have_ip_accounting) {
2289 if (any_traffic) {
2290 if (igress)
2291 message_parts[n_message_parts++] = TAKE_PTR(igress);
2292 if (egress)
2293 message_parts[n_message_parts++] = TAKE_PTR(egress);
2294
2295 } else {
2296 char *k;
2297
2298 k = strdup("no IP traffic");
2299 if (!k) {
2300 r = log_oom();
2301 goto finish;
2302 }
2303
2304 message_parts[n_message_parts++] = k;
2305 }
2306 }
2307
2308 /* Is there any accounting data available at all? */
2309 if (n_iovec == 0) {
2310 r = 0;
2311 goto finish;
2312 }
2313
2314 if (n_message_parts == 0)
2315 t = strjoina("MESSAGE=", u->id, ": Completed.");
2316 else {
2317 _cleanup_free_ char *joined;
2318
2319 message_parts[n_message_parts] = NULL;
2320
2321 joined = strv_join(message_parts, ", ");
2322 if (!joined) {
2323 r = log_oom();
2324 goto finish;
2325 }
2326
2327 joined[0] = ascii_toupper(joined[0]);
2328 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2329 }
2330
2331 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2332 * and hence don't increase n_iovec for them */
2333 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2334 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2335
2336 t = strjoina(u->manager->unit_log_field, u->id);
2337 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2338
2339 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2340 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2341
2342 log_struct_iovec(log_level, iovec, n_iovec + 4);
2343 r = 0;
2344
2345 finish:
2346 for (i = 0; i < n_message_parts; i++)
2347 free(message_parts[i]);
2348
2349 for (i = 0; i < n_iovec; i++)
2350 free(iovec[i].iov_base);
2351
2352 return r;
2353
2354 }
2355
2356 static void unit_update_on_console(Unit *u) {
2357 bool b;
2358
2359 assert(u);
2360
2361 b = unit_needs_console(u);
2362 if (u->on_console == b)
2363 return;
2364
2365 u->on_console = b;
2366 if (b)
2367 manager_ref_console(u->manager);
2368 else
2369 manager_unref_console(u->manager);
2370 }
2371
2372 static void unit_emit_audit_start(Unit *u) {
2373 assert(u);
2374
2375 if (u->type != UNIT_SERVICE)
2376 return;
2377
2378 /* Write audit record if we have just finished starting up */
2379 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2380 u->in_audit = true;
2381 }
2382
2383 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2384 assert(u);
2385
2386 if (u->type != UNIT_SERVICE)
2387 return;
2388
2389 if (u->in_audit) {
2390 /* Write audit record if we have just finished shutting down */
2391 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2392 u->in_audit = false;
2393 } else {
2394 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2395 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2396
2397 if (state == UNIT_INACTIVE)
2398 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2399 }
2400 }
2401
2402 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2403 bool unexpected = false;
2404 JobResult result;
2405
2406 assert(j);
2407
2408 if (j->state == JOB_WAITING)
2409
2410 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2411 * due to EAGAIN. */
2412 job_add_to_run_queue(j);
2413
2414 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2415 * hence needs to invalidate jobs. */
2416
2417 switch (j->type) {
2418
2419 case JOB_START:
2420 case JOB_VERIFY_ACTIVE:
2421
2422 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2423 job_finish_and_invalidate(j, JOB_DONE, true, false);
2424 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2425 unexpected = true;
2426
2427 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2428 if (ns == UNIT_FAILED)
2429 result = JOB_FAILED;
2430 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2431 result = JOB_SKIPPED;
2432 else
2433 result = JOB_DONE;
2434
2435 job_finish_and_invalidate(j, result, true, false);
2436 }
2437 }
2438
2439 break;
2440
2441 case JOB_RELOAD:
2442 case JOB_RELOAD_OR_START:
2443 case JOB_TRY_RELOAD:
2444
2445 if (j->state == JOB_RUNNING) {
2446 if (ns == UNIT_ACTIVE)
2447 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2448 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2449 unexpected = true;
2450
2451 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2452 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2453 }
2454 }
2455
2456 break;
2457
2458 case JOB_STOP:
2459 case JOB_RESTART:
2460 case JOB_TRY_RESTART:
2461
2462 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2463 job_finish_and_invalidate(j, JOB_DONE, true, false);
2464 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2465 unexpected = true;
2466 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2467 }
2468
2469 break;
2470
2471 default:
2472 assert_not_reached("Job type unknown");
2473 }
2474
2475 return unexpected;
2476 }
2477
2478 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2479 const char *reason;
2480 Manager *m;
2481
2482 assert(u);
2483 assert(os < _UNIT_ACTIVE_STATE_MAX);
2484 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2485
2486 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2487 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2488 * remounted this function will be called too! */
2489
2490 m = u->manager;
2491
2492 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2493 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2494 unit_add_to_dbus_queue(u);
2495
2496 /* Update timestamps for state changes */
2497 if (!MANAGER_IS_RELOADING(m)) {
2498 dual_timestamp_get(&u->state_change_timestamp);
2499
2500 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2501 u->inactive_exit_timestamp = u->state_change_timestamp;
2502 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2503 u->inactive_enter_timestamp = u->state_change_timestamp;
2504
2505 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2506 u->active_enter_timestamp = u->state_change_timestamp;
2507 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2508 u->active_exit_timestamp = u->state_change_timestamp;
2509 }
2510
2511 /* Keep track of failed units */
2512 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2513
2514 /* Make sure the cgroup and state files are always removed when we become inactive */
2515 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2516 unit_prune_cgroup(u);
2517 unit_unlink_state_files(u);
2518 }
2519
2520 unit_update_on_console(u);
2521
2522 if (!MANAGER_IS_RELOADING(m)) {
2523 bool unexpected;
2524
2525 /* Let's propagate state changes to the job */
2526 if (u->job)
2527 unexpected = unit_process_job(u->job, ns, flags);
2528 else
2529 unexpected = true;
2530
2531 /* If this state change happened without being requested by a job, then let's retroactively start or
2532 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2533 * additional jobs just because something is already activated. */
2534
2535 if (unexpected) {
2536 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2537 retroactively_start_dependencies(u);
2538 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2539 retroactively_stop_dependencies(u);
2540 }
2541
2542 /* stop unneeded units regardless if going down was expected or not */
2543 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2544 check_unneeded_dependencies(u);
2545
2546 if (ns != os && ns == UNIT_FAILED) {
2547 log_unit_debug(u, "Unit entered failed state.");
2548
2549 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2550 unit_start_on_failure(u);
2551 }
2552
2553 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2554 /* This unit just finished starting up */
2555
2556 unit_emit_audit_start(u);
2557 manager_send_unit_plymouth(m, u);
2558 }
2559
2560 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2561 /* This unit just stopped/failed. */
2562
2563 unit_emit_audit_stop(u, ns);
2564 unit_log_resources(u);
2565 }
2566 }
2567
2568 manager_recheck_journal(m);
2569 manager_recheck_dbus(m);
2570
2571 unit_trigger_notify(u);
2572
2573 if (!MANAGER_IS_RELOADING(m)) {
2574 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2575 unit_submit_to_stop_when_unneeded_queue(u);
2576
2577 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2578 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2579 * without ever entering started.) */
2580 unit_check_binds_to(u);
2581
2582 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2583 reason = strjoina("unit ", u->id, " failed");
2584 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2585 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2586 reason = strjoina("unit ", u->id, " succeeded");
2587 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2588 }
2589 }
2590
2591 unit_add_to_gc_queue(u);
2592 }
2593
2594 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2595 int r;
2596
2597 assert(u);
2598 assert(pid_is_valid(pid));
2599
2600 /* Watch a specific PID */
2601
2602 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2603 * opportunity to remove any stalled references to this PID as they can be created
2604 * easily (when watching a process which is not our direct child). */
2605 if (exclusive)
2606 manager_unwatch_pid(u->manager, pid);
2607
2608 r = set_ensure_allocated(&u->pids, NULL);
2609 if (r < 0)
2610 return r;
2611
2612 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2613 if (r < 0)
2614 return r;
2615
2616 /* First try, let's add the unit keyed by "pid". */
2617 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2618 if (r == -EEXIST) {
2619 Unit **array;
2620 bool found = false;
2621 size_t n = 0;
2622
2623 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2624 * to an array of Units rather than just a Unit), lists us already. */
2625
2626 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2627 if (array)
2628 for (; array[n]; n++)
2629 if (array[n] == u)
2630 found = true;
2631
2632 if (found) /* Found it already? if so, do nothing */
2633 r = 0;
2634 else {
2635 Unit **new_array;
2636
2637 /* Allocate a new array */
2638 new_array = new(Unit*, n + 2);
2639 if (!new_array)
2640 return -ENOMEM;
2641
2642 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2643 new_array[n] = u;
2644 new_array[n+1] = NULL;
2645
2646 /* Add or replace the old array */
2647 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2648 if (r < 0) {
2649 free(new_array);
2650 return r;
2651 }
2652
2653 free(array);
2654 }
2655 } else if (r < 0)
2656 return r;
2657
2658 r = set_put(u->pids, PID_TO_PTR(pid));
2659 if (r < 0)
2660 return r;
2661
2662 return 0;
2663 }
2664
2665 void unit_unwatch_pid(Unit *u, pid_t pid) {
2666 Unit **array;
2667
2668 assert(u);
2669 assert(pid_is_valid(pid));
2670
2671 /* First let's drop the unit in case it's keyed as "pid". */
2672 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2673
2674 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2675 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2676 if (array) {
2677 size_t n, m = 0;
2678
2679 /* Let's iterate through the array, dropping our own entry */
2680 for (n = 0; array[n]; n++)
2681 if (array[n] != u)
2682 array[m++] = array[n];
2683 array[m] = NULL;
2684
2685 if (m == 0) {
2686 /* The array is now empty, remove the entire entry */
2687 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2688 free(array);
2689 }
2690 }
2691
2692 (void) set_remove(u->pids, PID_TO_PTR(pid));
2693 }
2694
2695 void unit_unwatch_all_pids(Unit *u) {
2696 assert(u);
2697
2698 while (!set_isempty(u->pids))
2699 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2700
2701 u->pids = set_free(u->pids);
2702 }
2703
2704 static void unit_tidy_watch_pids(Unit *u) {
2705 pid_t except1, except2;
2706 Iterator i;
2707 void *e;
2708
2709 assert(u);
2710
2711 /* Cleans dead PIDs from our list */
2712
2713 except1 = unit_main_pid(u);
2714 except2 = unit_control_pid(u);
2715
2716 SET_FOREACH(e, u->pids, i) {
2717 pid_t pid = PTR_TO_PID(e);
2718
2719 if (pid == except1 || pid == except2)
2720 continue;
2721
2722 if (!pid_is_unwaited(pid))
2723 unit_unwatch_pid(u, pid);
2724 }
2725 }
2726
2727 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2728 Unit *u = userdata;
2729
2730 assert(s);
2731 assert(u);
2732
2733 unit_tidy_watch_pids(u);
2734 unit_watch_all_pids(u);
2735
2736 /* If the PID set is empty now, then let's finish this off. */
2737 unit_synthesize_cgroup_empty_event(u);
2738
2739 return 0;
2740 }
2741
2742 int unit_enqueue_rewatch_pids(Unit *u) {
2743 int r;
2744
2745 assert(u);
2746
2747 if (!u->cgroup_path)
2748 return -ENOENT;
2749
2750 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2751 if (r < 0)
2752 return r;
2753 if (r > 0) /* On unified we can use proper notifications */
2754 return 0;
2755
2756 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2757 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2758 * involves issuing kill(pid, 0) on all processes we watch. */
2759
2760 if (!u->rewatch_pids_event_source) {
2761 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2762
2763 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2764 if (r < 0)
2765 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2766
2767 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2768 if (r < 0)
2769 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2770
2771 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2772
2773 u->rewatch_pids_event_source = TAKE_PTR(s);
2774 }
2775
2776 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2777 if (r < 0)
2778 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2779
2780 return 0;
2781 }
2782
2783 void unit_dequeue_rewatch_pids(Unit *u) {
2784 int r;
2785 assert(u);
2786
2787 if (!u->rewatch_pids_event_source)
2788 return;
2789
2790 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2791 if (r < 0)
2792 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2793
2794 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2795 }
2796
2797 bool unit_job_is_applicable(Unit *u, JobType j) {
2798 assert(u);
2799 assert(j >= 0 && j < _JOB_TYPE_MAX);
2800
2801 switch (j) {
2802
2803 case JOB_VERIFY_ACTIVE:
2804 case JOB_START:
2805 case JOB_NOP:
2806 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2807 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2808 * jobs for it. */
2809 return true;
2810
2811 case JOB_STOP:
2812 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2813 * external events), hence it makes no sense to permit enqueing such a request either. */
2814 return !u->perpetual;
2815
2816 case JOB_RESTART:
2817 case JOB_TRY_RESTART:
2818 return unit_can_stop(u) && unit_can_start(u);
2819
2820 case JOB_RELOAD:
2821 case JOB_TRY_RELOAD:
2822 return unit_can_reload(u);
2823
2824 case JOB_RELOAD_OR_START:
2825 return unit_can_reload(u) && unit_can_start(u);
2826
2827 default:
2828 assert_not_reached("Invalid job type");
2829 }
2830 }
2831
2832 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2833 assert(u);
2834
2835 /* Only warn about some unit types */
2836 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2837 return;
2838
2839 if (streq_ptr(u->id, other))
2840 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2841 else
2842 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2843 }
2844
2845 static int unit_add_dependency_hashmap(
2846 Hashmap **h,
2847 Unit *other,
2848 UnitDependencyMask origin_mask,
2849 UnitDependencyMask destination_mask) {
2850
2851 UnitDependencyInfo info;
2852 int r;
2853
2854 assert(h);
2855 assert(other);
2856 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2857 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2858 assert(origin_mask > 0 || destination_mask > 0);
2859
2860 r = hashmap_ensure_allocated(h, NULL);
2861 if (r < 0)
2862 return r;
2863
2864 assert_cc(sizeof(void*) == sizeof(info));
2865
2866 info.data = hashmap_get(*h, other);
2867 if (info.data) {
2868 /* Entry already exists. Add in our mask. */
2869
2870 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2871 FLAGS_SET(destination_mask, info.destination_mask))
2872 return 0; /* NOP */
2873
2874 info.origin_mask |= origin_mask;
2875 info.destination_mask |= destination_mask;
2876
2877 r = hashmap_update(*h, other, info.data);
2878 } else {
2879 info = (UnitDependencyInfo) {
2880 .origin_mask = origin_mask,
2881 .destination_mask = destination_mask,
2882 };
2883
2884 r = hashmap_put(*h, other, info.data);
2885 }
2886 if (r < 0)
2887 return r;
2888
2889 return 1;
2890 }
2891
2892 int unit_add_dependency(
2893 Unit *u,
2894 UnitDependency d,
2895 Unit *other,
2896 bool add_reference,
2897 UnitDependencyMask mask) {
2898
2899 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2900 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2901 [UNIT_WANTS] = UNIT_WANTED_BY,
2902 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2903 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2904 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2905 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2906 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2907 [UNIT_WANTED_BY] = UNIT_WANTS,
2908 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2909 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2910 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2911 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2912 [UNIT_BEFORE] = UNIT_AFTER,
2913 [UNIT_AFTER] = UNIT_BEFORE,
2914 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2915 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2916 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2917 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2918 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2919 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2920 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2921 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2922 };
2923 Unit *original_u = u, *original_other = other;
2924 int r;
2925
2926 assert(u);
2927 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2928 assert(other);
2929
2930 u = unit_follow_merge(u);
2931 other = unit_follow_merge(other);
2932
2933 /* We won't allow dependencies on ourselves. We will not
2934 * consider them an error however. */
2935 if (u == other) {
2936 maybe_warn_about_dependency(original_u, original_other->id, d);
2937 return 0;
2938 }
2939
2940 if (d == UNIT_AFTER && UNIT_VTABLE(u)->refuse_after) {
2941 log_unit_warning(u, "Requested dependency After=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(u->type));
2942 return 0;
2943 }
2944
2945 if (d == UNIT_BEFORE && UNIT_VTABLE(other)->refuse_after) {
2946 log_unit_warning(u, "Requested dependency Before=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(other->type));
2947 return 0;
2948 }
2949
2950 if (d == UNIT_ON_FAILURE && !UNIT_VTABLE(u)->can_fail) {
2951 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
2952 return 0;
2953 }
2954
2955 if (d == UNIT_TRIGGERS && !UNIT_VTABLE(u)->can_trigger)
2956 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2957 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
2958 if (d == UNIT_TRIGGERED_BY && !UNIT_VTABLE(other)->can_trigger)
2959 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2960 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
2961
2962 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2963 if (r < 0)
2964 return r;
2965
2966 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2967 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2968 if (r < 0)
2969 return r;
2970 }
2971
2972 if (add_reference) {
2973 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2974 if (r < 0)
2975 return r;
2976
2977 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2978 if (r < 0)
2979 return r;
2980 }
2981
2982 unit_add_to_dbus_queue(u);
2983 return 0;
2984 }
2985
2986 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2987 int r;
2988
2989 assert(u);
2990
2991 r = unit_add_dependency(u, d, other, add_reference, mask);
2992 if (r < 0)
2993 return r;
2994
2995 return unit_add_dependency(u, e, other, add_reference, mask);
2996 }
2997
2998 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2999 int r;
3000
3001 assert(u);
3002 assert(name);
3003 assert(buf);
3004 assert(ret);
3005
3006 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3007 *buf = NULL;
3008 *ret = name;
3009 return 0;
3010 }
3011
3012 if (u->instance)
3013 r = unit_name_replace_instance(name, u->instance, buf);
3014 else {
3015 _cleanup_free_ char *i = NULL;
3016
3017 r = unit_name_to_prefix(u->id, &i);
3018 if (r < 0)
3019 return r;
3020
3021 r = unit_name_replace_instance(name, i, buf);
3022 }
3023 if (r < 0)
3024 return r;
3025
3026 *ret = *buf;
3027 return 0;
3028 }
3029
3030 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3031 _cleanup_free_ char *buf = NULL;
3032 Unit *other;
3033 int r;
3034
3035 assert(u);
3036 assert(name);
3037
3038 r = resolve_template(u, name, &buf, &name);
3039 if (r < 0)
3040 return r;
3041
3042 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3043 if (r < 0)
3044 return r;
3045
3046 return unit_add_dependency(u, d, other, add_reference, mask);
3047 }
3048
3049 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3050 _cleanup_free_ char *buf = NULL;
3051 Unit *other;
3052 int r;
3053
3054 assert(u);
3055 assert(name);
3056
3057 r = resolve_template(u, name, &buf, &name);
3058 if (r < 0)
3059 return r;
3060
3061 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3062 if (r < 0)
3063 return r;
3064
3065 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3066 }
3067
3068 int set_unit_path(const char *p) {
3069 /* This is mostly for debug purposes */
3070 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3071 return -errno;
3072
3073 return 0;
3074 }
3075
3076 char *unit_dbus_path(Unit *u) {
3077 assert(u);
3078
3079 if (!u->id)
3080 return NULL;
3081
3082 return unit_dbus_path_from_name(u->id);
3083 }
3084
3085 char *unit_dbus_path_invocation_id(Unit *u) {
3086 assert(u);
3087
3088 if (sd_id128_is_null(u->invocation_id))
3089 return NULL;
3090
3091 return unit_dbus_path_from_name(u->invocation_id_string);
3092 }
3093
3094 int unit_set_slice(Unit *u, Unit *slice) {
3095 assert(u);
3096 assert(slice);
3097
3098 /* Sets the unit slice if it has not been set before. Is extra
3099 * careful, to only allow this for units that actually have a
3100 * cgroup context. Also, we don't allow to set this for slices
3101 * (since the parent slice is derived from the name). Make
3102 * sure the unit we set is actually a slice. */
3103
3104 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3105 return -EOPNOTSUPP;
3106
3107 if (u->type == UNIT_SLICE)
3108 return -EINVAL;
3109
3110 if (unit_active_state(u) != UNIT_INACTIVE)
3111 return -EBUSY;
3112
3113 if (slice->type != UNIT_SLICE)
3114 return -EINVAL;
3115
3116 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3117 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3118 return -EPERM;
3119
3120 if (UNIT_DEREF(u->slice) == slice)
3121 return 0;
3122
3123 /* Disallow slice changes if @u is already bound to cgroups */
3124 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3125 return -EBUSY;
3126
3127 unit_ref_set(&u->slice, u, slice);
3128 return 1;
3129 }
3130
3131 int unit_set_default_slice(Unit *u) {
3132 const char *slice_name;
3133 Unit *slice;
3134 int r;
3135
3136 assert(u);
3137
3138 if (UNIT_ISSET(u->slice))
3139 return 0;
3140
3141 if (u->instance) {
3142 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3143
3144 /* Implicitly place all instantiated units in their
3145 * own per-template slice */
3146
3147 r = unit_name_to_prefix(u->id, &prefix);
3148 if (r < 0)
3149 return r;
3150
3151 /* The prefix is already escaped, but it might include
3152 * "-" which has a special meaning for slice units,
3153 * hence escape it here extra. */
3154 escaped = unit_name_escape(prefix);
3155 if (!escaped)
3156 return -ENOMEM;
3157
3158 if (MANAGER_IS_SYSTEM(u->manager))
3159 slice_name = strjoina("system-", escaped, ".slice");
3160 else
3161 slice_name = strjoina(escaped, ".slice");
3162 } else
3163 slice_name =
3164 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3165 ? SPECIAL_SYSTEM_SLICE
3166 : SPECIAL_ROOT_SLICE;
3167
3168 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3169 if (r < 0)
3170 return r;
3171
3172 return unit_set_slice(u, slice);
3173 }
3174
3175 const char *unit_slice_name(Unit *u) {
3176 assert(u);
3177
3178 if (!UNIT_ISSET(u->slice))
3179 return NULL;
3180
3181 return UNIT_DEREF(u->slice)->id;
3182 }
3183
3184 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3185 _cleanup_free_ char *t = NULL;
3186 int r;
3187
3188 assert(u);
3189 assert(type);
3190 assert(_found);
3191
3192 r = unit_name_change_suffix(u->id, type, &t);
3193 if (r < 0)
3194 return r;
3195 if (unit_has_name(u, t))
3196 return -EINVAL;
3197
3198 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3199 assert(r < 0 || *_found != u);
3200 return r;
3201 }
3202
3203 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3204 const char *new_owner;
3205 Unit *u = userdata;
3206 int r;
3207
3208 assert(message);
3209 assert(u);
3210
3211 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3212 if (r < 0) {
3213 bus_log_parse_error(r);
3214 return 0;
3215 }
3216
3217 if (UNIT_VTABLE(u)->bus_name_owner_change)
3218 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3219
3220 return 0;
3221 }
3222
3223 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3224 const sd_bus_error *e;
3225 const char *new_owner;
3226 Unit *u = userdata;
3227 int r;
3228
3229 assert(message);
3230 assert(u);
3231
3232 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3233
3234 e = sd_bus_message_get_error(message);
3235 if (e) {
3236 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3237 log_unit_error(u, "Unexpected error response from GetNameOwner(): %s", e->message);
3238
3239 new_owner = NULL;
3240 } else {
3241 r = sd_bus_message_read(message, "s", &new_owner);
3242 if (r < 0)
3243 return bus_log_parse_error(r);
3244
3245 assert(!isempty(new_owner));
3246 }
3247
3248 if (UNIT_VTABLE(u)->bus_name_owner_change)
3249 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3250
3251 return 0;
3252 }
3253
3254 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3255 const char *match;
3256 int r;
3257
3258 assert(u);
3259 assert(bus);
3260 assert(name);
3261
3262 if (u->match_bus_slot || u->get_name_owner_slot)
3263 return -EBUSY;
3264
3265 match = strjoina("type='signal',"
3266 "sender='org.freedesktop.DBus',"
3267 "path='/org/freedesktop/DBus',"
3268 "interface='org.freedesktop.DBus',"
3269 "member='NameOwnerChanged',"
3270 "arg0='", name, "'");
3271
3272 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3273 if (r < 0)
3274 return r;
3275
3276 r = sd_bus_call_method_async(
3277 bus,
3278 &u->get_name_owner_slot,
3279 "org.freedesktop.DBus",
3280 "/org/freedesktop/DBus",
3281 "org.freedesktop.DBus",
3282 "GetNameOwner",
3283 get_name_owner_handler,
3284 u,
3285 "s", name);
3286 if (r < 0) {
3287 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3288 return r;
3289 }
3290
3291 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3292 return 0;
3293 }
3294
3295 int unit_watch_bus_name(Unit *u, const char *name) {
3296 int r;
3297
3298 assert(u);
3299 assert(name);
3300
3301 /* Watch a specific name on the bus. We only support one unit
3302 * watching each name for now. */
3303
3304 if (u->manager->api_bus) {
3305 /* If the bus is already available, install the match directly.
3306 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3307 r = unit_install_bus_match(u, u->manager->api_bus, name);
3308 if (r < 0)
3309 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3310 }
3311
3312 r = hashmap_put(u->manager->watch_bus, name, u);
3313 if (r < 0) {
3314 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3315 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3316 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3317 }
3318
3319 return 0;
3320 }
3321
3322 void unit_unwatch_bus_name(Unit *u, const char *name) {
3323 assert(u);
3324 assert(name);
3325
3326 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3327 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3328 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3329 }
3330
3331 bool unit_can_serialize(Unit *u) {
3332 assert(u);
3333
3334 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3335 }
3336
3337 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3338 _cleanup_free_ char *s = NULL;
3339 int r;
3340
3341 assert(f);
3342 assert(key);
3343
3344 if (mask == 0)
3345 return 0;
3346
3347 r = cg_mask_to_string(mask, &s);
3348 if (r < 0)
3349 return log_error_errno(r, "Failed to format cgroup mask: %m");
3350
3351 return serialize_item(f, key, s);
3352 }
3353
3354 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3355 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3356 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3357 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3358 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3359 };
3360
3361 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3362 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3363 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3364 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3365 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3366 };
3367
3368 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3369 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3370 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3371 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3372 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3373 };
3374
3375 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3376 CGroupIPAccountingMetric m;
3377 int r;
3378
3379 assert(u);
3380 assert(f);
3381 assert(fds);
3382
3383 if (unit_can_serialize(u)) {
3384 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3385 if (r < 0)
3386 return r;
3387 }
3388
3389 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3390
3391 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3392 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3393 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3394 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3395
3396 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3397 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3398
3399 if (dual_timestamp_is_set(&u->condition_timestamp))
3400 (void) serialize_bool(f, "condition-result", u->condition_result);
3401
3402 if (dual_timestamp_is_set(&u->assert_timestamp))
3403 (void) serialize_bool(f, "assert-result", u->assert_result);
3404
3405 (void) serialize_bool(f, "transient", u->transient);
3406 (void) serialize_bool(f, "in-audit", u->in_audit);
3407
3408 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3409 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3410 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3411 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_ratelimit_interval);
3412 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_ratelimit_burst);
3413
3414 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3415 if (u->cpu_usage_last != NSEC_INFINITY)
3416 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3417
3418 if (u->oom_kill_last > 0)
3419 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3420
3421 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3422 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3423
3424 if (u->io_accounting_last[im] != UINT64_MAX)
3425 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3426 }
3427
3428 if (u->cgroup_path)
3429 (void) serialize_item(f, "cgroup", u->cgroup_path);
3430
3431 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3432 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3433 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3434 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3435
3436 if (uid_is_valid(u->ref_uid))
3437 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3438 if (gid_is_valid(u->ref_gid))
3439 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3440
3441 if (!sd_id128_is_null(u->invocation_id))
3442 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3443
3444 bus_track_serialize(u->bus_track, f, "ref");
3445
3446 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3447 uint64_t v;
3448
3449 r = unit_get_ip_accounting(u, m, &v);
3450 if (r >= 0)
3451 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3452 }
3453
3454 if (serialize_jobs) {
3455 if (u->job) {
3456 fputs("job\n", f);
3457 job_serialize(u->job, f);
3458 }
3459
3460 if (u->nop_job) {
3461 fputs("job\n", f);
3462 job_serialize(u->nop_job, f);
3463 }
3464 }
3465
3466 /* End marker */
3467 fputc('\n', f);
3468 return 0;
3469 }
3470
3471 static int unit_deserialize_job(Unit *u, FILE *f) {
3472 _cleanup_(job_freep) Job *j = NULL;
3473 int r;
3474
3475 assert(u);
3476 assert(f);
3477
3478 j = job_new_raw(u);
3479 if (!j)
3480 return log_oom();
3481
3482 r = job_deserialize(j, f);
3483 if (r < 0)
3484 return r;
3485
3486 r = job_install_deserialized(j);
3487 if (r < 0)
3488 return r;
3489
3490 TAKE_PTR(j);
3491 return 0;
3492 }
3493
3494 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3495 int r;
3496
3497 assert(u);
3498 assert(f);
3499 assert(fds);
3500
3501 for (;;) {
3502 _cleanup_free_ char *line = NULL;
3503 char *l, *v;
3504 ssize_t m;
3505 size_t k;
3506
3507 r = read_line(f, LONG_LINE_MAX, &line);
3508 if (r < 0)
3509 return log_error_errno(r, "Failed to read serialization line: %m");
3510 if (r == 0) /* eof */
3511 break;
3512
3513 l = strstrip(line);
3514 if (isempty(l)) /* End marker */
3515 break;
3516
3517 k = strcspn(l, "=");
3518
3519 if (l[k] == '=') {
3520 l[k] = 0;
3521 v = l+k+1;
3522 } else
3523 v = l+k;
3524
3525 if (streq(l, "job")) {
3526 if (v[0] == '\0') {
3527 /* New-style serialized job */
3528 r = unit_deserialize_job(u, f);
3529 if (r < 0)
3530 return r;
3531 } else /* Legacy for pre-44 */
3532 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3533 continue;
3534 } else if (streq(l, "state-change-timestamp")) {
3535 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3536 continue;
3537 } else if (streq(l, "inactive-exit-timestamp")) {
3538 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3539 continue;
3540 } else if (streq(l, "active-enter-timestamp")) {
3541 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3542 continue;
3543 } else if (streq(l, "active-exit-timestamp")) {
3544 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3545 continue;
3546 } else if (streq(l, "inactive-enter-timestamp")) {
3547 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3548 continue;
3549 } else if (streq(l, "condition-timestamp")) {
3550 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3551 continue;
3552 } else if (streq(l, "assert-timestamp")) {
3553 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3554 continue;
3555 } else if (streq(l, "condition-result")) {
3556
3557 r = parse_boolean(v);
3558 if (r < 0)
3559 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3560 else
3561 u->condition_result = r;
3562
3563 continue;
3564
3565 } else if (streq(l, "assert-result")) {
3566
3567 r = parse_boolean(v);
3568 if (r < 0)
3569 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3570 else
3571 u->assert_result = r;
3572
3573 continue;
3574
3575 } else if (streq(l, "transient")) {
3576
3577 r = parse_boolean(v);
3578 if (r < 0)
3579 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3580 else
3581 u->transient = r;
3582
3583 continue;
3584
3585 } else if (streq(l, "in-audit")) {
3586
3587 r = parse_boolean(v);
3588 if (r < 0)
3589 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3590 else
3591 u->in_audit = r;
3592
3593 continue;
3594
3595 } else if (streq(l, "exported-invocation-id")) {
3596
3597 r = parse_boolean(v);
3598 if (r < 0)
3599 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3600 else
3601 u->exported_invocation_id = r;
3602
3603 continue;
3604
3605 } else if (streq(l, "exported-log-level-max")) {
3606
3607 r = parse_boolean(v);
3608 if (r < 0)
3609 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3610 else
3611 u->exported_log_level_max = r;
3612
3613 continue;
3614
3615 } else if (streq(l, "exported-log-extra-fields")) {
3616
3617 r = parse_boolean(v);
3618 if (r < 0)
3619 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3620 else
3621 u->exported_log_extra_fields = r;
3622
3623 continue;
3624
3625 } else if (streq(l, "exported-log-rate-limit-interval")) {
3626
3627 r = parse_boolean(v);
3628 if (r < 0)
3629 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3630 else
3631 u->exported_log_ratelimit_interval = r;
3632
3633 continue;
3634
3635 } else if (streq(l, "exported-log-rate-limit-burst")) {
3636
3637 r = parse_boolean(v);
3638 if (r < 0)
3639 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3640 else
3641 u->exported_log_ratelimit_burst = r;
3642
3643 continue;
3644
3645 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3646
3647 r = safe_atou64(v, &u->cpu_usage_base);
3648 if (r < 0)
3649 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3650
3651 continue;
3652
3653 } else if (streq(l, "cpu-usage-last")) {
3654
3655 r = safe_atou64(v, &u->cpu_usage_last);
3656 if (r < 0)
3657 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3658
3659 continue;
3660
3661 } else if (streq(l, "oom-kill-last")) {
3662
3663 r = safe_atou64(v, &u->oom_kill_last);
3664 if (r < 0)
3665 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3666
3667 continue;
3668
3669 } else if (streq(l, "cgroup")) {
3670
3671 r = unit_set_cgroup_path(u, v);
3672 if (r < 0)
3673 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3674
3675 (void) unit_watch_cgroup(u);
3676 (void) unit_watch_cgroup_memory(u);
3677
3678 continue;
3679 } else if (streq(l, "cgroup-realized")) {
3680 int b;
3681
3682 b = parse_boolean(v);
3683 if (b < 0)
3684 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3685 else
3686 u->cgroup_realized = b;
3687
3688 continue;
3689
3690 } else if (streq(l, "cgroup-realized-mask")) {
3691
3692 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3693 if (r < 0)
3694 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3695 continue;
3696
3697 } else if (streq(l, "cgroup-enabled-mask")) {
3698
3699 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3700 if (r < 0)
3701 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3702 continue;
3703
3704 } else if (streq(l, "cgroup-invalidated-mask")) {
3705
3706 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3707 if (r < 0)
3708 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3709 continue;
3710
3711 } else if (streq(l, "ref-uid")) {
3712 uid_t uid;
3713
3714 r = parse_uid(v, &uid);
3715 if (r < 0)
3716 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3717 else
3718 unit_ref_uid_gid(u, uid, GID_INVALID);
3719
3720 continue;
3721
3722 } else if (streq(l, "ref-gid")) {
3723 gid_t gid;
3724
3725 r = parse_gid(v, &gid);
3726 if (r < 0)
3727 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3728 else
3729 unit_ref_uid_gid(u, UID_INVALID, gid);
3730
3731 continue;
3732
3733 } else if (streq(l, "ref")) {
3734
3735 r = strv_extend(&u->deserialized_refs, v);
3736 if (r < 0)
3737 return log_oom();
3738
3739 continue;
3740 } else if (streq(l, "invocation-id")) {
3741 sd_id128_t id;
3742
3743 r = sd_id128_from_string(v, &id);
3744 if (r < 0)
3745 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3746 else {
3747 r = unit_set_invocation_id(u, id);
3748 if (r < 0)
3749 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3750 }
3751
3752 continue;
3753 }
3754
3755 /* Check if this is an IP accounting metric serialization field */
3756 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3757 if (m >= 0) {
3758 uint64_t c;
3759
3760 r = safe_atou64(v, &c);
3761 if (r < 0)
3762 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3763 else
3764 u->ip_accounting_extra[m] = c;
3765 continue;
3766 }
3767
3768 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3769 if (m >= 0) {
3770 uint64_t c;
3771
3772 r = safe_atou64(v, &c);
3773 if (r < 0)
3774 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3775 else
3776 u->io_accounting_base[m] = c;
3777 continue;
3778 }
3779
3780 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3781 if (m >= 0) {
3782 uint64_t c;
3783
3784 r = safe_atou64(v, &c);
3785 if (r < 0)
3786 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3787 else
3788 u->io_accounting_last[m] = c;
3789 continue;
3790 }
3791
3792 if (unit_can_serialize(u)) {
3793 r = exec_runtime_deserialize_compat(u, l, v, fds);
3794 if (r < 0) {
3795 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3796 continue;
3797 }
3798
3799 /* Returns positive if key was handled by the call */
3800 if (r > 0)
3801 continue;
3802
3803 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3804 if (r < 0)
3805 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3806 }
3807 }
3808
3809 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3810 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3811 * before 228 where the base for timeouts was not persistent across reboots. */
3812
3813 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3814 dual_timestamp_get(&u->state_change_timestamp);
3815
3816 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3817 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3818 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3819 unit_invalidate_cgroup_bpf(u);
3820
3821 return 0;
3822 }
3823
3824 int unit_deserialize_skip(FILE *f) {
3825 int r;
3826 assert(f);
3827
3828 /* Skip serialized data for this unit. We don't know what it is. */
3829
3830 for (;;) {
3831 _cleanup_free_ char *line = NULL;
3832 char *l;
3833
3834 r = read_line(f, LONG_LINE_MAX, &line);
3835 if (r < 0)
3836 return log_error_errno(r, "Failed to read serialization line: %m");
3837 if (r == 0)
3838 return 0;
3839
3840 l = strstrip(line);
3841
3842 /* End marker */
3843 if (isempty(l))
3844 return 1;
3845 }
3846 }
3847
3848 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3849 _cleanup_free_ char *e = NULL;
3850 Unit *device;
3851 int r;
3852
3853 assert(u);
3854
3855 /* Adds in links to the device node that this unit is based on */
3856 if (isempty(what))
3857 return 0;
3858
3859 if (!is_device_path(what))
3860 return 0;
3861
3862 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3863 if (!unit_type_supported(UNIT_DEVICE))
3864 return 0;
3865
3866 r = unit_name_from_path(what, ".device", &e);
3867 if (r < 0)
3868 return r;
3869
3870 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3871 if (r < 0)
3872 return r;
3873
3874 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3875 dep = UNIT_BINDS_TO;
3876
3877 return unit_add_two_dependencies(u, UNIT_AFTER,
3878 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3879 device, true, mask);
3880 }
3881
3882 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3883 _cleanup_free_ char *escaped = NULL, *target = NULL;
3884 int r;
3885
3886 assert(u);
3887
3888 if (isempty(what))
3889 return 0;
3890
3891 if (!path_startswith(what, "/dev/"))
3892 return 0;
3893
3894 /* If we don't support devices, then also don't bother with blockdev@.target */
3895 if (!unit_type_supported(UNIT_DEVICE))
3896 return 0;
3897
3898 r = unit_name_path_escape(what, &escaped);
3899 if (r < 0)
3900 return r;
3901
3902 r = unit_name_build("blockdev", escaped, ".target", &target);
3903 if (r < 0)
3904 return r;
3905
3906 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3907 }
3908
3909 int unit_coldplug(Unit *u) {
3910 int r = 0, q;
3911 char **i;
3912 Job *uj;
3913
3914 assert(u);
3915
3916 /* Make sure we don't enter a loop, when coldplugging recursively. */
3917 if (u->coldplugged)
3918 return 0;
3919
3920 u->coldplugged = true;
3921
3922 STRV_FOREACH(i, u->deserialized_refs) {
3923 q = bus_unit_track_add_name(u, *i);
3924 if (q < 0 && r >= 0)
3925 r = q;
3926 }
3927 u->deserialized_refs = strv_free(u->deserialized_refs);
3928
3929 if (UNIT_VTABLE(u)->coldplug) {
3930 q = UNIT_VTABLE(u)->coldplug(u);
3931 if (q < 0 && r >= 0)
3932 r = q;
3933 }
3934
3935 uj = u->job ?: u->nop_job;
3936 if (uj) {
3937 q = job_coldplug(uj);
3938 if (q < 0 && r >= 0)
3939 r = q;
3940 }
3941
3942 return r;
3943 }
3944
3945 void unit_catchup(Unit *u) {
3946 assert(u);
3947
3948 if (UNIT_VTABLE(u)->catchup)
3949 UNIT_VTABLE(u)->catchup(u);
3950 }
3951
3952 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3953 struct stat st;
3954
3955 if (!path)
3956 return false;
3957
3958 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3959 * are never out-of-date. */
3960 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3961 return false;
3962
3963 if (stat(path, &st) < 0)
3964 /* What, cannot access this anymore? */
3965 return true;
3966
3967 if (path_masked)
3968 /* For masked files check if they are still so */
3969 return !null_or_empty(&st);
3970 else
3971 /* For non-empty files check the mtime */
3972 return timespec_load(&st.st_mtim) > mtime;
3973
3974 return false;
3975 }
3976
3977 bool unit_need_daemon_reload(Unit *u) {
3978 _cleanup_strv_free_ char **t = NULL;
3979 char **path;
3980
3981 assert(u);
3982
3983 /* For unit files, we allow masking… */
3984 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3985 u->load_state == UNIT_MASKED))
3986 return true;
3987
3988 /* Source paths should not be masked… */
3989 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3990 return true;
3991
3992 if (u->load_state == UNIT_LOADED)
3993 (void) unit_find_dropin_paths(u, &t);
3994 if (!strv_equal(u->dropin_paths, t))
3995 return true;
3996
3997 /* … any drop-ins that are masked are simply omitted from the list. */
3998 STRV_FOREACH(path, u->dropin_paths)
3999 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4000 return true;
4001
4002 return false;
4003 }
4004
4005 void unit_reset_failed(Unit *u) {
4006 assert(u);
4007
4008 if (UNIT_VTABLE(u)->reset_failed)
4009 UNIT_VTABLE(u)->reset_failed(u);
4010
4011 ratelimit_reset(&u->start_ratelimit);
4012 u->start_limit_hit = false;
4013 }
4014
4015 Unit *unit_following(Unit *u) {
4016 assert(u);
4017
4018 if (UNIT_VTABLE(u)->following)
4019 return UNIT_VTABLE(u)->following(u);
4020
4021 return NULL;
4022 }
4023
4024 bool unit_stop_pending(Unit *u) {
4025 assert(u);
4026
4027 /* This call does check the current state of the unit. It's
4028 * hence useful to be called from state change calls of the
4029 * unit itself, where the state isn't updated yet. This is
4030 * different from unit_inactive_or_pending() which checks both
4031 * the current state and for a queued job. */
4032
4033 return unit_has_job_type(u, JOB_STOP);
4034 }
4035
4036 bool unit_inactive_or_pending(Unit *u) {
4037 assert(u);
4038
4039 /* Returns true if the unit is inactive or going down */
4040
4041 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4042 return true;
4043
4044 if (unit_stop_pending(u))
4045 return true;
4046
4047 return false;
4048 }
4049
4050 bool unit_active_or_pending(Unit *u) {
4051 assert(u);
4052
4053 /* Returns true if the unit is active or going up */
4054
4055 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4056 return true;
4057
4058 if (u->job &&
4059 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4060 return true;
4061
4062 return false;
4063 }
4064
4065 bool unit_will_restart_default(Unit *u) {
4066 assert(u);
4067
4068 return unit_has_job_type(u, JOB_START);
4069 }
4070
4071 bool unit_will_restart(Unit *u) {
4072 assert(u);
4073
4074 if (!UNIT_VTABLE(u)->will_restart)
4075 return false;
4076
4077 return UNIT_VTABLE(u)->will_restart(u);
4078 }
4079
4080 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4081 assert(u);
4082 assert(w >= 0 && w < _KILL_WHO_MAX);
4083 assert(SIGNAL_VALID(signo));
4084
4085 if (!UNIT_VTABLE(u)->kill)
4086 return -EOPNOTSUPP;
4087
4088 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4089 }
4090
4091 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4092 _cleanup_set_free_ Set *pid_set = NULL;
4093 int r;
4094
4095 pid_set = set_new(NULL);
4096 if (!pid_set)
4097 return NULL;
4098
4099 /* Exclude the main/control pids from being killed via the cgroup */
4100 if (main_pid > 0) {
4101 r = set_put(pid_set, PID_TO_PTR(main_pid));
4102 if (r < 0)
4103 return NULL;
4104 }
4105
4106 if (control_pid > 0) {
4107 r = set_put(pid_set, PID_TO_PTR(control_pid));
4108 if (r < 0)
4109 return NULL;
4110 }
4111
4112 return TAKE_PTR(pid_set);
4113 }
4114
4115 int unit_kill_common(
4116 Unit *u,
4117 KillWho who,
4118 int signo,
4119 pid_t main_pid,
4120 pid_t control_pid,
4121 sd_bus_error *error) {
4122
4123 int r = 0;
4124 bool killed = false;
4125
4126 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4127 if (main_pid < 0)
4128 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4129 else if (main_pid == 0)
4130 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4131 }
4132
4133 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4134 if (control_pid < 0)
4135 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4136 else if (control_pid == 0)
4137 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4138 }
4139
4140 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4141 if (control_pid > 0) {
4142 if (kill(control_pid, signo) < 0)
4143 r = -errno;
4144 else
4145 killed = true;
4146 }
4147
4148 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4149 if (main_pid > 0) {
4150 if (kill(main_pid, signo) < 0)
4151 r = -errno;
4152 else
4153 killed = true;
4154 }
4155
4156 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4157 _cleanup_set_free_ Set *pid_set = NULL;
4158 int q;
4159
4160 /* Exclude the main/control pids from being killed via the cgroup */
4161 pid_set = unit_pid_set(main_pid, control_pid);
4162 if (!pid_set)
4163 return -ENOMEM;
4164
4165 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4166 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4167 r = q;
4168 else
4169 killed = true;
4170 }
4171
4172 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4173 return -ESRCH;
4174
4175 return r;
4176 }
4177
4178 int unit_following_set(Unit *u, Set **s) {
4179 assert(u);
4180 assert(s);
4181
4182 if (UNIT_VTABLE(u)->following_set)
4183 return UNIT_VTABLE(u)->following_set(u, s);
4184
4185 *s = NULL;
4186 return 0;
4187 }
4188
4189 UnitFileState unit_get_unit_file_state(Unit *u) {
4190 int r;
4191
4192 assert(u);
4193
4194 if (u->unit_file_state < 0 && u->fragment_path) {
4195 r = unit_file_get_state(
4196 u->manager->unit_file_scope,
4197 NULL,
4198 u->id,
4199 &u->unit_file_state);
4200 if (r < 0)
4201 u->unit_file_state = UNIT_FILE_BAD;
4202 }
4203
4204 return u->unit_file_state;
4205 }
4206
4207 int unit_get_unit_file_preset(Unit *u) {
4208 assert(u);
4209
4210 if (u->unit_file_preset < 0 && u->fragment_path)
4211 u->unit_file_preset = unit_file_query_preset(
4212 u->manager->unit_file_scope,
4213 NULL,
4214 basename(u->fragment_path));
4215
4216 return u->unit_file_preset;
4217 }
4218
4219 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4220 assert(ref);
4221 assert(source);
4222 assert(target);
4223
4224 if (ref->target)
4225 unit_ref_unset(ref);
4226
4227 ref->source = source;
4228 ref->target = target;
4229 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4230 return target;
4231 }
4232
4233 void unit_ref_unset(UnitRef *ref) {
4234 assert(ref);
4235
4236 if (!ref->target)
4237 return;
4238
4239 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4240 * be unreferenced now. */
4241 unit_add_to_gc_queue(ref->target);
4242
4243 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4244 ref->source = ref->target = NULL;
4245 }
4246
4247 static int user_from_unit_name(Unit *u, char **ret) {
4248
4249 static const uint8_t hash_key[] = {
4250 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4251 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4252 };
4253
4254 _cleanup_free_ char *n = NULL;
4255 int r;
4256
4257 r = unit_name_to_prefix(u->id, &n);
4258 if (r < 0)
4259 return r;
4260
4261 if (valid_user_group_name(n)) {
4262 *ret = TAKE_PTR(n);
4263 return 0;
4264 }
4265
4266 /* If we can't use the unit name as a user name, then let's hash it and use that */
4267 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4268 return -ENOMEM;
4269
4270 return 0;
4271 }
4272
4273 int unit_patch_contexts(Unit *u) {
4274 CGroupContext *cc;
4275 ExecContext *ec;
4276 unsigned i;
4277 int r;
4278
4279 assert(u);
4280
4281 /* Patch in the manager defaults into the exec and cgroup
4282 * contexts, _after_ the rest of the settings have been
4283 * initialized */
4284
4285 ec = unit_get_exec_context(u);
4286 if (ec) {
4287 /* This only copies in the ones that need memory */
4288 for (i = 0; i < _RLIMIT_MAX; i++)
4289 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4290 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4291 if (!ec->rlimit[i])
4292 return -ENOMEM;
4293 }
4294
4295 if (MANAGER_IS_USER(u->manager) &&
4296 !ec->working_directory) {
4297
4298 r = get_home_dir(&ec->working_directory);
4299 if (r < 0)
4300 return r;
4301
4302 /* Allow user services to run, even if the
4303 * home directory is missing */
4304 ec->working_directory_missing_ok = true;
4305 }
4306
4307 if (ec->private_devices)
4308 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4309
4310 if (ec->protect_kernel_modules)
4311 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4312
4313 if (ec->protect_kernel_logs)
4314 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4315
4316 if (ec->dynamic_user) {
4317 if (!ec->user) {
4318 r = user_from_unit_name(u, &ec->user);
4319 if (r < 0)
4320 return r;
4321 }
4322
4323 if (!ec->group) {
4324 ec->group = strdup(ec->user);
4325 if (!ec->group)
4326 return -ENOMEM;
4327 }
4328
4329 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4330 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4331 * sandbox. */
4332
4333 ec->private_tmp = true;
4334 ec->remove_ipc = true;
4335 ec->protect_system = PROTECT_SYSTEM_STRICT;
4336 if (ec->protect_home == PROTECT_HOME_NO)
4337 ec->protect_home = PROTECT_HOME_READ_ONLY;
4338
4339 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4340 * them. */
4341 ec->no_new_privileges = true;
4342 ec->restrict_suid_sgid = true;
4343 }
4344 }
4345
4346 cc = unit_get_cgroup_context(u);
4347 if (cc && ec) {
4348
4349 if (ec->private_devices &&
4350 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4351 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4352
4353 if (ec->root_image &&
4354 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4355
4356 /* When RootImage= is specified, the following devices are touched. */
4357 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4358 if (r < 0)
4359 return r;
4360
4361 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4362 if (r < 0)
4363 return r;
4364
4365 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4366 if (r < 0)
4367 return r;
4368
4369 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices */
4370 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "modprobe@loop.service", true, UNIT_DEPENDENCY_FILE);
4371 if (r < 0)
4372 return r;
4373 }
4374 }
4375
4376 return 0;
4377 }
4378
4379 ExecContext *unit_get_exec_context(Unit *u) {
4380 size_t offset;
4381 assert(u);
4382
4383 if (u->type < 0)
4384 return NULL;
4385
4386 offset = UNIT_VTABLE(u)->exec_context_offset;
4387 if (offset <= 0)
4388 return NULL;
4389
4390 return (ExecContext*) ((uint8_t*) u + offset);
4391 }
4392
4393 KillContext *unit_get_kill_context(Unit *u) {
4394 size_t offset;
4395 assert(u);
4396
4397 if (u->type < 0)
4398 return NULL;
4399
4400 offset = UNIT_VTABLE(u)->kill_context_offset;
4401 if (offset <= 0)
4402 return NULL;
4403
4404 return (KillContext*) ((uint8_t*) u + offset);
4405 }
4406
4407 CGroupContext *unit_get_cgroup_context(Unit *u) {
4408 size_t offset;
4409
4410 if (u->type < 0)
4411 return NULL;
4412
4413 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4414 if (offset <= 0)
4415 return NULL;
4416
4417 return (CGroupContext*) ((uint8_t*) u + offset);
4418 }
4419
4420 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4421 size_t offset;
4422
4423 if (u->type < 0)
4424 return NULL;
4425
4426 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4427 if (offset <= 0)
4428 return NULL;
4429
4430 return *(ExecRuntime**) ((uint8_t*) u + offset);
4431 }
4432
4433 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4434 assert(u);
4435
4436 if (UNIT_WRITE_FLAGS_NOOP(flags))
4437 return NULL;
4438
4439 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4440 return u->manager->lookup_paths.transient;
4441
4442 if (flags & UNIT_PERSISTENT)
4443 return u->manager->lookup_paths.persistent_control;
4444
4445 if (flags & UNIT_RUNTIME)
4446 return u->manager->lookup_paths.runtime_control;
4447
4448 return NULL;
4449 }
4450
4451 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4452 char *ret = NULL;
4453
4454 if (!s)
4455 return NULL;
4456
4457 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4458 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4459 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4460 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4461 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4462 * allocations. */
4463
4464 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4465 ret = specifier_escape(s);
4466 if (!ret)
4467 return NULL;
4468
4469 s = ret;
4470 }
4471
4472 if (flags & UNIT_ESCAPE_C) {
4473 char *a;
4474
4475 a = cescape(s);
4476 free(ret);
4477 if (!a)
4478 return NULL;
4479
4480 ret = a;
4481 }
4482
4483 if (buf) {
4484 *buf = ret;
4485 return ret ?: (char*) s;
4486 }
4487
4488 return ret ?: strdup(s);
4489 }
4490
4491 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4492 _cleanup_free_ char *result = NULL;
4493 size_t n = 0, allocated = 0;
4494 char **i;
4495
4496 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4497 * way suitable for ExecStart= stanzas */
4498
4499 STRV_FOREACH(i, l) {
4500 _cleanup_free_ char *buf = NULL;
4501 const char *p;
4502 size_t a;
4503 char *q;
4504
4505 p = unit_escape_setting(*i, flags, &buf);
4506 if (!p)
4507 return NULL;
4508
4509 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4510 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4511 return NULL;
4512
4513 q = result + n;
4514 if (n > 0)
4515 *(q++) = ' ';
4516
4517 *(q++) = '"';
4518 q = stpcpy(q, p);
4519 *(q++) = '"';
4520
4521 n += a;
4522 }
4523
4524 if (!GREEDY_REALLOC(result, allocated, n + 1))
4525 return NULL;
4526
4527 result[n] = 0;
4528
4529 return TAKE_PTR(result);
4530 }
4531
4532 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4533 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4534 const char *dir, *wrapped;
4535 int r;
4536
4537 assert(u);
4538 assert(name);
4539 assert(data);
4540
4541 if (UNIT_WRITE_FLAGS_NOOP(flags))
4542 return 0;
4543
4544 data = unit_escape_setting(data, flags, &escaped);
4545 if (!data)
4546 return -ENOMEM;
4547
4548 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4549 * previous section header is the same */
4550
4551 if (flags & UNIT_PRIVATE) {
4552 if (!UNIT_VTABLE(u)->private_section)
4553 return -EINVAL;
4554
4555 if (!u->transient_file || u->last_section_private < 0)
4556 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4557 else if (u->last_section_private == 0)
4558 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4559 } else {
4560 if (!u->transient_file || u->last_section_private < 0)
4561 data = strjoina("[Unit]\n", data);
4562 else if (u->last_section_private > 0)
4563 data = strjoina("\n[Unit]\n", data);
4564 }
4565
4566 if (u->transient_file) {
4567 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4568 * write to the transient unit file. */
4569 fputs(data, u->transient_file);
4570
4571 if (!endswith(data, "\n"))
4572 fputc('\n', u->transient_file);
4573
4574 /* Remember which section we wrote this entry to */
4575 u->last_section_private = !!(flags & UNIT_PRIVATE);
4576 return 0;
4577 }
4578
4579 dir = unit_drop_in_dir(u, flags);
4580 if (!dir)
4581 return -EINVAL;
4582
4583 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4584 "# or an equivalent operation. Do not edit.\n",
4585 data,
4586 "\n");
4587
4588 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4589 if (r < 0)
4590 return r;
4591
4592 (void) mkdir_p_label(p, 0755);
4593
4594 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4595 * recreate the cache after every drop-in we write. */
4596 if (u->manager->unit_path_cache) {
4597 r = set_put_strdup(u->manager->unit_path_cache, p);
4598 if (r < 0)
4599 return r;
4600 }
4601
4602 r = write_string_file_atomic_label(q, wrapped);
4603 if (r < 0)
4604 return r;
4605
4606 r = strv_push(&u->dropin_paths, q);
4607 if (r < 0)
4608 return r;
4609 q = NULL;
4610
4611 strv_uniq(u->dropin_paths);
4612
4613 u->dropin_mtime = now(CLOCK_REALTIME);
4614
4615 return 0;
4616 }
4617
4618 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4619 _cleanup_free_ char *p = NULL;
4620 va_list ap;
4621 int r;
4622
4623 assert(u);
4624 assert(name);
4625 assert(format);
4626
4627 if (UNIT_WRITE_FLAGS_NOOP(flags))
4628 return 0;
4629
4630 va_start(ap, format);
4631 r = vasprintf(&p, format, ap);
4632 va_end(ap);
4633
4634 if (r < 0)
4635 return -ENOMEM;
4636
4637 return unit_write_setting(u, flags, name, p);
4638 }
4639
4640 int unit_make_transient(Unit *u) {
4641 _cleanup_free_ char *path = NULL;
4642 FILE *f;
4643
4644 assert(u);
4645
4646 if (!UNIT_VTABLE(u)->can_transient)
4647 return -EOPNOTSUPP;
4648
4649 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4650
4651 path = path_join(u->manager->lookup_paths.transient, u->id);
4652 if (!path)
4653 return -ENOMEM;
4654
4655 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4656 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4657
4658 RUN_WITH_UMASK(0022) {
4659 f = fopen(path, "we");
4660 if (!f)
4661 return -errno;
4662 }
4663
4664 safe_fclose(u->transient_file);
4665 u->transient_file = f;
4666
4667 free_and_replace(u->fragment_path, path);
4668
4669 u->source_path = mfree(u->source_path);
4670 u->dropin_paths = strv_free(u->dropin_paths);
4671 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4672
4673 u->load_state = UNIT_STUB;
4674 u->load_error = 0;
4675 u->transient = true;
4676
4677 unit_add_to_dbus_queue(u);
4678 unit_add_to_gc_queue(u);
4679
4680 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4681 u->transient_file);
4682
4683 return 0;
4684 }
4685
4686 static int log_kill(pid_t pid, int sig, void *userdata) {
4687 _cleanup_free_ char *comm = NULL;
4688
4689 (void) get_process_comm(pid, &comm);
4690
4691 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4692 only, like for example systemd's own PAM stub process. */
4693 if (comm && comm[0] == '(')
4694 return 0;
4695
4696 log_unit_notice(userdata,
4697 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4698 pid,
4699 strna(comm),
4700 signal_to_string(sig));
4701
4702 return 1;
4703 }
4704
4705 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4706 assert(c);
4707
4708 switch (k) {
4709
4710 case KILL_TERMINATE:
4711 case KILL_TERMINATE_AND_LOG:
4712 *noteworthy = false;
4713 return c->kill_signal;
4714
4715 case KILL_RESTART:
4716 *noteworthy = false;
4717 return restart_kill_signal(c);
4718
4719 case KILL_KILL:
4720 *noteworthy = true;
4721 return c->final_kill_signal;
4722
4723 case KILL_WATCHDOG:
4724 *noteworthy = true;
4725 return c->watchdog_signal;
4726
4727 default:
4728 assert_not_reached("KillOperation unknown");
4729 }
4730 }
4731
4732 int unit_kill_context(
4733 Unit *u,
4734 KillContext *c,
4735 KillOperation k,
4736 pid_t main_pid,
4737 pid_t control_pid,
4738 bool main_pid_alien) {
4739
4740 bool wait_for_exit = false, send_sighup;
4741 cg_kill_log_func_t log_func = NULL;
4742 int sig, r;
4743
4744 assert(u);
4745 assert(c);
4746
4747 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4748 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4749
4750 if (c->kill_mode == KILL_NONE)
4751 return 0;
4752
4753 bool noteworthy;
4754 sig = operation_to_signal(c, k, &noteworthy);
4755 if (noteworthy)
4756 log_func = log_kill;
4757
4758 send_sighup =
4759 c->send_sighup &&
4760 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4761 sig != SIGHUP;
4762
4763 if (main_pid > 0) {
4764 if (log_func)
4765 log_func(main_pid, sig, u);
4766
4767 r = kill_and_sigcont(main_pid, sig);
4768 if (r < 0 && r != -ESRCH) {
4769 _cleanup_free_ char *comm = NULL;
4770 (void) get_process_comm(main_pid, &comm);
4771
4772 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4773 } else {
4774 if (!main_pid_alien)
4775 wait_for_exit = true;
4776
4777 if (r != -ESRCH && send_sighup)
4778 (void) kill(main_pid, SIGHUP);
4779 }
4780 }
4781
4782 if (control_pid > 0) {
4783 if (log_func)
4784 log_func(control_pid, sig, u);
4785
4786 r = kill_and_sigcont(control_pid, sig);
4787 if (r < 0 && r != -ESRCH) {
4788 _cleanup_free_ char *comm = NULL;
4789 (void) get_process_comm(control_pid, &comm);
4790
4791 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4792 } else {
4793 wait_for_exit = true;
4794
4795 if (r != -ESRCH && send_sighup)
4796 (void) kill(control_pid, SIGHUP);
4797 }
4798 }
4799
4800 if (u->cgroup_path &&
4801 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4802 _cleanup_set_free_ Set *pid_set = NULL;
4803
4804 /* Exclude the main/control pids from being killed via the cgroup */
4805 pid_set = unit_pid_set(main_pid, control_pid);
4806 if (!pid_set)
4807 return -ENOMEM;
4808
4809 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4810 sig,
4811 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4812 pid_set,
4813 log_func, u);
4814 if (r < 0) {
4815 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4816 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4817
4818 } else if (r > 0) {
4819
4820 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4821 * we are running in a container or if this is a delegation unit, simply because cgroup
4822 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4823 * of containers it can be confused easily by left-over directories in the cgroup — which
4824 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4825 * there we get proper events. Hence rely on them. */
4826
4827 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4828 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4829 wait_for_exit = true;
4830
4831 if (send_sighup) {
4832 set_free(pid_set);
4833
4834 pid_set = unit_pid_set(main_pid, control_pid);
4835 if (!pid_set)
4836 return -ENOMEM;
4837
4838 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4839 SIGHUP,
4840 CGROUP_IGNORE_SELF,
4841 pid_set,
4842 NULL, NULL);
4843 }
4844 }
4845 }
4846
4847 return wait_for_exit;
4848 }
4849
4850 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4851 _cleanup_free_ char *p = NULL;
4852 UnitDependencyInfo di;
4853 int r;
4854
4855 assert(u);
4856 assert(path);
4857
4858 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4859 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4860 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4861 * determine which units to make themselves a dependency of. */
4862
4863 if (!path_is_absolute(path))
4864 return -EINVAL;
4865
4866 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4867 if (r < 0)
4868 return r;
4869
4870 p = strdup(path);
4871 if (!p)
4872 return -ENOMEM;
4873
4874 path = path_simplify(p, true);
4875
4876 if (!path_is_normalized(path))
4877 return -EPERM;
4878
4879 if (hashmap_contains(u->requires_mounts_for, path))
4880 return 0;
4881
4882 di = (UnitDependencyInfo) {
4883 .origin_mask = mask
4884 };
4885
4886 r = hashmap_put(u->requires_mounts_for, path, di.data);
4887 if (r < 0)
4888 return r;
4889 p = NULL;
4890
4891 char prefix[strlen(path) + 1];
4892 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4893 Set *x;
4894
4895 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4896 if (!x) {
4897 _cleanup_free_ char *q = NULL;
4898
4899 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4900 if (r < 0)
4901 return r;
4902
4903 q = strdup(prefix);
4904 if (!q)
4905 return -ENOMEM;
4906
4907 x = set_new(NULL);
4908 if (!x)
4909 return -ENOMEM;
4910
4911 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4912 if (r < 0) {
4913 set_free(x);
4914 return r;
4915 }
4916 q = NULL;
4917 }
4918
4919 r = set_put(x, u);
4920 if (r < 0)
4921 return r;
4922 }
4923
4924 return 0;
4925 }
4926
4927 int unit_setup_exec_runtime(Unit *u) {
4928 ExecRuntime **rt;
4929 size_t offset;
4930 Unit *other;
4931 Iterator i;
4932 void *v;
4933 int r;
4934
4935 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4936 assert(offset > 0);
4937
4938 /* Check if there already is an ExecRuntime for this unit? */
4939 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4940 if (*rt)
4941 return 0;
4942
4943 /* Try to get it from somebody else */
4944 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4945 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4946 if (r == 1)
4947 return 1;
4948 }
4949
4950 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4951 }
4952
4953 int unit_setup_dynamic_creds(Unit *u) {
4954 ExecContext *ec;
4955 DynamicCreds *dcreds;
4956 size_t offset;
4957
4958 assert(u);
4959
4960 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4961 assert(offset > 0);
4962 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4963
4964 ec = unit_get_exec_context(u);
4965 assert(ec);
4966
4967 if (!ec->dynamic_user)
4968 return 0;
4969
4970 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4971 }
4972
4973 bool unit_type_supported(UnitType t) {
4974 if (_unlikely_(t < 0))
4975 return false;
4976 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4977 return false;
4978
4979 if (!unit_vtable[t]->supported)
4980 return true;
4981
4982 return unit_vtable[t]->supported();
4983 }
4984
4985 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4986 int r;
4987
4988 assert(u);
4989 assert(where);
4990
4991 r = dir_is_empty(where);
4992 if (r > 0 || r == -ENOTDIR)
4993 return;
4994 if (r < 0) {
4995 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4996 return;
4997 }
4998
4999 log_struct(LOG_NOTICE,
5000 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5001 LOG_UNIT_ID(u),
5002 LOG_UNIT_INVOCATION_ID(u),
5003 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5004 "WHERE=%s", where);
5005 }
5006
5007 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5008 _cleanup_free_ char *canonical_where = NULL;
5009 int r;
5010
5011 assert(u);
5012 assert(where);
5013
5014 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5015 if (r < 0) {
5016 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5017 return 0;
5018 }
5019
5020 /* We will happily ignore a trailing slash (or any redundant slashes) */
5021 if (path_equal(where, canonical_where))
5022 return 0;
5023
5024 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5025 log_struct(LOG_ERR,
5026 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5027 LOG_UNIT_ID(u),
5028 LOG_UNIT_INVOCATION_ID(u),
5029 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5030 "WHERE=%s", where);
5031
5032 return -ELOOP;
5033 }
5034
5035 bool unit_is_pristine(Unit *u) {
5036 assert(u);
5037
5038 /* Check if the unit already exists or is already around,
5039 * in a number of different ways. Note that to cater for unit
5040 * types such as slice, we are generally fine with units that
5041 * are marked UNIT_LOADED even though nothing was actually
5042 * loaded, as those unit types don't require a file on disk. */
5043
5044 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5045 u->fragment_path ||
5046 u->source_path ||
5047 !strv_isempty(u->dropin_paths) ||
5048 u->job ||
5049 u->merged_into);
5050 }
5051
5052 pid_t unit_control_pid(Unit *u) {
5053 assert(u);
5054
5055 if (UNIT_VTABLE(u)->control_pid)
5056 return UNIT_VTABLE(u)->control_pid(u);
5057
5058 return 0;
5059 }
5060
5061 pid_t unit_main_pid(Unit *u) {
5062 assert(u);
5063
5064 if (UNIT_VTABLE(u)->main_pid)
5065 return UNIT_VTABLE(u)->main_pid(u);
5066
5067 return 0;
5068 }
5069
5070 static void unit_unref_uid_internal(
5071 Unit *u,
5072 uid_t *ref_uid,
5073 bool destroy_now,
5074 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5075
5076 assert(u);
5077 assert(ref_uid);
5078 assert(_manager_unref_uid);
5079
5080 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5081 * gid_t are actually the same time, with the same validity rules.
5082 *
5083 * Drops a reference to UID/GID from a unit. */
5084
5085 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5086 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5087
5088 if (!uid_is_valid(*ref_uid))
5089 return;
5090
5091 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5092 *ref_uid = UID_INVALID;
5093 }
5094
5095 static void unit_unref_uid(Unit *u, bool destroy_now) {
5096 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5097 }
5098
5099 static void unit_unref_gid(Unit *u, bool destroy_now) {
5100 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5101 }
5102
5103 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5104 assert(u);
5105
5106 unit_unref_uid(u, destroy_now);
5107 unit_unref_gid(u, destroy_now);
5108 }
5109
5110 static int unit_ref_uid_internal(
5111 Unit *u,
5112 uid_t *ref_uid,
5113 uid_t uid,
5114 bool clean_ipc,
5115 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5116
5117 int r;
5118
5119 assert(u);
5120 assert(ref_uid);
5121 assert(uid_is_valid(uid));
5122 assert(_manager_ref_uid);
5123
5124 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5125 * are actually the same type, and have the same validity rules.
5126 *
5127 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5128 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5129 * drops to zero. */
5130
5131 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5132 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5133
5134 if (*ref_uid == uid)
5135 return 0;
5136
5137 if (uid_is_valid(*ref_uid)) /* Already set? */
5138 return -EBUSY;
5139
5140 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5141 if (r < 0)
5142 return r;
5143
5144 *ref_uid = uid;
5145 return 1;
5146 }
5147
5148 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5149 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5150 }
5151
5152 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5153 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5154 }
5155
5156 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5157 int r = 0, q = 0;
5158
5159 assert(u);
5160
5161 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5162
5163 if (uid_is_valid(uid)) {
5164 r = unit_ref_uid(u, uid, clean_ipc);
5165 if (r < 0)
5166 return r;
5167 }
5168
5169 if (gid_is_valid(gid)) {
5170 q = unit_ref_gid(u, gid, clean_ipc);
5171 if (q < 0) {
5172 if (r > 0)
5173 unit_unref_uid(u, false);
5174
5175 return q;
5176 }
5177 }
5178
5179 return r > 0 || q > 0;
5180 }
5181
5182 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5183 ExecContext *c;
5184 int r;
5185
5186 assert(u);
5187
5188 c = unit_get_exec_context(u);
5189
5190 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5191 if (r < 0)
5192 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5193
5194 return r;
5195 }
5196
5197 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5198 int r;
5199
5200 assert(u);
5201
5202 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5203 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5204 * objects when no service references the UID/GID anymore. */
5205
5206 r = unit_ref_uid_gid(u, uid, gid);
5207 if (r > 0)
5208 unit_add_to_dbus_queue(u);
5209 }
5210
5211 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5212 int r;
5213
5214 assert(u);
5215
5216 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5217
5218 if (sd_id128_equal(u->invocation_id, id))
5219 return 0;
5220
5221 if (!sd_id128_is_null(u->invocation_id))
5222 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5223
5224 if (sd_id128_is_null(id)) {
5225 r = 0;
5226 goto reset;
5227 }
5228
5229 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5230 if (r < 0)
5231 goto reset;
5232
5233 u->invocation_id = id;
5234 sd_id128_to_string(id, u->invocation_id_string);
5235
5236 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5237 if (r < 0)
5238 goto reset;
5239
5240 return 0;
5241
5242 reset:
5243 u->invocation_id = SD_ID128_NULL;
5244 u->invocation_id_string[0] = 0;
5245 return r;
5246 }
5247
5248 int unit_acquire_invocation_id(Unit *u) {
5249 sd_id128_t id;
5250 int r;
5251
5252 assert(u);
5253
5254 r = sd_id128_randomize(&id);
5255 if (r < 0)
5256 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5257
5258 r = unit_set_invocation_id(u, id);
5259 if (r < 0)
5260 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5261
5262 unit_add_to_dbus_queue(u);
5263 return 0;
5264 }
5265
5266 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5267 int r;
5268
5269 assert(u);
5270 assert(p);
5271
5272 /* Copy parameters from manager */
5273 r = manager_get_effective_environment(u->manager, &p->environment);
5274 if (r < 0)
5275 return r;
5276
5277 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5278 p->cgroup_supported = u->manager->cgroup_supported;
5279 p->prefix = u->manager->prefix;
5280 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5281
5282 /* Copy parameters from unit */
5283 p->cgroup_path = u->cgroup_path;
5284 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5285
5286 return 0;
5287 }
5288
5289 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5290 int r;
5291
5292 assert(u);
5293 assert(ret);
5294
5295 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5296 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5297
5298 (void) unit_realize_cgroup(u);
5299
5300 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5301 if (r != 0)
5302 return r;
5303
5304 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5305 (void) ignore_signals(SIGPIPE, -1);
5306
5307 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5308
5309 if (u->cgroup_path) {
5310 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5311 if (r < 0) {
5312 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5313 _exit(EXIT_CGROUP);
5314 }
5315 }
5316
5317 return 0;
5318 }
5319
5320 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5321 pid_t pid;
5322 int r;
5323
5324 assert(u);
5325 assert(ret_pid);
5326
5327 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5328 if (r < 0)
5329 return r;
5330 if (r == 0) {
5331 int ret = EXIT_SUCCESS;
5332 char **i;
5333
5334 STRV_FOREACH(i, paths) {
5335 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5336 if (r < 0) {
5337 log_error_errno(r, "Failed to remove '%s': %m", *i);
5338 ret = EXIT_FAILURE;
5339 }
5340 }
5341
5342 _exit(ret);
5343 }
5344
5345 r = unit_watch_pid(u, pid, true);
5346 if (r < 0)
5347 return r;
5348
5349 *ret_pid = pid;
5350 return 0;
5351 }
5352
5353 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5354 assert(u);
5355 assert(d >= 0);
5356 assert(d < _UNIT_DEPENDENCY_MAX);
5357 assert(other);
5358
5359 if (di.origin_mask == 0 && di.destination_mask == 0) {
5360 /* No bit set anymore, let's drop the whole entry */
5361 assert_se(hashmap_remove(u->dependencies[d], other));
5362 log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5363 } else
5364 /* Mask was reduced, let's update the entry */
5365 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5366 }
5367
5368 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5369 UnitDependency d;
5370
5371 assert(u);
5372
5373 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5374
5375 if (mask == 0)
5376 return;
5377
5378 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5379 bool done;
5380
5381 do {
5382 UnitDependencyInfo di;
5383 Unit *other;
5384 Iterator i;
5385
5386 done = true;
5387
5388 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5389 UnitDependency q;
5390
5391 if ((di.origin_mask & ~mask) == di.origin_mask)
5392 continue;
5393 di.origin_mask &= ~mask;
5394 unit_update_dependency_mask(u, d, other, di);
5395
5396 /* We updated the dependency from our unit to the other unit now. But most dependencies
5397 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5398 * all dependency types on the other unit and delete all those which point to us and
5399 * have the right mask set. */
5400
5401 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5402 UnitDependencyInfo dj;
5403
5404 dj.data = hashmap_get(other->dependencies[q], u);
5405 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5406 continue;
5407 dj.destination_mask &= ~mask;
5408
5409 unit_update_dependency_mask(other, q, u, dj);
5410 }
5411
5412 unit_add_to_gc_queue(other);
5413
5414 done = false;
5415 break;
5416 }
5417
5418 } while (!done);
5419 }
5420 }
5421
5422 static int unit_get_invocation_path(Unit *u, char **ret) {
5423 char *p;
5424 int r;
5425
5426 assert(u);
5427 assert(ret);
5428
5429 if (MANAGER_IS_SYSTEM(u->manager))
5430 p = strjoin("/run/systemd/units/invocation:", u->id);
5431 else {
5432 _cleanup_free_ char *user_path = NULL;
5433 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5434 if (r < 0)
5435 return r;
5436 p = strjoin(user_path, u->id);
5437 }
5438
5439 if (!p)
5440 return -ENOMEM;
5441
5442 *ret = p;
5443 return 0;
5444 }
5445
5446 static int unit_export_invocation_id(Unit *u) {
5447 _cleanup_free_ char *p = NULL;
5448 int r;
5449
5450 assert(u);
5451
5452 if (u->exported_invocation_id)
5453 return 0;
5454
5455 if (sd_id128_is_null(u->invocation_id))
5456 return 0;
5457
5458 r = unit_get_invocation_path(u, &p);
5459 if (r < 0)
5460 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5461
5462 r = symlink_atomic(u->invocation_id_string, p);
5463 if (r < 0)
5464 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5465
5466 u->exported_invocation_id = true;
5467 return 0;
5468 }
5469
5470 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5471 const char *p;
5472 char buf[2];
5473 int r;
5474
5475 assert(u);
5476 assert(c);
5477
5478 if (u->exported_log_level_max)
5479 return 0;
5480
5481 if (c->log_level_max < 0)
5482 return 0;
5483
5484 assert(c->log_level_max <= 7);
5485
5486 buf[0] = '0' + c->log_level_max;
5487 buf[1] = 0;
5488
5489 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5490 r = symlink_atomic(buf, p);
5491 if (r < 0)
5492 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5493
5494 u->exported_log_level_max = true;
5495 return 0;
5496 }
5497
5498 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5499 _cleanup_close_ int fd = -1;
5500 struct iovec *iovec;
5501 const char *p;
5502 char *pattern;
5503 le64_t *sizes;
5504 ssize_t n;
5505 size_t i;
5506 int r;
5507
5508 if (u->exported_log_extra_fields)
5509 return 0;
5510
5511 if (c->n_log_extra_fields <= 0)
5512 return 0;
5513
5514 sizes = newa(le64_t, c->n_log_extra_fields);
5515 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5516
5517 for (i = 0; i < c->n_log_extra_fields; i++) {
5518 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5519
5520 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5521 iovec[i*2+1] = c->log_extra_fields[i];
5522 }
5523
5524 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5525 pattern = strjoina(p, ".XXXXXX");
5526
5527 fd = mkostemp_safe(pattern);
5528 if (fd < 0)
5529 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5530
5531 n = writev(fd, iovec, c->n_log_extra_fields*2);
5532 if (n < 0) {
5533 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5534 goto fail;
5535 }
5536
5537 (void) fchmod(fd, 0644);
5538
5539 if (rename(pattern, p) < 0) {
5540 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5541 goto fail;
5542 }
5543
5544 u->exported_log_extra_fields = true;
5545 return 0;
5546
5547 fail:
5548 (void) unlink(pattern);
5549 return r;
5550 }
5551
5552 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5553 _cleanup_free_ char *buf = NULL;
5554 const char *p;
5555 int r;
5556
5557 assert(u);
5558 assert(c);
5559
5560 if (u->exported_log_ratelimit_interval)
5561 return 0;
5562
5563 if (c->log_ratelimit_interval_usec == 0)
5564 return 0;
5565
5566 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5567
5568 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5569 return log_oom();
5570
5571 r = symlink_atomic(buf, p);
5572 if (r < 0)
5573 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5574
5575 u->exported_log_ratelimit_interval = true;
5576 return 0;
5577 }
5578
5579 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5580 _cleanup_free_ char *buf = NULL;
5581 const char *p;
5582 int r;
5583
5584 assert(u);
5585 assert(c);
5586
5587 if (u->exported_log_ratelimit_burst)
5588 return 0;
5589
5590 if (c->log_ratelimit_burst == 0)
5591 return 0;
5592
5593 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5594
5595 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5596 return log_oom();
5597
5598 r = symlink_atomic(buf, p);
5599 if (r < 0)
5600 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5601
5602 u->exported_log_ratelimit_burst = true;
5603 return 0;
5604 }
5605
5606 void unit_export_state_files(Unit *u) {
5607 const ExecContext *c;
5608
5609 assert(u);
5610
5611 if (!u->id)
5612 return;
5613
5614 if (MANAGER_IS_TEST_RUN(u->manager))
5615 return;
5616
5617 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5618 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5619 * the IPC system itself and PID 1 also log to the journal.
5620 *
5621 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5622 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5623 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5624 * namespace at least.
5625 *
5626 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5627 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5628 * them with one. */
5629
5630 (void) unit_export_invocation_id(u);
5631
5632 if (!MANAGER_IS_SYSTEM(u->manager))
5633 return;
5634
5635 c = unit_get_exec_context(u);
5636 if (c) {
5637 (void) unit_export_log_level_max(u, c);
5638 (void) unit_export_log_extra_fields(u, c);
5639 (void) unit_export_log_ratelimit_interval(u, c);
5640 (void) unit_export_log_ratelimit_burst(u, c);
5641 }
5642 }
5643
5644 void unit_unlink_state_files(Unit *u) {
5645 const char *p;
5646
5647 assert(u);
5648
5649 if (!u->id)
5650 return;
5651
5652 /* Undoes the effect of unit_export_state() */
5653
5654 if (u->exported_invocation_id) {
5655 _cleanup_free_ char *invocation_path = NULL;
5656 int r = unit_get_invocation_path(u, &invocation_path);
5657 if (r >= 0) {
5658 (void) unlink(invocation_path);
5659 u->exported_invocation_id = false;
5660 }
5661 }
5662
5663 if (!MANAGER_IS_SYSTEM(u->manager))
5664 return;
5665
5666 if (u->exported_log_level_max) {
5667 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5668 (void) unlink(p);
5669
5670 u->exported_log_level_max = false;
5671 }
5672
5673 if (u->exported_log_extra_fields) {
5674 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5675 (void) unlink(p);
5676
5677 u->exported_log_extra_fields = false;
5678 }
5679
5680 if (u->exported_log_ratelimit_interval) {
5681 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5682 (void) unlink(p);
5683
5684 u->exported_log_ratelimit_interval = false;
5685 }
5686
5687 if (u->exported_log_ratelimit_burst) {
5688 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5689 (void) unlink(p);
5690
5691 u->exported_log_ratelimit_burst = false;
5692 }
5693 }
5694
5695 int unit_prepare_exec(Unit *u) {
5696 int r;
5697
5698 assert(u);
5699
5700 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5701 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5702 r = bpf_firewall_load_custom(u);
5703 if (r < 0)
5704 return r;
5705
5706 /* Prepares everything so that we can fork of a process for this unit */
5707
5708 (void) unit_realize_cgroup(u);
5709
5710 if (u->reset_accounting) {
5711 (void) unit_reset_accounting(u);
5712 u->reset_accounting = false;
5713 }
5714
5715 unit_export_state_files(u);
5716
5717 r = unit_setup_exec_runtime(u);
5718 if (r < 0)
5719 return r;
5720
5721 r = unit_setup_dynamic_creds(u);
5722 if (r < 0)
5723 return r;
5724
5725 return 0;
5726 }
5727
5728 static int log_leftover(pid_t pid, int sig, void *userdata) {
5729 _cleanup_free_ char *comm = NULL;
5730
5731 (void) get_process_comm(pid, &comm);
5732
5733 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5734 return 0;
5735
5736 log_unit_warning(userdata,
5737 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5738 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5739 pid, strna(comm));
5740
5741 return 1;
5742 }
5743
5744 int unit_warn_leftover_processes(Unit *u) {
5745 assert(u);
5746
5747 (void) unit_pick_cgroup_path(u);
5748
5749 if (!u->cgroup_path)
5750 return 0;
5751
5752 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5753 }
5754
5755 bool unit_needs_console(Unit *u) {
5756 ExecContext *ec;
5757 UnitActiveState state;
5758
5759 assert(u);
5760
5761 state = unit_active_state(u);
5762
5763 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5764 return false;
5765
5766 if (UNIT_VTABLE(u)->needs_console)
5767 return UNIT_VTABLE(u)->needs_console(u);
5768
5769 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5770 ec = unit_get_exec_context(u);
5771 if (!ec)
5772 return false;
5773
5774 return exec_context_may_touch_console(ec);
5775 }
5776
5777 const char *unit_label_path(Unit *u) {
5778 const char *p;
5779
5780 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5781 * when validating access checks. */
5782
5783 p = u->source_path ?: u->fragment_path;
5784 if (!p)
5785 return NULL;
5786
5787 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5788 if (path_equal(p, "/dev/null"))
5789 return NULL;
5790
5791 return p;
5792 }
5793
5794 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5795 int r;
5796
5797 assert(u);
5798
5799 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5800 * and not a kernel thread either */
5801
5802 /* First, a simple range check */
5803 if (!pid_is_valid(pid))
5804 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5805
5806 /* Some extra safety check */
5807 if (pid == 1 || pid == getpid_cached())
5808 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5809
5810 /* Don't even begin to bother with kernel threads */
5811 r = is_kernel_thread(pid);
5812 if (r == -ESRCH)
5813 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5814 if (r < 0)
5815 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5816 if (r > 0)
5817 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5818
5819 return 0;
5820 }
5821
5822 void unit_log_success(Unit *u) {
5823 assert(u);
5824
5825 log_struct(LOG_INFO,
5826 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5827 LOG_UNIT_ID(u),
5828 LOG_UNIT_INVOCATION_ID(u),
5829 LOG_UNIT_MESSAGE(u, "Succeeded."));
5830 }
5831
5832 void unit_log_failure(Unit *u, const char *result) {
5833 assert(u);
5834 assert(result);
5835
5836 log_struct(LOG_WARNING,
5837 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5838 LOG_UNIT_ID(u),
5839 LOG_UNIT_INVOCATION_ID(u),
5840 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5841 "UNIT_RESULT=%s", result);
5842 }
5843
5844 void unit_log_skip(Unit *u, const char *result) {
5845 assert(u);
5846 assert(result);
5847
5848 log_struct(LOG_INFO,
5849 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5850 LOG_UNIT_ID(u),
5851 LOG_UNIT_INVOCATION_ID(u),
5852 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5853 "UNIT_RESULT=%s", result);
5854 }
5855
5856 void unit_log_process_exit(
5857 Unit *u,
5858 const char *kind,
5859 const char *command,
5860 bool success,
5861 int code,
5862 int status) {
5863
5864 int level;
5865
5866 assert(u);
5867 assert(kind);
5868
5869 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5870 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5871 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5872 * WARNING. */
5873 if (success)
5874 level = LOG_DEBUG;
5875 else if (code == CLD_EXITED)
5876 level = LOG_NOTICE;
5877 else
5878 level = LOG_WARNING;
5879
5880 log_struct(level,
5881 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5882 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5883 kind,
5884 sigchld_code_to_string(code), status,
5885 strna(code == CLD_EXITED
5886 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5887 : signal_to_string(status))),
5888 "EXIT_CODE=%s", sigchld_code_to_string(code),
5889 "EXIT_STATUS=%i", status,
5890 "COMMAND=%s", strna(command),
5891 LOG_UNIT_ID(u),
5892 LOG_UNIT_INVOCATION_ID(u));
5893 }
5894
5895 int unit_exit_status(Unit *u) {
5896 assert(u);
5897
5898 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5899 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5900 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5901 * service process has exited abnormally (signal/coredump). */
5902
5903 if (!UNIT_VTABLE(u)->exit_status)
5904 return -EOPNOTSUPP;
5905
5906 return UNIT_VTABLE(u)->exit_status(u);
5907 }
5908
5909 int unit_failure_action_exit_status(Unit *u) {
5910 int r;
5911
5912 assert(u);
5913
5914 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5915
5916 if (u->failure_action_exit_status >= 0)
5917 return u->failure_action_exit_status;
5918
5919 r = unit_exit_status(u);
5920 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5921 return 255;
5922
5923 return r;
5924 }
5925
5926 int unit_success_action_exit_status(Unit *u) {
5927 int r;
5928
5929 assert(u);
5930
5931 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5932
5933 if (u->success_action_exit_status >= 0)
5934 return u->success_action_exit_status;
5935
5936 r = unit_exit_status(u);
5937 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5938 return 255;
5939
5940 return r;
5941 }
5942
5943 int unit_test_trigger_loaded(Unit *u) {
5944 Unit *trigger;
5945
5946 /* Tests whether the unit to trigger is loaded */
5947
5948 trigger = UNIT_TRIGGER(u);
5949 if (!trigger)
5950 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5951 "Refusing to start, no unit to trigger.");
5952 if (trigger->load_state != UNIT_LOADED)
5953 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5954 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5955
5956 return 0;
5957 }
5958
5959 void unit_destroy_runtime_directory(Unit *u, const ExecContext *context) {
5960 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
5961 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
5962 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5963 }
5964
5965 int unit_clean(Unit *u, ExecCleanMask mask) {
5966 UnitActiveState state;
5967
5968 assert(u);
5969
5970 /* Special return values:
5971 *
5972 * -EOPNOTSUPP → cleaning not supported for this unit type
5973 * -EUNATCH → cleaning not defined for this resource type
5974 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5975 * a job queued or similar
5976 */
5977
5978 if (!UNIT_VTABLE(u)->clean)
5979 return -EOPNOTSUPP;
5980
5981 if (mask == 0)
5982 return -EUNATCH;
5983
5984 if (u->load_state != UNIT_LOADED)
5985 return -EBUSY;
5986
5987 if (u->job)
5988 return -EBUSY;
5989
5990 state = unit_active_state(u);
5991 if (!IN_SET(state, UNIT_INACTIVE))
5992 return -EBUSY;
5993
5994 return UNIT_VTABLE(u)->clean(u, mask);
5995 }
5996
5997 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5998 assert(u);
5999
6000 if (!UNIT_VTABLE(u)->clean ||
6001 u->load_state != UNIT_LOADED) {
6002 *ret = 0;
6003 return 0;
6004 }
6005
6006 /* When the clean() method is set, can_clean() really should be set too */
6007 assert(UNIT_VTABLE(u)->can_clean);
6008
6009 return UNIT_VTABLE(u)->can_clean(u, ret);
6010 }
6011
6012 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6013 [COLLECT_INACTIVE] = "inactive",
6014 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6015 };
6016
6017 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);