]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/unit.c
user-util: rework how we validate user names
[thirdparty/systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bus-common-errors.h"
15 #include "bus-util.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "install.h"
31 #include "load-dropin.h"
32 #include "load-fragment.h"
33 #include "log.h"
34 #include "macro.h"
35 #include "missing_audit.h"
36 #include "mkdir.h"
37 #include "parse-util.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "rm-rf.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
126 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 }
193
194 kc = unit_get_kill_context(u);
195 if (kc)
196 kill_context_init(kc);
197
198 if (UNIT_VTABLE(u)->init)
199 UNIT_VTABLE(u)->init(u);
200 }
201
202 int unit_add_name(Unit *u, const char *text) {
203 _cleanup_free_ char *s = NULL, *i = NULL;
204 UnitType t;
205 int r;
206
207 assert(u);
208 assert(text);
209
210 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211
212 if (!u->instance)
213 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
214 "instance is not set when adding name '%s': %m", text);
215
216 r = unit_name_replace_instance(text, u->instance, &s);
217 if (r < 0)
218 return log_unit_debug_errno(u, r,
219 "failed to build instance name from '%s': %m", text);
220 } else {
221 s = strdup(text);
222 if (!s)
223 return -ENOMEM;
224 }
225
226 if (set_contains(u->names, s))
227 return 0;
228 if (hashmap_contains(u->manager->units, s))
229 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
230 "unit already exist when adding name '%s': %m", text);
231
232 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
233 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
234 "name '%s' is invalid: %m", text);
235
236 t = unit_name_to_type(s);
237 if (t < 0)
238 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
239 "failed to to derive unit type from name '%s': %m", text);
240
241 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
242 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
243 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
244 u->type, t, text);
245
246 r = unit_name_to_instance(s, &i);
247 if (r < 0)
248 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", text);
249
250 if (i && !unit_type_may_template(t))
251 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", text);
252
253 /* Ensure that this unit is either instanced or not instanced,
254 * but not both. Note that we do allow names with different
255 * instance names however! */
256 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
257 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
258 "instance is illegal: u->type(%d), u->instance(%s) and i(%s) for name '%s': %m",
259 u->type, u->instance, i, text);
260
261 if (!unit_type_may_alias(t) && !set_isempty(u->names))
262 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST), "symlinks are not allowed for name '%s': %m", text);
263
264 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
265 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(E2BIG), "too many units: %m");
266
267 r = set_put(u->names, s);
268 if (r < 0)
269 return r;
270 assert(r > 0);
271
272 r = hashmap_put(u->manager->units, s, u);
273 if (r < 0) {
274 (void) set_remove(u->names, s);
275 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
276 }
277
278 if (u->type == _UNIT_TYPE_INVALID) {
279 u->type = t;
280 u->id = s;
281 u->instance = TAKE_PTR(i);
282
283 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
284
285 unit_init(u);
286 }
287
288 s = NULL;
289
290 unit_add_to_dbus_queue(u);
291 return 0;
292 }
293
294 int unit_choose_id(Unit *u, const char *name) {
295 _cleanup_free_ char *t = NULL;
296 char *s, *i;
297 int r;
298
299 assert(u);
300 assert(name);
301
302 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
303
304 if (!u->instance)
305 return -EINVAL;
306
307 r = unit_name_replace_instance(name, u->instance, &t);
308 if (r < 0)
309 return r;
310
311 name = t;
312 }
313
314 /* Selects one of the names of this unit as the id */
315 s = set_get(u->names, (char*) name);
316 if (!s)
317 return -ENOENT;
318
319 /* Determine the new instance from the new id */
320 r = unit_name_to_instance(s, &i);
321 if (r < 0)
322 return r;
323
324 u->id = s;
325
326 free(u->instance);
327 u->instance = i;
328
329 unit_add_to_dbus_queue(u);
330
331 return 0;
332 }
333
334 int unit_set_description(Unit *u, const char *description) {
335 int r;
336
337 assert(u);
338
339 r = free_and_strdup(&u->description, empty_to_null(description));
340 if (r < 0)
341 return r;
342 if (r > 0)
343 unit_add_to_dbus_queue(u);
344
345 return 0;
346 }
347
348 bool unit_may_gc(Unit *u) {
349 UnitActiveState state;
350 int r;
351
352 assert(u);
353
354 /* Checks whether the unit is ready to be unloaded for garbage collection.
355 * Returns true when the unit may be collected, and false if there's some
356 * reason to keep it loaded.
357 *
358 * References from other units are *not* checked here. Instead, this is done
359 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
360 */
361
362 if (u->job)
363 return false;
364
365 if (u->nop_job)
366 return false;
367
368 state = unit_active_state(u);
369
370 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
371 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
372 UNIT_VTABLE(u)->release_resources)
373 UNIT_VTABLE(u)->release_resources(u);
374
375 if (u->perpetual)
376 return false;
377
378 if (sd_bus_track_count(u->bus_track) > 0)
379 return false;
380
381 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
382 switch (u->collect_mode) {
383
384 case COLLECT_INACTIVE:
385 if (state != UNIT_INACTIVE)
386 return false;
387
388 break;
389
390 case COLLECT_INACTIVE_OR_FAILED:
391 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
392 return false;
393
394 break;
395
396 default:
397 assert_not_reached("Unknown garbage collection mode");
398 }
399
400 if (u->cgroup_path) {
401 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
402 * around. Units with active processes should never be collected. */
403
404 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
405 if (r < 0)
406 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
407 if (r <= 0)
408 return false;
409 }
410
411 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
412 return false;
413
414 return true;
415 }
416
417 void unit_add_to_load_queue(Unit *u) {
418 assert(u);
419 assert(u->type != _UNIT_TYPE_INVALID);
420
421 if (u->load_state != UNIT_STUB || u->in_load_queue)
422 return;
423
424 LIST_PREPEND(load_queue, u->manager->load_queue, u);
425 u->in_load_queue = true;
426 }
427
428 void unit_add_to_cleanup_queue(Unit *u) {
429 assert(u);
430
431 if (u->in_cleanup_queue)
432 return;
433
434 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
435 u->in_cleanup_queue = true;
436 }
437
438 void unit_add_to_gc_queue(Unit *u) {
439 assert(u);
440
441 if (u->in_gc_queue || u->in_cleanup_queue)
442 return;
443
444 if (!unit_may_gc(u))
445 return;
446
447 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
448 u->in_gc_queue = true;
449 }
450
451 void unit_add_to_dbus_queue(Unit *u) {
452 assert(u);
453 assert(u->type != _UNIT_TYPE_INVALID);
454
455 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
456 return;
457
458 /* Shortcut things if nobody cares */
459 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
460 sd_bus_track_count(u->bus_track) <= 0 &&
461 set_isempty(u->manager->private_buses)) {
462 u->sent_dbus_new_signal = true;
463 return;
464 }
465
466 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
467 u->in_dbus_queue = true;
468 }
469
470 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
471 assert(u);
472
473 if (u->in_stop_when_unneeded_queue)
474 return;
475
476 if (!u->stop_when_unneeded)
477 return;
478
479 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
480 return;
481
482 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
483 u->in_stop_when_unneeded_queue = true;
484 }
485
486 static void bidi_set_free(Unit *u, Hashmap *h) {
487 Unit *other;
488 Iterator i;
489 void *v;
490
491 assert(u);
492
493 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
494
495 HASHMAP_FOREACH_KEY(v, other, h, i) {
496 UnitDependency d;
497
498 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
499 hashmap_remove(other->dependencies[d], u);
500
501 unit_add_to_gc_queue(other);
502 }
503
504 hashmap_free(h);
505 }
506
507 static void unit_remove_transient(Unit *u) {
508 char **i;
509
510 assert(u);
511
512 if (!u->transient)
513 return;
514
515 if (u->fragment_path)
516 (void) unlink(u->fragment_path);
517
518 STRV_FOREACH(i, u->dropin_paths) {
519 _cleanup_free_ char *p = NULL, *pp = NULL;
520
521 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
522 if (!p)
523 continue;
524
525 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
526 if (!pp)
527 continue;
528
529 /* Only drop transient drop-ins */
530 if (!path_equal(u->manager->lookup_paths.transient, pp))
531 continue;
532
533 (void) unlink(*i);
534 (void) rmdir(p);
535 }
536 }
537
538 static void unit_free_requires_mounts_for(Unit *u) {
539 assert(u);
540
541 for (;;) {
542 _cleanup_free_ char *path;
543
544 path = hashmap_steal_first_key(u->requires_mounts_for);
545 if (!path)
546 break;
547 else {
548 char s[strlen(path) + 1];
549
550 PATH_FOREACH_PREFIX_MORE(s, path) {
551 char *y;
552 Set *x;
553
554 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
555 if (!x)
556 continue;
557
558 (void) set_remove(x, u);
559
560 if (set_isempty(x)) {
561 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
562 free(y);
563 set_free(x);
564 }
565 }
566 }
567 }
568
569 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
570 }
571
572 static void unit_done(Unit *u) {
573 ExecContext *ec;
574 CGroupContext *cc;
575
576 assert(u);
577
578 if (u->type < 0)
579 return;
580
581 if (UNIT_VTABLE(u)->done)
582 UNIT_VTABLE(u)->done(u);
583
584 ec = unit_get_exec_context(u);
585 if (ec)
586 exec_context_done(ec);
587
588 cc = unit_get_cgroup_context(u);
589 if (cc)
590 cgroup_context_done(cc);
591 }
592
593 void unit_free(Unit *u) {
594 UnitDependency d;
595 Iterator i;
596 char *t;
597
598 if (!u)
599 return;
600
601 if (UNIT_ISSET(u->slice)) {
602 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
603 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
604
605 /* And make sure the parent is realized again, updating cgroup memberships */
606 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
607 }
608
609 u->transient_file = safe_fclose(u->transient_file);
610
611 if (!MANAGER_IS_RELOADING(u->manager))
612 unit_remove_transient(u);
613
614 bus_unit_send_removed_signal(u);
615
616 unit_done(u);
617
618 unit_dequeue_rewatch_pids(u);
619
620 sd_bus_slot_unref(u->match_bus_slot);
621 sd_bus_track_unref(u->bus_track);
622 u->deserialized_refs = strv_free(u->deserialized_refs);
623
624 unit_free_requires_mounts_for(u);
625
626 SET_FOREACH(t, u->names, i)
627 hashmap_remove_value(u->manager->units, t, u);
628
629 if (!sd_id128_is_null(u->invocation_id))
630 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
631
632 if (u->job) {
633 Job *j = u->job;
634 job_uninstall(j);
635 job_free(j);
636 }
637
638 if (u->nop_job) {
639 Job *j = u->nop_job;
640 job_uninstall(j);
641 job_free(j);
642 }
643
644 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
645 bidi_set_free(u, u->dependencies[d]);
646
647 if (u->on_console)
648 manager_unref_console(u->manager);
649
650 unit_release_cgroup(u);
651
652 if (!MANAGER_IS_RELOADING(u->manager))
653 unit_unlink_state_files(u);
654
655 unit_unref_uid_gid(u, false);
656
657 (void) manager_update_failed_units(u->manager, u, false);
658 set_remove(u->manager->startup_units, u);
659
660 unit_unwatch_all_pids(u);
661
662 unit_ref_unset(&u->slice);
663 while (u->refs_by_target)
664 unit_ref_unset(u->refs_by_target);
665
666 if (u->type != _UNIT_TYPE_INVALID)
667 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
668
669 if (u->in_load_queue)
670 LIST_REMOVE(load_queue, u->manager->load_queue, u);
671
672 if (u->in_dbus_queue)
673 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
674
675 if (u->in_gc_queue)
676 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
677
678 if (u->in_cgroup_realize_queue)
679 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
680
681 if (u->in_cgroup_empty_queue)
682 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
683
684 if (u->in_cleanup_queue)
685 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
686
687 if (u->in_target_deps_queue)
688 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
689
690 if (u->in_stop_when_unneeded_queue)
691 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
692
693 safe_close(u->ip_accounting_ingress_map_fd);
694 safe_close(u->ip_accounting_egress_map_fd);
695
696 safe_close(u->ipv4_allow_map_fd);
697 safe_close(u->ipv6_allow_map_fd);
698 safe_close(u->ipv4_deny_map_fd);
699 safe_close(u->ipv6_deny_map_fd);
700
701 bpf_program_unref(u->ip_bpf_ingress);
702 bpf_program_unref(u->ip_bpf_ingress_installed);
703 bpf_program_unref(u->ip_bpf_egress);
704 bpf_program_unref(u->ip_bpf_egress_installed);
705
706 set_free(u->ip_bpf_custom_ingress);
707 set_free(u->ip_bpf_custom_egress);
708 set_free(u->ip_bpf_custom_ingress_installed);
709 set_free(u->ip_bpf_custom_egress_installed);
710
711 bpf_program_unref(u->bpf_device_control_installed);
712
713 condition_free_list(u->conditions);
714 condition_free_list(u->asserts);
715
716 free(u->description);
717 strv_free(u->documentation);
718 free(u->fragment_path);
719 free(u->source_path);
720 strv_free(u->dropin_paths);
721 free(u->instance);
722
723 free(u->job_timeout_reboot_arg);
724
725 set_free_free(u->names);
726
727 free(u->reboot_arg);
728
729 free(u);
730 }
731
732 UnitActiveState unit_active_state(Unit *u) {
733 assert(u);
734
735 if (u->load_state == UNIT_MERGED)
736 return unit_active_state(unit_follow_merge(u));
737
738 /* After a reload it might happen that a unit is not correctly
739 * loaded but still has a process around. That's why we won't
740 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
741
742 return UNIT_VTABLE(u)->active_state(u);
743 }
744
745 const char* unit_sub_state_to_string(Unit *u) {
746 assert(u);
747
748 return UNIT_VTABLE(u)->sub_state_to_string(u);
749 }
750
751 static int set_complete_move(Set **s, Set **other) {
752 assert(s);
753 assert(other);
754
755 if (!other)
756 return 0;
757
758 if (*s)
759 return set_move(*s, *other);
760 else
761 *s = TAKE_PTR(*other);
762
763 return 0;
764 }
765
766 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
767 assert(s);
768 assert(other);
769
770 if (!*other)
771 return 0;
772
773 if (*s)
774 return hashmap_move(*s, *other);
775 else
776 *s = TAKE_PTR(*other);
777
778 return 0;
779 }
780
781 static int merge_names(Unit *u, Unit *other) {
782 char *t;
783 Iterator i;
784 int r;
785
786 assert(u);
787 assert(other);
788
789 r = set_complete_move(&u->names, &other->names);
790 if (r < 0)
791 return r;
792
793 set_free_free(other->names);
794 other->names = NULL;
795 other->id = NULL;
796
797 SET_FOREACH(t, u->names, i)
798 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
799
800 return 0;
801 }
802
803 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
804 unsigned n_reserve;
805
806 assert(u);
807 assert(other);
808 assert(d < _UNIT_DEPENDENCY_MAX);
809
810 /*
811 * If u does not have this dependency set allocated, there is no need
812 * to reserve anything. In that case other's set will be transferred
813 * as a whole to u by complete_move().
814 */
815 if (!u->dependencies[d])
816 return 0;
817
818 /* merge_dependencies() will skip a u-on-u dependency */
819 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
820
821 return hashmap_reserve(u->dependencies[d], n_reserve);
822 }
823
824 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
825 Iterator i;
826 Unit *back;
827 void *v;
828 int r;
829
830 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
831
832 assert(u);
833 assert(other);
834 assert(d < _UNIT_DEPENDENCY_MAX);
835
836 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
837 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
838 UnitDependency k;
839
840 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
841 * pointers back, and let's fix them up, to instead point to 'u'. */
842
843 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
844 if (back == u) {
845 /* Do not add dependencies between u and itself. */
846 if (hashmap_remove(back->dependencies[k], other))
847 maybe_warn_about_dependency(u, other_id, k);
848 } else {
849 UnitDependencyInfo di_u, di_other, di_merged;
850
851 /* Let's drop this dependency between "back" and "other", and let's create it between
852 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
853 * and any such dependency which might already exist */
854
855 di_other.data = hashmap_get(back->dependencies[k], other);
856 if (!di_other.data)
857 continue; /* dependency isn't set, let's try the next one */
858
859 di_u.data = hashmap_get(back->dependencies[k], u);
860
861 di_merged = (UnitDependencyInfo) {
862 .origin_mask = di_u.origin_mask | di_other.origin_mask,
863 .destination_mask = di_u.destination_mask | di_other.destination_mask,
864 };
865
866 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
867 if (r < 0)
868 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
869 assert(r >= 0);
870
871 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
872 }
873 }
874
875 }
876
877 /* Also do not move dependencies on u to itself */
878 back = hashmap_remove(other->dependencies[d], u);
879 if (back)
880 maybe_warn_about_dependency(u, other_id, d);
881
882 /* The move cannot fail. The caller must have performed a reservation. */
883 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
884
885 other->dependencies[d] = hashmap_free(other->dependencies[d]);
886 }
887
888 int unit_merge(Unit *u, Unit *other) {
889 UnitDependency d;
890 const char *other_id = NULL;
891 int r;
892
893 assert(u);
894 assert(other);
895 assert(u->manager == other->manager);
896 assert(u->type != _UNIT_TYPE_INVALID);
897
898 other = unit_follow_merge(other);
899
900 if (other == u)
901 return 0;
902
903 if (u->type != other->type)
904 return -EINVAL;
905
906 if (!u->instance != !other->instance)
907 return -EINVAL;
908
909 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
910 return -EEXIST;
911
912 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
913 return -EEXIST;
914
915 if (other->job)
916 return -EEXIST;
917
918 if (other->nop_job)
919 return -EEXIST;
920
921 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
922 return -EEXIST;
923
924 if (other->id)
925 other_id = strdupa(other->id);
926
927 /* Make reservations to ensure merge_dependencies() won't fail */
928 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
929 r = reserve_dependencies(u, other, d);
930 /*
931 * We don't rollback reservations if we fail. We don't have
932 * a way to undo reservations. A reservation is not a leak.
933 */
934 if (r < 0)
935 return r;
936 }
937
938 /* Merge names */
939 r = merge_names(u, other);
940 if (r < 0)
941 return r;
942
943 /* Redirect all references */
944 while (other->refs_by_target)
945 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
946
947 /* Merge dependencies */
948 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
949 merge_dependencies(u, other, other_id, d);
950
951 other->load_state = UNIT_MERGED;
952 other->merged_into = u;
953
954 /* If there is still some data attached to the other node, we
955 * don't need it anymore, and can free it. */
956 if (other->load_state != UNIT_STUB)
957 if (UNIT_VTABLE(other)->done)
958 UNIT_VTABLE(other)->done(other);
959
960 unit_add_to_dbus_queue(u);
961 unit_add_to_cleanup_queue(other);
962
963 return 0;
964 }
965
966 int unit_merge_by_name(Unit *u, const char *name) {
967 _cleanup_free_ char *s = NULL;
968 Unit *other;
969 int r;
970
971 /* Either add name to u, or if a unit with name already exists, merge it with u.
972 * If name is a template, do the same for name@instance, where instance is u's instance. */
973
974 assert(u);
975 assert(name);
976
977 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
978 if (!u->instance)
979 return -EINVAL;
980
981 r = unit_name_replace_instance(name, u->instance, &s);
982 if (r < 0)
983 return r;
984
985 name = s;
986 }
987
988 other = manager_get_unit(u->manager, name);
989 if (other)
990 return unit_merge(u, other);
991
992 return unit_add_name(u, name);
993 }
994
995 Unit* unit_follow_merge(Unit *u) {
996 assert(u);
997
998 while (u->load_state == UNIT_MERGED)
999 assert_se(u = u->merged_into);
1000
1001 return u;
1002 }
1003
1004 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1005 ExecDirectoryType dt;
1006 char **dp;
1007 int r;
1008
1009 assert(u);
1010 assert(c);
1011
1012 if (c->working_directory && !c->working_directory_missing_ok) {
1013 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1014 if (r < 0)
1015 return r;
1016 }
1017
1018 if (c->root_directory) {
1019 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1020 if (r < 0)
1021 return r;
1022 }
1023
1024 if (c->root_image) {
1025 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1026 if (r < 0)
1027 return r;
1028 }
1029
1030 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1031 if (!u->manager->prefix[dt])
1032 continue;
1033
1034 STRV_FOREACH(dp, c->directories[dt].paths) {
1035 _cleanup_free_ char *p;
1036
1037 p = path_join(u->manager->prefix[dt], *dp);
1038 if (!p)
1039 return -ENOMEM;
1040
1041 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1042 if (r < 0)
1043 return r;
1044 }
1045 }
1046
1047 if (!MANAGER_IS_SYSTEM(u->manager))
1048 return 0;
1049
1050 if (c->private_tmp) {
1051 const char *p;
1052
1053 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1054 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1055 if (r < 0)
1056 return r;
1057 }
1058
1059 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1060 if (r < 0)
1061 return r;
1062 }
1063
1064 if (!IN_SET(c->std_output,
1065 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1066 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1067 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1068 !IN_SET(c->std_error,
1069 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1070 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1071 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1072 !c->log_namespace)
1073 return 0;
1074
1075 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1076 * is run first. */
1077
1078 if (c->log_namespace) {
1079 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1080
1081 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1082 if (r < 0)
1083 return r;
1084
1085 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1086 if (r < 0)
1087 return r;
1088
1089 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1090 if (r < 0)
1091 return r;
1092
1093 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1094 if (r < 0)
1095 return r;
1096 } else
1097 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1098 if (r < 0)
1099 return r;
1100
1101 return 0;
1102 }
1103
1104 const char *unit_description(Unit *u) {
1105 assert(u);
1106
1107 if (u->description)
1108 return u->description;
1109
1110 return strna(u->id);
1111 }
1112
1113 const char *unit_status_string(Unit *u) {
1114 assert(u);
1115
1116 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1117 return u->id;
1118
1119 return unit_description(u);
1120 }
1121
1122 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1123 const struct {
1124 UnitDependencyMask mask;
1125 const char *name;
1126 } table[] = {
1127 { UNIT_DEPENDENCY_FILE, "file" },
1128 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1129 { UNIT_DEPENDENCY_DEFAULT, "default" },
1130 { UNIT_DEPENDENCY_UDEV, "udev" },
1131 { UNIT_DEPENDENCY_PATH, "path" },
1132 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1133 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1134 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1135 };
1136 size_t i;
1137
1138 assert(f);
1139 assert(kind);
1140 assert(space);
1141
1142 for (i = 0; i < ELEMENTSOF(table); i++) {
1143
1144 if (mask == 0)
1145 break;
1146
1147 if (FLAGS_SET(mask, table[i].mask)) {
1148 if (*space)
1149 fputc(' ', f);
1150 else
1151 *space = true;
1152
1153 fputs(kind, f);
1154 fputs("-", f);
1155 fputs(table[i].name, f);
1156
1157 mask &= ~table[i].mask;
1158 }
1159 }
1160
1161 assert(mask == 0);
1162 }
1163
1164 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1165 char *t, **j;
1166 UnitDependency d;
1167 Iterator i;
1168 const char *prefix2;
1169 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1170 Unit *following;
1171 _cleanup_set_free_ Set *following_set = NULL;
1172 const char *n;
1173 CGroupMask m;
1174 int r;
1175
1176 assert(u);
1177 assert(u->type >= 0);
1178
1179 prefix = strempty(prefix);
1180 prefix2 = strjoina(prefix, "\t");
1181
1182 fprintf(f,
1183 "%s-> Unit %s:\n",
1184 prefix, u->id);
1185
1186 SET_FOREACH(t, u->names, i)
1187 if (!streq(t, u->id))
1188 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1189
1190 fprintf(f,
1191 "%s\tDescription: %s\n"
1192 "%s\tInstance: %s\n"
1193 "%s\tUnit Load State: %s\n"
1194 "%s\tUnit Active State: %s\n"
1195 "%s\tState Change Timestamp: %s\n"
1196 "%s\tInactive Exit Timestamp: %s\n"
1197 "%s\tActive Enter Timestamp: %s\n"
1198 "%s\tActive Exit Timestamp: %s\n"
1199 "%s\tInactive Enter Timestamp: %s\n"
1200 "%s\tMay GC: %s\n"
1201 "%s\tNeed Daemon Reload: %s\n"
1202 "%s\tTransient: %s\n"
1203 "%s\tPerpetual: %s\n"
1204 "%s\tGarbage Collection Mode: %s\n"
1205 "%s\tSlice: %s\n"
1206 "%s\tCGroup: %s\n"
1207 "%s\tCGroup realized: %s\n",
1208 prefix, unit_description(u),
1209 prefix, strna(u->instance),
1210 prefix, unit_load_state_to_string(u->load_state),
1211 prefix, unit_active_state_to_string(unit_active_state(u)),
1212 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1213 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1214 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1215 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1216 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1217 prefix, yes_no(unit_may_gc(u)),
1218 prefix, yes_no(unit_need_daemon_reload(u)),
1219 prefix, yes_no(u->transient),
1220 prefix, yes_no(u->perpetual),
1221 prefix, collect_mode_to_string(u->collect_mode),
1222 prefix, strna(unit_slice_name(u)),
1223 prefix, strna(u->cgroup_path),
1224 prefix, yes_no(u->cgroup_realized));
1225
1226 if (u->cgroup_realized_mask != 0) {
1227 _cleanup_free_ char *s = NULL;
1228 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1229 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1230 }
1231
1232 if (u->cgroup_enabled_mask != 0) {
1233 _cleanup_free_ char *s = NULL;
1234 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1235 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1236 }
1237
1238 m = unit_get_own_mask(u);
1239 if (m != 0) {
1240 _cleanup_free_ char *s = NULL;
1241 (void) cg_mask_to_string(m, &s);
1242 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1243 }
1244
1245 m = unit_get_members_mask(u);
1246 if (m != 0) {
1247 _cleanup_free_ char *s = NULL;
1248 (void) cg_mask_to_string(m, &s);
1249 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1250 }
1251
1252 m = unit_get_delegate_mask(u);
1253 if (m != 0) {
1254 _cleanup_free_ char *s = NULL;
1255 (void) cg_mask_to_string(m, &s);
1256 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1257 }
1258
1259 if (!sd_id128_is_null(u->invocation_id))
1260 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1261 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1262
1263 STRV_FOREACH(j, u->documentation)
1264 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1265
1266 following = unit_following(u);
1267 if (following)
1268 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1269
1270 r = unit_following_set(u, &following_set);
1271 if (r >= 0) {
1272 Unit *other;
1273
1274 SET_FOREACH(other, following_set, i)
1275 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1276 }
1277
1278 if (u->fragment_path)
1279 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1280
1281 if (u->source_path)
1282 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1283
1284 STRV_FOREACH(j, u->dropin_paths)
1285 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1286
1287 if (u->failure_action != EMERGENCY_ACTION_NONE)
1288 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1289 if (u->failure_action_exit_status >= 0)
1290 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1291 if (u->success_action != EMERGENCY_ACTION_NONE)
1292 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1293 if (u->success_action_exit_status >= 0)
1294 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1295
1296 if (u->job_timeout != USEC_INFINITY)
1297 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1298
1299 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1300 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1301
1302 if (u->job_timeout_reboot_arg)
1303 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1304
1305 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1306 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1307
1308 if (dual_timestamp_is_set(&u->condition_timestamp))
1309 fprintf(f,
1310 "%s\tCondition Timestamp: %s\n"
1311 "%s\tCondition Result: %s\n",
1312 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1313 prefix, yes_no(u->condition_result));
1314
1315 if (dual_timestamp_is_set(&u->assert_timestamp))
1316 fprintf(f,
1317 "%s\tAssert Timestamp: %s\n"
1318 "%s\tAssert Result: %s\n",
1319 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1320 prefix, yes_no(u->assert_result));
1321
1322 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1323 UnitDependencyInfo di;
1324 Unit *other;
1325
1326 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1327 bool space = false;
1328
1329 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1330
1331 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1332 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1333
1334 fputs(")\n", f);
1335 }
1336 }
1337
1338 if (!hashmap_isempty(u->requires_mounts_for)) {
1339 UnitDependencyInfo di;
1340 const char *path;
1341
1342 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1343 bool space = false;
1344
1345 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1346
1347 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1348 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1349
1350 fputs(")\n", f);
1351 }
1352 }
1353
1354 if (u->load_state == UNIT_LOADED) {
1355
1356 fprintf(f,
1357 "%s\tStopWhenUnneeded: %s\n"
1358 "%s\tRefuseManualStart: %s\n"
1359 "%s\tRefuseManualStop: %s\n"
1360 "%s\tDefaultDependencies: %s\n"
1361 "%s\tOnFailureJobMode: %s\n"
1362 "%s\tIgnoreOnIsolate: %s\n",
1363 prefix, yes_no(u->stop_when_unneeded),
1364 prefix, yes_no(u->refuse_manual_start),
1365 prefix, yes_no(u->refuse_manual_stop),
1366 prefix, yes_no(u->default_dependencies),
1367 prefix, job_mode_to_string(u->on_failure_job_mode),
1368 prefix, yes_no(u->ignore_on_isolate));
1369
1370 if (UNIT_VTABLE(u)->dump)
1371 UNIT_VTABLE(u)->dump(u, f, prefix2);
1372
1373 } else if (u->load_state == UNIT_MERGED)
1374 fprintf(f,
1375 "%s\tMerged into: %s\n",
1376 prefix, u->merged_into->id);
1377 else if (u->load_state == UNIT_ERROR)
1378 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1379
1380 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1381 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1382
1383 if (u->job)
1384 job_dump(u->job, f, prefix2);
1385
1386 if (u->nop_job)
1387 job_dump(u->nop_job, f, prefix2);
1388 }
1389
1390 /* Common implementation for multiple backends */
1391 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1392 int r;
1393
1394 assert(u);
1395
1396 /* Load a .{service,socket,...} file */
1397 r = unit_load_fragment(u);
1398 if (r < 0)
1399 return r;
1400
1401 if (u->load_state == UNIT_STUB) {
1402 if (fragment_required)
1403 return -ENOENT;
1404
1405 u->load_state = UNIT_LOADED;
1406 }
1407
1408 /* Load drop-in directory data. If u is an alias, we might be reloading the
1409 * target unit needlessly. But we cannot be sure which drops-ins have already
1410 * been loaded and which not, at least without doing complicated book-keeping,
1411 * so let's always reread all drop-ins. */
1412 return unit_load_dropin(unit_follow_merge(u));
1413 }
1414
1415 void unit_add_to_target_deps_queue(Unit *u) {
1416 Manager *m = u->manager;
1417
1418 assert(u);
1419
1420 if (u->in_target_deps_queue)
1421 return;
1422
1423 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1424 u->in_target_deps_queue = true;
1425 }
1426
1427 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1428 assert(u);
1429 assert(target);
1430
1431 if (target->type != UNIT_TARGET)
1432 return 0;
1433
1434 /* Only add the dependency if both units are loaded, so that
1435 * that loop check below is reliable */
1436 if (u->load_state != UNIT_LOADED ||
1437 target->load_state != UNIT_LOADED)
1438 return 0;
1439
1440 /* If either side wants no automatic dependencies, then let's
1441 * skip this */
1442 if (!u->default_dependencies ||
1443 !target->default_dependencies)
1444 return 0;
1445
1446 /* Don't create loops */
1447 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1448 return 0;
1449
1450 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1451 }
1452
1453 static int unit_add_slice_dependencies(Unit *u) {
1454 UnitDependencyMask mask;
1455 assert(u);
1456
1457 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1458 return 0;
1459
1460 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1461 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1462 relationship). */
1463 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1464
1465 if (UNIT_ISSET(u->slice))
1466 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1467
1468 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1469 return 0;
1470
1471 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1472 }
1473
1474 static int unit_add_mount_dependencies(Unit *u) {
1475 UnitDependencyInfo di;
1476 const char *path;
1477 Iterator i;
1478 int r;
1479
1480 assert(u);
1481
1482 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1483 char prefix[strlen(path) + 1];
1484
1485 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1486 _cleanup_free_ char *p = NULL;
1487 Unit *m;
1488
1489 r = unit_name_from_path(prefix, ".mount", &p);
1490 if (r < 0)
1491 return r;
1492
1493 m = manager_get_unit(u->manager, p);
1494 if (!m) {
1495 /* Make sure to load the mount unit if
1496 * it exists. If so the dependencies
1497 * on this unit will be added later
1498 * during the loading of the mount
1499 * unit. */
1500 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1501 continue;
1502 }
1503 if (m == u)
1504 continue;
1505
1506 if (m->load_state != UNIT_LOADED)
1507 continue;
1508
1509 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1510 if (r < 0)
1511 return r;
1512
1513 if (m->fragment_path) {
1514 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1515 if (r < 0)
1516 return r;
1517 }
1518 }
1519 }
1520
1521 return 0;
1522 }
1523
1524 static int unit_add_startup_units(Unit *u) {
1525 CGroupContext *c;
1526 int r;
1527
1528 c = unit_get_cgroup_context(u);
1529 if (!c)
1530 return 0;
1531
1532 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1533 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1534 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1535 return 0;
1536
1537 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1538 if (r < 0)
1539 return r;
1540
1541 return set_put(u->manager->startup_units, u);
1542 }
1543
1544 int unit_load(Unit *u) {
1545 int r;
1546
1547 assert(u);
1548
1549 if (u->in_load_queue) {
1550 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1551 u->in_load_queue = false;
1552 }
1553
1554 if (u->type == _UNIT_TYPE_INVALID)
1555 return -EINVAL;
1556
1557 if (u->load_state != UNIT_STUB)
1558 return 0;
1559
1560 if (u->transient_file) {
1561 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1562 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1563
1564 r = fflush_and_check(u->transient_file);
1565 if (r < 0)
1566 goto fail;
1567
1568 u->transient_file = safe_fclose(u->transient_file);
1569 u->fragment_mtime = now(CLOCK_REALTIME);
1570 }
1571
1572 r = UNIT_VTABLE(u)->load(u);
1573 if (r < 0)
1574 goto fail;
1575
1576 assert(u->load_state != UNIT_STUB);
1577
1578 if (u->load_state == UNIT_LOADED) {
1579 unit_add_to_target_deps_queue(u);
1580
1581 r = unit_add_slice_dependencies(u);
1582 if (r < 0)
1583 goto fail;
1584
1585 r = unit_add_mount_dependencies(u);
1586 if (r < 0)
1587 goto fail;
1588
1589 r = unit_add_startup_units(u);
1590 if (r < 0)
1591 goto fail;
1592
1593 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1594 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1595 r = -ENOEXEC;
1596 goto fail;
1597 }
1598
1599 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1600 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1601
1602 /* We finished loading, let's ensure our parents recalculate the members mask */
1603 unit_invalidate_cgroup_members_masks(u);
1604 }
1605
1606 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1607
1608 unit_add_to_dbus_queue(unit_follow_merge(u));
1609 unit_add_to_gc_queue(u);
1610
1611 return 0;
1612
1613 fail:
1614 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1615 * return ENOEXEC to ensure units are placed in this state after loading */
1616
1617 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1618 r == -ENOEXEC ? UNIT_BAD_SETTING :
1619 UNIT_ERROR;
1620 u->load_error = r;
1621
1622 unit_add_to_dbus_queue(u);
1623 unit_add_to_gc_queue(u);
1624
1625 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1626 }
1627
1628 _printf_(7, 8)
1629 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1630 Unit *u = userdata;
1631 va_list ap;
1632 int r;
1633
1634 va_start(ap, format);
1635 if (u)
1636 r = log_object_internalv(level, error, file, line, func,
1637 u->manager->unit_log_field,
1638 u->id,
1639 u->manager->invocation_log_field,
1640 u->invocation_id_string,
1641 format, ap);
1642 else
1643 r = log_internalv(level, error, file, line, func, format, ap);
1644 va_end(ap);
1645
1646 return r;
1647 }
1648
1649 static bool unit_test_condition(Unit *u) {
1650 assert(u);
1651
1652 dual_timestamp_get(&u->condition_timestamp);
1653 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1654
1655 unit_add_to_dbus_queue(u);
1656
1657 return u->condition_result;
1658 }
1659
1660 static bool unit_test_assert(Unit *u) {
1661 assert(u);
1662
1663 dual_timestamp_get(&u->assert_timestamp);
1664 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1665
1666 unit_add_to_dbus_queue(u);
1667
1668 return u->assert_result;
1669 }
1670
1671 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *unit_status_msg_format) {
1672 const char *d;
1673
1674 d = unit_status_string(u);
1675 if (log_get_show_color())
1676 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1677
1678 DISABLE_WARNING_FORMAT_NONLITERAL;
1679 manager_status_printf(u->manager, status_type, status, unit_status_msg_format, d);
1680 REENABLE_WARNING;
1681 }
1682
1683 int unit_test_start_limit(Unit *u) {
1684 const char *reason;
1685
1686 assert(u);
1687
1688 if (ratelimit_below(&u->start_ratelimit)) {
1689 u->start_limit_hit = false;
1690 return 0;
1691 }
1692
1693 log_unit_warning(u, "Start request repeated too quickly.");
1694 u->start_limit_hit = true;
1695
1696 reason = strjoina("unit ", u->id, " failed");
1697
1698 emergency_action(u->manager, u->start_limit_action,
1699 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1700 u->reboot_arg, -1, reason);
1701
1702 return -ECANCELED;
1703 }
1704
1705 bool unit_shall_confirm_spawn(Unit *u) {
1706 assert(u);
1707
1708 if (manager_is_confirm_spawn_disabled(u->manager))
1709 return false;
1710
1711 /* For some reasons units remaining in the same process group
1712 * as PID 1 fail to acquire the console even if it's not used
1713 * by any process. So skip the confirmation question for them. */
1714 return !unit_get_exec_context(u)->same_pgrp;
1715 }
1716
1717 static bool unit_verify_deps(Unit *u) {
1718 Unit *other;
1719 Iterator j;
1720 void *v;
1721
1722 assert(u);
1723
1724 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1725 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1726 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1727 * conjunction with After= as for them any such check would make things entirely racy. */
1728
1729 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1730
1731 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1732 continue;
1733
1734 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1735 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1736 return false;
1737 }
1738 }
1739
1740 return true;
1741 }
1742
1743 /* Errors that aren't really errors:
1744 * -EALREADY: Unit is already started.
1745 * -ECOMM: Condition failed
1746 * -EAGAIN: An operation is already in progress. Retry later.
1747 *
1748 * Errors that are real errors:
1749 * -EBADR: This unit type does not support starting.
1750 * -ECANCELED: Start limit hit, too many requests for now
1751 * -EPROTO: Assert failed
1752 * -EINVAL: Unit not loaded
1753 * -EOPNOTSUPP: Unit type not supported
1754 * -ENOLINK: The necessary dependencies are not fulfilled.
1755 * -ESTALE: This unit has been started before and can't be started a second time
1756 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1757 */
1758 int unit_start(Unit *u) {
1759 UnitActiveState state;
1760 Unit *following;
1761
1762 assert(u);
1763
1764 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1765 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1766 * waiting is finished. */
1767 state = unit_active_state(u);
1768 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1769 return -EALREADY;
1770 if (state == UNIT_MAINTENANCE)
1771 return -EAGAIN;
1772
1773 /* Units that aren't loaded cannot be started */
1774 if (u->load_state != UNIT_LOADED)
1775 return -EINVAL;
1776
1777 /* Refuse starting scope units more than once */
1778 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1779 return -ESTALE;
1780
1781 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1782 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1783 * recheck the condition in that case. */
1784 if (state != UNIT_ACTIVATING &&
1785 !unit_test_condition(u))
1786 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1787
1788 /* If the asserts failed, fail the entire job */
1789 if (state != UNIT_ACTIVATING &&
1790 !unit_test_assert(u))
1791 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1792
1793 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1794 * condition checks, so that we rather return condition check errors (which are usually not
1795 * considered a true failure) than "not supported" errors (which are considered a failure).
1796 */
1797 if (!unit_type_supported(u->type))
1798 return -EOPNOTSUPP;
1799
1800 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1801 * should have taken care of this already, but let's check this here again. After all, our
1802 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1803 if (!unit_verify_deps(u))
1804 return -ENOLINK;
1805
1806 /* Forward to the main object, if we aren't it. */
1807 following = unit_following(u);
1808 if (following) {
1809 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1810 return unit_start(following);
1811 }
1812
1813 /* If it is stopped, but we cannot start it, then fail */
1814 if (!UNIT_VTABLE(u)->start)
1815 return -EBADR;
1816
1817 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1818 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1819 * waits for a holdoff timer to elapse before it will start again. */
1820
1821 unit_add_to_dbus_queue(u);
1822
1823 return UNIT_VTABLE(u)->start(u);
1824 }
1825
1826 bool unit_can_start(Unit *u) {
1827 assert(u);
1828
1829 if (u->load_state != UNIT_LOADED)
1830 return false;
1831
1832 if (!unit_type_supported(u->type))
1833 return false;
1834
1835 /* Scope units may be started only once */
1836 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1837 return false;
1838
1839 return !!UNIT_VTABLE(u)->start;
1840 }
1841
1842 bool unit_can_isolate(Unit *u) {
1843 assert(u);
1844
1845 return unit_can_start(u) &&
1846 u->allow_isolate;
1847 }
1848
1849 /* Errors:
1850 * -EBADR: This unit type does not support stopping.
1851 * -EALREADY: Unit is already stopped.
1852 * -EAGAIN: An operation is already in progress. Retry later.
1853 */
1854 int unit_stop(Unit *u) {
1855 UnitActiveState state;
1856 Unit *following;
1857
1858 assert(u);
1859
1860 state = unit_active_state(u);
1861 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1862 return -EALREADY;
1863
1864 following = unit_following(u);
1865 if (following) {
1866 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1867 return unit_stop(following);
1868 }
1869
1870 if (!UNIT_VTABLE(u)->stop)
1871 return -EBADR;
1872
1873 unit_add_to_dbus_queue(u);
1874
1875 return UNIT_VTABLE(u)->stop(u);
1876 }
1877
1878 bool unit_can_stop(Unit *u) {
1879 assert(u);
1880
1881 if (!unit_type_supported(u->type))
1882 return false;
1883
1884 if (u->perpetual)
1885 return false;
1886
1887 return !!UNIT_VTABLE(u)->stop;
1888 }
1889
1890 /* Errors:
1891 * -EBADR: This unit type does not support reloading.
1892 * -ENOEXEC: Unit is not started.
1893 * -EAGAIN: An operation is already in progress. Retry later.
1894 */
1895 int unit_reload(Unit *u) {
1896 UnitActiveState state;
1897 Unit *following;
1898
1899 assert(u);
1900
1901 if (u->load_state != UNIT_LOADED)
1902 return -EINVAL;
1903
1904 if (!unit_can_reload(u))
1905 return -EBADR;
1906
1907 state = unit_active_state(u);
1908 if (state == UNIT_RELOADING)
1909 return -EAGAIN;
1910
1911 if (state != UNIT_ACTIVE) {
1912 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1913 return -ENOEXEC;
1914 }
1915
1916 following = unit_following(u);
1917 if (following) {
1918 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1919 return unit_reload(following);
1920 }
1921
1922 unit_add_to_dbus_queue(u);
1923
1924 if (!UNIT_VTABLE(u)->reload) {
1925 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1926 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1927 return 0;
1928 }
1929
1930 return UNIT_VTABLE(u)->reload(u);
1931 }
1932
1933 bool unit_can_reload(Unit *u) {
1934 assert(u);
1935
1936 if (UNIT_VTABLE(u)->can_reload)
1937 return UNIT_VTABLE(u)->can_reload(u);
1938
1939 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1940 return true;
1941
1942 return UNIT_VTABLE(u)->reload;
1943 }
1944
1945 bool unit_is_unneeded(Unit *u) {
1946 static const UnitDependency deps[] = {
1947 UNIT_REQUIRED_BY,
1948 UNIT_REQUISITE_OF,
1949 UNIT_WANTED_BY,
1950 UNIT_BOUND_BY,
1951 };
1952 size_t j;
1953
1954 assert(u);
1955
1956 if (!u->stop_when_unneeded)
1957 return false;
1958
1959 /* Don't clean up while the unit is transitioning or is even inactive. */
1960 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1961 return false;
1962 if (u->job)
1963 return false;
1964
1965 for (j = 0; j < ELEMENTSOF(deps); j++) {
1966 Unit *other;
1967 Iterator i;
1968 void *v;
1969
1970 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1971 * restart, then don't clean this one up. */
1972
1973 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1974 if (other->job)
1975 return false;
1976
1977 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1978 return false;
1979
1980 if (unit_will_restart(other))
1981 return false;
1982 }
1983 }
1984
1985 return true;
1986 }
1987
1988 static void check_unneeded_dependencies(Unit *u) {
1989
1990 static const UnitDependency deps[] = {
1991 UNIT_REQUIRES,
1992 UNIT_REQUISITE,
1993 UNIT_WANTS,
1994 UNIT_BINDS_TO,
1995 };
1996 size_t j;
1997
1998 assert(u);
1999
2000 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2001
2002 for (j = 0; j < ELEMENTSOF(deps); j++) {
2003 Unit *other;
2004 Iterator i;
2005 void *v;
2006
2007 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2008 unit_submit_to_stop_when_unneeded_queue(other);
2009 }
2010 }
2011
2012 static void unit_check_binds_to(Unit *u) {
2013 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2014 bool stop = false;
2015 Unit *other;
2016 Iterator i;
2017 void *v;
2018 int r;
2019
2020 assert(u);
2021
2022 if (u->job)
2023 return;
2024
2025 if (unit_active_state(u) != UNIT_ACTIVE)
2026 return;
2027
2028 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2029 if (other->job)
2030 continue;
2031
2032 if (!other->coldplugged)
2033 /* We might yet create a job for the other unit… */
2034 continue;
2035
2036 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2037 continue;
2038
2039 stop = true;
2040 break;
2041 }
2042
2043 if (!stop)
2044 return;
2045
2046 /* If stopping a unit fails continuously we might enter a stop
2047 * loop here, hence stop acting on the service being
2048 * unnecessary after a while. */
2049 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2050 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2051 return;
2052 }
2053
2054 assert(other);
2055 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2056
2057 /* A unit we need to run is gone. Sniff. Let's stop this. */
2058 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2059 if (r < 0)
2060 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2061 }
2062
2063 static void retroactively_start_dependencies(Unit *u) {
2064 Iterator i;
2065 Unit *other;
2066 void *v;
2067
2068 assert(u);
2069 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2070
2071 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2072 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2073 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2074 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2075
2076 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2077 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2078 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2079 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2080
2081 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2082 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2083 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2084 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2085
2086 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2087 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2088 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2089
2090 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2091 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2092 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2093 }
2094
2095 static void retroactively_stop_dependencies(Unit *u) {
2096 Unit *other;
2097 Iterator i;
2098 void *v;
2099
2100 assert(u);
2101 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2102
2103 /* Pull down units which are bound to us recursively if enabled */
2104 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2106 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2107 }
2108
2109 void unit_start_on_failure(Unit *u) {
2110 Unit *other;
2111 Iterator i;
2112 void *v;
2113 int r;
2114
2115 assert(u);
2116
2117 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2118 return;
2119
2120 log_unit_info(u, "Triggering OnFailure= dependencies.");
2121
2122 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2123 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2124
2125 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2126 if (r < 0)
2127 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2128 }
2129 }
2130
2131 void unit_trigger_notify(Unit *u) {
2132 Unit *other;
2133 Iterator i;
2134 void *v;
2135
2136 assert(u);
2137
2138 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2139 if (UNIT_VTABLE(other)->trigger_notify)
2140 UNIT_VTABLE(other)->trigger_notify(other, u);
2141 }
2142
2143 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2144 if (condition_notice && log_level > LOG_NOTICE)
2145 return LOG_NOTICE;
2146 if (condition_info && log_level > LOG_INFO)
2147 return LOG_INFO;
2148 return log_level;
2149 }
2150
2151 static int unit_log_resources(Unit *u) {
2152 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2153 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2154 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2155 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2156 size_t n_message_parts = 0, n_iovec = 0;
2157 char* message_parts[1 + 2 + 2 + 1], *t;
2158 nsec_t nsec = NSEC_INFINITY;
2159 CGroupIPAccountingMetric m;
2160 size_t i;
2161 int r;
2162 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2163 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2164 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2165 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2166 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2167 };
2168 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2169 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2170 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2171 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2172 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2173 };
2174
2175 assert(u);
2176
2177 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2178 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2179 * information and the complete data in structured fields. */
2180
2181 (void) unit_get_cpu_usage(u, &nsec);
2182 if (nsec != NSEC_INFINITY) {
2183 char buf[FORMAT_TIMESPAN_MAX] = "";
2184
2185 /* Format the CPU time for inclusion in the structured log message */
2186 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2187 r = log_oom();
2188 goto finish;
2189 }
2190 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2191
2192 /* Format the CPU time for inclusion in the human language message string */
2193 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2194 t = strjoin("consumed ", buf, " CPU time");
2195 if (!t) {
2196 r = log_oom();
2197 goto finish;
2198 }
2199
2200 message_parts[n_message_parts++] = t;
2201
2202 log_level = raise_level(log_level,
2203 nsec > NOTICEWORTHY_CPU_NSEC,
2204 nsec > MENTIONWORTHY_CPU_NSEC);
2205 }
2206
2207 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2208 char buf[FORMAT_BYTES_MAX] = "";
2209 uint64_t value = UINT64_MAX;
2210
2211 assert(io_fields[k]);
2212
2213 (void) unit_get_io_accounting(u, k, k > 0, &value);
2214 if (value == UINT64_MAX)
2215 continue;
2216
2217 have_io_accounting = true;
2218 if (value > 0)
2219 any_io = true;
2220
2221 /* Format IO accounting data for inclusion in the structured log message */
2222 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2223 r = log_oom();
2224 goto finish;
2225 }
2226 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2227
2228 /* Format the IO accounting data for inclusion in the human language message string, but only
2229 * for the bytes counters (and not for the operations counters) */
2230 if (k == CGROUP_IO_READ_BYTES) {
2231 assert(!rr);
2232 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2233 if (!rr) {
2234 r = log_oom();
2235 goto finish;
2236 }
2237 } else if (k == CGROUP_IO_WRITE_BYTES) {
2238 assert(!wr);
2239 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2240 if (!wr) {
2241 r = log_oom();
2242 goto finish;
2243 }
2244 }
2245
2246 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2247 log_level = raise_level(log_level,
2248 value > MENTIONWORTHY_IO_BYTES,
2249 value > NOTICEWORTHY_IO_BYTES);
2250 }
2251
2252 if (have_io_accounting) {
2253 if (any_io) {
2254 if (rr)
2255 message_parts[n_message_parts++] = TAKE_PTR(rr);
2256 if (wr)
2257 message_parts[n_message_parts++] = TAKE_PTR(wr);
2258
2259 } else {
2260 char *k;
2261
2262 k = strdup("no IO");
2263 if (!k) {
2264 r = log_oom();
2265 goto finish;
2266 }
2267
2268 message_parts[n_message_parts++] = k;
2269 }
2270 }
2271
2272 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2273 char buf[FORMAT_BYTES_MAX] = "";
2274 uint64_t value = UINT64_MAX;
2275
2276 assert(ip_fields[m]);
2277
2278 (void) unit_get_ip_accounting(u, m, &value);
2279 if (value == UINT64_MAX)
2280 continue;
2281
2282 have_ip_accounting = true;
2283 if (value > 0)
2284 any_traffic = true;
2285
2286 /* Format IP accounting data for inclusion in the structured log message */
2287 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2288 r = log_oom();
2289 goto finish;
2290 }
2291 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2292
2293 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2294 * bytes counters (and not for the packets counters) */
2295 if (m == CGROUP_IP_INGRESS_BYTES) {
2296 assert(!igress);
2297 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2298 if (!igress) {
2299 r = log_oom();
2300 goto finish;
2301 }
2302 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2303 assert(!egress);
2304 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2305 if (!egress) {
2306 r = log_oom();
2307 goto finish;
2308 }
2309 }
2310
2311 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2312 log_level = raise_level(log_level,
2313 value > MENTIONWORTHY_IP_BYTES,
2314 value > NOTICEWORTHY_IP_BYTES);
2315 }
2316
2317 if (have_ip_accounting) {
2318 if (any_traffic) {
2319 if (igress)
2320 message_parts[n_message_parts++] = TAKE_PTR(igress);
2321 if (egress)
2322 message_parts[n_message_parts++] = TAKE_PTR(egress);
2323
2324 } else {
2325 char *k;
2326
2327 k = strdup("no IP traffic");
2328 if (!k) {
2329 r = log_oom();
2330 goto finish;
2331 }
2332
2333 message_parts[n_message_parts++] = k;
2334 }
2335 }
2336
2337 /* Is there any accounting data available at all? */
2338 if (n_iovec == 0) {
2339 r = 0;
2340 goto finish;
2341 }
2342
2343 if (n_message_parts == 0)
2344 t = strjoina("MESSAGE=", u->id, ": Completed.");
2345 else {
2346 _cleanup_free_ char *joined;
2347
2348 message_parts[n_message_parts] = NULL;
2349
2350 joined = strv_join(message_parts, ", ");
2351 if (!joined) {
2352 r = log_oom();
2353 goto finish;
2354 }
2355
2356 joined[0] = ascii_toupper(joined[0]);
2357 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2358 }
2359
2360 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2361 * and hence don't increase n_iovec for them */
2362 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2363 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2364
2365 t = strjoina(u->manager->unit_log_field, u->id);
2366 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2367
2368 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2369 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2370
2371 log_struct_iovec(log_level, iovec, n_iovec + 4);
2372 r = 0;
2373
2374 finish:
2375 for (i = 0; i < n_message_parts; i++)
2376 free(message_parts[i]);
2377
2378 for (i = 0; i < n_iovec; i++)
2379 free(iovec[i].iov_base);
2380
2381 return r;
2382
2383 }
2384
2385 static void unit_update_on_console(Unit *u) {
2386 bool b;
2387
2388 assert(u);
2389
2390 b = unit_needs_console(u);
2391 if (u->on_console == b)
2392 return;
2393
2394 u->on_console = b;
2395 if (b)
2396 manager_ref_console(u->manager);
2397 else
2398 manager_unref_console(u->manager);
2399 }
2400
2401 static void unit_emit_audit_start(Unit *u) {
2402 assert(u);
2403
2404 if (u->type != UNIT_SERVICE)
2405 return;
2406
2407 /* Write audit record if we have just finished starting up */
2408 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2409 u->in_audit = true;
2410 }
2411
2412 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2413 assert(u);
2414
2415 if (u->type != UNIT_SERVICE)
2416 return;
2417
2418 if (u->in_audit) {
2419 /* Write audit record if we have just finished shutting down */
2420 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2421 u->in_audit = false;
2422 } else {
2423 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2424 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2425
2426 if (state == UNIT_INACTIVE)
2427 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2428 }
2429 }
2430
2431 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2432 bool unexpected = false;
2433 JobResult result;
2434
2435 assert(j);
2436
2437 if (j->state == JOB_WAITING)
2438
2439 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2440 * due to EAGAIN. */
2441 job_add_to_run_queue(j);
2442
2443 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2444 * hence needs to invalidate jobs. */
2445
2446 switch (j->type) {
2447
2448 case JOB_START:
2449 case JOB_VERIFY_ACTIVE:
2450
2451 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2452 job_finish_and_invalidate(j, JOB_DONE, true, false);
2453 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2454 unexpected = true;
2455
2456 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2457 if (ns == UNIT_FAILED)
2458 result = JOB_FAILED;
2459 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2460 result = JOB_SKIPPED;
2461 else
2462 result = JOB_DONE;
2463
2464 job_finish_and_invalidate(j, result, true, false);
2465 }
2466 }
2467
2468 break;
2469
2470 case JOB_RELOAD:
2471 case JOB_RELOAD_OR_START:
2472 case JOB_TRY_RELOAD:
2473
2474 if (j->state == JOB_RUNNING) {
2475 if (ns == UNIT_ACTIVE)
2476 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2477 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2478 unexpected = true;
2479
2480 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2481 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2482 }
2483 }
2484
2485 break;
2486
2487 case JOB_STOP:
2488 case JOB_RESTART:
2489 case JOB_TRY_RESTART:
2490
2491 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2492 job_finish_and_invalidate(j, JOB_DONE, true, false);
2493 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2494 unexpected = true;
2495 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2496 }
2497
2498 break;
2499
2500 default:
2501 assert_not_reached("Job type unknown");
2502 }
2503
2504 return unexpected;
2505 }
2506
2507 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2508 const char *reason;
2509 Manager *m;
2510
2511 assert(u);
2512 assert(os < _UNIT_ACTIVE_STATE_MAX);
2513 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2514
2515 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2516 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2517 * remounted this function will be called too! */
2518
2519 m = u->manager;
2520
2521 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2522 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2523 unit_add_to_dbus_queue(u);
2524
2525 /* Update timestamps for state changes */
2526 if (!MANAGER_IS_RELOADING(m)) {
2527 dual_timestamp_get(&u->state_change_timestamp);
2528
2529 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2530 u->inactive_exit_timestamp = u->state_change_timestamp;
2531 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2532 u->inactive_enter_timestamp = u->state_change_timestamp;
2533
2534 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2535 u->active_enter_timestamp = u->state_change_timestamp;
2536 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2537 u->active_exit_timestamp = u->state_change_timestamp;
2538 }
2539
2540 /* Keep track of failed units */
2541 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2542
2543 /* Make sure the cgroup and state files are always removed when we become inactive */
2544 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2545 unit_prune_cgroup(u);
2546 unit_unlink_state_files(u);
2547 }
2548
2549 unit_update_on_console(u);
2550
2551 if (!MANAGER_IS_RELOADING(m)) {
2552 bool unexpected;
2553
2554 /* Let's propagate state changes to the job */
2555 if (u->job)
2556 unexpected = unit_process_job(u->job, ns, flags);
2557 else
2558 unexpected = true;
2559
2560 /* If this state change happened without being requested by a job, then let's retroactively start or
2561 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2562 * additional jobs just because something is already activated. */
2563
2564 if (unexpected) {
2565 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2566 retroactively_start_dependencies(u);
2567 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2568 retroactively_stop_dependencies(u);
2569 }
2570
2571 /* stop unneeded units regardless if going down was expected or not */
2572 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2573 check_unneeded_dependencies(u);
2574
2575 if (ns != os && ns == UNIT_FAILED) {
2576 log_unit_debug(u, "Unit entered failed state.");
2577
2578 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2579 unit_start_on_failure(u);
2580 }
2581
2582 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2583 /* This unit just finished starting up */
2584
2585 unit_emit_audit_start(u);
2586 manager_send_unit_plymouth(m, u);
2587 }
2588
2589 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2590 /* This unit just stopped/failed. */
2591
2592 unit_emit_audit_stop(u, ns);
2593 unit_log_resources(u);
2594 }
2595 }
2596
2597 manager_recheck_journal(m);
2598 manager_recheck_dbus(m);
2599
2600 unit_trigger_notify(u);
2601
2602 if (!MANAGER_IS_RELOADING(m)) {
2603 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2604 unit_submit_to_stop_when_unneeded_queue(u);
2605
2606 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2607 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2608 * without ever entering started.) */
2609 unit_check_binds_to(u);
2610
2611 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2612 reason = strjoina("unit ", u->id, " failed");
2613 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2614 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2615 reason = strjoina("unit ", u->id, " succeeded");
2616 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2617 }
2618 }
2619
2620 unit_add_to_gc_queue(u);
2621 }
2622
2623 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2624 int r;
2625
2626 assert(u);
2627 assert(pid_is_valid(pid));
2628
2629 /* Watch a specific PID */
2630
2631 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2632 * opportunity to remove any stalled references to this PID as they can be created
2633 * easily (when watching a process which is not our direct child). */
2634 if (exclusive)
2635 manager_unwatch_pid(u->manager, pid);
2636
2637 r = set_ensure_allocated(&u->pids, NULL);
2638 if (r < 0)
2639 return r;
2640
2641 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2642 if (r < 0)
2643 return r;
2644
2645 /* First try, let's add the unit keyed by "pid". */
2646 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2647 if (r == -EEXIST) {
2648 Unit **array;
2649 bool found = false;
2650 size_t n = 0;
2651
2652 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2653 * to an array of Units rather than just a Unit), lists us already. */
2654
2655 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2656 if (array)
2657 for (; array[n]; n++)
2658 if (array[n] == u)
2659 found = true;
2660
2661 if (found) /* Found it already? if so, do nothing */
2662 r = 0;
2663 else {
2664 Unit **new_array;
2665
2666 /* Allocate a new array */
2667 new_array = new(Unit*, n + 2);
2668 if (!new_array)
2669 return -ENOMEM;
2670
2671 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2672 new_array[n] = u;
2673 new_array[n+1] = NULL;
2674
2675 /* Add or replace the old array */
2676 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2677 if (r < 0) {
2678 free(new_array);
2679 return r;
2680 }
2681
2682 free(array);
2683 }
2684 } else if (r < 0)
2685 return r;
2686
2687 r = set_put(u->pids, PID_TO_PTR(pid));
2688 if (r < 0)
2689 return r;
2690
2691 return 0;
2692 }
2693
2694 void unit_unwatch_pid(Unit *u, pid_t pid) {
2695 Unit **array;
2696
2697 assert(u);
2698 assert(pid_is_valid(pid));
2699
2700 /* First let's drop the unit in case it's keyed as "pid". */
2701 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2702
2703 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2704 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2705 if (array) {
2706 size_t n, m = 0;
2707
2708 /* Let's iterate through the array, dropping our own entry */
2709 for (n = 0; array[n]; n++)
2710 if (array[n] != u)
2711 array[m++] = array[n];
2712 array[m] = NULL;
2713
2714 if (m == 0) {
2715 /* The array is now empty, remove the entire entry */
2716 assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2717 free(array);
2718 }
2719 }
2720
2721 (void) set_remove(u->pids, PID_TO_PTR(pid));
2722 }
2723
2724 void unit_unwatch_all_pids(Unit *u) {
2725 assert(u);
2726
2727 while (!set_isempty(u->pids))
2728 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2729
2730 u->pids = set_free(u->pids);
2731 }
2732
2733 static void unit_tidy_watch_pids(Unit *u) {
2734 pid_t except1, except2;
2735 Iterator i;
2736 void *e;
2737
2738 assert(u);
2739
2740 /* Cleans dead PIDs from our list */
2741
2742 except1 = unit_main_pid(u);
2743 except2 = unit_control_pid(u);
2744
2745 SET_FOREACH(e, u->pids, i) {
2746 pid_t pid = PTR_TO_PID(e);
2747
2748 if (pid == except1 || pid == except2)
2749 continue;
2750
2751 if (!pid_is_unwaited(pid))
2752 unit_unwatch_pid(u, pid);
2753 }
2754 }
2755
2756 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2757 Unit *u = userdata;
2758
2759 assert(s);
2760 assert(u);
2761
2762 unit_tidy_watch_pids(u);
2763 unit_watch_all_pids(u);
2764
2765 /* If the PID set is empty now, then let's finish this off. */
2766 unit_synthesize_cgroup_empty_event(u);
2767
2768 return 0;
2769 }
2770
2771 int unit_enqueue_rewatch_pids(Unit *u) {
2772 int r;
2773
2774 assert(u);
2775
2776 if (!u->cgroup_path)
2777 return -ENOENT;
2778
2779 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2780 if (r < 0)
2781 return r;
2782 if (r > 0) /* On unified we can use proper notifications */
2783 return 0;
2784
2785 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2786 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2787 * involves issuing kill(pid, 0) on all processes we watch. */
2788
2789 if (!u->rewatch_pids_event_source) {
2790 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2791
2792 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2793 if (r < 0)
2794 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2795
2796 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2797 if (r < 0)
2798 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2799
2800 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2801
2802 u->rewatch_pids_event_source = TAKE_PTR(s);
2803 }
2804
2805 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2806 if (r < 0)
2807 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2808
2809 return 0;
2810 }
2811
2812 void unit_dequeue_rewatch_pids(Unit *u) {
2813 int r;
2814 assert(u);
2815
2816 if (!u->rewatch_pids_event_source)
2817 return;
2818
2819 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2820 if (r < 0)
2821 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2822
2823 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2824 }
2825
2826 bool unit_job_is_applicable(Unit *u, JobType j) {
2827 assert(u);
2828 assert(j >= 0 && j < _JOB_TYPE_MAX);
2829
2830 switch (j) {
2831
2832 case JOB_VERIFY_ACTIVE:
2833 case JOB_START:
2834 case JOB_NOP:
2835 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2836 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2837 * jobs for it. */
2838 return true;
2839
2840 case JOB_STOP:
2841 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2842 * external events), hence it makes no sense to permit enqueing such a request either. */
2843 return !u->perpetual;
2844
2845 case JOB_RESTART:
2846 case JOB_TRY_RESTART:
2847 return unit_can_stop(u) && unit_can_start(u);
2848
2849 case JOB_RELOAD:
2850 case JOB_TRY_RELOAD:
2851 return unit_can_reload(u);
2852
2853 case JOB_RELOAD_OR_START:
2854 return unit_can_reload(u) && unit_can_start(u);
2855
2856 default:
2857 assert_not_reached("Invalid job type");
2858 }
2859 }
2860
2861 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2862 assert(u);
2863
2864 /* Only warn about some unit types */
2865 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2866 return;
2867
2868 if (streq_ptr(u->id, other))
2869 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2870 else
2871 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2872 }
2873
2874 static int unit_add_dependency_hashmap(
2875 Hashmap **h,
2876 Unit *other,
2877 UnitDependencyMask origin_mask,
2878 UnitDependencyMask destination_mask) {
2879
2880 UnitDependencyInfo info;
2881 int r;
2882
2883 assert(h);
2884 assert(other);
2885 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2886 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2887 assert(origin_mask > 0 || destination_mask > 0);
2888
2889 r = hashmap_ensure_allocated(h, NULL);
2890 if (r < 0)
2891 return r;
2892
2893 assert_cc(sizeof(void*) == sizeof(info));
2894
2895 info.data = hashmap_get(*h, other);
2896 if (info.data) {
2897 /* Entry already exists. Add in our mask. */
2898
2899 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2900 FLAGS_SET(destination_mask, info.destination_mask))
2901 return 0; /* NOP */
2902
2903 info.origin_mask |= origin_mask;
2904 info.destination_mask |= destination_mask;
2905
2906 r = hashmap_update(*h, other, info.data);
2907 } else {
2908 info = (UnitDependencyInfo) {
2909 .origin_mask = origin_mask,
2910 .destination_mask = destination_mask,
2911 };
2912
2913 r = hashmap_put(*h, other, info.data);
2914 }
2915 if (r < 0)
2916 return r;
2917
2918 return 1;
2919 }
2920
2921 int unit_add_dependency(
2922 Unit *u,
2923 UnitDependency d,
2924 Unit *other,
2925 bool add_reference,
2926 UnitDependencyMask mask) {
2927
2928 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2929 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2930 [UNIT_WANTS] = UNIT_WANTED_BY,
2931 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2932 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2933 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2934 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2935 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2936 [UNIT_WANTED_BY] = UNIT_WANTS,
2937 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2938 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2939 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2940 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2941 [UNIT_BEFORE] = UNIT_AFTER,
2942 [UNIT_AFTER] = UNIT_BEFORE,
2943 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2944 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2945 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2946 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2947 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2948 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2949 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2950 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2951 };
2952 Unit *original_u = u, *original_other = other;
2953 int r;
2954
2955 assert(u);
2956 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2957 assert(other);
2958
2959 u = unit_follow_merge(u);
2960 other = unit_follow_merge(other);
2961
2962 /* We won't allow dependencies on ourselves. We will not
2963 * consider them an error however. */
2964 if (u == other) {
2965 maybe_warn_about_dependency(original_u, original_other->id, d);
2966 return 0;
2967 }
2968
2969 if (d == UNIT_AFTER && UNIT_VTABLE(u)->refuse_after) {
2970 log_unit_warning(u, "Requested dependency After=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(u->type));
2971 return 0;
2972 }
2973
2974 if (d == UNIT_BEFORE && UNIT_VTABLE(other)->refuse_after) {
2975 log_unit_warning(u, "Requested dependency Before=%s ignored (%s units cannot be delayed).", other->id, unit_type_to_string(other->type));
2976 return 0;
2977 }
2978
2979 if (d == UNIT_ON_FAILURE && !UNIT_VTABLE(u)->can_fail) {
2980 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
2981 return 0;
2982 }
2983
2984 if (d == UNIT_TRIGGERS && !UNIT_VTABLE(u)->can_trigger)
2985 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2986 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
2987 if (d == UNIT_TRIGGERED_BY && !UNIT_VTABLE(other)->can_trigger)
2988 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2989 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
2990
2991 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2992 if (r < 0)
2993 return r;
2994
2995 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2996 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2997 if (r < 0)
2998 return r;
2999 }
3000
3001 if (add_reference) {
3002 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
3003 if (r < 0)
3004 return r;
3005
3006 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
3007 if (r < 0)
3008 return r;
3009 }
3010
3011 unit_add_to_dbus_queue(u);
3012 return 0;
3013 }
3014
3015 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3016 int r;
3017
3018 assert(u);
3019
3020 r = unit_add_dependency(u, d, other, add_reference, mask);
3021 if (r < 0)
3022 return r;
3023
3024 return unit_add_dependency(u, e, other, add_reference, mask);
3025 }
3026
3027 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3028 int r;
3029
3030 assert(u);
3031 assert(name);
3032 assert(buf);
3033 assert(ret);
3034
3035 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3036 *buf = NULL;
3037 *ret = name;
3038 return 0;
3039 }
3040
3041 if (u->instance)
3042 r = unit_name_replace_instance(name, u->instance, buf);
3043 else {
3044 _cleanup_free_ char *i = NULL;
3045
3046 r = unit_name_to_prefix(u->id, &i);
3047 if (r < 0)
3048 return r;
3049
3050 r = unit_name_replace_instance(name, i, buf);
3051 }
3052 if (r < 0)
3053 return r;
3054
3055 *ret = *buf;
3056 return 0;
3057 }
3058
3059 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3060 _cleanup_free_ char *buf = NULL;
3061 Unit *other;
3062 int r;
3063
3064 assert(u);
3065 assert(name);
3066
3067 r = resolve_template(u, name, &buf, &name);
3068 if (r < 0)
3069 return r;
3070
3071 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3072 if (r < 0)
3073 return r;
3074
3075 return unit_add_dependency(u, d, other, add_reference, mask);
3076 }
3077
3078 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3079 _cleanup_free_ char *buf = NULL;
3080 Unit *other;
3081 int r;
3082
3083 assert(u);
3084 assert(name);
3085
3086 r = resolve_template(u, name, &buf, &name);
3087 if (r < 0)
3088 return r;
3089
3090 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3091 if (r < 0)
3092 return r;
3093
3094 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3095 }
3096
3097 int set_unit_path(const char *p) {
3098 /* This is mostly for debug purposes */
3099 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3100 return -errno;
3101
3102 return 0;
3103 }
3104
3105 char *unit_dbus_path(Unit *u) {
3106 assert(u);
3107
3108 if (!u->id)
3109 return NULL;
3110
3111 return unit_dbus_path_from_name(u->id);
3112 }
3113
3114 char *unit_dbus_path_invocation_id(Unit *u) {
3115 assert(u);
3116
3117 if (sd_id128_is_null(u->invocation_id))
3118 return NULL;
3119
3120 return unit_dbus_path_from_name(u->invocation_id_string);
3121 }
3122
3123 int unit_set_slice(Unit *u, Unit *slice) {
3124 assert(u);
3125 assert(slice);
3126
3127 /* Sets the unit slice if it has not been set before. Is extra
3128 * careful, to only allow this for units that actually have a
3129 * cgroup context. Also, we don't allow to set this for slices
3130 * (since the parent slice is derived from the name). Make
3131 * sure the unit we set is actually a slice. */
3132
3133 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3134 return -EOPNOTSUPP;
3135
3136 if (u->type == UNIT_SLICE)
3137 return -EINVAL;
3138
3139 if (unit_active_state(u) != UNIT_INACTIVE)
3140 return -EBUSY;
3141
3142 if (slice->type != UNIT_SLICE)
3143 return -EINVAL;
3144
3145 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3146 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3147 return -EPERM;
3148
3149 if (UNIT_DEREF(u->slice) == slice)
3150 return 0;
3151
3152 /* Disallow slice changes if @u is already bound to cgroups */
3153 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3154 return -EBUSY;
3155
3156 unit_ref_set(&u->slice, u, slice);
3157 return 1;
3158 }
3159
3160 int unit_set_default_slice(Unit *u) {
3161 const char *slice_name;
3162 Unit *slice;
3163 int r;
3164
3165 assert(u);
3166
3167 if (UNIT_ISSET(u->slice))
3168 return 0;
3169
3170 if (u->instance) {
3171 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3172
3173 /* Implicitly place all instantiated units in their
3174 * own per-template slice */
3175
3176 r = unit_name_to_prefix(u->id, &prefix);
3177 if (r < 0)
3178 return r;
3179
3180 /* The prefix is already escaped, but it might include
3181 * "-" which has a special meaning for slice units,
3182 * hence escape it here extra. */
3183 escaped = unit_name_escape(prefix);
3184 if (!escaped)
3185 return -ENOMEM;
3186
3187 if (MANAGER_IS_SYSTEM(u->manager))
3188 slice_name = strjoina("system-", escaped, ".slice");
3189 else
3190 slice_name = strjoina(escaped, ".slice");
3191 } else
3192 slice_name =
3193 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3194 ? SPECIAL_SYSTEM_SLICE
3195 : SPECIAL_ROOT_SLICE;
3196
3197 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3198 if (r < 0)
3199 return r;
3200
3201 return unit_set_slice(u, slice);
3202 }
3203
3204 const char *unit_slice_name(Unit *u) {
3205 assert(u);
3206
3207 if (!UNIT_ISSET(u->slice))
3208 return NULL;
3209
3210 return UNIT_DEREF(u->slice)->id;
3211 }
3212
3213 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3214 _cleanup_free_ char *t = NULL;
3215 int r;
3216
3217 assert(u);
3218 assert(type);
3219 assert(_found);
3220
3221 r = unit_name_change_suffix(u->id, type, &t);
3222 if (r < 0)
3223 return r;
3224 if (unit_has_name(u, t))
3225 return -EINVAL;
3226
3227 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3228 assert(r < 0 || *_found != u);
3229 return r;
3230 }
3231
3232 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3233 const char *new_owner;
3234 Unit *u = userdata;
3235 int r;
3236
3237 assert(message);
3238 assert(u);
3239
3240 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3241 if (r < 0) {
3242 bus_log_parse_error(r);
3243 return 0;
3244 }
3245
3246 if (UNIT_VTABLE(u)->bus_name_owner_change)
3247 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3248
3249 return 0;
3250 }
3251
3252 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3253 const sd_bus_error *e;
3254 const char *new_owner;
3255 Unit *u = userdata;
3256 int r;
3257
3258 assert(message);
3259 assert(u);
3260
3261 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3262
3263 e = sd_bus_message_get_error(message);
3264 if (e) {
3265 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3266 log_unit_error(u, "Unexpected error response from GetNameOwner(): %s", e->message);
3267
3268 new_owner = NULL;
3269 } else {
3270 r = sd_bus_message_read(message, "s", &new_owner);
3271 if (r < 0)
3272 return bus_log_parse_error(r);
3273
3274 assert(!isempty(new_owner));
3275 }
3276
3277 if (UNIT_VTABLE(u)->bus_name_owner_change)
3278 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3279
3280 return 0;
3281 }
3282
3283 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3284 const char *match;
3285 int r;
3286
3287 assert(u);
3288 assert(bus);
3289 assert(name);
3290
3291 if (u->match_bus_slot || u->get_name_owner_slot)
3292 return -EBUSY;
3293
3294 match = strjoina("type='signal',"
3295 "sender='org.freedesktop.DBus',"
3296 "path='/org/freedesktop/DBus',"
3297 "interface='org.freedesktop.DBus',"
3298 "member='NameOwnerChanged',"
3299 "arg0='", name, "'");
3300
3301 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3302 if (r < 0)
3303 return r;
3304
3305 r = sd_bus_call_method_async(
3306 bus,
3307 &u->get_name_owner_slot,
3308 "org.freedesktop.DBus",
3309 "/org/freedesktop/DBus",
3310 "org.freedesktop.DBus",
3311 "GetNameOwner",
3312 get_name_owner_handler,
3313 u,
3314 "s", name);
3315 if (r < 0) {
3316 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3317 return r;
3318 }
3319
3320 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3321 return 0;
3322 }
3323
3324 int unit_watch_bus_name(Unit *u, const char *name) {
3325 int r;
3326
3327 assert(u);
3328 assert(name);
3329
3330 /* Watch a specific name on the bus. We only support one unit
3331 * watching each name for now. */
3332
3333 if (u->manager->api_bus) {
3334 /* If the bus is already available, install the match directly.
3335 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3336 r = unit_install_bus_match(u, u->manager->api_bus, name);
3337 if (r < 0)
3338 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3339 }
3340
3341 r = hashmap_put(u->manager->watch_bus, name, u);
3342 if (r < 0) {
3343 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3344 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3345 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3346 }
3347
3348 return 0;
3349 }
3350
3351 void unit_unwatch_bus_name(Unit *u, const char *name) {
3352 assert(u);
3353 assert(name);
3354
3355 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3356 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3357 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3358 }
3359
3360 bool unit_can_serialize(Unit *u) {
3361 assert(u);
3362
3363 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3364 }
3365
3366 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3367 _cleanup_free_ char *s = NULL;
3368 int r;
3369
3370 assert(f);
3371 assert(key);
3372
3373 if (mask == 0)
3374 return 0;
3375
3376 r = cg_mask_to_string(mask, &s);
3377 if (r < 0)
3378 return log_error_errno(r, "Failed to format cgroup mask: %m");
3379
3380 return serialize_item(f, key, s);
3381 }
3382
3383 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3384 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3385 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3386 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3387 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3388 };
3389
3390 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3391 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3392 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3393 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3394 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3395 };
3396
3397 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3398 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3399 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3400 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3401 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3402 };
3403
3404 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3405 CGroupIPAccountingMetric m;
3406 int r;
3407
3408 assert(u);
3409 assert(f);
3410 assert(fds);
3411
3412 if (unit_can_serialize(u)) {
3413 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3414 if (r < 0)
3415 return r;
3416 }
3417
3418 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3419
3420 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3421 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3422 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3423 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3424
3425 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3426 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3427
3428 if (dual_timestamp_is_set(&u->condition_timestamp))
3429 (void) serialize_bool(f, "condition-result", u->condition_result);
3430
3431 if (dual_timestamp_is_set(&u->assert_timestamp))
3432 (void) serialize_bool(f, "assert-result", u->assert_result);
3433
3434 (void) serialize_bool(f, "transient", u->transient);
3435 (void) serialize_bool(f, "in-audit", u->in_audit);
3436
3437 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3438 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3439 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3440 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_ratelimit_interval);
3441 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_ratelimit_burst);
3442
3443 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3444 if (u->cpu_usage_last != NSEC_INFINITY)
3445 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3446
3447 if (u->oom_kill_last > 0)
3448 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3449
3450 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3451 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3452
3453 if (u->io_accounting_last[im] != UINT64_MAX)
3454 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3455 }
3456
3457 if (u->cgroup_path)
3458 (void) serialize_item(f, "cgroup", u->cgroup_path);
3459
3460 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3461 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3462 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3463 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3464
3465 if (uid_is_valid(u->ref_uid))
3466 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3467 if (gid_is_valid(u->ref_gid))
3468 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3469
3470 if (!sd_id128_is_null(u->invocation_id))
3471 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3472
3473 bus_track_serialize(u->bus_track, f, "ref");
3474
3475 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3476 uint64_t v;
3477
3478 r = unit_get_ip_accounting(u, m, &v);
3479 if (r >= 0)
3480 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3481 }
3482
3483 if (serialize_jobs) {
3484 if (u->job) {
3485 fputs("job\n", f);
3486 job_serialize(u->job, f);
3487 }
3488
3489 if (u->nop_job) {
3490 fputs("job\n", f);
3491 job_serialize(u->nop_job, f);
3492 }
3493 }
3494
3495 /* End marker */
3496 fputc('\n', f);
3497 return 0;
3498 }
3499
3500 static int unit_deserialize_job(Unit *u, FILE *f) {
3501 _cleanup_(job_freep) Job *j = NULL;
3502 int r;
3503
3504 assert(u);
3505 assert(f);
3506
3507 j = job_new_raw(u);
3508 if (!j)
3509 return log_oom();
3510
3511 r = job_deserialize(j, f);
3512 if (r < 0)
3513 return r;
3514
3515 r = job_install_deserialized(j);
3516 if (r < 0)
3517 return r;
3518
3519 TAKE_PTR(j);
3520 return 0;
3521 }
3522
3523 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3524 int r;
3525
3526 assert(u);
3527 assert(f);
3528 assert(fds);
3529
3530 for (;;) {
3531 _cleanup_free_ char *line = NULL;
3532 char *l, *v;
3533 ssize_t m;
3534 size_t k;
3535
3536 r = read_line(f, LONG_LINE_MAX, &line);
3537 if (r < 0)
3538 return log_error_errno(r, "Failed to read serialization line: %m");
3539 if (r == 0) /* eof */
3540 break;
3541
3542 l = strstrip(line);
3543 if (isempty(l)) /* End marker */
3544 break;
3545
3546 k = strcspn(l, "=");
3547
3548 if (l[k] == '=') {
3549 l[k] = 0;
3550 v = l+k+1;
3551 } else
3552 v = l+k;
3553
3554 if (streq(l, "job")) {
3555 if (v[0] == '\0') {
3556 /* New-style serialized job */
3557 r = unit_deserialize_job(u, f);
3558 if (r < 0)
3559 return r;
3560 } else /* Legacy for pre-44 */
3561 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3562 continue;
3563 } else if (streq(l, "state-change-timestamp")) {
3564 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3565 continue;
3566 } else if (streq(l, "inactive-exit-timestamp")) {
3567 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3568 continue;
3569 } else if (streq(l, "active-enter-timestamp")) {
3570 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3571 continue;
3572 } else if (streq(l, "active-exit-timestamp")) {
3573 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3574 continue;
3575 } else if (streq(l, "inactive-enter-timestamp")) {
3576 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3577 continue;
3578 } else if (streq(l, "condition-timestamp")) {
3579 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3580 continue;
3581 } else if (streq(l, "assert-timestamp")) {
3582 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3583 continue;
3584 } else if (streq(l, "condition-result")) {
3585
3586 r = parse_boolean(v);
3587 if (r < 0)
3588 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3589 else
3590 u->condition_result = r;
3591
3592 continue;
3593
3594 } else if (streq(l, "assert-result")) {
3595
3596 r = parse_boolean(v);
3597 if (r < 0)
3598 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3599 else
3600 u->assert_result = r;
3601
3602 continue;
3603
3604 } else if (streq(l, "transient")) {
3605
3606 r = parse_boolean(v);
3607 if (r < 0)
3608 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3609 else
3610 u->transient = r;
3611
3612 continue;
3613
3614 } else if (streq(l, "in-audit")) {
3615
3616 r = parse_boolean(v);
3617 if (r < 0)
3618 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3619 else
3620 u->in_audit = r;
3621
3622 continue;
3623
3624 } else if (streq(l, "exported-invocation-id")) {
3625
3626 r = parse_boolean(v);
3627 if (r < 0)
3628 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3629 else
3630 u->exported_invocation_id = r;
3631
3632 continue;
3633
3634 } else if (streq(l, "exported-log-level-max")) {
3635
3636 r = parse_boolean(v);
3637 if (r < 0)
3638 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3639 else
3640 u->exported_log_level_max = r;
3641
3642 continue;
3643
3644 } else if (streq(l, "exported-log-extra-fields")) {
3645
3646 r = parse_boolean(v);
3647 if (r < 0)
3648 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3649 else
3650 u->exported_log_extra_fields = r;
3651
3652 continue;
3653
3654 } else if (streq(l, "exported-log-rate-limit-interval")) {
3655
3656 r = parse_boolean(v);
3657 if (r < 0)
3658 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3659 else
3660 u->exported_log_ratelimit_interval = r;
3661
3662 continue;
3663
3664 } else if (streq(l, "exported-log-rate-limit-burst")) {
3665
3666 r = parse_boolean(v);
3667 if (r < 0)
3668 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3669 else
3670 u->exported_log_ratelimit_burst = r;
3671
3672 continue;
3673
3674 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3675
3676 r = safe_atou64(v, &u->cpu_usage_base);
3677 if (r < 0)
3678 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3679
3680 continue;
3681
3682 } else if (streq(l, "cpu-usage-last")) {
3683
3684 r = safe_atou64(v, &u->cpu_usage_last);
3685 if (r < 0)
3686 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3687
3688 continue;
3689
3690 } else if (streq(l, "oom-kill-last")) {
3691
3692 r = safe_atou64(v, &u->oom_kill_last);
3693 if (r < 0)
3694 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3695
3696 continue;
3697
3698 } else if (streq(l, "cgroup")) {
3699
3700 r = unit_set_cgroup_path(u, v);
3701 if (r < 0)
3702 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3703
3704 (void) unit_watch_cgroup(u);
3705 (void) unit_watch_cgroup_memory(u);
3706
3707 continue;
3708 } else if (streq(l, "cgroup-realized")) {
3709 int b;
3710
3711 b = parse_boolean(v);
3712 if (b < 0)
3713 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3714 else
3715 u->cgroup_realized = b;
3716
3717 continue;
3718
3719 } else if (streq(l, "cgroup-realized-mask")) {
3720
3721 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3722 if (r < 0)
3723 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3724 continue;
3725
3726 } else if (streq(l, "cgroup-enabled-mask")) {
3727
3728 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3729 if (r < 0)
3730 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3731 continue;
3732
3733 } else if (streq(l, "cgroup-invalidated-mask")) {
3734
3735 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3736 if (r < 0)
3737 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3738 continue;
3739
3740 } else if (streq(l, "ref-uid")) {
3741 uid_t uid;
3742
3743 r = parse_uid(v, &uid);
3744 if (r < 0)
3745 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3746 else
3747 unit_ref_uid_gid(u, uid, GID_INVALID);
3748
3749 continue;
3750
3751 } else if (streq(l, "ref-gid")) {
3752 gid_t gid;
3753
3754 r = parse_gid(v, &gid);
3755 if (r < 0)
3756 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3757 else
3758 unit_ref_uid_gid(u, UID_INVALID, gid);
3759
3760 continue;
3761
3762 } else if (streq(l, "ref")) {
3763
3764 r = strv_extend(&u->deserialized_refs, v);
3765 if (r < 0)
3766 return log_oom();
3767
3768 continue;
3769 } else if (streq(l, "invocation-id")) {
3770 sd_id128_t id;
3771
3772 r = sd_id128_from_string(v, &id);
3773 if (r < 0)
3774 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3775 else {
3776 r = unit_set_invocation_id(u, id);
3777 if (r < 0)
3778 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3779 }
3780
3781 continue;
3782 }
3783
3784 /* Check if this is an IP accounting metric serialization field */
3785 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3786 if (m >= 0) {
3787 uint64_t c;
3788
3789 r = safe_atou64(v, &c);
3790 if (r < 0)
3791 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3792 else
3793 u->ip_accounting_extra[m] = c;
3794 continue;
3795 }
3796
3797 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3798 if (m >= 0) {
3799 uint64_t c;
3800
3801 r = safe_atou64(v, &c);
3802 if (r < 0)
3803 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3804 else
3805 u->io_accounting_base[m] = c;
3806 continue;
3807 }
3808
3809 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3810 if (m >= 0) {
3811 uint64_t c;
3812
3813 r = safe_atou64(v, &c);
3814 if (r < 0)
3815 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3816 else
3817 u->io_accounting_last[m] = c;
3818 continue;
3819 }
3820
3821 if (unit_can_serialize(u)) {
3822 r = exec_runtime_deserialize_compat(u, l, v, fds);
3823 if (r < 0) {
3824 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3825 continue;
3826 }
3827
3828 /* Returns positive if key was handled by the call */
3829 if (r > 0)
3830 continue;
3831
3832 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3833 if (r < 0)
3834 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3835 }
3836 }
3837
3838 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3839 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3840 * before 228 where the base for timeouts was not persistent across reboots. */
3841
3842 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3843 dual_timestamp_get(&u->state_change_timestamp);
3844
3845 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3846 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3847 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3848 unit_invalidate_cgroup_bpf(u);
3849
3850 return 0;
3851 }
3852
3853 int unit_deserialize_skip(FILE *f) {
3854 int r;
3855 assert(f);
3856
3857 /* Skip serialized data for this unit. We don't know what it is. */
3858
3859 for (;;) {
3860 _cleanup_free_ char *line = NULL;
3861 char *l;
3862
3863 r = read_line(f, LONG_LINE_MAX, &line);
3864 if (r < 0)
3865 return log_error_errno(r, "Failed to read serialization line: %m");
3866 if (r == 0)
3867 return 0;
3868
3869 l = strstrip(line);
3870
3871 /* End marker */
3872 if (isempty(l))
3873 return 1;
3874 }
3875 }
3876
3877 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3878 _cleanup_free_ char *e = NULL;
3879 Unit *device;
3880 int r;
3881
3882 assert(u);
3883
3884 /* Adds in links to the device node that this unit is based on */
3885 if (isempty(what))
3886 return 0;
3887
3888 if (!is_device_path(what))
3889 return 0;
3890
3891 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3892 if (!unit_type_supported(UNIT_DEVICE))
3893 return 0;
3894
3895 r = unit_name_from_path(what, ".device", &e);
3896 if (r < 0)
3897 return r;
3898
3899 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3900 if (r < 0)
3901 return r;
3902
3903 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3904 dep = UNIT_BINDS_TO;
3905
3906 return unit_add_two_dependencies(u, UNIT_AFTER,
3907 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3908 device, true, mask);
3909 }
3910
3911 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3912 _cleanup_free_ char *escaped = NULL, *target = NULL;
3913 int r;
3914
3915 assert(u);
3916
3917 if (isempty(what))
3918 return 0;
3919
3920 if (!path_startswith(what, "/dev/"))
3921 return 0;
3922
3923 /* If we don't support devices, then also don't bother with blockdev@.target */
3924 if (!unit_type_supported(UNIT_DEVICE))
3925 return 0;
3926
3927 r = unit_name_path_escape(what, &escaped);
3928 if (r < 0)
3929 return r;
3930
3931 r = unit_name_build("blockdev", escaped, ".target", &target);
3932 if (r < 0)
3933 return r;
3934
3935 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3936 }
3937
3938 int unit_coldplug(Unit *u) {
3939 int r = 0, q;
3940 char **i;
3941 Job *uj;
3942
3943 assert(u);
3944
3945 /* Make sure we don't enter a loop, when coldplugging recursively. */
3946 if (u->coldplugged)
3947 return 0;
3948
3949 u->coldplugged = true;
3950
3951 STRV_FOREACH(i, u->deserialized_refs) {
3952 q = bus_unit_track_add_name(u, *i);
3953 if (q < 0 && r >= 0)
3954 r = q;
3955 }
3956 u->deserialized_refs = strv_free(u->deserialized_refs);
3957
3958 if (UNIT_VTABLE(u)->coldplug) {
3959 q = UNIT_VTABLE(u)->coldplug(u);
3960 if (q < 0 && r >= 0)
3961 r = q;
3962 }
3963
3964 uj = u->job ?: u->nop_job;
3965 if (uj) {
3966 q = job_coldplug(uj);
3967 if (q < 0 && r >= 0)
3968 r = q;
3969 }
3970
3971 return r;
3972 }
3973
3974 void unit_catchup(Unit *u) {
3975 assert(u);
3976
3977 if (UNIT_VTABLE(u)->catchup)
3978 UNIT_VTABLE(u)->catchup(u);
3979 }
3980
3981 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3982 struct stat st;
3983
3984 if (!path)
3985 return false;
3986
3987 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3988 * are never out-of-date. */
3989 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3990 return false;
3991
3992 if (stat(path, &st) < 0)
3993 /* What, cannot access this anymore? */
3994 return true;
3995
3996 if (path_masked)
3997 /* For masked files check if they are still so */
3998 return !null_or_empty(&st);
3999 else
4000 /* For non-empty files check the mtime */
4001 return timespec_load(&st.st_mtim) > mtime;
4002
4003 return false;
4004 }
4005
4006 bool unit_need_daemon_reload(Unit *u) {
4007 _cleanup_strv_free_ char **t = NULL;
4008 char **path;
4009
4010 assert(u);
4011
4012 /* For unit files, we allow masking… */
4013 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
4014 u->load_state == UNIT_MASKED))
4015 return true;
4016
4017 /* Source paths should not be masked… */
4018 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
4019 return true;
4020
4021 if (u->load_state == UNIT_LOADED)
4022 (void) unit_find_dropin_paths(u, &t);
4023 if (!strv_equal(u->dropin_paths, t))
4024 return true;
4025
4026 /* … any drop-ins that are masked are simply omitted from the list. */
4027 STRV_FOREACH(path, u->dropin_paths)
4028 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4029 return true;
4030
4031 return false;
4032 }
4033
4034 void unit_reset_failed(Unit *u) {
4035 assert(u);
4036
4037 if (UNIT_VTABLE(u)->reset_failed)
4038 UNIT_VTABLE(u)->reset_failed(u);
4039
4040 ratelimit_reset(&u->start_ratelimit);
4041 u->start_limit_hit = false;
4042 }
4043
4044 Unit *unit_following(Unit *u) {
4045 assert(u);
4046
4047 if (UNIT_VTABLE(u)->following)
4048 return UNIT_VTABLE(u)->following(u);
4049
4050 return NULL;
4051 }
4052
4053 bool unit_stop_pending(Unit *u) {
4054 assert(u);
4055
4056 /* This call does check the current state of the unit. It's
4057 * hence useful to be called from state change calls of the
4058 * unit itself, where the state isn't updated yet. This is
4059 * different from unit_inactive_or_pending() which checks both
4060 * the current state and for a queued job. */
4061
4062 return unit_has_job_type(u, JOB_STOP);
4063 }
4064
4065 bool unit_inactive_or_pending(Unit *u) {
4066 assert(u);
4067
4068 /* Returns true if the unit is inactive or going down */
4069
4070 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4071 return true;
4072
4073 if (unit_stop_pending(u))
4074 return true;
4075
4076 return false;
4077 }
4078
4079 bool unit_active_or_pending(Unit *u) {
4080 assert(u);
4081
4082 /* Returns true if the unit is active or going up */
4083
4084 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4085 return true;
4086
4087 if (u->job &&
4088 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4089 return true;
4090
4091 return false;
4092 }
4093
4094 bool unit_will_restart_default(Unit *u) {
4095 assert(u);
4096
4097 return unit_has_job_type(u, JOB_START);
4098 }
4099
4100 bool unit_will_restart(Unit *u) {
4101 assert(u);
4102
4103 if (!UNIT_VTABLE(u)->will_restart)
4104 return false;
4105
4106 return UNIT_VTABLE(u)->will_restart(u);
4107 }
4108
4109 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4110 assert(u);
4111 assert(w >= 0 && w < _KILL_WHO_MAX);
4112 assert(SIGNAL_VALID(signo));
4113
4114 if (!UNIT_VTABLE(u)->kill)
4115 return -EOPNOTSUPP;
4116
4117 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4118 }
4119
4120 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4121 _cleanup_set_free_ Set *pid_set = NULL;
4122 int r;
4123
4124 pid_set = set_new(NULL);
4125 if (!pid_set)
4126 return NULL;
4127
4128 /* Exclude the main/control pids from being killed via the cgroup */
4129 if (main_pid > 0) {
4130 r = set_put(pid_set, PID_TO_PTR(main_pid));
4131 if (r < 0)
4132 return NULL;
4133 }
4134
4135 if (control_pid > 0) {
4136 r = set_put(pid_set, PID_TO_PTR(control_pid));
4137 if (r < 0)
4138 return NULL;
4139 }
4140
4141 return TAKE_PTR(pid_set);
4142 }
4143
4144 int unit_kill_common(
4145 Unit *u,
4146 KillWho who,
4147 int signo,
4148 pid_t main_pid,
4149 pid_t control_pid,
4150 sd_bus_error *error) {
4151
4152 int r = 0;
4153 bool killed = false;
4154
4155 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4156 if (main_pid < 0)
4157 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4158 else if (main_pid == 0)
4159 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4160 }
4161
4162 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4163 if (control_pid < 0)
4164 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4165 else if (control_pid == 0)
4166 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4167 }
4168
4169 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4170 if (control_pid > 0) {
4171 if (kill(control_pid, signo) < 0)
4172 r = -errno;
4173 else
4174 killed = true;
4175 }
4176
4177 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4178 if (main_pid > 0) {
4179 if (kill(main_pid, signo) < 0)
4180 r = -errno;
4181 else
4182 killed = true;
4183 }
4184
4185 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4186 _cleanup_set_free_ Set *pid_set = NULL;
4187 int q;
4188
4189 /* Exclude the main/control pids from being killed via the cgroup */
4190 pid_set = unit_pid_set(main_pid, control_pid);
4191 if (!pid_set)
4192 return -ENOMEM;
4193
4194 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4195 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4196 r = q;
4197 else
4198 killed = true;
4199 }
4200
4201 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4202 return -ESRCH;
4203
4204 return r;
4205 }
4206
4207 int unit_following_set(Unit *u, Set **s) {
4208 assert(u);
4209 assert(s);
4210
4211 if (UNIT_VTABLE(u)->following_set)
4212 return UNIT_VTABLE(u)->following_set(u, s);
4213
4214 *s = NULL;
4215 return 0;
4216 }
4217
4218 UnitFileState unit_get_unit_file_state(Unit *u) {
4219 int r;
4220
4221 assert(u);
4222
4223 if (u->unit_file_state < 0 && u->fragment_path) {
4224 r = unit_file_get_state(
4225 u->manager->unit_file_scope,
4226 NULL,
4227 u->id,
4228 &u->unit_file_state);
4229 if (r < 0)
4230 u->unit_file_state = UNIT_FILE_BAD;
4231 }
4232
4233 return u->unit_file_state;
4234 }
4235
4236 int unit_get_unit_file_preset(Unit *u) {
4237 assert(u);
4238
4239 if (u->unit_file_preset < 0 && u->fragment_path)
4240 u->unit_file_preset = unit_file_query_preset(
4241 u->manager->unit_file_scope,
4242 NULL,
4243 basename(u->fragment_path));
4244
4245 return u->unit_file_preset;
4246 }
4247
4248 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4249 assert(ref);
4250 assert(source);
4251 assert(target);
4252
4253 if (ref->target)
4254 unit_ref_unset(ref);
4255
4256 ref->source = source;
4257 ref->target = target;
4258 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4259 return target;
4260 }
4261
4262 void unit_ref_unset(UnitRef *ref) {
4263 assert(ref);
4264
4265 if (!ref->target)
4266 return;
4267
4268 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4269 * be unreferenced now. */
4270 unit_add_to_gc_queue(ref->target);
4271
4272 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4273 ref->source = ref->target = NULL;
4274 }
4275
4276 static int user_from_unit_name(Unit *u, char **ret) {
4277
4278 static const uint8_t hash_key[] = {
4279 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4280 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4281 };
4282
4283 _cleanup_free_ char *n = NULL;
4284 int r;
4285
4286 r = unit_name_to_prefix(u->id, &n);
4287 if (r < 0)
4288 return r;
4289
4290 if (valid_user_group_name(n, 0)) {
4291 *ret = TAKE_PTR(n);
4292 return 0;
4293 }
4294
4295 /* If we can't use the unit name as a user name, then let's hash it and use that */
4296 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4297 return -ENOMEM;
4298
4299 return 0;
4300 }
4301
4302 int unit_patch_contexts(Unit *u) {
4303 CGroupContext *cc;
4304 ExecContext *ec;
4305 unsigned i;
4306 int r;
4307
4308 assert(u);
4309
4310 /* Patch in the manager defaults into the exec and cgroup
4311 * contexts, _after_ the rest of the settings have been
4312 * initialized */
4313
4314 ec = unit_get_exec_context(u);
4315 if (ec) {
4316 /* This only copies in the ones that need memory */
4317 for (i = 0; i < _RLIMIT_MAX; i++)
4318 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4319 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4320 if (!ec->rlimit[i])
4321 return -ENOMEM;
4322 }
4323
4324 if (MANAGER_IS_USER(u->manager) &&
4325 !ec->working_directory) {
4326
4327 r = get_home_dir(&ec->working_directory);
4328 if (r < 0)
4329 return r;
4330
4331 /* Allow user services to run, even if the
4332 * home directory is missing */
4333 ec->working_directory_missing_ok = true;
4334 }
4335
4336 if (ec->private_devices)
4337 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4338
4339 if (ec->protect_kernel_modules)
4340 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4341
4342 if (ec->protect_kernel_logs)
4343 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4344
4345 if (ec->protect_clock)
4346 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4347
4348 if (ec->dynamic_user) {
4349 if (!ec->user) {
4350 r = user_from_unit_name(u, &ec->user);
4351 if (r < 0)
4352 return r;
4353 }
4354
4355 if (!ec->group) {
4356 ec->group = strdup(ec->user);
4357 if (!ec->group)
4358 return -ENOMEM;
4359 }
4360
4361 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4362 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4363 * sandbox. */
4364
4365 ec->private_tmp = true;
4366 ec->remove_ipc = true;
4367 ec->protect_system = PROTECT_SYSTEM_STRICT;
4368 if (ec->protect_home == PROTECT_HOME_NO)
4369 ec->protect_home = PROTECT_HOME_READ_ONLY;
4370
4371 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4372 * them. */
4373 ec->no_new_privileges = true;
4374 ec->restrict_suid_sgid = true;
4375 }
4376 }
4377
4378 cc = unit_get_cgroup_context(u);
4379 if (cc && ec) {
4380
4381 if (ec->private_devices &&
4382 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4383 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4384
4385 if (ec->root_image &&
4386 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4387
4388 /* When RootImage= is specified, the following devices are touched. */
4389 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4390 if (r < 0)
4391 return r;
4392
4393 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4394 if (r < 0)
4395 return r;
4396
4397 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4398 if (r < 0)
4399 return r;
4400
4401 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices */
4402 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "modprobe@loop.service", true, UNIT_DEPENDENCY_FILE);
4403 if (r < 0)
4404 return r;
4405 }
4406
4407 if (ec->protect_clock) {
4408 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4409 if (r < 0)
4410 return r;
4411 }
4412 }
4413
4414 return 0;
4415 }
4416
4417 ExecContext *unit_get_exec_context(Unit *u) {
4418 size_t offset;
4419 assert(u);
4420
4421 if (u->type < 0)
4422 return NULL;
4423
4424 offset = UNIT_VTABLE(u)->exec_context_offset;
4425 if (offset <= 0)
4426 return NULL;
4427
4428 return (ExecContext*) ((uint8_t*) u + offset);
4429 }
4430
4431 KillContext *unit_get_kill_context(Unit *u) {
4432 size_t offset;
4433 assert(u);
4434
4435 if (u->type < 0)
4436 return NULL;
4437
4438 offset = UNIT_VTABLE(u)->kill_context_offset;
4439 if (offset <= 0)
4440 return NULL;
4441
4442 return (KillContext*) ((uint8_t*) u + offset);
4443 }
4444
4445 CGroupContext *unit_get_cgroup_context(Unit *u) {
4446 size_t offset;
4447
4448 if (u->type < 0)
4449 return NULL;
4450
4451 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4452 if (offset <= 0)
4453 return NULL;
4454
4455 return (CGroupContext*) ((uint8_t*) u + offset);
4456 }
4457
4458 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4459 size_t offset;
4460
4461 if (u->type < 0)
4462 return NULL;
4463
4464 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4465 if (offset <= 0)
4466 return NULL;
4467
4468 return *(ExecRuntime**) ((uint8_t*) u + offset);
4469 }
4470
4471 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4472 assert(u);
4473
4474 if (UNIT_WRITE_FLAGS_NOOP(flags))
4475 return NULL;
4476
4477 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4478 return u->manager->lookup_paths.transient;
4479
4480 if (flags & UNIT_PERSISTENT)
4481 return u->manager->lookup_paths.persistent_control;
4482
4483 if (flags & UNIT_RUNTIME)
4484 return u->manager->lookup_paths.runtime_control;
4485
4486 return NULL;
4487 }
4488
4489 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4490 char *ret = NULL;
4491
4492 if (!s)
4493 return NULL;
4494
4495 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4496 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4497 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4498 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4499 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4500 * allocations. */
4501
4502 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4503 ret = specifier_escape(s);
4504 if (!ret)
4505 return NULL;
4506
4507 s = ret;
4508 }
4509
4510 if (flags & UNIT_ESCAPE_C) {
4511 char *a;
4512
4513 a = cescape(s);
4514 free(ret);
4515 if (!a)
4516 return NULL;
4517
4518 ret = a;
4519 }
4520
4521 if (buf) {
4522 *buf = ret;
4523 return ret ?: (char*) s;
4524 }
4525
4526 return ret ?: strdup(s);
4527 }
4528
4529 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4530 _cleanup_free_ char *result = NULL;
4531 size_t n = 0, allocated = 0;
4532 char **i;
4533
4534 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4535 * way suitable for ExecStart= stanzas */
4536
4537 STRV_FOREACH(i, l) {
4538 _cleanup_free_ char *buf = NULL;
4539 const char *p;
4540 size_t a;
4541 char *q;
4542
4543 p = unit_escape_setting(*i, flags, &buf);
4544 if (!p)
4545 return NULL;
4546
4547 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4548 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4549 return NULL;
4550
4551 q = result + n;
4552 if (n > 0)
4553 *(q++) = ' ';
4554
4555 *(q++) = '"';
4556 q = stpcpy(q, p);
4557 *(q++) = '"';
4558
4559 n += a;
4560 }
4561
4562 if (!GREEDY_REALLOC(result, allocated, n + 1))
4563 return NULL;
4564
4565 result[n] = 0;
4566
4567 return TAKE_PTR(result);
4568 }
4569
4570 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4571 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4572 const char *dir, *wrapped;
4573 int r;
4574
4575 assert(u);
4576 assert(name);
4577 assert(data);
4578
4579 if (UNIT_WRITE_FLAGS_NOOP(flags))
4580 return 0;
4581
4582 data = unit_escape_setting(data, flags, &escaped);
4583 if (!data)
4584 return -ENOMEM;
4585
4586 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4587 * previous section header is the same */
4588
4589 if (flags & UNIT_PRIVATE) {
4590 if (!UNIT_VTABLE(u)->private_section)
4591 return -EINVAL;
4592
4593 if (!u->transient_file || u->last_section_private < 0)
4594 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4595 else if (u->last_section_private == 0)
4596 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4597 } else {
4598 if (!u->transient_file || u->last_section_private < 0)
4599 data = strjoina("[Unit]\n", data);
4600 else if (u->last_section_private > 0)
4601 data = strjoina("\n[Unit]\n", data);
4602 }
4603
4604 if (u->transient_file) {
4605 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4606 * write to the transient unit file. */
4607 fputs(data, u->transient_file);
4608
4609 if (!endswith(data, "\n"))
4610 fputc('\n', u->transient_file);
4611
4612 /* Remember which section we wrote this entry to */
4613 u->last_section_private = !!(flags & UNIT_PRIVATE);
4614 return 0;
4615 }
4616
4617 dir = unit_drop_in_dir(u, flags);
4618 if (!dir)
4619 return -EINVAL;
4620
4621 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4622 "# or an equivalent operation. Do not edit.\n",
4623 data,
4624 "\n");
4625
4626 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4627 if (r < 0)
4628 return r;
4629
4630 (void) mkdir_p_label(p, 0755);
4631
4632 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4633 * recreate the cache after every drop-in we write. */
4634 if (u->manager->unit_path_cache) {
4635 r = set_put_strdup(u->manager->unit_path_cache, p);
4636 if (r < 0)
4637 return r;
4638 }
4639
4640 r = write_string_file_atomic_label(q, wrapped);
4641 if (r < 0)
4642 return r;
4643
4644 r = strv_push(&u->dropin_paths, q);
4645 if (r < 0)
4646 return r;
4647 q = NULL;
4648
4649 strv_uniq(u->dropin_paths);
4650
4651 u->dropin_mtime = now(CLOCK_REALTIME);
4652
4653 return 0;
4654 }
4655
4656 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4657 _cleanup_free_ char *p = NULL;
4658 va_list ap;
4659 int r;
4660
4661 assert(u);
4662 assert(name);
4663 assert(format);
4664
4665 if (UNIT_WRITE_FLAGS_NOOP(flags))
4666 return 0;
4667
4668 va_start(ap, format);
4669 r = vasprintf(&p, format, ap);
4670 va_end(ap);
4671
4672 if (r < 0)
4673 return -ENOMEM;
4674
4675 return unit_write_setting(u, flags, name, p);
4676 }
4677
4678 int unit_make_transient(Unit *u) {
4679 _cleanup_free_ char *path = NULL;
4680 FILE *f;
4681
4682 assert(u);
4683
4684 if (!UNIT_VTABLE(u)->can_transient)
4685 return -EOPNOTSUPP;
4686
4687 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4688
4689 path = path_join(u->manager->lookup_paths.transient, u->id);
4690 if (!path)
4691 return -ENOMEM;
4692
4693 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4694 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4695
4696 RUN_WITH_UMASK(0022) {
4697 f = fopen(path, "we");
4698 if (!f)
4699 return -errno;
4700 }
4701
4702 safe_fclose(u->transient_file);
4703 u->transient_file = f;
4704
4705 free_and_replace(u->fragment_path, path);
4706
4707 u->source_path = mfree(u->source_path);
4708 u->dropin_paths = strv_free(u->dropin_paths);
4709 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4710
4711 u->load_state = UNIT_STUB;
4712 u->load_error = 0;
4713 u->transient = true;
4714
4715 unit_add_to_dbus_queue(u);
4716 unit_add_to_gc_queue(u);
4717
4718 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4719 u->transient_file);
4720
4721 return 0;
4722 }
4723
4724 static int log_kill(pid_t pid, int sig, void *userdata) {
4725 _cleanup_free_ char *comm = NULL;
4726
4727 (void) get_process_comm(pid, &comm);
4728
4729 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4730 only, like for example systemd's own PAM stub process. */
4731 if (comm && comm[0] == '(')
4732 return 0;
4733
4734 log_unit_notice(userdata,
4735 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4736 pid,
4737 strna(comm),
4738 signal_to_string(sig));
4739
4740 return 1;
4741 }
4742
4743 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4744 assert(c);
4745
4746 switch (k) {
4747
4748 case KILL_TERMINATE:
4749 case KILL_TERMINATE_AND_LOG:
4750 *noteworthy = false;
4751 return c->kill_signal;
4752
4753 case KILL_RESTART:
4754 *noteworthy = false;
4755 return restart_kill_signal(c);
4756
4757 case KILL_KILL:
4758 *noteworthy = true;
4759 return c->final_kill_signal;
4760
4761 case KILL_WATCHDOG:
4762 *noteworthy = true;
4763 return c->watchdog_signal;
4764
4765 default:
4766 assert_not_reached("KillOperation unknown");
4767 }
4768 }
4769
4770 int unit_kill_context(
4771 Unit *u,
4772 KillContext *c,
4773 KillOperation k,
4774 pid_t main_pid,
4775 pid_t control_pid,
4776 bool main_pid_alien) {
4777
4778 bool wait_for_exit = false, send_sighup;
4779 cg_kill_log_func_t log_func = NULL;
4780 int sig, r;
4781
4782 assert(u);
4783 assert(c);
4784
4785 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4786 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4787
4788 if (c->kill_mode == KILL_NONE)
4789 return 0;
4790
4791 bool noteworthy;
4792 sig = operation_to_signal(c, k, &noteworthy);
4793 if (noteworthy)
4794 log_func = log_kill;
4795
4796 send_sighup =
4797 c->send_sighup &&
4798 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4799 sig != SIGHUP;
4800
4801 if (main_pid > 0) {
4802 if (log_func)
4803 log_func(main_pid, sig, u);
4804
4805 r = kill_and_sigcont(main_pid, sig);
4806 if (r < 0 && r != -ESRCH) {
4807 _cleanup_free_ char *comm = NULL;
4808 (void) get_process_comm(main_pid, &comm);
4809
4810 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4811 } else {
4812 if (!main_pid_alien)
4813 wait_for_exit = true;
4814
4815 if (r != -ESRCH && send_sighup)
4816 (void) kill(main_pid, SIGHUP);
4817 }
4818 }
4819
4820 if (control_pid > 0) {
4821 if (log_func)
4822 log_func(control_pid, sig, u);
4823
4824 r = kill_and_sigcont(control_pid, sig);
4825 if (r < 0 && r != -ESRCH) {
4826 _cleanup_free_ char *comm = NULL;
4827 (void) get_process_comm(control_pid, &comm);
4828
4829 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4830 } else {
4831 wait_for_exit = true;
4832
4833 if (r != -ESRCH && send_sighup)
4834 (void) kill(control_pid, SIGHUP);
4835 }
4836 }
4837
4838 if (u->cgroup_path &&
4839 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4840 _cleanup_set_free_ Set *pid_set = NULL;
4841
4842 /* Exclude the main/control pids from being killed via the cgroup */
4843 pid_set = unit_pid_set(main_pid, control_pid);
4844 if (!pid_set)
4845 return -ENOMEM;
4846
4847 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4848 sig,
4849 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4850 pid_set,
4851 log_func, u);
4852 if (r < 0) {
4853 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4854 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4855
4856 } else if (r > 0) {
4857
4858 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4859 * we are running in a container or if this is a delegation unit, simply because cgroup
4860 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4861 * of containers it can be confused easily by left-over directories in the cgroup — which
4862 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4863 * there we get proper events. Hence rely on them. */
4864
4865 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4866 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4867 wait_for_exit = true;
4868
4869 if (send_sighup) {
4870 set_free(pid_set);
4871
4872 pid_set = unit_pid_set(main_pid, control_pid);
4873 if (!pid_set)
4874 return -ENOMEM;
4875
4876 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4877 SIGHUP,
4878 CGROUP_IGNORE_SELF,
4879 pid_set,
4880 NULL, NULL);
4881 }
4882 }
4883 }
4884
4885 return wait_for_exit;
4886 }
4887
4888 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4889 _cleanup_free_ char *p = NULL;
4890 UnitDependencyInfo di;
4891 int r;
4892
4893 assert(u);
4894 assert(path);
4895
4896 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4897 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4898 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4899 * determine which units to make themselves a dependency of. */
4900
4901 if (!path_is_absolute(path))
4902 return -EINVAL;
4903
4904 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4905 if (r < 0)
4906 return r;
4907
4908 p = strdup(path);
4909 if (!p)
4910 return -ENOMEM;
4911
4912 path = path_simplify(p, true);
4913
4914 if (!path_is_normalized(path))
4915 return -EPERM;
4916
4917 if (hashmap_contains(u->requires_mounts_for, path))
4918 return 0;
4919
4920 di = (UnitDependencyInfo) {
4921 .origin_mask = mask
4922 };
4923
4924 r = hashmap_put(u->requires_mounts_for, path, di.data);
4925 if (r < 0)
4926 return r;
4927 p = NULL;
4928
4929 char prefix[strlen(path) + 1];
4930 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4931 Set *x;
4932
4933 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4934 if (!x) {
4935 _cleanup_free_ char *q = NULL;
4936
4937 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4938 if (r < 0)
4939 return r;
4940
4941 q = strdup(prefix);
4942 if (!q)
4943 return -ENOMEM;
4944
4945 x = set_new(NULL);
4946 if (!x)
4947 return -ENOMEM;
4948
4949 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4950 if (r < 0) {
4951 set_free(x);
4952 return r;
4953 }
4954 q = NULL;
4955 }
4956
4957 r = set_put(x, u);
4958 if (r < 0)
4959 return r;
4960 }
4961
4962 return 0;
4963 }
4964
4965 int unit_setup_exec_runtime(Unit *u) {
4966 ExecRuntime **rt;
4967 size_t offset;
4968 Unit *other;
4969 Iterator i;
4970 void *v;
4971 int r;
4972
4973 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4974 assert(offset > 0);
4975
4976 /* Check if there already is an ExecRuntime for this unit? */
4977 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4978 if (*rt)
4979 return 0;
4980
4981 /* Try to get it from somebody else */
4982 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4983 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4984 if (r == 1)
4985 return 1;
4986 }
4987
4988 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4989 }
4990
4991 int unit_setup_dynamic_creds(Unit *u) {
4992 ExecContext *ec;
4993 DynamicCreds *dcreds;
4994 size_t offset;
4995
4996 assert(u);
4997
4998 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4999 assert(offset > 0);
5000 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
5001
5002 ec = unit_get_exec_context(u);
5003 assert(ec);
5004
5005 if (!ec->dynamic_user)
5006 return 0;
5007
5008 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
5009 }
5010
5011 bool unit_type_supported(UnitType t) {
5012 if (_unlikely_(t < 0))
5013 return false;
5014 if (_unlikely_(t >= _UNIT_TYPE_MAX))
5015 return false;
5016
5017 if (!unit_vtable[t]->supported)
5018 return true;
5019
5020 return unit_vtable[t]->supported();
5021 }
5022
5023 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5024 int r;
5025
5026 assert(u);
5027 assert(where);
5028
5029 r = dir_is_empty(where);
5030 if (r > 0 || r == -ENOTDIR)
5031 return;
5032 if (r < 0) {
5033 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5034 return;
5035 }
5036
5037 log_struct(LOG_NOTICE,
5038 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5039 LOG_UNIT_ID(u),
5040 LOG_UNIT_INVOCATION_ID(u),
5041 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5042 "WHERE=%s", where);
5043 }
5044
5045 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5046 _cleanup_free_ char *canonical_where = NULL;
5047 int r;
5048
5049 assert(u);
5050 assert(where);
5051
5052 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5053 if (r < 0) {
5054 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5055 return 0;
5056 }
5057
5058 /* We will happily ignore a trailing slash (or any redundant slashes) */
5059 if (path_equal(where, canonical_where))
5060 return 0;
5061
5062 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5063 log_struct(LOG_ERR,
5064 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5065 LOG_UNIT_ID(u),
5066 LOG_UNIT_INVOCATION_ID(u),
5067 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5068 "WHERE=%s", where);
5069
5070 return -ELOOP;
5071 }
5072
5073 bool unit_is_pristine(Unit *u) {
5074 assert(u);
5075
5076 /* Check if the unit already exists or is already around,
5077 * in a number of different ways. Note that to cater for unit
5078 * types such as slice, we are generally fine with units that
5079 * are marked UNIT_LOADED even though nothing was actually
5080 * loaded, as those unit types don't require a file on disk. */
5081
5082 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5083 u->fragment_path ||
5084 u->source_path ||
5085 !strv_isempty(u->dropin_paths) ||
5086 u->job ||
5087 u->merged_into);
5088 }
5089
5090 pid_t unit_control_pid(Unit *u) {
5091 assert(u);
5092
5093 if (UNIT_VTABLE(u)->control_pid)
5094 return UNIT_VTABLE(u)->control_pid(u);
5095
5096 return 0;
5097 }
5098
5099 pid_t unit_main_pid(Unit *u) {
5100 assert(u);
5101
5102 if (UNIT_VTABLE(u)->main_pid)
5103 return UNIT_VTABLE(u)->main_pid(u);
5104
5105 return 0;
5106 }
5107
5108 static void unit_unref_uid_internal(
5109 Unit *u,
5110 uid_t *ref_uid,
5111 bool destroy_now,
5112 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5113
5114 assert(u);
5115 assert(ref_uid);
5116 assert(_manager_unref_uid);
5117
5118 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5119 * gid_t are actually the same time, with the same validity rules.
5120 *
5121 * Drops a reference to UID/GID from a unit. */
5122
5123 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5124 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5125
5126 if (!uid_is_valid(*ref_uid))
5127 return;
5128
5129 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5130 *ref_uid = UID_INVALID;
5131 }
5132
5133 static void unit_unref_uid(Unit *u, bool destroy_now) {
5134 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5135 }
5136
5137 static void unit_unref_gid(Unit *u, bool destroy_now) {
5138 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5139 }
5140
5141 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5142 assert(u);
5143
5144 unit_unref_uid(u, destroy_now);
5145 unit_unref_gid(u, destroy_now);
5146 }
5147
5148 static int unit_ref_uid_internal(
5149 Unit *u,
5150 uid_t *ref_uid,
5151 uid_t uid,
5152 bool clean_ipc,
5153 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5154
5155 int r;
5156
5157 assert(u);
5158 assert(ref_uid);
5159 assert(uid_is_valid(uid));
5160 assert(_manager_ref_uid);
5161
5162 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5163 * are actually the same type, and have the same validity rules.
5164 *
5165 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5166 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5167 * drops to zero. */
5168
5169 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5170 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5171
5172 if (*ref_uid == uid)
5173 return 0;
5174
5175 if (uid_is_valid(*ref_uid)) /* Already set? */
5176 return -EBUSY;
5177
5178 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5179 if (r < 0)
5180 return r;
5181
5182 *ref_uid = uid;
5183 return 1;
5184 }
5185
5186 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5187 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5188 }
5189
5190 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5191 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5192 }
5193
5194 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5195 int r = 0, q = 0;
5196
5197 assert(u);
5198
5199 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5200
5201 if (uid_is_valid(uid)) {
5202 r = unit_ref_uid(u, uid, clean_ipc);
5203 if (r < 0)
5204 return r;
5205 }
5206
5207 if (gid_is_valid(gid)) {
5208 q = unit_ref_gid(u, gid, clean_ipc);
5209 if (q < 0) {
5210 if (r > 0)
5211 unit_unref_uid(u, false);
5212
5213 return q;
5214 }
5215 }
5216
5217 return r > 0 || q > 0;
5218 }
5219
5220 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5221 ExecContext *c;
5222 int r;
5223
5224 assert(u);
5225
5226 c = unit_get_exec_context(u);
5227
5228 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5229 if (r < 0)
5230 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5231
5232 return r;
5233 }
5234
5235 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5236 int r;
5237
5238 assert(u);
5239
5240 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5241 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5242 * objects when no service references the UID/GID anymore. */
5243
5244 r = unit_ref_uid_gid(u, uid, gid);
5245 if (r > 0)
5246 unit_add_to_dbus_queue(u);
5247 }
5248
5249 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5250 int r;
5251
5252 assert(u);
5253
5254 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5255
5256 if (sd_id128_equal(u->invocation_id, id))
5257 return 0;
5258
5259 if (!sd_id128_is_null(u->invocation_id))
5260 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5261
5262 if (sd_id128_is_null(id)) {
5263 r = 0;
5264 goto reset;
5265 }
5266
5267 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5268 if (r < 0)
5269 goto reset;
5270
5271 u->invocation_id = id;
5272 sd_id128_to_string(id, u->invocation_id_string);
5273
5274 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5275 if (r < 0)
5276 goto reset;
5277
5278 return 0;
5279
5280 reset:
5281 u->invocation_id = SD_ID128_NULL;
5282 u->invocation_id_string[0] = 0;
5283 return r;
5284 }
5285
5286 int unit_acquire_invocation_id(Unit *u) {
5287 sd_id128_t id;
5288 int r;
5289
5290 assert(u);
5291
5292 r = sd_id128_randomize(&id);
5293 if (r < 0)
5294 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5295
5296 r = unit_set_invocation_id(u, id);
5297 if (r < 0)
5298 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5299
5300 unit_add_to_dbus_queue(u);
5301 return 0;
5302 }
5303
5304 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5305 int r;
5306
5307 assert(u);
5308 assert(p);
5309
5310 /* Copy parameters from manager */
5311 r = manager_get_effective_environment(u->manager, &p->environment);
5312 if (r < 0)
5313 return r;
5314
5315 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5316 p->cgroup_supported = u->manager->cgroup_supported;
5317 p->prefix = u->manager->prefix;
5318 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5319
5320 /* Copy parameters from unit */
5321 p->cgroup_path = u->cgroup_path;
5322 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5323
5324 return 0;
5325 }
5326
5327 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5328 int r;
5329
5330 assert(u);
5331 assert(ret);
5332
5333 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5334 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5335
5336 (void) unit_realize_cgroup(u);
5337
5338 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5339 if (r != 0)
5340 return r;
5341
5342 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5343 (void) ignore_signals(SIGPIPE, -1);
5344
5345 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5346
5347 if (u->cgroup_path) {
5348 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5349 if (r < 0) {
5350 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5351 _exit(EXIT_CGROUP);
5352 }
5353 }
5354
5355 return 0;
5356 }
5357
5358 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5359 pid_t pid;
5360 int r;
5361
5362 assert(u);
5363 assert(ret_pid);
5364
5365 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5366 if (r < 0)
5367 return r;
5368 if (r == 0) {
5369 int ret = EXIT_SUCCESS;
5370 char **i;
5371
5372 STRV_FOREACH(i, paths) {
5373 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5374 if (r < 0) {
5375 log_error_errno(r, "Failed to remove '%s': %m", *i);
5376 ret = EXIT_FAILURE;
5377 }
5378 }
5379
5380 _exit(ret);
5381 }
5382
5383 r = unit_watch_pid(u, pid, true);
5384 if (r < 0)
5385 return r;
5386
5387 *ret_pid = pid;
5388 return 0;
5389 }
5390
5391 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5392 assert(u);
5393 assert(d >= 0);
5394 assert(d < _UNIT_DEPENDENCY_MAX);
5395 assert(other);
5396
5397 if (di.origin_mask == 0 && di.destination_mask == 0) {
5398 /* No bit set anymore, let's drop the whole entry */
5399 assert_se(hashmap_remove(u->dependencies[d], other));
5400 log_unit_debug(u, "lost dependency %s=%s", unit_dependency_to_string(d), other->id);
5401 } else
5402 /* Mask was reduced, let's update the entry */
5403 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5404 }
5405
5406 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5407 UnitDependency d;
5408
5409 assert(u);
5410
5411 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5412
5413 if (mask == 0)
5414 return;
5415
5416 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5417 bool done;
5418
5419 do {
5420 UnitDependencyInfo di;
5421 Unit *other;
5422 Iterator i;
5423
5424 done = true;
5425
5426 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5427 UnitDependency q;
5428
5429 if ((di.origin_mask & ~mask) == di.origin_mask)
5430 continue;
5431 di.origin_mask &= ~mask;
5432 unit_update_dependency_mask(u, d, other, di);
5433
5434 /* We updated the dependency from our unit to the other unit now. But most dependencies
5435 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5436 * all dependency types on the other unit and delete all those which point to us and
5437 * have the right mask set. */
5438
5439 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5440 UnitDependencyInfo dj;
5441
5442 dj.data = hashmap_get(other->dependencies[q], u);
5443 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5444 continue;
5445 dj.destination_mask &= ~mask;
5446
5447 unit_update_dependency_mask(other, q, u, dj);
5448 }
5449
5450 unit_add_to_gc_queue(other);
5451
5452 done = false;
5453 break;
5454 }
5455
5456 } while (!done);
5457 }
5458 }
5459
5460 static int unit_get_invocation_path(Unit *u, char **ret) {
5461 char *p;
5462 int r;
5463
5464 assert(u);
5465 assert(ret);
5466
5467 if (MANAGER_IS_SYSTEM(u->manager))
5468 p = strjoin("/run/systemd/units/invocation:", u->id);
5469 else {
5470 _cleanup_free_ char *user_path = NULL;
5471 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5472 if (r < 0)
5473 return r;
5474 p = strjoin(user_path, u->id);
5475 }
5476
5477 if (!p)
5478 return -ENOMEM;
5479
5480 *ret = p;
5481 return 0;
5482 }
5483
5484 static int unit_export_invocation_id(Unit *u) {
5485 _cleanup_free_ char *p = NULL;
5486 int r;
5487
5488 assert(u);
5489
5490 if (u->exported_invocation_id)
5491 return 0;
5492
5493 if (sd_id128_is_null(u->invocation_id))
5494 return 0;
5495
5496 r = unit_get_invocation_path(u, &p);
5497 if (r < 0)
5498 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5499
5500 r = symlink_atomic(u->invocation_id_string, p);
5501 if (r < 0)
5502 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5503
5504 u->exported_invocation_id = true;
5505 return 0;
5506 }
5507
5508 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5509 const char *p;
5510 char buf[2];
5511 int r;
5512
5513 assert(u);
5514 assert(c);
5515
5516 if (u->exported_log_level_max)
5517 return 0;
5518
5519 if (c->log_level_max < 0)
5520 return 0;
5521
5522 assert(c->log_level_max <= 7);
5523
5524 buf[0] = '0' + c->log_level_max;
5525 buf[1] = 0;
5526
5527 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5528 r = symlink_atomic(buf, p);
5529 if (r < 0)
5530 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5531
5532 u->exported_log_level_max = true;
5533 return 0;
5534 }
5535
5536 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5537 _cleanup_close_ int fd = -1;
5538 struct iovec *iovec;
5539 const char *p;
5540 char *pattern;
5541 le64_t *sizes;
5542 ssize_t n;
5543 size_t i;
5544 int r;
5545
5546 if (u->exported_log_extra_fields)
5547 return 0;
5548
5549 if (c->n_log_extra_fields <= 0)
5550 return 0;
5551
5552 sizes = newa(le64_t, c->n_log_extra_fields);
5553 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5554
5555 for (i = 0; i < c->n_log_extra_fields; i++) {
5556 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5557
5558 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5559 iovec[i*2+1] = c->log_extra_fields[i];
5560 }
5561
5562 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5563 pattern = strjoina(p, ".XXXXXX");
5564
5565 fd = mkostemp_safe(pattern);
5566 if (fd < 0)
5567 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5568
5569 n = writev(fd, iovec, c->n_log_extra_fields*2);
5570 if (n < 0) {
5571 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5572 goto fail;
5573 }
5574
5575 (void) fchmod(fd, 0644);
5576
5577 if (rename(pattern, p) < 0) {
5578 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5579 goto fail;
5580 }
5581
5582 u->exported_log_extra_fields = true;
5583 return 0;
5584
5585 fail:
5586 (void) unlink(pattern);
5587 return r;
5588 }
5589
5590 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5591 _cleanup_free_ char *buf = NULL;
5592 const char *p;
5593 int r;
5594
5595 assert(u);
5596 assert(c);
5597
5598 if (u->exported_log_ratelimit_interval)
5599 return 0;
5600
5601 if (c->log_ratelimit_interval_usec == 0)
5602 return 0;
5603
5604 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5605
5606 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5607 return log_oom();
5608
5609 r = symlink_atomic(buf, p);
5610 if (r < 0)
5611 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5612
5613 u->exported_log_ratelimit_interval = true;
5614 return 0;
5615 }
5616
5617 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5618 _cleanup_free_ char *buf = NULL;
5619 const char *p;
5620 int r;
5621
5622 assert(u);
5623 assert(c);
5624
5625 if (u->exported_log_ratelimit_burst)
5626 return 0;
5627
5628 if (c->log_ratelimit_burst == 0)
5629 return 0;
5630
5631 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5632
5633 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5634 return log_oom();
5635
5636 r = symlink_atomic(buf, p);
5637 if (r < 0)
5638 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5639
5640 u->exported_log_ratelimit_burst = true;
5641 return 0;
5642 }
5643
5644 void unit_export_state_files(Unit *u) {
5645 const ExecContext *c;
5646
5647 assert(u);
5648
5649 if (!u->id)
5650 return;
5651
5652 if (MANAGER_IS_TEST_RUN(u->manager))
5653 return;
5654
5655 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5656 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5657 * the IPC system itself and PID 1 also log to the journal.
5658 *
5659 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5660 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5661 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5662 * namespace at least.
5663 *
5664 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5665 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5666 * them with one. */
5667
5668 (void) unit_export_invocation_id(u);
5669
5670 if (!MANAGER_IS_SYSTEM(u->manager))
5671 return;
5672
5673 c = unit_get_exec_context(u);
5674 if (c) {
5675 (void) unit_export_log_level_max(u, c);
5676 (void) unit_export_log_extra_fields(u, c);
5677 (void) unit_export_log_ratelimit_interval(u, c);
5678 (void) unit_export_log_ratelimit_burst(u, c);
5679 }
5680 }
5681
5682 void unit_unlink_state_files(Unit *u) {
5683 const char *p;
5684
5685 assert(u);
5686
5687 if (!u->id)
5688 return;
5689
5690 /* Undoes the effect of unit_export_state() */
5691
5692 if (u->exported_invocation_id) {
5693 _cleanup_free_ char *invocation_path = NULL;
5694 int r = unit_get_invocation_path(u, &invocation_path);
5695 if (r >= 0) {
5696 (void) unlink(invocation_path);
5697 u->exported_invocation_id = false;
5698 }
5699 }
5700
5701 if (!MANAGER_IS_SYSTEM(u->manager))
5702 return;
5703
5704 if (u->exported_log_level_max) {
5705 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5706 (void) unlink(p);
5707
5708 u->exported_log_level_max = false;
5709 }
5710
5711 if (u->exported_log_extra_fields) {
5712 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5713 (void) unlink(p);
5714
5715 u->exported_log_extra_fields = false;
5716 }
5717
5718 if (u->exported_log_ratelimit_interval) {
5719 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5720 (void) unlink(p);
5721
5722 u->exported_log_ratelimit_interval = false;
5723 }
5724
5725 if (u->exported_log_ratelimit_burst) {
5726 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5727 (void) unlink(p);
5728
5729 u->exported_log_ratelimit_burst = false;
5730 }
5731 }
5732
5733 int unit_prepare_exec(Unit *u) {
5734 int r;
5735
5736 assert(u);
5737
5738 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5739 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5740 r = bpf_firewall_load_custom(u);
5741 if (r < 0)
5742 return r;
5743
5744 /* Prepares everything so that we can fork of a process for this unit */
5745
5746 (void) unit_realize_cgroup(u);
5747
5748 if (u->reset_accounting) {
5749 (void) unit_reset_accounting(u);
5750 u->reset_accounting = false;
5751 }
5752
5753 unit_export_state_files(u);
5754
5755 r = unit_setup_exec_runtime(u);
5756 if (r < 0)
5757 return r;
5758
5759 r = unit_setup_dynamic_creds(u);
5760 if (r < 0)
5761 return r;
5762
5763 return 0;
5764 }
5765
5766 static int log_leftover(pid_t pid, int sig, void *userdata) {
5767 _cleanup_free_ char *comm = NULL;
5768
5769 (void) get_process_comm(pid, &comm);
5770
5771 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5772 return 0;
5773
5774 log_unit_warning(userdata,
5775 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5776 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5777 pid, strna(comm));
5778
5779 return 1;
5780 }
5781
5782 int unit_warn_leftover_processes(Unit *u) {
5783 assert(u);
5784
5785 (void) unit_pick_cgroup_path(u);
5786
5787 if (!u->cgroup_path)
5788 return 0;
5789
5790 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5791 }
5792
5793 bool unit_needs_console(Unit *u) {
5794 ExecContext *ec;
5795 UnitActiveState state;
5796
5797 assert(u);
5798
5799 state = unit_active_state(u);
5800
5801 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5802 return false;
5803
5804 if (UNIT_VTABLE(u)->needs_console)
5805 return UNIT_VTABLE(u)->needs_console(u);
5806
5807 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5808 ec = unit_get_exec_context(u);
5809 if (!ec)
5810 return false;
5811
5812 return exec_context_may_touch_console(ec);
5813 }
5814
5815 const char *unit_label_path(const Unit *u) {
5816 const char *p;
5817
5818 assert(u);
5819
5820 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5821 * when validating access checks. */
5822
5823 p = u->source_path ?: u->fragment_path;
5824 if (!p)
5825 return NULL;
5826
5827 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5828 if (path_equal(p, "/dev/null"))
5829 return NULL;
5830
5831 return p;
5832 }
5833
5834 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5835 int r;
5836
5837 assert(u);
5838
5839 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5840 * and not a kernel thread either */
5841
5842 /* First, a simple range check */
5843 if (!pid_is_valid(pid))
5844 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5845
5846 /* Some extra safety check */
5847 if (pid == 1 || pid == getpid_cached())
5848 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5849
5850 /* Don't even begin to bother with kernel threads */
5851 r = is_kernel_thread(pid);
5852 if (r == -ESRCH)
5853 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5854 if (r < 0)
5855 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5856 if (r > 0)
5857 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5858
5859 return 0;
5860 }
5861
5862 void unit_log_success(Unit *u) {
5863 assert(u);
5864
5865 log_struct(LOG_INFO,
5866 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5867 LOG_UNIT_ID(u),
5868 LOG_UNIT_INVOCATION_ID(u),
5869 LOG_UNIT_MESSAGE(u, "Succeeded."));
5870 }
5871
5872 void unit_log_failure(Unit *u, const char *result) {
5873 assert(u);
5874 assert(result);
5875
5876 log_struct(LOG_WARNING,
5877 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5878 LOG_UNIT_ID(u),
5879 LOG_UNIT_INVOCATION_ID(u),
5880 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5881 "UNIT_RESULT=%s", result);
5882 }
5883
5884 void unit_log_skip(Unit *u, const char *result) {
5885 assert(u);
5886 assert(result);
5887
5888 log_struct(LOG_INFO,
5889 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5890 LOG_UNIT_ID(u),
5891 LOG_UNIT_INVOCATION_ID(u),
5892 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5893 "UNIT_RESULT=%s", result);
5894 }
5895
5896 void unit_log_process_exit(
5897 Unit *u,
5898 const char *kind,
5899 const char *command,
5900 bool success,
5901 int code,
5902 int status) {
5903
5904 int level;
5905
5906 assert(u);
5907 assert(kind);
5908
5909 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5910 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5911 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5912 * WARNING. */
5913 if (success)
5914 level = LOG_DEBUG;
5915 else if (code == CLD_EXITED)
5916 level = LOG_NOTICE;
5917 else
5918 level = LOG_WARNING;
5919
5920 log_struct(level,
5921 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5922 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5923 kind,
5924 sigchld_code_to_string(code), status,
5925 strna(code == CLD_EXITED
5926 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5927 : signal_to_string(status))),
5928 "EXIT_CODE=%s", sigchld_code_to_string(code),
5929 "EXIT_STATUS=%i", status,
5930 "COMMAND=%s", strna(command),
5931 LOG_UNIT_ID(u),
5932 LOG_UNIT_INVOCATION_ID(u));
5933 }
5934
5935 int unit_exit_status(Unit *u) {
5936 assert(u);
5937
5938 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5939 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5940 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5941 * service process has exited abnormally (signal/coredump). */
5942
5943 if (!UNIT_VTABLE(u)->exit_status)
5944 return -EOPNOTSUPP;
5945
5946 return UNIT_VTABLE(u)->exit_status(u);
5947 }
5948
5949 int unit_failure_action_exit_status(Unit *u) {
5950 int r;
5951
5952 assert(u);
5953
5954 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5955
5956 if (u->failure_action_exit_status >= 0)
5957 return u->failure_action_exit_status;
5958
5959 r = unit_exit_status(u);
5960 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5961 return 255;
5962
5963 return r;
5964 }
5965
5966 int unit_success_action_exit_status(Unit *u) {
5967 int r;
5968
5969 assert(u);
5970
5971 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5972
5973 if (u->success_action_exit_status >= 0)
5974 return u->success_action_exit_status;
5975
5976 r = unit_exit_status(u);
5977 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5978 return 255;
5979
5980 return r;
5981 }
5982
5983 int unit_test_trigger_loaded(Unit *u) {
5984 Unit *trigger;
5985
5986 /* Tests whether the unit to trigger is loaded */
5987
5988 trigger = UNIT_TRIGGER(u);
5989 if (!trigger)
5990 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5991 "Refusing to start, no unit to trigger.");
5992 if (trigger->load_state != UNIT_LOADED)
5993 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5994 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5995
5996 return 0;
5997 }
5998
5999 void unit_destroy_runtime_directory(Unit *u, const ExecContext *context) {
6000 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
6001 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
6002 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6003 }
6004
6005 int unit_clean(Unit *u, ExecCleanMask mask) {
6006 UnitActiveState state;
6007
6008 assert(u);
6009
6010 /* Special return values:
6011 *
6012 * -EOPNOTSUPP → cleaning not supported for this unit type
6013 * -EUNATCH → cleaning not defined for this resource type
6014 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6015 * a job queued or similar
6016 */
6017
6018 if (!UNIT_VTABLE(u)->clean)
6019 return -EOPNOTSUPP;
6020
6021 if (mask == 0)
6022 return -EUNATCH;
6023
6024 if (u->load_state != UNIT_LOADED)
6025 return -EBUSY;
6026
6027 if (u->job)
6028 return -EBUSY;
6029
6030 state = unit_active_state(u);
6031 if (!IN_SET(state, UNIT_INACTIVE))
6032 return -EBUSY;
6033
6034 return UNIT_VTABLE(u)->clean(u, mask);
6035 }
6036
6037 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6038 assert(u);
6039
6040 if (!UNIT_VTABLE(u)->clean ||
6041 u->load_state != UNIT_LOADED) {
6042 *ret = 0;
6043 return 0;
6044 }
6045
6046 /* When the clean() method is set, can_clean() really should be set too */
6047 assert(UNIT_VTABLE(u)->can_clean);
6048
6049 return UNIT_VTABLE(u)->can_clean(u, ret);
6050 }
6051
6052 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6053 [COLLECT_INACTIVE] = "inactive",
6054 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6055 };
6056
6057 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);