1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
4 #include <linux/capability.h>
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "ansi-color.h"
14 #include "bpf-firewall.h"
15 #include "bpf-restrict-fs.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
22 #include "chattr-util.h"
23 #include "condition.h"
24 #include "dbus-unit.h"
26 #include "dynamic-user.h"
29 #include "exec-credential.h"
33 #include "format-util.h"
35 #include "id128-util.h"
37 #include "iovec-util.h"
38 #include "label-util.h"
39 #include "load-dropin.h"
40 #include "load-fragment.h"
42 #include "logarithm.h"
43 #include "mkdir-label.h"
45 #include "mount-util.h"
46 #include "mountpoint-util.h"
47 #include "path-util.h"
48 #include "process-util.h"
49 #include "quota-util.h"
51 #include "serialize.h"
53 #include "signal-util.h"
54 #include "siphash24.h"
55 #include "sparse-endian.h"
57 #include "specifier.h"
58 #include "stat-util.h"
59 #include "string-table.h"
60 #include "string-util.h"
62 #include "tmpfile-util.h"
63 #include "umask-util.h"
65 #include "unit-name.h"
66 #include "user-util.h"
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
81 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
82 [UNIT_SERVICE
] = &service_vtable
,
83 [UNIT_SOCKET
] = &socket_vtable
,
84 [UNIT_TARGET
] = &target_vtable
,
85 [UNIT_DEVICE
] = &device_vtable
,
86 [UNIT_MOUNT
] = &mount_vtable
,
87 [UNIT_AUTOMOUNT
] = &automount_vtable
,
88 [UNIT_SWAP
] = &swap_vtable
,
89 [UNIT_TIMER
] = &timer_vtable
,
90 [UNIT_PATH
] = &path_vtable
,
91 [UNIT_SLICE
] = &slice_vtable
,
92 [UNIT_SCOPE
] = &scope_vtable
,
95 Unit
* unit_new(Manager
*m
, size_t size
) {
99 assert(size
>= sizeof(Unit
));
106 u
->type
= _UNIT_TYPE_INVALID
;
107 u
->default_dependencies
= true;
108 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
109 u
->unit_file_preset
= _PRESET_ACTION_INVALID
;
110 u
->on_failure_job_mode
= JOB_REPLACE
;
111 u
->on_success_job_mode
= JOB_FAIL
;
112 u
->job_timeout
= USEC_INFINITY
;
113 u
->job_running_timeout
= USEC_INFINITY
;
114 u
->ref_uid
= UID_INVALID
;
115 u
->ref_gid
= GID_INVALID
;
117 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
119 u
->last_section_private
= -1;
121 u
->start_ratelimit
= m
->defaults
.start_limit
;
123 u
->auto_start_stop_ratelimit
= (const RateLimit
) {
124 .interval
= 10 * USEC_PER_SEC
,
131 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
132 _cleanup_(unit_freep
) Unit
*u
= NULL
;
135 u
= unit_new(m
, size
);
139 r
= unit_add_name(u
, name
);
148 bool unit_has_name(const Unit
*u
, const char *name
) {
152 return streq_ptr(name
, u
->id
) ||
153 set_contains(u
->aliases
, name
);
156 static void unit_init(Unit
*u
) {
163 assert(u
->type
>= 0);
165 cc
= unit_get_cgroup_context(u
);
167 cgroup_context_init(cc
);
169 /* Copy in the manager defaults into the cgroup
170 * context, _before_ the rest of the settings have
171 * been initialized */
173 cc
->io_accounting
= u
->manager
->defaults
.io_accounting
;
174 cc
->memory_accounting
= u
->manager
->defaults
.memory_accounting
;
175 cc
->tasks_accounting
= u
->manager
->defaults
.tasks_accounting
;
176 cc
->ip_accounting
= u
->manager
->defaults
.ip_accounting
;
178 if (u
->type
!= UNIT_SLICE
)
179 cc
->tasks_max
= u
->manager
->defaults
.tasks_max
;
181 cc
->memory_pressure_watch
= u
->manager
->defaults
.memory_pressure_watch
;
182 cc
->memory_pressure_threshold_usec
= u
->manager
->defaults
.memory_pressure_threshold_usec
;
185 ec
= unit_get_exec_context(u
);
187 exec_context_init(ec
);
189 if (u
->manager
->defaults
.oom_score_adjust_set
) {
190 ec
->oom_score_adjust
= u
->manager
->defaults
.oom_score_adjust
;
191 ec
->oom_score_adjust_set
= true;
194 ec
->restrict_suid_sgid
= u
->manager
->defaults
.restrict_suid_sgid
;
196 if (MANAGER_IS_SYSTEM(u
->manager
))
197 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
199 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
201 /* User manager might have its umask redefined by PAM or UMask=. In this
202 * case let the units it manages inherit this value by default. They can
203 * still tune this value through their own unit file */
204 (void) get_process_umask(0, &ec
->umask
);
208 kc
= unit_get_kill_context(u
);
210 kill_context_init(kc
);
212 if (UNIT_VTABLE(u
)->init
)
213 UNIT_VTABLE(u
)->init(u
);
216 static int unit_add_alias(Unit
*u
, char *donated_name
) {
219 /* Make sure that u->names is allocated. We may leave u->names
220 * empty if we fail later, but this is not a problem. */
221 r
= set_ensure_put(&u
->aliases
, &string_hash_ops_free
, donated_name
);
229 int unit_add_name(Unit
*u
, const char *text
) {
230 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
237 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
239 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
240 "Instance is not set when adding name '%s'.", text
);
242 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
244 return log_unit_debug_errno(u
, r
,
245 "Failed to build instance name from '%s': %m", text
);
252 if (unit_has_name(u
, name
))
255 if (hashmap_contains(u
->manager
->units
, name
))
256 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
257 "Unit already exist when adding name '%s'.", name
);
259 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
260 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
261 "Name '%s' is invalid.", name
);
263 t
= unit_name_to_type(name
);
265 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
266 "failed to derive unit type from name '%s'.", name
);
268 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
269 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
270 "Unit type is illegal: u->type(%d) and t(%d) for name '%s'.",
273 r
= unit_name_to_instance(name
, &instance
);
275 return log_unit_debug_errno(u
, r
, "Failed to extract instance from name '%s': %m", name
);
277 if (instance
&& !unit_type_may_template(t
))
278 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "Templates are not allowed for name '%s'.", name
);
280 /* Ensure that this unit either has no instance, or that the instance matches. */
281 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
282 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
283 "Cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
284 name
, instance
, u
->instance
);
286 if (u
->id
&& !unit_type_may_alias(t
))
287 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
288 "Cannot add name %s, aliases are not allowed for %s units.",
289 name
, unit_type_to_string(t
));
291 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
292 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "Cannot add name, manager has too many units.");
294 /* Add name to the global hashmap first, because that's easier to undo */
295 r
= hashmap_put(u
->manager
->units
, name
, u
);
297 return log_unit_debug_errno(u
, r
, "Add unit to hashmap failed for name '%s': %m", text
);
300 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
302 hashmap_remove(u
->manager
->units
, name
);
308 /* A new name, we don't need the set yet. */
309 assert(u
->type
== _UNIT_TYPE_INVALID
);
310 assert(!u
->instance
);
313 u
->id
= TAKE_PTR(name
);
314 u
->instance
= TAKE_PTR(instance
);
316 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
320 unit_add_to_dbus_queue(u
);
324 int unit_choose_id(Unit
*u
, const char *name
) {
325 _cleanup_free_
char *t
= NULL
;
332 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
336 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
343 if (streq_ptr(u
->id
, name
))
344 return 0; /* Nothing to do. */
346 /* Selects one of the aliases of this unit as the id */
347 s
= set_get(u
->aliases
, (char*) name
);
352 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
356 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
358 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
359 unit_add_to_dbus_queue(u
);
364 int unit_set_description(Unit
*u
, const char *description
) {
369 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
373 unit_add_to_dbus_queue(u
);
378 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
381 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
382 if (other
->job
|| other
->nop_job
)
385 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
386 if (other
->job
|| other
->nop_job
)
392 void unit_release_resources(Unit
*u
) {
393 UnitActiveState state
;
398 if (u
->job
|| u
->nop_job
)
404 state
= unit_active_state(u
);
405 if (!UNIT_IS_INACTIVE_OR_FAILED(state
))
408 if (unit_will_restart(u
))
411 ec
= unit_get_exec_context(u
);
412 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
413 exec_context_destroy_runtime_directory(ec
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
415 if (UNIT_VTABLE(u
)->release_resources
)
416 UNIT_VTABLE(u
)->release_resources(u
);
419 bool unit_may_gc(Unit
*u
) {
420 UnitActiveState state
;
425 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
426 * unit may be collected, and false if there's some reason to keep it loaded.
428 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
429 * using markers to properly collect dependency loops.
432 if (u
->job
|| u
->nop_job
)
438 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
439 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
440 * before we release the unit. */
441 if (u
->in_cgroup_empty_queue
|| u
->in_cgroup_oom_queue
)
444 /* Make sure to send out D-Bus events before we unload the unit */
445 if (u
->in_dbus_queue
)
448 if (sd_bus_track_count(u
->bus_track
) > 0)
451 state
= unit_active_state(u
);
453 /* But we keep the unit object around for longer when it is referenced or configured to not be
455 switch (u
->collect_mode
) {
457 case COLLECT_INACTIVE
:
458 if (state
!= UNIT_INACTIVE
)
463 case COLLECT_INACTIVE_OR_FAILED
:
464 if (!UNIT_IS_INACTIVE_OR_FAILED(state
))
470 assert_not_reached();
473 /* Check if any OnFailure= or on Success= jobs may be pending */
474 if (unit_success_failure_handler_has_jobs(u
))
477 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
478 * around. Units with active processes should never be collected. */
479 r
= unit_cgroup_is_empty(u
);
480 if (r
<= 0 && !IN_SET(r
, -ENXIO
, -EOWNERDEAD
))
481 return false; /* ENXIO/EOWNERDEAD means: currently not realized */
483 if (!UNIT_VTABLE(u
)->may_gc
)
486 return UNIT_VTABLE(u
)->may_gc(u
);
489 void unit_add_to_load_queue(Unit
*u
) {
491 assert(u
->type
!= _UNIT_TYPE_INVALID
);
493 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
496 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
497 u
->in_load_queue
= true;
500 void unit_add_to_cleanup_queue(Unit
*u
) {
503 if (u
->in_cleanup_queue
)
506 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
507 u
->in_cleanup_queue
= true;
510 void unit_add_to_gc_queue(Unit
*u
) {
513 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
519 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
520 u
->in_gc_queue
= true;
523 void unit_add_to_dbus_queue(Unit
*u
) {
525 assert(u
->type
!= _UNIT_TYPE_INVALID
);
527 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
530 /* Shortcut things if nobody cares */
531 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
532 sd_bus_track_count(u
->bus_track
) <= 0 &&
533 set_isempty(u
->manager
->private_buses
)) {
534 u
->sent_dbus_new_signal
= true;
538 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
539 u
->in_dbus_queue
= true;
542 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
545 if (u
->in_stop_when_unneeded_queue
)
548 if (!u
->stop_when_unneeded
)
551 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
554 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
555 u
->in_stop_when_unneeded_queue
= true;
558 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
561 if (u
->in_start_when_upheld_queue
)
564 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
567 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
570 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
571 u
->in_start_when_upheld_queue
= true;
574 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
577 if (u
->in_stop_when_bound_queue
)
580 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
583 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
586 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
587 u
->in_stop_when_bound_queue
= true;
590 static bool unit_can_release_resources(Unit
*u
) {
595 if (UNIT_VTABLE(u
)->release_resources
)
598 ec
= unit_get_exec_context(u
);
599 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
605 void unit_submit_to_release_resources_queue(Unit
*u
) {
608 if (u
->in_release_resources_queue
)
611 if (u
->job
|| u
->nop_job
)
617 if (!unit_can_release_resources(u
))
620 LIST_PREPEND(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
621 u
->in_release_resources_queue
= true;
624 void unit_add_to_stop_notify_queue(Unit
*u
) {
627 if (u
->in_stop_notify_queue
)
630 assert(UNIT_VTABLE(u
)->stop_notify
);
632 LIST_PREPEND(stop_notify_queue
, u
->manager
->stop_notify_queue
, u
);
633 u
->in_stop_notify_queue
= true;
636 void unit_remove_from_stop_notify_queue(Unit
*u
) {
639 if (!u
->in_stop_notify_queue
)
642 LIST_REMOVE(stop_notify_queue
, u
->manager
->stop_notify_queue
, u
);
643 u
->in_stop_notify_queue
= false;
646 static void unit_clear_dependencies(Unit
*u
) {
649 /* Removes all dependencies configured on u and their reverse dependencies. */
651 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
653 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
656 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
657 hashmap_remove(other_deps
, u
);
659 unit_add_to_gc_queue(other
);
660 other
->dependency_generation
++;
666 u
->dependencies
= hashmap_free(u
->dependencies
);
667 u
->dependency_generation
++;
670 static void unit_remove_transient(Unit
*u
) {
677 STRV_FOREACH(i
, u
->dropin_paths
) {
678 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
680 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
683 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
686 /* Only drop transient drop-ins */
687 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
694 if (u
->fragment_path
) {
695 (void) unlink(u
->fragment_path
);
696 (void) unit_file_remove_from_name_map(
697 &u
->manager
->lookup_paths
,
698 &u
->manager
->unit_cache_timestamp_hash
,
699 &u
->manager
->unit_id_map
,
700 &u
->manager
->unit_name_map
,
701 &u
->manager
->unit_path_cache
,
706 static void unit_free_mounts_for(Unit
*u
) {
709 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
711 _cleanup_free_
char *path
= NULL
;
713 path
= hashmap_steal_first_key(u
->mounts_for
[t
]);
717 char s
[strlen(path
) + 1];
719 PATH_FOREACH_PREFIX_MORE(s
, path
) {
723 x
= hashmap_get2(u
->manager
->units_needing_mounts_for
[t
], s
, (void**) &y
);
727 (void) set_remove(x
, u
);
729 if (set_isempty(x
)) {
730 assert_se(hashmap_remove(u
->manager
->units_needing_mounts_for
[t
], y
));
737 u
->mounts_for
[t
] = hashmap_free(u
->mounts_for
[t
]);
741 static void unit_done(Unit
*u
) {
750 if (UNIT_VTABLE(u
)->done
)
751 UNIT_VTABLE(u
)->done(u
);
753 ec
= unit_get_exec_context(u
);
755 exec_context_done(ec
);
757 cc
= unit_get_cgroup_context(u
);
759 cgroup_context_done(cc
);
762 Unit
* unit_free(Unit
*u
) {
769 sd_event_source_disable_unref(u
->auto_start_stop_event_source
);
771 u
->transient_file
= safe_fclose(u
->transient_file
);
773 if (!MANAGER_IS_RELOADING(u
->manager
))
774 unit_remove_transient(u
);
776 bus_unit_send_removed_signal(u
);
780 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
781 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
782 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
783 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
785 unit_free_mounts_for(u
);
787 SET_FOREACH(t
, u
->aliases
)
788 hashmap_remove_value(u
->manager
->units
, t
, u
);
790 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
792 if (!sd_id128_is_null(u
->invocation_id
))
793 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
807 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
808 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
809 slice
= UNIT_GET_SLICE(u
);
810 unit_clear_dependencies(u
);
812 unit_add_family_to_cgroup_realize_queue(slice
);
815 manager_unref_console(u
->manager
);
817 unit_release_cgroup(u
, /* drop_cgroup_runtime = */ true);
819 if (!MANAGER_IS_RELOADING(u
->manager
))
820 unit_unlink_state_files(u
);
822 unit_unref_uid_gid(u
, false);
824 (void) manager_update_failed_units(u
->manager
, u
, false);
825 set_remove(u
->manager
->startup_units
, u
);
827 unit_unwatch_all_pids(u
);
829 while (u
->refs_by_target
)
830 unit_ref_unset(u
->refs_by_target
);
832 if (u
->type
!= _UNIT_TYPE_INVALID
)
833 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
835 if (u
->in_load_queue
)
836 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
838 if (u
->in_dbus_queue
)
839 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
841 if (u
->in_cleanup_queue
)
842 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
845 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
847 if (u
->in_cgroup_realize_queue
)
848 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
850 if (u
->in_cgroup_empty_queue
)
851 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
853 if (u
->in_cgroup_oom_queue
)
854 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
856 if (u
->in_target_deps_queue
)
857 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
859 if (u
->in_stop_when_unneeded_queue
)
860 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
862 if (u
->in_start_when_upheld_queue
)
863 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
865 if (u
->in_stop_when_bound_queue
)
866 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
868 if (u
->in_release_resources_queue
)
869 LIST_REMOVE(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
871 unit_remove_from_stop_notify_queue(u
);
873 condition_free_list(u
->conditions
);
874 condition_free_list(u
->asserts
);
876 free(u
->description
);
877 strv_free(u
->documentation
);
878 free(u
->fragment_path
);
879 free(u
->source_path
);
880 strv_free(u
->dropin_paths
);
883 free(u
->job_timeout_reboot_arg
);
886 free(u
->access_selinux_context
);
888 set_free(u
->aliases
);
891 activation_details_unref(u
->activation_details
);
896 UnitActiveState
unit_active_state(Unit
*u
) {
899 if (u
->load_state
== UNIT_MERGED
)
900 return unit_active_state(unit_follow_merge(u
));
902 /* After a reload it might happen that a unit is not correctly
903 * loaded but still has a process around. That's why we won't
904 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
906 return UNIT_VTABLE(u
)->active_state(u
);
909 const char* unit_sub_state_to_string(Unit
*u
) {
912 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
915 static int unit_merge_names(Unit
*u
, Unit
*other
) {
922 r
= unit_add_alias(u
, other
->id
);
926 r
= set_move(u
->aliases
, other
->aliases
);
928 set_remove(u
->aliases
, other
->id
);
933 other
->aliases
= set_free(other
->aliases
);
935 SET_FOREACH(name
, u
->aliases
)
936 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
941 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
950 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
953 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
954 * hashmaps is an estimate that is likely too high since they probably use some of the same
955 * types. But it's never too low, and that's all we need. */
957 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
959 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
963 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
968 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
969 * other unit's dependencies.
971 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
972 * reserve anything for. In that case other's set will be transferred as a whole to u by
973 * complete_move(). */
975 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
978 other_deps
= hashmap_get(other
->dependencies
, d
);
980 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
988 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
989 /* Only warn about some unit types */
990 return IN_SET(dependency
,
1001 static int unit_per_dependency_type_hashmap_update(
1004 UnitDependencyMask origin_mask
,
1005 UnitDependencyMask destination_mask
) {
1007 UnitDependencyInfo info
;
1011 assert_cc(sizeof(void*) == sizeof(info
));
1013 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1014 * exists, or insert it anew if not. */
1016 info
.data
= hashmap_get(per_type
, other
);
1018 /* Entry already exists. Add in our mask. */
1020 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
1021 FLAGS_SET(destination_mask
, info
.destination_mask
))
1024 info
.origin_mask
|= origin_mask
;
1025 info
.destination_mask
|= destination_mask
;
1027 r
= hashmap_update(per_type
, other
, info
.data
);
1029 info
= (UnitDependencyInfo
) {
1030 .origin_mask
= origin_mask
,
1031 .destination_mask
= destination_mask
,
1034 r
= hashmap_put(per_type
, other
, info
.data
);
1042 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1044 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1045 * since the hashmaps all want it as void pointer. */
1053 /* First, remove dependency to other. */
1054 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1055 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1056 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1057 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1058 other
->id
, other
->id
, u
->id
);
1060 if (hashmap_isempty(deps
))
1061 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1065 _cleanup_hashmap_free_ Hashmap
*other_deps
= NULL
;
1066 UnitDependencyInfo di_back
;
1069 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1070 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1074 deps
= hashmap_get(u
->dependencies
, dt
);
1076 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1077 * referenced units as 'back'. */
1078 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1083 /* This is a dependency pointing back to the unit we want to merge with?
1084 * Suppress it (but warn) */
1085 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1086 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1087 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1088 u
->id
, other
->id
, other
->id
, u
->id
);
1090 hashmap_remove(other_deps
, back
);
1094 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1095 * point to 'u' instead. */
1096 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1097 UnitDependencyInfo di_move
;
1099 di_move
.data
= hashmap_remove(back_deps
, other
);
1103 assert_se(unit_per_dependency_type_hashmap_update(
1106 di_move
.origin_mask
,
1107 di_move
.destination_mask
) >= 0);
1110 /* The target unit already has dependencies of this type, let's then merge this individually. */
1112 assert_se(unit_per_dependency_type_hashmap_update(
1115 di_back
.origin_mask
,
1116 di_back
.destination_mask
) >= 0);
1119 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1120 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1121 * dependencies of this type, let's move them per type wholesale. */
1123 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1126 other
->dependencies
= hashmap_free(other
->dependencies
);
1128 u
->dependency_generation
++;
1129 other
->dependency_generation
++;
1132 int unit_merge(Unit
*u
, Unit
*other
) {
1137 assert(u
->manager
== other
->manager
);
1138 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1140 other
= unit_follow_merge(other
);
1145 if (u
->type
!= other
->type
)
1148 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1151 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1154 if (!streq_ptr(u
->instance
, other
->instance
))
1163 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1166 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1167 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1168 r
= unit_reserve_dependencies(u
, other
);
1172 /* Redirect all references */
1173 while (other
->refs_by_target
)
1174 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1176 /* Merge dependencies */
1177 unit_merge_dependencies(u
, other
);
1179 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1180 r
= unit_merge_names(u
, other
);
1184 other
->load_state
= UNIT_MERGED
;
1185 other
->merged_into
= u
;
1187 if (!u
->activation_details
)
1188 u
->activation_details
= activation_details_ref(other
->activation_details
);
1190 /* If there is still some data attached to the other node, we
1191 * don't need it anymore, and can free it. */
1192 if (other
->load_state
!= UNIT_STUB
)
1193 if (UNIT_VTABLE(other
)->done
)
1194 UNIT_VTABLE(other
)->done(other
);
1196 unit_add_to_dbus_queue(u
);
1197 unit_add_to_cleanup_queue(other
);
1202 int unit_merge_by_name(Unit
*u
, const char *name
) {
1203 _cleanup_free_
char *s
= NULL
;
1207 /* Either add name to u, or if a unit with name already exists, merge it with u.
1208 * If name is a template, do the same for name@instance, where instance is u's instance. */
1213 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1217 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1224 other
= manager_get_unit(u
->manager
, name
);
1226 return unit_merge(u
, other
);
1228 return unit_add_name(u
, name
);
1231 Unit
* unit_follow_merge(Unit
*u
) {
1234 while (u
->load_state
== UNIT_MERGED
)
1235 assert_se(u
= u
->merged_into
);
1240 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1246 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1248 if (c
->working_directory
) {
1249 r
= unit_add_mounts_for(
1251 c
->working_directory
,
1252 UNIT_DEPENDENCY_FILE
,
1253 c
->working_directory_missing_ok
? UNIT_MOUNT_WANTS
: UNIT_MOUNT_REQUIRES
);
1258 if (c
->root_directory
) {
1259 r
= unit_add_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1264 if (c
->root_image
) {
1265 r
= unit_add_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1270 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1271 if (!u
->manager
->prefix
[dt
])
1274 FOREACH_ARRAY(i
, c
->directories
[dt
].items
, c
->directories
[dt
].n_items
) {
1275 _cleanup_free_
char *p
= NULL
;
1277 p
= path_join(u
->manager
->prefix
[dt
], i
->path
);
1281 r
= unit_add_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_REQUIRES
);
1287 if (!MANAGER_IS_SYSTEM(u
->manager
))
1290 /* For the following three directory types we need write access, and /var/ is possibly on the root
1291 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1292 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1293 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1294 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1295 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1300 /* This must be already set in unit_patch_contexts(). */
1301 assert(c
->private_var_tmp
>= 0 && c
->private_var_tmp
< _PRIVATE_TMP_MAX
);
1303 if (c
->private_tmp
== PRIVATE_TMP_CONNECTED
) {
1304 assert(c
->private_var_tmp
== PRIVATE_TMP_CONNECTED
);
1306 r
= unit_add_mounts_for(u
, "/tmp/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1310 r
= unit_add_mounts_for(u
, "/var/tmp/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1314 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1318 } else if (c
->private_var_tmp
== PRIVATE_TMP_DISCONNECTED
&& !exec_context_with_rootfs(c
)) {
1319 /* Even if PrivateTmp=disconnected, we still require /var/tmp/ mountpoint to be present,
1320 * i.e. /var/ needs to be mounted. See comments in unit_patch_contexts(). */
1321 r
= unit_add_mounts_for(u
, "/var/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1326 if (c
->root_image
) {
1327 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1328 * implicit dependency on udev */
1330 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1335 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1337 if (c
->log_namespace
) {
1338 static const struct {
1339 const char *template;
1342 { "systemd-journald", UNIT_SOCKET
, },
1343 { "systemd-journald-varlink", UNIT_SOCKET
, },
1344 { "systemd-journald-sync", UNIT_SERVICE
, },
1347 FOREACH_ELEMENT(i
, deps
) {
1348 _cleanup_free_
char *unit
= NULL
;
1350 r
= unit_name_build_from_type(i
->template, c
->log_namespace
, i
->type
, &unit
);
1354 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, unit
, true, UNIT_DEPENDENCY_FILE
);
1358 } else if (IN_SET(c
->std_output
, EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1359 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) ||
1360 IN_SET(c
->std_error
, EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1361 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
)) {
1363 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1371 const char* unit_description(Unit
*u
) {
1375 return u
->description
;
1377 return strna(u
->id
);
1380 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1384 /* Return u->id, u->description, or "{u->id} - {u->description}".
1385 * Versions with u->description are only used if it is set.
1386 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1389 * Note that *ret_combined_buffer may be set to NULL. */
1391 if (!u
->description
||
1392 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1393 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1394 streq(u
->description
, u
->id
)) {
1396 if (ret_combined_buffer
)
1397 *ret_combined_buffer
= NULL
;
1401 if (ret_combined_buffer
) {
1402 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1403 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1404 if (*ret_combined_buffer
)
1405 return *ret_combined_buffer
;
1406 log_oom(); /* Fall back to ->description */
1408 *ret_combined_buffer
= NULL
;
1411 return u
->description
;
1414 /* Common implementation for multiple backends */
1415 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1420 /* Load a .{service,socket,...} file */
1421 r
= unit_load_fragment(u
);
1425 if (u
->load_state
== UNIT_MASKED
)
1428 if (u
->load_state
== UNIT_STUB
) {
1429 if (fragment_required
)
1432 u
->load_state
= UNIT_LOADED
;
1435 u
= unit_follow_merge(u
);
1437 /* Load drop-in directory data. If u is an alias, we might be reloading the
1438 * target unit needlessly. But we cannot be sure which drops-ins have already
1439 * been loaded and which not, at least without doing complicated book-keeping,
1440 * so let's always reread all drop-ins. */
1441 r
= unit_load_dropin(u
);
1445 if (u
->source_path
) {
1448 if (stat(u
->source_path
, &st
) >= 0)
1449 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1451 u
->source_mtime
= 0;
1457 void unit_add_to_target_deps_queue(Unit
*u
) {
1458 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1460 if (u
->in_target_deps_queue
)
1463 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1464 u
->in_target_deps_queue
= true;
1467 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1471 if (target
->type
!= UNIT_TARGET
)
1474 /* Only add the dependency if both units are loaded, so that
1475 * that loop check below is reliable */
1476 if (u
->load_state
!= UNIT_LOADED
||
1477 target
->load_state
!= UNIT_LOADED
)
1480 /* If either side wants no automatic dependencies, then let's
1482 if (!u
->default_dependencies
||
1483 !target
->default_dependencies
)
1486 /* Don't create loops */
1487 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1490 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1493 static int unit_add_slice_dependencies(Unit
*u
) {
1498 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1501 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1502 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1504 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1506 slice
= UNIT_GET_SLICE(u
);
1508 if (!IN_SET(slice
->freezer_state
, FREEZER_RUNNING
, FREEZER_THAWING
))
1509 u
->freezer_state
= FREEZER_FROZEN_BY_PARENT
;
1511 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1514 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1517 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1520 static int unit_add_mount_dependencies(Unit
*u
) {
1521 bool changed
= false;
1526 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
1527 UnitDependencyInfo di
;
1530 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->mounts_for
[t
]) {
1532 char prefix
[strlen(ASSERT_PTR(path
)) + 1];
1534 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1535 _cleanup_free_
char *p
= NULL
;
1538 r
= unit_name_from_path(prefix
, ".mount", &p
);
1540 continue; /* If the path cannot be converted to a mount unit name,
1541 * then it's not manageable as a unit by systemd, and
1542 * hence we don't need a dependency on it. Let's thus
1543 * silently ignore the issue. */
1547 m
= manager_get_unit(u
->manager
, p
);
1549 /* Make sure to load the mount unit if it exists. If so the
1550 * dependencies on this unit will be added later during the loading
1551 * of the mount unit. */
1552 (void) manager_load_unit_prepare(
1563 if (m
->load_state
!= UNIT_LOADED
)
1566 r
= unit_add_dependency(
1570 /* add_reference= */ true,
1574 changed
= changed
|| r
> 0;
1576 if (m
->fragment_path
) {
1577 r
= unit_add_dependency(
1579 unit_mount_dependency_type_to_dependency_type(t
),
1581 /* add_reference= */ true,
1585 changed
= changed
|| r
> 0;
1594 static int unit_add_oomd_dependencies(Unit
*u
) {
1601 if (!u
->default_dependencies
)
1604 c
= unit_get_cgroup_context(u
);
1608 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1612 r
= cg_mask_supported(&mask
);
1614 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1616 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1619 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1622 static int unit_add_startup_units(Unit
*u
) {
1623 if (!unit_has_startup_cgroup_constraints(u
))
1626 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1629 static const struct {
1630 UnitDependencyAtom atom
;
1631 size_t job_mode_offset
;
1632 const char *dependency_name
;
1633 const char *job_mode_setting_name
;
1634 } on_termination_settings
[] = {
1635 { UNIT_ATOM_ON_SUCCESS
, offsetof(Unit
, on_success_job_mode
), "OnSuccess=", "OnSuccessJobMode=" },
1636 { UNIT_ATOM_ON_FAILURE
, offsetof(Unit
, on_failure_job_mode
), "OnFailure=", "OnFailureJobMode=" },
1639 static int unit_validate_on_termination_job_modes(Unit
*u
) {
1642 /* Verify that if On{Success,Failure}JobMode=isolate, only one unit gets specified. */
1644 FOREACH_ELEMENT(setting
, on_termination_settings
) {
1645 JobMode job_mode
= *(JobMode
*) ((uint8_t*) u
+ setting
->job_mode_offset
);
1647 if (job_mode
!= JOB_ISOLATE
)
1650 Unit
*other
, *found
= NULL
;
1651 UNIT_FOREACH_DEPENDENCY(other
, u
, setting
->atom
) {
1654 else if (found
!= other
)
1655 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
),
1656 "More than one %s dependencies specified but %sisolate set. Refusing.",
1657 setting
->dependency_name
, setting
->job_mode_setting_name
);
1664 int unit_load(Unit
*u
) {
1669 if (u
->in_load_queue
) {
1670 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1671 u
->in_load_queue
= false;
1674 if (u
->type
== _UNIT_TYPE_INVALID
)
1677 if (u
->load_state
!= UNIT_STUB
)
1680 if (u
->transient_file
) {
1681 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1682 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1684 r
= fflush_and_check(u
->transient_file
);
1688 u
->transient_file
= safe_fclose(u
->transient_file
);
1689 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1692 r
= UNIT_VTABLE(u
)->load(u
);
1696 assert(u
->load_state
!= UNIT_STUB
);
1698 if (u
->load_state
== UNIT_LOADED
) {
1699 unit_add_to_target_deps_queue(u
);
1701 r
= unit_add_slice_dependencies(u
);
1705 r
= unit_add_mount_dependencies(u
);
1709 r
= unit_add_oomd_dependencies(u
);
1713 r
= unit_add_startup_units(u
);
1717 r
= unit_validate_on_termination_job_modes(u
);
1721 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1722 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1724 /* We finished loading, let's ensure our parents recalculate the members mask */
1725 unit_invalidate_cgroup_members_masks(u
);
1728 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1730 unit_add_to_dbus_queue(unit_follow_merge(u
));
1731 unit_add_to_gc_queue(u
);
1732 (void) manager_varlink_send_managed_oom_update(u
);
1737 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1738 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1740 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1741 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1745 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1746 * an attempt is made to load this unit, we know we need to check again. */
1747 if (u
->load_state
== UNIT_NOT_FOUND
)
1748 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1750 unit_add_to_dbus_queue(u
);
1751 unit_add_to_gc_queue(u
);
1753 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1757 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1762 if (u
&& !unit_log_level_test(u
, level
))
1763 return -ERRNO_VALUE(error
);
1765 va_start(ap
, format
);
1767 r
= log_object_internalv(level
, error
, file
, line
, func
,
1770 unit_invocation_log_field(u
),
1771 u
->invocation_id_string
,
1774 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1780 static bool unit_test_condition(Unit
*u
) {
1781 _cleanup_strv_free_
char **env
= NULL
;
1786 dual_timestamp_now(&u
->condition_timestamp
);
1788 r
= manager_get_effective_environment(u
->manager
, &env
);
1790 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1791 u
->condition_result
= true;
1793 u
->condition_result
= condition_test_list(
1796 condition_type_to_string
,
1800 unit_add_to_dbus_queue(u
);
1801 return u
->condition_result
;
1804 static bool unit_test_assert(Unit
*u
) {
1805 _cleanup_strv_free_
char **env
= NULL
;
1810 dual_timestamp_now(&u
->assert_timestamp
);
1812 r
= manager_get_effective_environment(u
->manager
, &env
);
1814 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1815 u
->assert_result
= CONDITION_ERROR
;
1817 u
->assert_result
= condition_test_list(
1820 assert_type_to_string
,
1824 unit_add_to_dbus_queue(u
);
1825 return u
->assert_result
;
1828 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1829 if (log_get_show_color()) {
1830 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1831 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1833 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1836 DISABLE_WARNING_FORMAT_NONLITERAL
;
1837 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1841 int unit_test_start_limit(Unit
*u
) {
1846 if (ratelimit_below(&u
->start_ratelimit
)) {
1847 u
->start_limit_hit
= false;
1851 log_unit_warning(u
, "Start request repeated too quickly.");
1852 u
->start_limit_hit
= true;
1854 reason
= strjoina("unit ", u
->id
, " failed");
1858 u
->start_limit_action
,
1859 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
|EMERGENCY_ACTION_SLEEP_5S
,
1861 /* exit_status= */ -1,
1867 static bool unit_verify_deps(Unit
*u
) {
1872 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1873 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1874 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1875 * that are not used in conjunction with After= as for them any such check would make things entirely
1878 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1880 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1883 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1884 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1892 /* Errors that aren't really errors:
1893 * -EALREADY: Unit is already started.
1894 * -ECOMM: Condition failed
1895 * -EAGAIN: An operation is already in progress. Retry later.
1897 * Errors that are real errors:
1898 * -EBADR: This unit type does not support starting.
1899 * -ECANCELED: Start limit hit, too many requests for now
1900 * -EPROTO: Assert failed
1901 * -EINVAL: Unit not loaded
1902 * -EOPNOTSUPP: Unit type not supported
1903 * -ENOLINK: The necessary dependencies are not fulfilled.
1904 * -ESTALE: This unit has been started before and can't be started a second time
1905 * -EDEADLK: This unit is frozen
1906 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1907 * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached
1909 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1910 UnitActiveState state
;
1916 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1917 if (UNIT_VTABLE(u
)->subsystem_ratelimited
) {
1918 r
= UNIT_VTABLE(u
)->subsystem_ratelimited(u
->manager
);
1925 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1926 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1927 * waiting is finished. */
1928 state
= unit_active_state(u
);
1929 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1931 if (state
== UNIT_MAINTENANCE
)
1934 /* Units that aren't loaded cannot be started */
1935 if (u
->load_state
!= UNIT_LOADED
)
1938 /* Refuse starting scope units more than once */
1939 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1942 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1943 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1944 * recheck the condition in that case. */
1945 if (state
!= UNIT_ACTIVATING
&&
1946 !unit_test_condition(u
))
1947 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition not met. Not starting unit.");
1949 /* If the asserts failed, fail the entire job */
1950 if (state
!= UNIT_ACTIVATING
&&
1951 !unit_test_assert(u
))
1952 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1954 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1955 * condition checks, so that we rather return condition check errors (which are usually not
1956 * considered a true failure) than "not supported" errors (which are considered a failure).
1958 if (!unit_type_supported(u
->type
))
1961 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1962 * should have taken care of this already, but let's check this here again. After all, our
1963 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1964 if (!unit_verify_deps(u
))
1967 /* Forward to the main object, if we aren't it. */
1968 following
= unit_following(u
);
1970 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1971 return unit_start(following
, details
);
1974 /* Check to make sure the unit isn't frozen */
1975 if (u
->freezer_state
!= FREEZER_RUNNING
)
1978 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1979 if (UNIT_VTABLE(u
)->can_start
) {
1980 r
= UNIT_VTABLE(u
)->can_start(u
);
1985 /* If it is stopped, but we cannot start it, then fail */
1986 if (!UNIT_VTABLE(u
)->start
)
1989 if (UNIT_IS_INACTIVE_OR_FAILED(state
)) {
1990 Slice
*slice
= SLICE(UNIT_GET_SLICE(u
));
1993 /* Check hard concurrency limit. Note this is partially redundant, we already checked
1994 * this when enqueuing jobs. However, between the time when we enqueued this and the
1995 * time we are dispatching the queue the configuration might have changed, hence
1996 * check here again */
1997 if (slice_concurrency_hard_max_reached(slice
, u
))
1998 return -ETOOMANYREFS
;
2000 /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in
2002 if (slice_concurrency_soft_max_reached(slice
, u
))
2003 return -EAGAIN
; /* Try again, keep in queue */
2007 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
2008 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
2009 * waits for a holdoff timer to elapse before it will start again. */
2011 unit_add_to_dbus_queue(u
);
2013 if (!u
->activation_details
) /* Older details object wins */
2014 u
->activation_details
= activation_details_ref(details
);
2016 return UNIT_VTABLE(u
)->start(u
);
2019 bool unit_can_start(Unit
*u
) {
2022 if (u
->load_state
!= UNIT_LOADED
)
2025 if (!unit_type_supported(u
->type
))
2028 /* Scope units may be started only once */
2029 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
2032 return !!UNIT_VTABLE(u
)->start
;
2035 bool unit_can_isolate(Unit
*u
) {
2038 return unit_can_start(u
) &&
2043 * -EBADR: This unit type does not support stopping.
2044 * -EALREADY: Unit is already stopped.
2045 * -EAGAIN: An operation is already in progress. Retry later.
2046 * -EDEADLK: Unit is frozen
2048 int unit_stop(Unit
*u
) {
2049 UnitActiveState state
;
2054 state
= unit_active_state(u
);
2055 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
2058 following
= unit_following(u
);
2060 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
2061 return unit_stop(following
);
2064 /* Check to make sure the unit isn't frozen */
2065 if (u
->freezer_state
!= FREEZER_RUNNING
)
2068 if (!UNIT_VTABLE(u
)->stop
)
2071 unit_add_to_dbus_queue(u
);
2073 return UNIT_VTABLE(u
)->stop(u
);
2076 bool unit_can_stop(Unit
*u
) {
2079 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2080 * Extrinsic units follow external state and they may stop following external state changes
2081 * (hence we return true here), but an attempt to do this through the manager will fail. */
2083 if (!unit_type_supported(u
->type
))
2089 return !!UNIT_VTABLE(u
)->stop
;
2093 * -EBADR: This unit type does not support reloading.
2094 * -ENOEXEC: Unit is not started.
2095 * -EAGAIN: An operation is already in progress. Retry later.
2096 * -EDEADLK: Unit is frozen.
2098 int unit_reload(Unit
*u
) {
2099 UnitActiveState state
;
2104 if (u
->load_state
!= UNIT_LOADED
)
2107 if (!unit_can_reload(u
))
2110 state
= unit_active_state(u
);
2111 if (IN_SET(state
, UNIT_RELOADING
, UNIT_REFRESHING
))
2112 /* "refreshing" means some resources in the unit namespace is being updated. Unlike reload,
2113 * the unit processes aren't made aware of refresh. Let's put the job back to queue
2114 * in both cases, as refresh typically takes place before reload and it's better to wait
2115 * for it rather than failing. */
2118 if (state
!= UNIT_ACTIVE
)
2119 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2121 following
= unit_following(u
);
2123 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2124 return unit_reload(following
);
2127 /* Check to make sure the unit isn't frozen */
2128 if (u
->freezer_state
!= FREEZER_RUNNING
)
2131 unit_add_to_dbus_queue(u
);
2133 if (!UNIT_VTABLE(u
)->reload
) {
2134 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2135 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), /* reload_success = */ true);
2139 return UNIT_VTABLE(u
)->reload(u
);
2142 bool unit_can_reload(Unit
*u
) {
2145 if (UNIT_VTABLE(u
)->can_reload
)
2146 return UNIT_VTABLE(u
)->can_reload(u
);
2148 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2151 return UNIT_VTABLE(u
)->reload
;
2154 bool unit_is_unneeded(Unit
*u
) {
2158 if (!u
->stop_when_unneeded
)
2161 /* Don't clean up while the unit is transitioning or is even inactive. */
2162 if (unit_active_state(u
) != UNIT_ACTIVE
)
2167 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2168 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2169 * restart, then don't clean this one up. */
2174 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2177 if (unit_will_restart(other
))
2184 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2189 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2190 * that is active declared an Uphold= dependencies on it */
2192 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2194 *ret_culprit
= NULL
;
2198 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2202 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2204 *ret_culprit
= other
;
2210 *ret_culprit
= NULL
;
2214 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2219 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2220 * because the other unit is down. */
2222 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2223 /* Don't clean up while the unit is transitioning or is even inactive. */
2225 *ret_culprit
= NULL
;
2229 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2233 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2235 *ret_culprit
= other
;
2242 *ret_culprit
= NULL
;
2246 static void check_unneeded_dependencies(Unit
*u
) {
2250 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2252 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2253 unit_submit_to_stop_when_unneeded_queue(other
);
2256 static void check_uphold_dependencies(Unit
*u
) {
2260 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2262 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2263 unit_submit_to_start_when_upheld_queue(other
);
2266 static void check_bound_by_dependencies(Unit
*u
) {
2270 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2272 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2273 unit_submit_to_stop_when_bound_queue(other
);
2276 static void retroactively_start_dependencies(Unit
*u
) {
2280 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2282 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2283 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2284 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2285 (void) manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2287 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2288 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2289 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2290 (void) manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, /* error = */ NULL
, /* ret = */ NULL
);
2292 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2293 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2294 (void) manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2297 static void retroactively_stop_dependencies(Unit
*u
) {
2301 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2303 /* Pull down units which are bound to us recursively if enabled */
2304 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2305 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2306 (void) manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2309 void unit_start_on_termination_deps(Unit
*u
, UnitDependencyAtom atom
) {
2310 const char *dependency_name
= NULL
;
2312 unsigned n_jobs
= 0;
2315 /* Act on OnFailure= and OnSuccess= dependencies */
2319 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2321 FOREACH_ELEMENT(setting
, on_termination_settings
)
2322 if (atom
== setting
->atom
) {
2323 job_mode
= *(JobMode
*) ((uint8_t*) u
+ setting
->job_mode_offset
);
2324 dependency_name
= setting
->dependency_name
;
2328 assert(dependency_name
);
2331 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, atom
) {
2332 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2335 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2337 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, &error
, /* ret = */ NULL
);
2339 log_unit_warning_errno(u
, r
, "Failed to enqueue %s%s job, ignoring: %s",
2340 dependency_name
, other
->id
, bus_error_message(&error
, r
));
2345 log_unit_debug(u
, "Triggering %s dependencies done (%u %s).",
2346 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2349 void unit_trigger_notify(Unit
*u
) {
2354 UNIT_FOREACH_DEPENDENCY_SAFE(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2355 if (UNIT_VTABLE(other
)->trigger_notify
)
2356 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2359 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2360 if (condition_notice
&& log_level
> LOG_NOTICE
)
2362 if (condition_info
&& log_level
> LOG_INFO
)
2367 static int unit_log_resources(Unit
*u
) {
2369 static const struct {
2370 const char *journal_field
;
2371 const char *message_suffix
;
2372 } memory_fields
[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1] = {
2373 [CGROUP_MEMORY_PEAK
] = { "MEMORY_PEAK", "memory peak" },
2374 [CGROUP_MEMORY_SWAP_PEAK
] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2375 }, ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2376 [CGROUP_IP_INGRESS_BYTES
] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2377 [CGROUP_IP_EGRESS_BYTES
] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2378 [CGROUP_IP_INGRESS_PACKETS
] = { "IP_METRIC_INGRESS_PACKETS", NULL
},
2379 [CGROUP_IP_EGRESS_PACKETS
] = { "IP_METRIC_EGRESS_PACKETS", NULL
},
2380 }, io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2381 [CGROUP_IO_READ_BYTES
] = { "IO_METRIC_READ_BYTES", "read from disk" },
2382 [CGROUP_IO_WRITE_BYTES
] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2383 [CGROUP_IO_READ_OPERATIONS
] = { "IO_METRIC_READ_OPERATIONS", NULL
},
2384 [CGROUP_IO_WRITE_OPERATIONS
] = { "IO_METRIC_WRITE_OPERATIONS", NULL
},
2387 struct iovec
*iovec
= NULL
;
2389 _cleanup_free_
char *message
= NULL
, *t
= NULL
;
2390 nsec_t cpu_nsec
= NSEC_INFINITY
;
2391 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2395 CLEANUP_ARRAY(iovec
, n_iovec
, iovec_array_free
);
2397 iovec
= new(struct iovec
, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1) +
2398 _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4);
2402 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2403 * accounting was enabled for a unit. It does this in two ways: a friendly human-readable string with reduced
2404 * information and the complete data in structured fields. */
2406 (void) unit_get_cpu_usage(u
, &cpu_nsec
);
2407 if (cpu_nsec
!= NSEC_INFINITY
) {
2408 /* Format the CPU time for inclusion in the structured log message */
2409 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, cpu_nsec
) < 0)
2411 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2413 /* Format the CPU time for inclusion in the human language message string */
2414 if (dual_timestamp_is_set(&u
->inactive_exit_timestamp
) &&
2415 dual_timestamp_is_set(&u
->inactive_enter_timestamp
)) {
2416 usec_t wall_clock_usec
= usec_sub_unsigned(u
->inactive_enter_timestamp
.monotonic
, u
->inactive_exit_timestamp
.monotonic
);
2417 if (strextendf_with_separator(&message
, ", ",
2418 "Consumed %s CPU time over %s wall clock time",
2419 FORMAT_TIMESPAN(cpu_nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
),
2420 FORMAT_TIMESPAN(wall_clock_usec
, USEC_PER_MSEC
)) < 0)
2423 if (strextendf_with_separator(&message
, ", ",
2424 "Consumed %s CPU time",
2425 FORMAT_TIMESPAN(cpu_nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
)) < 0)
2429 log_level
= raise_level(log_level
,
2430 cpu_nsec
> MENTIONWORTHY_CPU_NSEC
,
2431 cpu_nsec
> NOTICEWORTHY_CPU_NSEC
);
2434 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++) {
2435 uint64_t value
= UINT64_MAX
;
2437 assert(memory_fields
[metric
].journal_field
);
2438 assert(memory_fields
[metric
].message_suffix
);
2440 (void) unit_get_memory_accounting(u
, metric
, &value
);
2441 if (value
== UINT64_MAX
)
2444 if (asprintf(&t
, "%s=%" PRIu64
, memory_fields
[metric
].journal_field
, value
) < 0)
2446 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2448 /* If value is 0, we don't log it in the MESSAGE= field. */
2452 if (strextendf_with_separator(&message
, ", ", "%s %s",
2453 FORMAT_BYTES(value
), memory_fields
[metric
].message_suffix
) < 0)
2456 log_level
= raise_level(log_level
,
2457 value
> MENTIONWORTHY_MEMORY_BYTES
,
2458 value
> NOTICEWORTHY_MEMORY_BYTES
);
2461 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2462 uint64_t value
= UINT64_MAX
;
2464 assert(io_fields
[k
].journal_field
);
2466 (void) unit_get_io_accounting(u
, k
, &value
);
2467 if (value
== UINT64_MAX
)
2470 /* Format IO accounting data for inclusion in the structured log message */
2471 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
].journal_field
, value
) < 0)
2473 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2475 /* If value is 0, we don't log it in the MESSAGE= field. */
2479 /* Format the IO accounting data for inclusion in the human language message string, but only
2480 * for the bytes counters (and not for the operations counters) */
2481 if (io_fields
[k
].message_suffix
) {
2482 if (strextendf_with_separator(&message
, ", ", "%s %s",
2483 FORMAT_BYTES(value
), io_fields
[k
].message_suffix
) < 0)
2486 log_level
= raise_level(log_level
,
2487 value
> MENTIONWORTHY_IO_BYTES
,
2488 value
> NOTICEWORTHY_IO_BYTES
);
2492 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2493 uint64_t value
= UINT64_MAX
;
2495 assert(ip_fields
[m
].journal_field
);
2497 (void) unit_get_ip_accounting(u
, m
, &value
);
2498 if (value
== UINT64_MAX
)
2501 /* Format IP accounting data for inclusion in the structured log message */
2502 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
].journal_field
, value
) < 0)
2504 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2506 /* If value is 0, we don't log it in the MESSAGE= field. */
2510 /* Format the IP accounting data for inclusion in the human language message string, but only
2511 * for the bytes counters (and not for the packets counters) */
2512 if (ip_fields
[m
].message_suffix
) {
2513 if (strextendf_with_separator(&message
, ", ", "%s %s",
2514 FORMAT_BYTES(value
), ip_fields
[m
].message_suffix
) < 0)
2517 log_level
= raise_level(log_level
,
2518 value
> MENTIONWORTHY_IP_BYTES
,
2519 value
> NOTICEWORTHY_IP_BYTES
);
2523 /* This check is here because it is the earliest point following all possible log_level assignments.
2524 * (If log_level is assigned anywhere after this point, move this check.) */
2525 if (!unit_log_level_test(u
, log_level
))
2528 /* Is there any accounting data available at all? */
2534 t
= strjoin("MESSAGE=", u
->id
, ": ", message
?: "Completed", ".");
2537 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2539 if (!set_iovec_string_field(iovec
, &n_iovec
, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR
))
2542 if (!set_iovec_string_field(iovec
, &n_iovec
, unit_log_field(u
), u
->id
))
2545 if (!set_iovec_string_field(iovec
, &n_iovec
, unit_invocation_log_field(u
), u
->invocation_id_string
))
2548 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
);
2553 static void unit_update_on_console(Unit
*u
) {
2558 b
= unit_needs_console(u
);
2559 if (u
->on_console
== b
)
2564 manager_ref_console(u
->manager
);
2566 manager_unref_console(u
->manager
);
2569 static void unit_emit_audit_start(Unit
*u
) {
2572 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2575 /* Write audit record if we have just finished starting up */
2576 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ true);
2580 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2583 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2587 /* Write audit record if we have just finished shutting down */
2588 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2589 u
->in_audit
= false;
2591 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2592 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2594 if (state
== UNIT_INACTIVE
)
2595 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ true);
2599 static bool unit_process_job(Job
*j
, UnitActiveState ns
, bool reload_success
) {
2600 bool unexpected
= false;
2605 if (j
->state
== JOB_WAITING
)
2606 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2608 job_add_to_run_queue(j
);
2610 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2611 * hence needs to invalidate jobs. */
2616 case JOB_VERIFY_ACTIVE
:
2618 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2619 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2620 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2623 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2624 if (ns
== UNIT_FAILED
)
2625 result
= JOB_FAILED
;
2629 job_finish_and_invalidate(j
, result
, true, false);
2636 case JOB_RELOAD_OR_START
:
2637 case JOB_TRY_RELOAD
:
2639 if (j
->state
== JOB_RUNNING
) {
2640 if (ns
== UNIT_ACTIVE
)
2641 job_finish_and_invalidate(j
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2642 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
, UNIT_REFRESHING
)) {
2645 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2646 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2654 case JOB_TRY_RESTART
:
2656 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2657 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2658 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2660 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2666 assert_not_reached();
2672 static void unit_recursive_add_to_run_queue(Unit
*u
) {
2676 job_add_to_run_queue(u
->job
);
2679 UNIT_FOREACH_DEPENDENCY(child
, u
, UNIT_ATOM_SLICE_OF
) {
2684 unit_recursive_add_to_run_queue(child
);
2688 static void unit_check_concurrency_limit(Unit
*u
) {
2691 Unit
*slice
= UNIT_GET_SLICE(u
);
2695 /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */
2697 if (SLICE(slice
)->concurrency_soft_max
!= UINT_MAX
) {
2699 UNIT_FOREACH_DEPENDENCY(sibling
, slice
, UNIT_ATOM_SLICE_OF
) {
2703 unit_recursive_add_to_run_queue(sibling
);
2707 /* Also go up the tree. */
2708 unit_check_concurrency_limit(slice
);
2711 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2713 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2714 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2716 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2717 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2718 * remounted this function will be called too! */
2720 Manager
*m
= ASSERT_PTR(u
->manager
);
2722 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2723 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2724 unit_add_to_dbus_queue(u
);
2726 /* Update systemd-oomd on the property/state change.
2728 * Always send an update if the unit is going into an inactive state so systemd-oomd knows to
2730 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2731 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2732 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2733 * have the information on the property. Thus, indiscriminately send an update. */
2734 if (os
!= ns
&& (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
)))
2735 (void) manager_varlink_send_managed_oom_update(u
);
2737 /* Update timestamps for state changes */
2738 if (!MANAGER_IS_RELOADING(m
)) {
2739 dual_timestamp_now(&u
->state_change_timestamp
);
2741 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2742 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2743 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2744 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2746 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2747 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2748 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2749 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2752 /* Keep track of failed units */
2753 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2755 /* Make sure the cgroup and state files are always removed when we become inactive */
2756 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2757 SET_FLAG(u
->markers
,
2758 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2760 unit_prune_cgroup(u
);
2761 unit_unlink_state_files(u
);
2762 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2763 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2765 unit_update_on_console(u
);
2767 if (!MANAGER_IS_RELOADING(m
)) {
2770 /* Let's propagate state changes to the job */
2772 unexpected
= unit_process_job(u
->job
, ns
, reload_success
);
2776 /* If this state change happened without being requested by a job, then let's retroactively start or
2777 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2778 * additional jobs just because something is already activated. */
2781 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2782 retroactively_start_dependencies(u
);
2783 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2784 retroactively_stop_dependencies(u
);
2787 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2788 /* This unit just finished starting up */
2790 unit_emit_audit_start(u
);
2791 manager_send_unit_plymouth(m
, u
);
2792 manager_send_unit_supervisor(m
, u
, /* active= */ true);
2794 } else if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2795 /* This unit just stopped/failed. */
2797 unit_emit_audit_stop(u
, ns
);
2798 manager_send_unit_supervisor(m
, u
, /* active= */ false);
2799 unit_log_resources(u
);
2802 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
))
2803 unit_start_on_termination_deps(u
, UNIT_ATOM_ON_SUCCESS
);
2804 else if (ns
!= os
&& ns
== UNIT_FAILED
)
2805 unit_start_on_termination_deps(u
, UNIT_ATOM_ON_FAILURE
);
2808 manager_recheck_journal(m
);
2809 manager_recheck_dbus(m
);
2811 unit_trigger_notify(u
);
2813 if (!MANAGER_IS_RELOADING(m
)) {
2816 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2817 reason
= strjoina("unit ", u
->id
, " failed");
2818 emergency_action(m
, u
->failure_action
, EMERGENCY_ACTION_WARN
|EMERGENCY_ACTION_SLEEP_5S
, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2819 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2820 reason
= strjoina("unit ", u
->id
, " succeeded");
2821 emergency_action(m
, u
->success_action
, /* flags= */ 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2825 /* And now, add the unit or depending units to various queues that will act on the new situation if
2826 * needed. These queues generally check for continuous state changes rather than events (like most of
2827 * the state propagation above), and do work deferred instead of instantly, since they typically
2828 * don't want to run during reloading, and usually involve checking combined state of multiple units
2831 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2832 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2833 check_unneeded_dependencies(u
);
2834 check_bound_by_dependencies(u
);
2836 /* Maybe someone wants us to remain up? */
2837 unit_submit_to_start_when_upheld_queue(u
);
2839 /* Maybe the unit should be GC'ed now? */
2840 unit_add_to_gc_queue(u
);
2842 /* Maybe we can release some resources now? */
2843 unit_submit_to_release_resources_queue(u
);
2845 /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */
2846 unit_check_concurrency_limit(u
);
2848 /* Maybe someone else has been waiting for us to stop? */
2849 m
->may_dispatch_stop_notify_queue
= true;
2851 } else if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2852 /* Start uphold units regardless if going up was expected or not */
2853 check_uphold_dependencies(u
);
2855 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2856 unit_submit_to_stop_when_unneeded_queue(u
);
2858 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2859 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2860 * inactive, without ever entering started.) */
2861 unit_submit_to_stop_when_bound_queue(u
);
2865 int unit_watch_pidref(Unit
*u
, const PidRef
*pid
, bool exclusive
) {
2866 _cleanup_(pidref_freep
) PidRef
*pid_dup
= NULL
;
2869 /* Adds a specific PID to the set of PIDs this unit watches. */
2872 assert(pidref_is_set(pid
));
2874 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2875 * opportunity to remove any stalled references to this PID as they can be created
2876 * easily (when watching a process which is not our direct child). */
2878 manager_unwatch_pidref(u
->manager
, pid
);
2880 if (set_contains(u
->pids
, pid
)) { /* early exit if already being watched */
2885 r
= pidref_dup(pid
, &pid_dup
);
2889 /* First, insert into the set of PIDs maintained by the unit */
2890 r
= set_ensure_put(&u
->pids
, &pidref_hash_ops_free
, pid_dup
);
2894 pid
= TAKE_PTR(pid_dup
); /* continue with our copy now that we have installed it properly in our set */
2896 /* Second, insert it into the simple global table, see if that works */
2897 r
= hashmap_ensure_put(&u
->manager
->watch_pids
, &pidref_hash_ops
, pid
, u
);
2901 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2902 * hashmap that points to an array. */
2904 PidRef
*old_pid
= NULL
;
2905 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &old_pid
);
2907 /* Count entries in array */
2909 for (; array
&& array
[n
]; n
++)
2912 /* Allocate a new array */
2913 _cleanup_free_ Unit
**new_array
= new(Unit
*, n
+ 2);
2917 /* Append us to the end */
2918 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2920 new_array
[n
+1] = NULL
;
2922 /* Add or replace the old array */
2923 r
= hashmap_ensure_replace(&u
->manager
->watch_pids_more
, &pidref_hash_ops
, old_pid
?: pid
, new_array
);
2927 TAKE_PTR(new_array
); /* Now part of the hash table */
2928 free(array
); /* Which means we can now delete the old version */
2932 void unit_unwatch_pidref(Unit
*u
, const PidRef
*pid
) {
2934 assert(pidref_is_set(pid
));
2936 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2937 _cleanup_(pidref_freep
) PidRef
*pid1
= set_remove(u
->pids
, pid
);
2939 return; /* Early exit if this PID was never watched by us */
2941 /* First let's drop the unit from the simple hash table, if it is included there */
2942 PidRef
*pid2
= NULL
;
2943 Unit
*uu
= hashmap_get2(u
->manager
->watch_pids
, pid
, (void**) &pid2
);
2945 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2946 assert((uu
== u
) == (pid1
== pid2
));
2949 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2950 assert_se(hashmap_remove_value(u
->manager
->watch_pids
, pid2
, uu
));
2952 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2953 PidRef
*pid3
= NULL
;
2954 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &pid3
);
2956 /* Let's iterate through the array, dropping our own entry */
2957 size_t m
= 0, n
= 0;
2958 for (; array
&& array
[n
]; n
++)
2960 array
[m
++] = array
[n
];
2962 return; /* Not there */
2964 array
[m
] = NULL
; /* set trailing NULL marker on the new end */
2967 /* The array is now empty, remove the entire entry */
2968 assert_se(hashmap_remove_value(u
->manager
->watch_pids_more
, pid3
, array
));
2971 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2972 * we will delete, but by the PidRef object of the Unit that is now first in the
2975 PidRef
*new_pid3
= ASSERT_PTR(set_get(array
[0]->pids
, pid
));
2976 assert_se(hashmap_replace(u
->manager
->watch_pids_more
, new_pid3
, array
) >= 0);
2981 void unit_unwatch_all_pids(Unit
*u
) {
2984 while (!set_isempty(u
->pids
))
2985 unit_unwatch_pidref(u
, set_first(u
->pids
));
2987 u
->pids
= set_free(u
->pids
);
2990 void unit_unwatch_pidref_done(Unit
*u
, PidRef
*pidref
) {
2993 if (!pidref_is_set(pidref
))
2996 unit_unwatch_pidref(u
, pidref
);
2997 pidref_done(pidref
);
3000 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
3002 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
3006 case JOB_VERIFY_ACTIVE
:
3009 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3010 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3015 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3016 * external events), hence it makes no sense to permit enqueuing such a request either. */
3017 return !u
->perpetual
;
3020 case JOB_TRY_RESTART
:
3021 return unit_can_stop(u
) && unit_can_start(u
);
3024 case JOB_TRY_RELOAD
:
3025 return unit_can_reload(u
);
3027 case JOB_RELOAD_OR_START
:
3028 return unit_can_reload(u
) && unit_can_start(u
);
3031 assert_not_reached();
3035 static Hashmap
*unit_get_dependency_hashmap_per_type(Unit
*u
, UnitDependency d
) {
3039 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3041 deps
= hashmap_get(u
->dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
3043 _cleanup_hashmap_free_ Hashmap
*h
= NULL
;
3045 h
= hashmap_new(NULL
);
3049 if (hashmap_ensure_put(&u
->dependencies
, NULL
, UNIT_DEPENDENCY_TO_PTR(d
), h
) < 0)
3058 typedef enum NotifyDependencyFlags
{
3059 NOTIFY_DEPENDENCY_UPDATE_FROM
= 1 << 0,
3060 NOTIFY_DEPENDENCY_UPDATE_TO
= 1 << 1,
3061 } NotifyDependencyFlags
;
3063 static int unit_add_dependency_impl(
3067 UnitDependencyMask mask
) {
3069 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3070 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3071 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3072 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3073 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3074 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3075 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3076 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3077 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3078 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3079 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3080 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3081 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3082 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3083 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3084 [UNIT_BEFORE
] = UNIT_AFTER
,
3085 [UNIT_AFTER
] = UNIT_BEFORE
,
3086 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3087 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3088 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3089 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3090 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3091 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3092 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3093 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3094 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3095 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3096 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3097 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3098 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3099 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3100 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3103 Hashmap
*u_deps
, *other_deps
;
3104 UnitDependencyInfo u_info
, u_info_old
, other_info
, other_info_old
;
3105 NotifyDependencyFlags flags
= 0;
3110 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3111 assert(inverse_table
[d
] >= 0 && inverse_table
[d
] < _UNIT_DEPENDENCY_MAX
);
3112 assert(mask
> 0 && mask
< _UNIT_DEPENDENCY_MASK_FULL
);
3114 /* Ensure the following two hashmaps for each unit exist:
3115 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3116 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3117 u_deps
= unit_get_dependency_hashmap_per_type(u
, d
);
3121 other_deps
= unit_get_dependency_hashmap_per_type(other
, inverse_table
[d
]);
3125 /* Save the original dependency info. */
3126 u_info
.data
= u_info_old
.data
= hashmap_get(u_deps
, other
);
3127 other_info
.data
= other_info_old
.data
= hashmap_get(other_deps
, u
);
3129 /* Update dependency info. */
3130 u_info
.origin_mask
|= mask
;
3131 other_info
.destination_mask
|= mask
;
3133 /* Save updated dependency info. */
3134 if (u_info
.data
!= u_info_old
.data
) {
3135 r
= hashmap_replace(u_deps
, other
, u_info
.data
);
3139 flags
= NOTIFY_DEPENDENCY_UPDATE_FROM
;
3140 u
->dependency_generation
++;
3143 if (other_info
.data
!= other_info_old
.data
) {
3144 r
= hashmap_replace(other_deps
, u
, other_info
.data
);
3146 if (u_info
.data
!= u_info_old
.data
) {
3147 /* Restore the old dependency. */
3148 if (u_info_old
.data
)
3149 (void) hashmap_update(u_deps
, other
, u_info_old
.data
);
3151 hashmap_remove(u_deps
, other
);
3156 flags
|= NOTIFY_DEPENDENCY_UPDATE_TO
;
3157 other
->dependency_generation
++;
3163 int unit_add_dependency(
3168 UnitDependencyMask mask
) {
3170 UnitDependencyAtom a
;
3173 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3174 * there, no need to notify! */
3175 NotifyDependencyFlags notify_flags
;
3178 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3181 u
= unit_follow_merge(u
);
3182 other
= unit_follow_merge(other
);
3183 a
= unit_dependency_to_atom(d
);
3186 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3188 if (unit_should_warn_about_dependency(d
))
3189 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3190 unit_dependency_to_string(d
), u
->id
);
3194 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3197 /* Note that ordering a device unit after a unit is permitted since it allows its job running
3198 * timeout to be started at a specific time. */
3199 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3200 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3204 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3205 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3209 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3210 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3211 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3212 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3213 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3214 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3216 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3217 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3218 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3219 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3220 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3221 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3223 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3224 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3225 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3227 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3228 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3229 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3231 r
= unit_add_dependency_impl(u
, d
, other
, mask
);
3236 if (add_reference
) {
3237 r
= unit_add_dependency_impl(u
, UNIT_REFERENCES
, other
, mask
);
3243 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_FROM
))
3244 unit_add_to_dbus_queue(u
);
3245 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_TO
))
3246 unit_add_to_dbus_queue(other
);
3248 return notify_flags
!= 0;
3251 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3255 assert(d
>= 0 || e
>= 0);
3258 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3264 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3269 return r
> 0 || s
> 0;
3272 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3280 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3287 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3289 _cleanup_free_
char *i
= NULL
;
3291 r
= unit_name_to_prefix(u
->id
, &i
);
3295 r
= unit_name_replace_instance(name
, i
, buf
);
3304 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3305 _cleanup_free_
char *buf
= NULL
;
3312 r
= resolve_template(u
, name
, &buf
, &name
);
3316 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3319 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3323 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3326 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3327 _cleanup_free_
char *buf
= NULL
;
3334 r
= resolve_template(u
, name
, &buf
, &name
);
3338 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3341 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3345 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3348 int setenv_unit_path(const char *p
) {
3351 /* This is mostly for debug purposes */
3352 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, /* overwrite = */ true));
3355 char* unit_dbus_path(Unit
*u
) {
3361 return unit_dbus_path_from_name(u
->id
);
3364 char* unit_dbus_path_invocation_id(Unit
*u
) {
3367 if (sd_id128_is_null(u
->invocation_id
))
3370 return unit_dbus_path_from_name(u
->invocation_id_string
);
3373 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3378 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3380 if (sd_id128_equal(u
->invocation_id
, id
))
3383 if (!sd_id128_is_null(u
->invocation_id
))
3384 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3386 if (sd_id128_is_null(id
)) {
3391 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3395 u
->invocation_id
= id
;
3396 sd_id128_to_string(id
, u
->invocation_id_string
);
3398 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3405 u
->invocation_id
= SD_ID128_NULL
;
3406 u
->invocation_id_string
[0] = 0;
3410 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3416 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3417 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3418 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3420 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3423 if (u
->type
== UNIT_SLICE
)
3426 if (unit_active_state(u
) != UNIT_INACTIVE
)
3429 if (slice
->type
!= UNIT_SLICE
)
3432 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3433 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3436 if (UNIT_GET_SLICE(u
) == slice
)
3439 /* Disallow slice changes if @u is already bound to cgroups */
3440 if (UNIT_GET_SLICE(u
)) {
3441 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3442 if (crt
&& crt
->cgroup_path
)
3446 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3447 if (UNIT_GET_SLICE(u
))
3448 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3450 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3457 int unit_set_default_slice(Unit
*u
) {
3458 const char *slice_name
;
3464 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3467 if (UNIT_GET_SLICE(u
))
3471 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3473 /* Implicitly place all instantiated units in their
3474 * own per-template slice */
3476 r
= unit_name_to_prefix(u
->id
, &prefix
);
3480 /* The prefix is already escaped, but it might include
3481 * "-" which has a special meaning for slice units,
3482 * hence escape it here extra. */
3483 escaped
= unit_name_escape(prefix
);
3487 if (MANAGER_IS_SYSTEM(u
->manager
))
3488 slice_name
= strjoina("system-", escaped
, ".slice");
3490 slice_name
= strjoina("app-", escaped
, ".slice");
3492 } else if (unit_is_extrinsic(u
))
3493 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3494 * the root slice. They don't really belong in one of the subslices. */
3495 slice_name
= SPECIAL_ROOT_SLICE
;
3497 else if (MANAGER_IS_SYSTEM(u
->manager
))
3498 slice_name
= SPECIAL_SYSTEM_SLICE
;
3500 slice_name
= SPECIAL_APP_SLICE
;
3502 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3506 return unit_set_slice(u
, slice
);
3509 const char* unit_slice_name(Unit
*u
) {
3513 slice
= UNIT_GET_SLICE(u
);
3520 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3521 _cleanup_free_
char *t
= NULL
;
3528 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3531 if (unit_has_name(u
, t
))
3534 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3535 assert(r
< 0 || *_found
!= u
);
3539 static int signal_name_owner_changed_install_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3540 Unit
*u
= ASSERT_PTR(userdata
);
3541 const sd_bus_error
*e
;
3544 e
= sd_bus_message_get_error(message
);
3546 log_unit_trace(u
, "Successfully installed NameOwnerChanged signal match.");
3550 r
= sd_bus_error_get_errno(e
);
3551 log_unit_error_errno(u
, r
,
3552 "Unexpected error response on installing NameOwnerChanged signal match: %s",
3553 bus_error_message(e
, r
));
3555 /* If we failed to install NameOwnerChanged signal, also unref the bus slot of GetNameOwner(). */
3556 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3557 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3559 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3560 UNIT_VTABLE(u
)->bus_name_owner_change(u
, NULL
);
3565 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3566 const char *new_owner
;
3567 Unit
*u
= ASSERT_PTR(userdata
);
3572 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3574 bus_log_parse_error(r
);
3578 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3579 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3584 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3585 const sd_bus_error
*e
;
3586 const char *new_owner
;
3587 Unit
*u
= ASSERT_PTR(userdata
);
3592 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3594 e
= sd_bus_message_get_error(message
);
3596 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3597 r
= sd_bus_error_get_errno(e
);
3598 log_unit_error_errno(u
, r
,
3599 "Unexpected error response from GetNameOwner(): %s",
3600 bus_error_message(e
, r
));
3605 r
= sd_bus_message_read(message
, "s", &new_owner
);
3607 return bus_log_parse_error(r
);
3609 assert(!isempty(new_owner
));
3612 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3613 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3618 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3619 _cleanup_(sd_bus_message_unrefp
) sd_bus_message
*m
= NULL
;
3621 usec_t timeout_usec
= 0;
3628 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3631 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3632 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3633 * value defined above. */
3634 if (UNIT_VTABLE(u
)->get_timeout_start_usec
)
3635 timeout_usec
= UNIT_VTABLE(u
)->get_timeout_start_usec(u
);
3637 match
= strjoina("type='signal',"
3638 "sender='org.freedesktop.DBus',"
3639 "path='/org/freedesktop/DBus',"
3640 "interface='org.freedesktop.DBus',"
3641 "member='NameOwnerChanged',"
3642 "arg0='", name
, "'");
3644 r
= bus_add_match_full(
3647 /* asynchronous = */ true,
3649 signal_name_owner_changed
,
3650 signal_name_owner_changed_install_handler
,
3656 r
= sd_bus_message_new_method_call(
3659 "org.freedesktop.DBus",
3660 "/org/freedesktop/DBus",
3661 "org.freedesktop.DBus",
3666 r
= sd_bus_message_append(m
, "s", name
);
3670 r
= sd_bus_call_async(
3672 &u
->get_name_owner_slot
,
3674 get_name_owner_handler
,
3678 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3682 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3686 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3692 /* Watch a specific name on the bus. We only support one unit
3693 * watching each name for now. */
3695 if (u
->manager
->api_bus
) {
3696 /* If the bus is already available, install the match directly.
3697 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3698 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3700 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3703 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3705 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3706 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3707 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3713 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3717 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3718 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3719 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3722 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3723 _cleanup_free_
char *e
= NULL
;
3729 /* Adds in links to the device node that this unit is based on */
3733 if (!is_device_path(what
))
3736 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3737 if (!unit_type_supported(UNIT_DEVICE
))
3740 r
= unit_name_from_path(what
, ".device", &e
);
3744 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3748 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3749 dep
= UNIT_BINDS_TO
;
3751 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3752 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3753 device
, true, mask
);
3756 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3757 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3765 if (!path_startswith(what
, "/dev/"))
3768 /* If we don't support devices, then also don't bother with blockdev@.target */
3769 if (!unit_type_supported(UNIT_DEVICE
))
3772 r
= unit_name_path_escape(what
, &escaped
);
3776 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3780 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3783 int unit_coldplug(Unit
*u
) {
3788 /* Make sure we don't enter a loop, when coldplugging recursively. */
3792 u
->coldplugged
= true;
3794 STRV_FOREACH(i
, u
->deserialized_refs
)
3795 RET_GATHER(r
, bus_unit_track_add_name(u
, *i
));
3797 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3799 if (UNIT_VTABLE(u
)->coldplug
)
3800 RET_GATHER(r
, UNIT_VTABLE(u
)->coldplug(u
));
3803 RET_GATHER(r
, job_coldplug(u
->job
));
3805 RET_GATHER(r
, job_coldplug(u
->nop_job
));
3807 unit_modify_nft_set(u
, /* add = */ true);
3811 void unit_catchup(Unit
*u
) {
3814 if (UNIT_VTABLE(u
)->catchup
)
3815 UNIT_VTABLE(u
)->catchup(u
);
3817 unit_cgroup_catchup(u
);
3820 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3826 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3827 * are never out-of-date. */
3828 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3831 if (stat(path
, &st
) < 0)
3832 /* What, cannot access this anymore? */
3836 /* For masked files check if they are still so */
3837 return !null_or_empty(&st
);
3839 /* For non-empty files check the mtime */
3840 return timespec_load(&st
.st_mtim
) > mtime
;
3845 bool unit_need_daemon_reload(Unit
*u
) {
3849 if (u
->manager
->unit_file_state_outdated
)
3852 /* For unit files, we allow masking… */
3853 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3854 u
->load_state
== UNIT_MASKED
))
3857 /* Source paths should not be masked… */
3858 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3861 if (u
->load_state
== UNIT_LOADED
) {
3862 _cleanup_strv_free_
char **dropins
= NULL
;
3864 (void) unit_find_dropin_paths(u
, /* use_unit_path_cache = */ false, &dropins
);
3866 if (!strv_equal(u
->dropin_paths
, dropins
))
3869 /* … any drop-ins that are masked are simply omitted from the list. */
3870 STRV_FOREACH(path
, u
->dropin_paths
)
3871 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3878 void unit_reset_failed(Unit
*u
) {
3881 if (UNIT_VTABLE(u
)->reset_failed
)
3882 UNIT_VTABLE(u
)->reset_failed(u
);
3884 ratelimit_reset(&u
->start_ratelimit
);
3885 u
->start_limit_hit
= false;
3887 (void) unit_set_debug_invocation(u
, /* enable= */ false);
3890 Unit
*unit_following(Unit
*u
) {
3893 if (UNIT_VTABLE(u
)->following
)
3894 return UNIT_VTABLE(u
)->following(u
);
3899 bool unit_stop_pending(Unit
*u
) {
3902 /* This call does check the current state of the unit. It's
3903 * hence useful to be called from state change calls of the
3904 * unit itself, where the state isn't updated yet. This is
3905 * different from unit_inactive_or_pending() which checks both
3906 * the current state and for a queued job. */
3908 return unit_has_job_type(u
, JOB_STOP
);
3911 bool unit_inactive_or_pending(Unit
*u
) {
3914 /* Returns true if the unit is inactive or going down */
3916 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3919 if (unit_stop_pending(u
))
3925 bool unit_active_or_pending(Unit
*u
) {
3928 /* Returns true if the unit is active or going up */
3930 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3934 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3940 bool unit_will_restart_default(Unit
*u
) {
3943 return unit_has_job_type(u
, JOB_START
);
3946 bool unit_will_restart(Unit
*u
) {
3949 if (!UNIT_VTABLE(u
)->will_restart
)
3952 return UNIT_VTABLE(u
)->will_restart(u
);
3955 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3958 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3959 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3962 static int unit_pid_set(Unit
*u
, Set
**pid_set
) {
3968 set_clear(*pid_set
); /* This updates input. */
3970 /* Exclude the main/control pids from being killed via the cgroup */
3973 FOREACH_ARGUMENT(pid
, unit_main_pid(u
), unit_control_pid(u
))
3974 if (pidref_is_set(pid
)) {
3975 r
= set_ensure_put(pid_set
, NULL
, PID_TO_PTR(pid
->pid
));
3983 static int kill_common_log(const PidRef
*pid
, int signo
, void *userdata
) {
3984 _cleanup_free_
char *comm
= NULL
;
3985 Unit
*u
= ASSERT_PTR(userdata
);
3987 (void) pidref_get_comm(pid
, &comm
);
3989 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
3990 signal_to_string(signo
), pid
->pid
, strna(comm
));
3995 static int kill_or_sigqueue(PidRef
*pidref
, int signo
, int code
, int value
) {
3996 assert(pidref_is_set(pidref
));
3997 assert(SIGNAL_VALID(signo
));
4002 log_debug("Killing " PID_FMT
" with signal SIG%s.", pidref
->pid
, signal_to_string(signo
));
4003 return pidref_kill(pidref
, signo
);
4006 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pidref
->pid
, signal_to_string(signo
));
4007 return pidref_sigqueue(pidref
, signo
, value
);
4010 assert_not_reached();
4014 static int unit_kill_one(
4021 sd_bus_error
*ret_error
) {
4028 if (!pidref_is_set(pidref
))
4031 _cleanup_free_
char *comm
= NULL
;
4032 (void) pidref_get_comm(pidref
, &comm
);
4034 r
= kill_or_sigqueue(pidref
, signo
, code
, value
);
4038 /* Report this failure both to the logs and to the client */
4040 sd_bus_error_set_errnof(
4042 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s): %m",
4043 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4045 return log_unit_warning_errno(
4047 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s) on client request: %m",
4048 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4051 log_unit_info(u
, "Sent signal SIG%s to %s process " PID_FMT
" (%s) on client request.",
4052 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4053 return 1; /* killed */
4059 const char *subgroup
,
4063 sd_bus_error
*ret_error
) {
4065 PidRef
*main_pid
, *control_pid
;
4066 bool killed
= false;
4069 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4070 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4071 * stop a service ourselves. */
4075 assert(whom
< _KILL_WHOM_MAX
);
4076 assert(SIGNAL_VALID(signo
));
4077 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
4080 if (!IN_SET(whom
, KILL_CGROUP
, KILL_CGROUP_FAIL
))
4081 return sd_bus_error_set(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
,
4082 "Killing by subgroup is only supported for 'cgroup' or 'cgroup-kill' modes.");
4084 if (!unit_cgroup_delegate(u
))
4085 return sd_bus_error_set(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
,
4086 "Killing by subgroup is only available for units with control group delegation enabled.");
4089 main_pid
= unit_main_pid(u
);
4090 control_pid
= unit_control_pid(u
);
4092 if (!UNIT_HAS_CGROUP_CONTEXT(u
) && !main_pid
&& !control_pid
)
4093 return sd_bus_error_set(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
, "Unit type does not support process killing.");
4095 if (IN_SET(whom
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4097 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4098 if (!pidref_is_set(main_pid
))
4099 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4102 if (IN_SET(whom
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4104 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4105 if (!pidref_is_set(control_pid
))
4106 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4109 if (IN_SET(whom
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4110 r
= unit_kill_one(u
, control_pid
, "control", signo
, code
, value
, ret_error
);
4112 killed
= killed
|| r
> 0;
4115 if (IN_SET(whom
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4116 r
= unit_kill_one(u
, main_pid
, "main", signo
, code
, value
, ret
>= 0 ? ret_error
: NULL
);
4118 killed
= killed
|| r
> 0;
4121 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4122 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4123 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4124 if (IN_SET(whom
, KILL_ALL
, KILL_ALL_FAIL
, KILL_CGROUP
, KILL_CGROUP_FAIL
) && code
== SI_USER
) {
4125 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4126 if (crt
&& crt
->cgroup_path
) {
4127 _cleanup_set_free_ Set
*pid_set
= NULL
;
4128 _cleanup_free_
char *joined
= NULL
;
4131 if (empty_or_root(subgroup
))
4132 p
= crt
->cgroup_path
;
4134 joined
= path_join(crt
->cgroup_path
, subgroup
);
4141 if (signo
== SIGKILL
) {
4142 r
= cg_kill_kernel_sigkill(p
);
4145 log_unit_info(u
, "Killed unit cgroup '%s' with SIGKILL on client request.", p
);
4148 if (r
!= -EOPNOTSUPP
) {
4150 sd_bus_error_set_errnof(ret_error
, r
,
4151 "Failed to kill unit cgroup: %m");
4152 RET_GATHER(ret
, log_unit_warning_errno(u
, r
, "Failed to kill unit cgroup '%s': %m", p
));
4155 /* Fall back to manual enumeration */
4156 } else if (IN_SET(whom
, KILL_ALL
, KILL_ALL_FAIL
)) {
4157 /* Exclude the main/control pids from being killed via the cgroup if not
4159 r
= unit_pid_set(u
, &pid_set
);
4164 r
= cg_kill_recursive(p
, signo
, /* flags= */ 0, pid_set
, kill_common_log
, u
);
4165 if (r
< 0 && !IN_SET(r
, -ESRCH
, -ENOENT
)) {
4167 sd_bus_error_set_errnof(
4169 "Failed to send signal SIG%s to processes in unit cgroup '%s': %m",
4170 signal_to_string(signo
), p
);
4172 RET_GATHER(ret
, log_unit_warning_errno(
4174 "Failed to send signal SIG%s to processes in unit cgroup '%s' on client request: %m",
4175 signal_to_string(signo
), p
));
4177 killed
= killed
|| r
> 0;
4182 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4183 if (ret
>= 0 && !killed
&& IN_SET(whom
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
, KILL_CGROUP_FAIL
))
4184 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4189 int unit_following_set(Unit
*u
, Set
**s
) {
4193 if (UNIT_VTABLE(u
)->following_set
)
4194 return UNIT_VTABLE(u
)->following_set(u
, s
);
4200 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4205 if (u
->unit_file_state
>= 0 || !u
->fragment_path
)
4206 return u
->unit_file_state
;
4208 /* If we know this is a transient unit no need to ask the unit file state for details. Let's bypass
4209 * the more expensive on-disk check. */
4211 return (u
->unit_file_state
= UNIT_FILE_TRANSIENT
);
4213 r
= unit_file_get_state(
4214 u
->manager
->runtime_scope
,
4215 /* root_dir= */ NULL
,
4217 &u
->unit_file_state
);
4219 u
->unit_file_state
= UNIT_FILE_BAD
;
4221 return u
->unit_file_state
;
4224 PresetAction
unit_get_unit_file_preset(Unit
*u
) {
4229 if (u
->unit_file_preset
>= 0)
4230 return u
->unit_file_preset
;
4232 /* If this is a transient or perpetual unit file it doesn't make much sense to ask the preset
4233 * database about this, because enabling/disabling makes no sense for either. Hence don't. */
4234 if (!u
->fragment_path
|| u
->transient
|| u
->perpetual
)
4235 return (u
->unit_file_preset
= -ENOEXEC
);
4237 _cleanup_free_
char *bn
= NULL
;
4238 r
= path_extract_filename(u
->fragment_path
, &bn
);
4240 return (u
->unit_file_preset
= r
);
4241 if (r
== O_DIRECTORY
)
4242 return (u
->unit_file_preset
= -EISDIR
);
4244 return (u
->unit_file_preset
= unit_file_query_preset(
4245 u
->manager
->runtime_scope
,
4246 /* root_dir= */ NULL
,
4248 /* cached= */ NULL
));
4251 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4257 unit_ref_unset(ref
);
4259 ref
->source
= source
;
4260 ref
->target
= target
;
4261 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4265 void unit_ref_unset(UnitRef
*ref
) {
4271 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4272 * be unreferenced now. */
4273 unit_add_to_gc_queue(ref
->target
);
4275 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4276 ref
->source
= ref
->target
= NULL
;
4279 static int user_from_unit_name(Unit
*u
, char **ret
) {
4281 static const uint8_t hash_key
[] = {
4282 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4283 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4286 _cleanup_free_
char *n
= NULL
;
4289 r
= unit_name_to_prefix(u
->id
, &n
);
4293 if (valid_user_group_name(n
, 0)) {
4298 /* If we can't use the unit name as a user name, then let's hash it and use that */
4299 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4305 static int unit_verify_contexts(const Unit
*u
) {
4308 const ExecContext
*ec
= unit_get_exec_context(u
);
4312 if (MANAGER_IS_USER(u
->manager
) && ec
->dynamic_user
)
4313 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "DynamicUser= enabled for user unit, which is not supported. Refusing.");
4315 if (ec
->dynamic_user
&& ec
->working_directory_home
)
4316 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "WorkingDirectory=~ is not allowed under DynamicUser=yes. Refusing.");
4318 if (ec
->working_directory
&& path_below_api_vfs(ec
->working_directory
) &&
4319 exec_needs_mount_namespace(ec
, /* params = */ NULL
, /* runtime = */ NULL
))
4320 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "WorkingDirectory= may not be below /proc/, /sys/ or /dev/ when using mount namespacing. Refusing.");
4322 if (exec_needs_pid_namespace(ec
, /* params= */ NULL
) && !UNIT_VTABLE(u
)->notify_pidref
)
4323 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "PrivatePIDs= setting is only supported for service units. Refusing.");
4325 const KillContext
*kc
= unit_get_kill_context(u
);
4327 if (ec
->pam_name
&& kc
&& !IN_SET(kc
->kill_mode
, KILL_CONTROL_GROUP
, KILL_MIXED
))
4328 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit has PAM enabled. Kill mode must be set to 'control-group' or 'mixed'. Refusing.");
4333 static PrivateTmp
unit_get_private_var_tmp(const Unit
*u
, const ExecContext
*c
) {
4336 assert(c
->private_tmp
>= 0 && c
->private_tmp
< _PRIVATE_TMP_MAX
);
4338 /* Disable disconnected private tmpfs on /var/tmp/ when DefaultDependencies=no and
4339 * RootImage=/RootDirectory= are not set, as /var/ may be a separated partition.
4340 * See issue #37258. */
4342 /* PrivateTmp=yes/no also enables/disables private tmpfs on /var/tmp/. */
4343 if (c
->private_tmp
!= PRIVATE_TMP_DISCONNECTED
)
4344 return c
->private_tmp
;
4346 /* When DefaultDependencies=yes, disconnected tmpfs is also enabled on /var/tmp/, and an explicit
4347 * dependency to the mount on /var/ will be added in unit_add_exec_dependencies(). */
4348 if (u
->default_dependencies
)
4349 return PRIVATE_TMP_DISCONNECTED
;
4351 /* When RootImage=/RootDirectory= is enabled, /var/ should be prepared by the image or directory,
4352 * hence we can mount a disconnected tmpfs on /var/tmp/. */
4353 if (exec_context_with_rootfs(c
))
4354 return PRIVATE_TMP_DISCONNECTED
;
4356 /* Even if DefaultDependencies=no, enable disconnected tmpfs when
4357 * RequiresMountsFor=/WantsMountsFor=/var/ is explicitly set. */
4358 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; t
++)
4359 if (hashmap_contains(u
->mounts_for
[t
], "/var/"))
4360 return PRIVATE_TMP_DISCONNECTED
;
4362 /* Check the same but for After= with Requires=/Requisite=/Wants= or friends. */
4363 Unit
*m
= manager_get_unit(u
->manager
, "var.mount");
4365 return PRIVATE_TMP_NO
;
4367 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, m
))
4368 return PRIVATE_TMP_NO
;
4370 if (unit_has_dependency(u
, UNIT_ATOM_PULL_IN_START
, m
) ||
4371 unit_has_dependency(u
, UNIT_ATOM_PULL_IN_VERIFY
, m
) ||
4372 unit_has_dependency(u
, UNIT_ATOM_PULL_IN_START_IGNORED
, m
))
4373 return PRIVATE_TMP_DISCONNECTED
;
4375 return PRIVATE_TMP_NO
;
4378 int unit_patch_contexts(Unit
*u
) {
4385 /* Patch in the manager defaults into the exec and cgroup
4386 * contexts, _after_ the rest of the settings have been
4389 ec
= unit_get_exec_context(u
);
4391 /* This only copies in the ones that need memory */
4392 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4393 if (u
->manager
->defaults
.rlimit
[i
] && !ec
->rlimit
[i
]) {
4394 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->defaults
.rlimit
[i
], 1);
4399 if (MANAGER_IS_USER(u
->manager
) && !ec
->working_directory
) {
4400 r
= get_home_dir(&ec
->working_directory
);
4404 if (!ec
->working_directory_home
)
4405 /* If home directory is implied by us, allow it to be missing. */
4406 ec
->working_directory_missing_ok
= true;
4409 if (ec
->private_devices
)
4410 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4412 if (ec
->protect_kernel_modules
)
4413 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4415 if (ec
->protect_kernel_logs
)
4416 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4418 if (ec
->protect_clock
)
4419 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4421 if (ec
->dynamic_user
) {
4423 r
= user_from_unit_name(u
, &ec
->user
);
4429 ec
->group
= strdup(ec
->user
);
4434 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4435 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4438 /* With DynamicUser= we want private directories, so if the user hasn't manually
4439 * selected PrivateTmp=, enable it, but to a fully private (disconnected) tmpfs
4441 if (ec
->private_tmp
== PRIVATE_TMP_NO
)
4442 ec
->private_tmp
= PRIVATE_TMP_DISCONNECTED
;
4443 ec
->remove_ipc
= true;
4444 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4445 if (ec
->protect_home
== PROTECT_HOME_NO
)
4446 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4448 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4450 ec
->no_new_privileges
= true;
4451 ec
->restrict_suid_sgid
= true;
4454 ec
->private_var_tmp
= unit_get_private_var_tmp(u
, ec
);
4456 FOREACH_ARRAY(d
, ec
->directories
, _EXEC_DIRECTORY_TYPE_MAX
)
4457 exec_directory_sort(d
);
4460 cc
= unit_get_cgroup_context(u
);
4463 if (ec
->private_devices
&&
4464 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4465 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4467 /* Only add these if needed, as they imply that everything else is blocked. */
4468 if (cgroup_context_has_device_policy(cc
)) {
4469 if (ec
->root_image
|| ec
->mount_images
) {
4471 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4472 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4473 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4477 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4478 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
);
4483 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4484 * Same for mapper and verity. */
4485 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4486 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4492 if (ec
->protect_clock
) {
4493 r
= cgroup_context_add_device_allow(cc
, "char-rtc", CGROUP_DEVICE_READ
);
4500 return unit_verify_contexts(u
);
4503 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4510 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4514 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4517 KillContext
*unit_get_kill_context(const Unit
*u
) {
4524 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4528 return (KillContext
*) ((uint8_t*) u
+ offset
);
4531 CGroupContext
*unit_get_cgroup_context(const Unit
*u
) {
4537 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4541 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4544 ExecRuntime
*unit_get_exec_runtime(const Unit
*u
) {
4550 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4554 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4557 CGroupRuntime
*unit_get_cgroup_runtime(const Unit
*u
) {
4563 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
4567 return *(CGroupRuntime
**) ((uint8_t*) u
+ offset
);
4570 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4573 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4576 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4577 return u
->manager
->lookup_paths
.transient
;
4579 if (flags
& UNIT_PERSISTENT
)
4580 return u
->manager
->lookup_paths
.persistent_control
;
4582 if (flags
& UNIT_RUNTIME
)
4583 return u
->manager
->lookup_paths
.runtime_control
;
4588 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4590 assert(popcount(flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
)) <= 1);
4593 _cleanup_free_
char *t
= NULL
;
4595 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4596 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4597 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4598 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4601 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4602 t
= specifier_escape(s
);
4609 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4610 * ExecStart= and friends, i.e. '$' and quotes. */
4612 if (flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
)) {
4615 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX_ENV
) {
4616 t2
= strreplace(s
, "$", "$$");
4619 free_and_replace(t
, t2
);
4622 t2
= shell_escape(t
?: s
, "\"");
4625 free_and_replace(t
, t2
);
4629 } else if (flags
& UNIT_ESCAPE_C
) {
4635 free_and_replace(t
, t2
);
4644 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4645 _cleanup_free_
char *result
= NULL
;
4648 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4649 * lines in a way suitable for ExecStart= stanzas. */
4651 STRV_FOREACH(i
, l
) {
4652 _cleanup_free_
char *buf
= NULL
;
4657 p
= unit_escape_setting(*i
, flags
, &buf
);
4661 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4662 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4676 if (!GREEDY_REALLOC(result
, n
+ 1))
4681 return TAKE_PTR(result
);
4684 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4685 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4686 const char *dir
, *wrapped
;
4693 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4696 data
= unit_escape_setting(data
, flags
, &escaped
);
4700 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4701 * previous section header is the same */
4703 if (flags
& UNIT_PRIVATE
) {
4704 if (!UNIT_VTABLE(u
)->private_section
)
4707 if (!u
->transient_file
|| u
->last_section_private
< 0)
4708 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4709 else if (u
->last_section_private
== 0)
4710 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4712 if (!u
->transient_file
|| u
->last_section_private
< 0)
4713 data
= strjoina("[Unit]\n", data
);
4714 else if (u
->last_section_private
> 0)
4715 data
= strjoina("\n[Unit]\n", data
);
4718 if (u
->transient_file
) {
4719 /* When this is a transient unit file in creation, then let's not create a new drop-in,
4720 * but instead write to the transient unit file. */
4721 fputs_with_newline(u
->transient_file
, data
);
4723 /* Remember which section we wrote this entry to */
4724 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4728 dir
= unit_drop_in_dir(u
, flags
);
4732 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4733 "# or an equivalent operation. Do not edit.\n",
4737 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4741 (void) mkdir_p_label(p
, 0755);
4743 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4744 * recreate the cache after every drop-in we write. */
4745 if (u
->manager
->unit_path_cache
) {
4746 r
= set_put_strdup_full(&u
->manager
->unit_path_cache
, &path_hash_ops_free
, p
);
4751 r
= write_string_file(q
, wrapped
, WRITE_STRING_FILE_CREATE
|WRITE_STRING_FILE_ATOMIC
|WRITE_STRING_FILE_LABEL
);
4755 r
= strv_push(&u
->dropin_paths
, q
);
4760 strv_uniq(u
->dropin_paths
);
4762 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4767 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4768 _cleanup_free_
char *p
= NULL
;
4776 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4779 va_start(ap
, format
);
4780 r
= vasprintf(&p
, format
, ap
);
4786 return unit_write_setting(u
, flags
, name
, p
);
4789 int unit_make_transient(Unit
*u
) {
4790 _cleanup_free_
char *path
= NULL
;
4795 if (!UNIT_VTABLE(u
)->can_transient
)
4798 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4800 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4804 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4805 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4808 f
= fopen(path
, "we");
4813 safe_fclose(u
->transient_file
);
4814 u
->transient_file
= f
;
4816 free_and_replace(u
->fragment_path
, path
);
4818 u
->source_path
= mfree(u
->source_path
);
4819 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4820 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4822 u
->load_state
= UNIT_STUB
;
4824 u
->transient
= true;
4826 unit_add_to_dbus_queue(u
);
4827 unit_add_to_gc_queue(u
);
4829 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4835 static bool ignore_leftover_process(const char *comm
) {
4836 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
4839 static int log_kill(const PidRef
*pid
, int sig
, void *userdata
) {
4840 const Unit
*u
= ASSERT_PTR(userdata
);
4841 _cleanup_free_
char *comm
= NULL
;
4843 assert(pidref_is_set(pid
));
4845 (void) pidref_get_comm(pid
, &comm
);
4847 if (ignore_leftover_process(comm
))
4848 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4849 * here to let the manager know that a process was killed. */
4853 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4856 signal_to_string(sig
));
4861 static int operation_to_signal(
4862 const KillContext
*c
,
4864 bool *ret_noteworthy
) {
4867 assert(ret_noteworthy
);
4871 case KILL_TERMINATE
:
4872 case KILL_TERMINATE_AND_LOG
:
4873 *ret_noteworthy
= k
== KILL_TERMINATE_AND_LOG
;
4874 return c
->kill_signal
;
4877 *ret_noteworthy
= false;
4878 return restart_kill_signal(c
);
4881 *ret_noteworthy
= true;
4882 return c
->final_kill_signal
;
4885 *ret_noteworthy
= true;
4886 return c
->watchdog_signal
;
4889 assert_not_reached();
4893 static int unit_kill_context_one(
4895 const PidRef
*pidref
,
4900 cg_kill_log_func_t log_func
) {
4907 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4909 if (!pidref_is_set(pidref
))
4913 log_func(pidref
, sig
, u
);
4915 r
= pidref_kill_and_sigcont(pidref
, sig
);
4919 _cleanup_free_
char *comm
= NULL
;
4921 (void) pidref_get_comm(pidref
, &comm
);
4922 return log_unit_warning_errno(u
, r
, "Failed to kill %s process " PID_FMT
" (%s), ignoring: %m", type
, pidref
->pid
, strna(comm
));
4926 (void) pidref_kill(pidref
, SIGHUP
);
4931 int unit_kill_context(Unit
*u
, KillOperation k
) {
4932 bool wait_for_exit
= false, send_sighup
;
4933 cg_kill_log_func_t log_func
= NULL
;
4938 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4939 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4940 * which is used for user-requested killing of unit processes. */
4942 KillContext
*c
= unit_get_kill_context(u
);
4943 if (!c
|| c
->kill_mode
== KILL_NONE
)
4947 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4949 log_func
= log_kill
;
4953 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4957 PidRef
*main_pid
= unit_main_pid_full(u
, &is_alien
);
4958 r
= unit_kill_context_one(u
, main_pid
, "main", is_alien
, sig
, send_sighup
, log_func
);
4959 wait_for_exit
= wait_for_exit
|| r
> 0;
4961 r
= unit_kill_context_one(u
, unit_control_pid(u
), "control", /* is_alien = */ false, sig
, send_sighup
, log_func
);
4962 wait_for_exit
= wait_for_exit
|| r
> 0;
4964 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4965 if (crt
&& crt
->cgroup_path
&&
4966 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4967 _cleanup_set_free_ Set
*pid_set
= NULL
;
4969 /* Exclude the main/control pids from being killed via the cgroup */
4970 r
= unit_pid_set(u
, &pid_set
);
4974 r
= cg_kill_recursive(
4977 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4981 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4982 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
4986 wait_for_exit
= true;
4989 r
= unit_pid_set(u
, &pid_set
);
4993 (void) cg_kill_recursive(
4998 /* log_kill= */ NULL
,
4999 /* userdata= */ NULL
);
5004 return wait_for_exit
;
5007 int unit_add_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
, UnitMountDependencyType type
) {
5008 Hashmap
**unit_map
, **manager_map
;
5013 assert(type
>= 0 && type
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
);
5015 unit_map
= &u
->mounts_for
[type
];
5016 manager_map
= &u
->manager
->units_needing_mounts_for
[type
];
5018 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
5019 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
5020 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
5021 * appearing mount units can easily determine which units to make themselves a dependency of. */
5023 if (!path_is_absolute(path
))
5026 if (hashmap_contains(*unit_map
, path
)) /* Exit quickly if the path is already covered. */
5029 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
5030 * only after simplification, since path_is_normalized() rejects paths with '.'.
5031 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
5032 _cleanup_free_
char *p
= NULL
;
5033 r
= path_simplify_alloc(path
, &p
);
5038 if (!path_is_normalized(path
))
5041 UnitDependencyInfo di
= {
5045 r
= hashmap_ensure_put(unit_map
, &path_hash_ops
, p
, di
.data
);
5049 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
5051 char prefix
[strlen(path
) + 1];
5052 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
5055 x
= hashmap_get(*manager_map
, prefix
);
5057 _cleanup_free_
char *q
= NULL
;
5059 r
= hashmap_ensure_allocated(manager_map
, &path_hash_ops
);
5071 r
= hashmap_put(*manager_map
, q
, x
);
5087 int unit_setup_exec_runtime(Unit
*u
) {
5088 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
5089 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
5090 _cleanup_set_free_ Set
*units
= NULL
;
5097 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
5100 /* Check if there already is an ExecRuntime for this unit? */
5101 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
5105 ec
= ASSERT_PTR(unit_get_exec_context(u
));
5107 r
= unit_get_transitive_dependency_set(u
, UNIT_ATOM_JOINS_NAMESPACE_OF
, &units
);
5111 /* Try to get it from somebody else */
5112 SET_FOREACH(other
, units
) {
5113 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
5121 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
5126 if (ec
->dynamic_user
) {
5127 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
5132 r
= exec_runtime_make(u
, ec
, esr
, dcreds
, rt
);
5142 CGroupRuntime
*unit_setup_cgroup_runtime(Unit
*u
) {
5147 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
5150 CGroupRuntime
**rt
= (CGroupRuntime
**) ((uint8_t*) u
+ offset
);
5154 return (*rt
= cgroup_runtime_new());
5157 bool unit_type_supported(UnitType t
) {
5158 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5161 assert(t
>= 0 && t
< _UNIT_TYPE_MAX
);
5163 if (cache
[t
] == 0) {
5166 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
5168 r
= getenv_bool(ascii_strupper(e
));
5169 if (r
< 0 && r
!= -ENXIO
)
5170 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
5172 cache
[t
] = r
== 0 ? -1 : 1;
5177 if (!unit_vtable
[t
]->supported
)
5180 return unit_vtable
[t
]->supported();
5183 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
5189 if (!unit_log_level_test(u
, LOG_NOTICE
))
5192 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
5193 if (r
> 0 || r
== -ENOTDIR
)
5196 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
5200 log_unit_struct(u
, LOG_NOTICE
,
5201 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING_STR
),
5202 LOG_UNIT_INVOCATION_ID(u
),
5203 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
5204 LOG_ITEM("WHERE=%s", where
));
5207 int unit_log_noncanonical_mount_path(Unit
*u
, const char *where
) {
5211 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5212 log_unit_struct(u
, LOG_ERR
,
5213 LOG_MESSAGE_ID(SD_MESSAGE_NON_CANONICAL_MOUNT_STR
),
5214 LOG_UNIT_INVOCATION_ID(u
),
5215 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
5216 LOG_ITEM("WHERE=%s", where
));
5221 int unit_fail_if_noncanonical_mount_path(Unit
*u
, const char* where
) {
5227 _cleanup_free_
char *canonical_where
= NULL
;
5228 r
= chase(where
, /* root= */ NULL
, CHASE_NONEXISTENT
, &canonical_where
, /* ret_fd= */ NULL
);
5230 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
5234 /* We will happily ignore a trailing slash (or any redundant slashes) */
5235 if (path_equal(where
, canonical_where
))
5238 return unit_log_noncanonical_mount_path(u
, where
);
5241 bool unit_is_pristine(Unit
*u
) {
5244 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5245 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5246 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5248 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5249 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5250 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5253 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
5254 !u
->fragment_path
&&
5260 PidRef
* unit_control_pid(Unit
*u
) {
5263 if (UNIT_VTABLE(u
)->control_pid
)
5264 return UNIT_VTABLE(u
)->control_pid(u
);
5269 PidRef
* unit_main_pid_full(Unit
*u
, bool *ret_is_alien
) {
5272 if (UNIT_VTABLE(u
)->main_pid
)
5273 return UNIT_VTABLE(u
)->main_pid(u
, ret_is_alien
);
5276 *ret_is_alien
= false;
5280 static void unit_modify_user_nft_set(Unit
*u
, bool add
, NFTSetSource source
, uint32_t element
) {
5285 if (!MANAGER_IS_SYSTEM(u
->manager
))
5289 c
= unit_get_cgroup_context(u
);
5293 if (!u
->manager
->fw_ctx
) {
5294 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
5298 assert(u
->manager
->fw_ctx
);
5301 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
5302 if (nft_set
->source
!= source
)
5305 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
5307 log_warning_errno(r
, "Failed to %s NFT set entry: family %s, table %s, set %s, ID %u, ignoring: %m",
5308 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5310 log_debug("%s NFT set entry: family %s, table %s, set %s, ID %u",
5311 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5315 static void unit_unref_uid_internal(
5319 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
5323 assert(_manager_unref_uid
);
5325 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5326 * gid_t are actually the same time, with the same validity rules.
5328 * Drops a reference to UID/GID from a unit. */
5330 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5331 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5333 if (!uid_is_valid(*ref_uid
))
5336 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
5337 *ref_uid
= UID_INVALID
;
5340 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
5343 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_USER
, u
->ref_uid
);
5345 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
5348 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
5351 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_GROUP
, u
->ref_gid
);
5353 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
5356 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5359 unit_unref_uid(u
, destroy_now
);
5360 unit_unref_gid(u
, destroy_now
);
5363 static int unit_ref_uid_internal(
5368 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5374 assert(uid_is_valid(uid
));
5375 assert(_manager_ref_uid
);
5377 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5378 * are actually the same type, and have the same validity rules.
5380 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5381 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5384 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5385 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5387 if (*ref_uid
== uid
)
5390 if (uid_is_valid(*ref_uid
)) /* Already set? */
5393 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5401 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5402 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5405 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5406 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5409 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5414 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5416 if (uid_is_valid(uid
)) {
5417 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5422 if (gid_is_valid(gid
)) {
5423 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5426 unit_unref_uid(u
, false);
5432 return r
> 0 || q
> 0;
5435 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5441 c
= unit_get_exec_context(u
);
5443 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5445 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5447 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_USER
, uid
);
5448 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_GROUP
, gid
);
5453 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5458 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5459 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5460 * objects when no service references the UID/GID anymore. */
5462 r
= unit_ref_uid_gid(u
, uid
, gid
);
5464 unit_add_to_dbus_queue(u
);
5467 int unit_acquire_invocation_id(Unit
*u
) {
5473 r
= sd_id128_randomize(&id
);
5475 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5477 r
= unit_set_invocation_id(u
, id
);
5479 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5481 unit_add_to_dbus_queue(u
);
5485 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5491 /* Copy parameters from manager */
5492 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5496 p
->runtime_scope
= u
->manager
->runtime_scope
;
5498 r
= strdup_to(&p
->confirm_spawn
, manager_get_confirm_spawn(u
->manager
));
5502 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5503 p
->prefix
= u
->manager
->prefix
;
5504 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5506 /* Copy parameters from unit */
5507 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5508 p
->cgroup_path
= crt
? crt
->cgroup_path
: NULL
;
5509 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5511 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5512 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5514 p
->shall_confirm_spawn
= u
->manager
->confirm_spawn
;
5516 p
->fallback_smack_process_label
= u
->manager
->defaults
.smack_process_label
;
5518 if (u
->manager
->restrict_fs
&& p
->bpf_restrict_fs_map_fd
< 0) {
5519 int fd
= bpf_restrict_fs_map_fd(u
);
5523 p
->bpf_restrict_fs_map_fd
= fd
;
5526 p
->user_lookup_fd
= u
->manager
->user_lookup_fds
[1];
5527 p
->handoff_timestamp_fd
= u
->manager
->handoff_timestamp_fds
[1];
5528 if (UNIT_VTABLE(u
)->notify_pidref
)
5529 p
->pidref_transport_fd
= u
->manager
->pidref_transport_fds
[1];
5531 p
->cgroup_id
= crt
? crt
->cgroup_id
: 0;
5532 p
->invocation_id
= u
->invocation_id
;
5533 sd_id128_to_string(p
->invocation_id
, p
->invocation_id_string
);
5534 p
->unit_id
= strdup(u
->id
);
5538 p
->debug_invocation
= u
->debug_invocation
;
5543 int unit_fork_helper_process(Unit
*u
, const char *name
, bool into_cgroup
, PidRef
*ret
) {
5544 CGroupRuntime
*crt
= NULL
;
5551 /* Forks off a helper process and makes sure it is a member of the unit's cgroup, if configured to
5552 * do so. Returns == 0 in the child, and > 0 in the parent. The pid parameter is always filled in
5553 * with the child's PID. */
5556 (void) unit_realize_cgroup(u
);
5558 crt
= unit_setup_cgroup_runtime(u
);
5563 r
= safe_fork(name
, FORK_REOPEN_LOG
|FORK_DEATHSIG_SIGTERM
, &pid
);
5567 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
5572 q
= pidref_set_pid(&pidref
, pid
);
5576 *ret
= TAKE_PIDREF(pidref
);
5582 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5583 (void) ignore_signals(SIGPIPE
);
5585 if (crt
&& crt
->cgroup_path
) {
5586 r
= cg_attach(crt
->cgroup_path
, 0);
5588 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(crt
->cgroup_path
));
5596 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, PidRef
*ret_pid
) {
5597 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
5603 r
= unit_fork_helper_process(u
, "(sd-rmrf)", /* into_cgroup= */ true, &pid
);
5607 int ret
= EXIT_SUCCESS
;
5609 STRV_FOREACH(i
, paths
) {
5610 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5612 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5620 r
= unit_watch_pidref(u
, &pid
, /* exclusive= */ true);
5624 *ret_pid
= TAKE_PIDREF(pid
);
5628 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5632 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5633 /* No bit set anymore, let's drop the whole entry */
5634 assert_se(hashmap_remove(deps
, other
));
5636 /* Mask was reduced, let's update the entry */
5637 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5640 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5644 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5649 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5653 UnitDependencyInfo di
;
5658 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5659 Hashmap
*other_deps
;
5661 if (FLAGS_SET(~mask
, di
.origin_mask
))
5664 di
.origin_mask
&= ~mask
;
5665 unit_update_dependency_mask(deps
, other
, di
);
5667 /* We updated the dependency from our unit to the other unit now. But most
5668 * dependencies imply a reverse dependency. Hence, let's delete that one
5669 * too. For that we go through all dependency types on the other unit and
5670 * delete all those which point to us and have the right mask set. */
5672 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5673 UnitDependencyInfo dj
;
5675 dj
.data
= hashmap_get(other_deps
, u
);
5676 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5679 dj
.destination_mask
&= ~mask
;
5680 unit_update_dependency_mask(other_deps
, u
, dj
);
5683 unit_add_to_gc_queue(other
);
5685 /* The unit 'other' may not be wanted by the unit 'u'. */
5686 unit_submit_to_stop_when_unneeded_queue(other
);
5688 u
->dependency_generation
++;
5689 other
->dependency_generation
++;
5699 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5706 if (MANAGER_IS_SYSTEM(u
->manager
))
5707 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5709 _cleanup_free_
char *user_path
= NULL
;
5711 r
= xdg_user_runtime_dir("/systemd/units/invocation:", &user_path
);
5715 p
= strjoin(user_path
, u
->id
);
5724 static int unit_export_invocation_id(Unit
*u
) {
5725 _cleanup_free_
char *p
= NULL
;
5730 if (u
->exported_invocation_id
)
5733 if (sd_id128_is_null(u
->invocation_id
))
5736 r
= unit_get_invocation_path(u
, &p
);
5738 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5740 r
= symlinkat_atomic_full(u
->invocation_id_string
, AT_FDCWD
, p
, SYMLINK_LABEL
);
5742 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5744 u
->exported_invocation_id
= true;
5748 static int unit_export_log_level_max(Unit
*u
, int log_level_max
, bool overwrite
) {
5755 /* When the debug_invocation logic runs, overwrite will be true as we always want to switch the max
5756 * log level that the journal applies, and we want to always restore the previous level once done */
5758 if (!overwrite
&& u
->exported_log_level_max
)
5761 if (log_level_max
< 0)
5764 assert(log_level_max
<= 7);
5766 buf
[0] = '0' + log_level_max
;
5769 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5770 r
= symlink_atomic(buf
, p
);
5772 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5774 u
->exported_log_level_max
= true;
5778 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5779 _cleanup_close_
int fd
= -EBADF
;
5780 struct iovec
*iovec
;
5787 if (u
->exported_log_extra_fields
)
5790 if (c
->n_log_extra_fields
<= 0)
5793 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5794 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5796 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5797 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5799 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5800 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5803 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5804 pattern
= strjoina(p
, ".XXXXXX");
5806 fd
= mkostemp_safe(pattern
);
5808 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5810 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5812 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5816 (void) fchmod(fd
, 0644);
5818 if (rename(pattern
, p
) < 0) {
5819 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5823 u
->exported_log_extra_fields
= true;
5827 (void) unlink(pattern
);
5831 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5832 _cleanup_free_
char *buf
= NULL
;
5839 if (u
->exported_log_ratelimit_interval
)
5842 if (c
->log_ratelimit
.interval
== 0)
5845 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5847 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit
.interval
) < 0)
5850 r
= symlink_atomic(buf
, p
);
5852 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5854 u
->exported_log_ratelimit_interval
= true;
5858 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5859 _cleanup_free_
char *buf
= NULL
;
5866 if (u
->exported_log_ratelimit_burst
)
5869 if (c
->log_ratelimit
.burst
== 0)
5872 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5874 if (asprintf(&buf
, "%u", c
->log_ratelimit
.burst
) < 0)
5877 r
= symlink_atomic(buf
, p
);
5879 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5881 u
->exported_log_ratelimit_burst
= true;
5885 void unit_export_state_files(Unit
*u
) {
5886 const ExecContext
*c
;
5893 if (MANAGER_IS_TEST_RUN(u
->manager
))
5896 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5897 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5898 * the IPC system itself and PID 1 also log to the journal.
5900 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5901 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5902 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5903 * namespace at least.
5905 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5906 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5909 (void) unit_export_invocation_id(u
);
5911 if (!MANAGER_IS_SYSTEM(u
->manager
))
5914 c
= unit_get_exec_context(u
);
5916 (void) unit_export_log_level_max(u
, c
->log_level_max
, /* overwrite= */ false);
5917 (void) unit_export_log_extra_fields(u
, c
);
5918 (void) unit_export_log_ratelimit_interval(u
, c
);
5919 (void) unit_export_log_ratelimit_burst(u
, c
);
5923 void unit_unlink_state_files(Unit
*u
) {
5931 /* Undoes the effect of unit_export_state() */
5933 if (u
->exported_invocation_id
) {
5934 _cleanup_free_
char *invocation_path
= NULL
;
5935 int r
= unit_get_invocation_path(u
, &invocation_path
);
5937 (void) unlink(invocation_path
);
5938 u
->exported_invocation_id
= false;
5942 if (!MANAGER_IS_SYSTEM(u
->manager
))
5945 if (u
->exported_log_level_max
) {
5946 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5949 u
->exported_log_level_max
= false;
5952 if (u
->exported_log_extra_fields
) {
5953 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5956 u
->exported_log_extra_fields
= false;
5959 if (u
->exported_log_ratelimit_interval
) {
5960 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5963 u
->exported_log_ratelimit_interval
= false;
5966 if (u
->exported_log_ratelimit_burst
) {
5967 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5970 u
->exported_log_ratelimit_burst
= false;
5974 int unit_set_debug_invocation(Unit
*u
, bool enable
) {
5979 if (u
->debug_invocation
== enable
)
5980 return 0; /* Nothing to do */
5982 u
->debug_invocation
= enable
;
5984 /* Ensure that the new log level is exported for the journal, in place of the previous one */
5985 if (u
->exported_log_level_max
) {
5986 const ExecContext
*ec
= unit_get_exec_context(u
);
5988 r
= unit_export_log_level_max(u
, enable
? LOG_PRI(LOG_DEBUG
) : ec
->log_level_max
, /* overwrite= */ true);
5997 int unit_prepare_exec(Unit
*u
) {
6002 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
6003 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
6004 r
= bpf_firewall_load_custom(u
);
6008 /* Prepares everything so that we can fork of a process for this unit */
6010 (void) unit_realize_cgroup(u
);
6012 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
6013 if (crt
&& crt
->reset_accounting
) {
6014 (void) unit_reset_accounting(u
);
6015 crt
->reset_accounting
= false;
6018 unit_export_state_files(u
);
6020 r
= unit_setup_exec_runtime(u
);
6027 static int unit_log_leftover_process_start(const PidRef
*pid
, int sig
, void *userdata
) {
6028 const Unit
*u
= ASSERT_PTR(userdata
);
6029 _cleanup_free_
char *comm
= NULL
;
6031 assert(pidref_is_set(pid
));
6033 (void) pidref_get_comm(pid
, &comm
);
6035 if (ignore_leftover_process(comm
))
6038 /* During start we print a warning */
6041 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
6042 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
6043 pid
->pid
, strna(comm
));
6048 static int unit_log_leftover_process_stop(const PidRef
*pid
, int sig
, void *userdata
) {
6049 const Unit
*u
= ASSERT_PTR(userdata
);
6050 _cleanup_free_
char *comm
= NULL
;
6052 assert(pidref_is_set(pid
));
6054 (void) pidref_get_comm(pid
, &comm
);
6056 if (ignore_leftover_process(comm
))
6059 /* During stop we only print an informational message */
6062 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
6063 pid
->pid
, strna(comm
));
6068 int unit_warn_leftover_processes(Unit
*u
, bool start
) {
6069 _cleanup_free_
char *cgroup
= NULL
;
6074 r
= unit_get_cgroup_path_with_fallback(u
, &cgroup
);
6078 return cg_kill_recursive(
6082 /* killed_pids= */ NULL
,
6083 start
? unit_log_leftover_process_start
: unit_log_leftover_process_stop
,
6087 bool unit_needs_console(Unit
*u
) {
6089 UnitActiveState state
;
6093 state
= unit_active_state(u
);
6095 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
6098 if (UNIT_VTABLE(u
)->needs_console
)
6099 return UNIT_VTABLE(u
)->needs_console(u
);
6101 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
6102 ec
= unit_get_exec_context(u
);
6106 return exec_context_may_touch_console(ec
);
6109 int unit_pid_attachable(Unit
*u
, PidRef
*pid
, sd_bus_error
*error
) {
6114 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
6115 * and not a kernel thread either */
6117 /* First, a simple range check */
6118 if (!pidref_is_set(pid
))
6119 return sd_bus_error_set(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier is not valid.");
6121 /* Some extra safety check */
6122 if (pid
->pid
== 1 || pidref_is_self(pid
))
6123 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
->pid
);
6125 /* Don't even begin to bother with kernel threads */
6126 r
= pidref_is_kernel_thread(pid
);
6128 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
->pid
);
6130 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
->pid
);
6132 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
->pid
);
6137 int unit_get_log_level_max(const Unit
*u
) {
6139 if (u
->debug_invocation
)
6142 ExecContext
*ec
= unit_get_exec_context(u
);
6143 if (ec
&& ec
->log_level_max
>= 0)
6144 return ec
->log_level_max
;
6147 return log_get_max_level();
6150 bool unit_log_level_test(const Unit
*u
, int level
) {
6152 return LOG_PRI(level
) <= unit_get_log_level_max(u
);
6155 void unit_log_success(Unit
*u
) {
6158 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
6159 * This message has low information value for regular users and it might be a bit overwhelming on a system with
6160 * a lot of devices. */
6162 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
6163 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SUCCESS_STR
),
6164 LOG_UNIT_INVOCATION_ID(u
),
6165 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
6168 void unit_log_failure(Unit
*u
, const char *result
) {
6172 log_unit_struct(u
, LOG_WARNING
,
6173 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_FAILURE_RESULT_STR
),
6174 LOG_UNIT_INVOCATION_ID(u
),
6175 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
6176 LOG_ITEM("UNIT_RESULT=%s", result
));
6179 void unit_log_skip(Unit
*u
, const char *result
) {
6183 log_unit_struct(u
, LOG_INFO
,
6184 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SKIPPED_STR
),
6185 LOG_UNIT_INVOCATION_ID(u
),
6186 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
6187 LOG_ITEM("UNIT_RESULT=%s", result
));
6190 void unit_log_process_exit(
6193 const char *command
,
6203 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6204 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6205 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6209 else if (code
== CLD_EXITED
)
6212 level
= LOG_WARNING
;
6214 log_unit_struct(u
, level
,
6215 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_PROCESS_EXIT_STR
),
6216 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
6218 sigchld_code_to_string(code
), status
,
6219 strna(code
== CLD_EXITED
6220 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
6221 : signal_to_string(status
)),
6222 success
? " (success)" : ""),
6223 LOG_ITEM("EXIT_CODE=%s", sigchld_code_to_string(code
)),
6224 LOG_ITEM("EXIT_STATUS=%i", status
),
6225 LOG_ITEM("COMMAND=%s", strna(command
)),
6226 LOG_UNIT_INVOCATION_ID(u
));
6229 int unit_exit_status(Unit
*u
) {
6232 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6233 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6234 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6235 * service process has exited abnormally (signal/coredump). */
6237 if (!UNIT_VTABLE(u
)->exit_status
)
6240 return UNIT_VTABLE(u
)->exit_status(u
);
6243 int unit_failure_action_exit_status(Unit
*u
) {
6248 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6250 if (u
->failure_action_exit_status
>= 0)
6251 return u
->failure_action_exit_status
;
6253 r
= unit_exit_status(u
);
6254 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6260 int unit_success_action_exit_status(Unit
*u
) {
6265 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6267 if (u
->success_action_exit_status
>= 0)
6268 return u
->success_action_exit_status
;
6270 r
= unit_exit_status(u
);
6271 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6277 int unit_test_trigger_loaded(Unit
*u
) {
6280 /* Tests whether the unit to trigger is loaded */
6282 trigger
= UNIT_TRIGGER(u
);
6284 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6285 "Refusing to start, no unit to trigger.");
6286 if (trigger
->load_state
!= UNIT_LOADED
)
6287 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6288 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
6293 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
, bool destroy_runtime_dir
) {
6298 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6299 if (destroy_runtime_dir
&& context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
)
6300 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
6302 exec_context_destroy_credentials(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
], u
->id
);
6303 exec_context_destroy_mount_ns_dir(u
);
6306 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
6307 UnitActiveState state
;
6311 /* Special return values:
6313 * -EOPNOTSUPP → cleaning not supported for this unit type
6314 * -EUNATCH → cleaning not defined for this resource type
6315 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6316 * a job queued or similar
6319 if (!UNIT_VTABLE(u
)->clean
)
6325 if (u
->load_state
!= UNIT_LOADED
)
6331 state
= unit_active_state(u
);
6332 if (state
!= UNIT_INACTIVE
)
6335 return UNIT_VTABLE(u
)->clean(u
, mask
);
6338 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
6341 if (!UNIT_VTABLE(u
)->clean
||
6342 u
->load_state
!= UNIT_LOADED
) {
6347 /* When the clean() method is set, can_clean() really should be set too */
6348 assert(UNIT_VTABLE(u
)->can_clean
);
6350 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
6353 bool unit_can_start_refuse_manual(Unit
*u
) {
6354 return unit_can_start(u
) && !u
->refuse_manual_start
;
6357 bool unit_can_stop_refuse_manual(Unit
*u
) {
6358 return unit_can_stop(u
) && !u
->refuse_manual_stop
;
6361 bool unit_can_isolate_refuse_manual(Unit
*u
) {
6362 return unit_can_isolate(u
) && !u
->refuse_manual_start
;
6365 void unit_next_freezer_state(Unit
*u
, FreezerAction action
, FreezerState
*ret_next
, FreezerState
*ret_objective
) {
6366 FreezerState current
, parent
, next
, objective
;
6369 assert(action
>= 0);
6370 assert(action
< _FREEZER_ACTION_MAX
);
6372 assert(ret_objective
);
6374 /* This function determines the correct freezer state transitions for a unit
6375 * given the action being requested. It returns the next state, and also the "objective",
6376 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6377 * ultimately want to achieve. */
6379 current
= u
->freezer_state
;
6381 Unit
*slice
= UNIT_GET_SLICE(u
);
6383 parent
= slice
->freezer_state
;
6385 parent
= FREEZER_RUNNING
;
6389 case FREEZER_FREEZE
:
6390 /* We always "promote" a freeze initiated by parent into a normal freeze */
6391 if (IN_SET(current
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6392 next
= FREEZER_FROZEN
;
6394 next
= FREEZER_FREEZING
;
6398 /* Thawing is the most complicated operation here, because we can't thaw a unit
6399 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6400 * initiated by parent if the parent is frozen */
6401 if (IN_SET(current
, FREEZER_RUNNING
, FREEZER_THAWING
,
6402 FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
)) /* Should usually be refused by unit_freezer_action */
6404 else if (current
== FREEZER_FREEZING
) {
6405 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6406 next
= FREEZER_THAWING
;
6408 next
= FREEZER_FREEZING_BY_PARENT
;
6409 } else if (current
== FREEZER_FROZEN
) {
6410 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6411 next
= FREEZER_THAWING
;
6413 next
= FREEZER_FROZEN_BY_PARENT
;
6415 assert_not_reached();
6418 case FREEZER_PARENT_FREEZE
:
6419 /* We need to avoid accidentally demoting units frozen manually */
6420 if (IN_SET(current
, FREEZER_FREEZING
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6423 next
= FREEZER_FREEZING_BY_PARENT
;
6426 case FREEZER_PARENT_THAW
:
6427 /* We don't want to thaw units from a parent if they were frozen
6428 * manually, so for such units this action is a no-op */
6429 if (IN_SET(current
, FREEZER_RUNNING
, FREEZER_FREEZING
, FREEZER_FROZEN
))
6432 next
= FREEZER_THAWING
;
6436 assert_not_reached();
6439 objective
= freezer_state_finish(next
);
6440 if (objective
== FREEZER_FROZEN_BY_PARENT
)
6441 objective
= FREEZER_FROZEN
;
6442 assert(IN_SET(objective
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6445 *ret_objective
= objective
;
6448 bool unit_can_freeze(const Unit
*u
) {
6451 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
) || unit_has_name(u
, SPECIAL_INIT_SCOPE
))
6454 if (UNIT_VTABLE(u
)->can_freeze
)
6455 return UNIT_VTABLE(u
)->can_freeze(u
);
6457 return UNIT_VTABLE(u
)->freezer_action
;
6460 void unit_set_freezer_state(Unit
*u
, FreezerState state
) {
6463 assert(state
< _FREEZER_STATE_MAX
);
6465 if (u
->freezer_state
== state
)
6468 log_unit_debug(u
, "Freezer state changed %s -> %s",
6469 freezer_state_to_string(u
->freezer_state
), freezer_state_to_string(state
));
6471 u
->freezer_state
= state
;
6473 unit_add_to_dbus_queue(u
);
6476 void unit_freezer_complete(Unit
*u
, FreezerState kernel_state
) {
6480 assert(IN_SET(kernel_state
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6482 expected
= IN_SET(u
->freezer_state
, FREEZER_RUNNING
, FREEZER_THAWING
) == (kernel_state
== FREEZER_RUNNING
);
6484 unit_set_freezer_state(u
, expected
? freezer_state_finish(u
->freezer_state
) : kernel_state
);
6485 log_unit_info(u
, "Unit now %s.", u
->freezer_state
== FREEZER_RUNNING
? "thawed" :
6486 freezer_state_to_string(u
->freezer_state
));
6488 /* If the cgroup's final state is against what's requested by us, report as canceled. */
6489 bus_unit_send_pending_freezer_message(u
, /* canceled = */ !expected
);
6492 int unit_freezer_action(Unit
*u
, FreezerAction action
) {
6497 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
6499 if (!unit_can_freeze(u
))
6505 if (u
->load_state
!= UNIT_LOADED
)
6508 s
= unit_active_state(u
);
6509 if (s
!= UNIT_ACTIVE
)
6512 if (action
== FREEZER_FREEZE
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
))
6514 if (action
== FREEZER_THAW
&& u
->freezer_state
== FREEZER_THAWING
)
6516 if (action
== FREEZER_THAW
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6519 r
= UNIT_VTABLE(u
)->freezer_action(u
, action
);
6523 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_THAWING
));
6527 Condition
*unit_find_failed_condition(Unit
*u
) {
6528 Condition
*failed_trigger
= NULL
;
6529 bool has_succeeded_trigger
= false;
6531 if (u
->condition_result
)
6534 LIST_FOREACH(conditions
, c
, u
->conditions
)
6536 if (c
->result
== CONDITION_SUCCEEDED
)
6537 has_succeeded_trigger
= true;
6538 else if (!failed_trigger
)
6540 } else if (c
->result
!= CONDITION_SUCCEEDED
)
6543 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
6546 int unit_can_live_mount(Unit
*u
, sd_bus_error
*error
) {
6549 if (!UNIT_VTABLE(u
)->live_mount
)
6550 return sd_bus_error_setf(
6552 SD_BUS_ERROR_NOT_SUPPORTED
,
6553 "Live mounting not supported by unit type '%s'",
6554 unit_type_to_string(u
->type
));
6556 if (u
->load_state
!= UNIT_LOADED
)
6557 return sd_bus_error_setf(
6559 BUS_ERROR_NO_SUCH_UNIT
,
6560 "Unit '%s' not loaded, cannot live mount",
6563 if (!UNIT_VTABLE(u
)->can_live_mount
)
6566 return UNIT_VTABLE(u
)->can_live_mount(u
, error
);
6569 int unit_live_mount(
6573 sd_bus_message
*message
,
6574 MountInNamespaceFlags flags
,
6575 const MountOptions
*options
,
6576 sd_bus_error
*error
) {
6579 assert(UNIT_VTABLE(u
)->live_mount
);
6581 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
))) {
6582 log_unit_debug(u
, "Unit not active, cannot perform live mount.");
6583 return sd_bus_error_setf(
6585 BUS_ERROR_UNIT_INACTIVE
,
6586 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: unit not active",
6592 if (unit_active_state(u
) == UNIT_REFRESHING
) {
6593 log_unit_debug(u
, "Unit already live mounting, refusing further requests.");
6594 return sd_bus_error_setf(
6596 BUS_ERROR_UNIT_BUSY
,
6597 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another live mount in progress",
6604 log_unit_debug(u
, "Unit already has a job in progress, cannot live mount");
6605 return sd_bus_error_setf(
6607 BUS_ERROR_UNIT_BUSY
,
6608 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another operation in progress",
6614 return UNIT_VTABLE(u
)->live_mount(u
, src
, dst
, message
, flags
, options
, error
);
6617 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
6618 [COLLECT_INACTIVE
] = "inactive",
6619 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
6622 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
6624 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
6629 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6630 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6631 * is NULL the first entry found), or NULL if not found. */
6633 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
6634 if (!other
|| other
== i
)
6640 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6641 _cleanup_free_ Unit
**array
= NULL
;
6648 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6649 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6650 * while the dependency table is continuously updated. */
6652 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6653 if (!GREEDY_REALLOC(array
, n
+ 1))
6659 *ret_array
= TAKE_PTR(array
);
6661 assert(n
<= INT_MAX
);
6665 int unit_get_transitive_dependency_set(Unit
*u
, UnitDependencyAtom atom
, Set
**ret
) {
6666 _cleanup_set_free_ Set
*units
= NULL
, *queue
= NULL
;
6673 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6676 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6677 r
= set_ensure_put(&units
, NULL
, other
);
6682 r
= set_ensure_put(&queue
, NULL
, other
);
6686 } while ((u
= set_steal_first(queue
)));
6688 *ret
= TAKE_PTR(units
);
6694 sd_event_source
**source
,
6697 sd_event_time_handler_t handler
) {
6706 if (usec
== USEC_INFINITY
)
6707 return sd_event_source_set_enabled(*source
, SD_EVENT_OFF
);
6709 r
= (relative
? sd_event_source_set_time_relative
: sd_event_source_set_time
)(*source
, usec
);
6713 return sd_event_source_set_enabled(*source
, SD_EVENT_ONESHOT
);
6716 if (usec
== USEC_INFINITY
)
6719 r
= (relative
? sd_event_add_time_relative
: sd_event_add_time
)(
6729 const char *d
= strjoina(unit_type_to_string(u
->type
), "-timer");
6730 (void) sd_event_source_set_description(*source
, d
);
6735 bool unit_passes_filter(Unit
*u
, char * const *states
, char * const *patterns
) {
6738 if (!strv_isempty(states
)) {
6739 char * const *unit_states
= STRV_MAKE(
6740 unit_load_state_to_string(u
->load_state
),
6741 unit_active_state_to_string(unit_active_state(u
)),
6742 unit_sub_state_to_string(u
));
6744 if (!strv_overlap(states
, unit_states
))
6748 return strv_fnmatch_or_empty(patterns
, u
->id
, FNM_NOESCAPE
);
6751 static int unit_get_nice(Unit
*u
) {
6754 ec
= unit_get_exec_context(u
);
6755 return ec
? ec
->nice
: 0;
6758 static uint64_t unit_get_cpu_weight(Unit
*u
) {
6761 cc
= unit_get_cgroup_context(u
);
6762 return cc
? cgroup_context_cpu_weight(cc
, manager_state(u
->manager
)) : CGROUP_WEIGHT_DEFAULT
;
6765 int unit_get_exec_quota_stats(Unit
*u
, ExecContext
*c
, ExecDirectoryType dt
, uint64_t *ret_usage
, uint64_t *ret_limit
) {
6767 _cleanup_close_
int fd
= -EBADF
;
6768 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
6773 if (c
->directories
[dt
].n_items
== 0) {
6774 *ret_usage
= UINT64_MAX
;
6775 *ret_limit
= UINT64_MAX
;
6779 ExecDirectoryItem
*i
= &c
->directories
[dt
].items
[0];
6780 p
= path_join(u
->manager
->prefix
[dt
], i
->path
);
6782 return log_oom_debug();
6784 if (exec_directory_is_private(c
, dt
)) {
6785 pp
= path_join(u
->manager
->prefix
[dt
], "private", i
->path
);
6787 return log_oom_debug();
6790 const char *target_dir
= pp
?: p
;
6791 fd
= open(target_dir
, O_PATH
| O_CLOEXEC
| O_DIRECTORY
);
6793 return log_unit_debug_errno(u
, errno
, "Failed to get exec quota stats: %m");
6796 r
= read_fs_xattr_fd(fd
, /* ret_xflags = */ NULL
, &proj_id
);
6798 return log_unit_debug_errno(u
, r
, "Failed to get project ID for exec quota stats: %m");
6801 r
= quota_query_proj_id(fd
, proj_id
, &req
);
6803 return log_unit_debug_errno(u
, r
, "Failed to query project ID for exec quota stats: %m");
6805 *ret_usage
= req
.dqb_curspace
;
6806 *ret_limit
= req
.dqb_bhardlimit
* QIF_DQBLKSIZE
;
6811 int unit_compare_priority(Unit
*a
, Unit
*b
) {
6814 ret
= CMP(a
->type
, b
->type
);
6818 ret
= CMP(unit_get_cpu_weight(a
), unit_get_cpu_weight(b
));
6822 ret
= CMP(unit_get_nice(a
), unit_get_nice(b
));
6826 return strcmp(a
->id
, b
->id
);
6829 const char* unit_log_field(const Unit
*u
) {
6830 return MANAGER_IS_SYSTEM(ASSERT_PTR(u
)->manager
) ? "UNIT=" : "USER_UNIT=";
6833 const char* unit_invocation_log_field(const Unit
*u
) {
6834 return MANAGER_IS_SYSTEM(ASSERT_PTR(u
)->manager
) ? "INVOCATION_ID=" : "USER_INVOCATION_ID=";
6837 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6838 [UNIT_PATH
] = &activation_details_path_vtable
,
6839 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6842 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6843 _cleanup_free_ ActivationDetails
*details
= NULL
;
6845 assert(trigger_unit
);
6846 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6847 assert(trigger_unit
->id
);
6849 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6853 *details
= (ActivationDetails
) {
6855 .trigger_unit_type
= trigger_unit
->type
,
6858 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6859 if (!details
->trigger_unit_name
)
6862 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6863 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6865 return TAKE_PTR(details
);
6868 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6872 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6873 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6875 free(details
->trigger_unit_name
);
6877 return mfree(details
);
6880 void activation_details_serialize(const ActivationDetails
*details
, FILE *f
) {
6881 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6884 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6885 if (details
->trigger_unit_name
)
6886 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6887 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6888 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6891 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6901 if (!streq(key
, "activation-details-unit-type"))
6904 t
= unit_type_from_string(value
);
6908 /* The activation details vtable has defined ops only for path and timer units */
6909 if (!activation_details_vtable
[t
])
6912 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6916 **details
= (ActivationDetails
) {
6918 .trigger_unit_type
= t
,
6924 if (streq(key
, "activation-details-unit-name")) {
6925 r
= free_and_strdup(&(*details
)->trigger_unit_name
, value
);
6932 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6933 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6938 int activation_details_append_env(const ActivationDetails
*details
, char ***strv
) {
6946 if (!isempty(details
->trigger_unit_name
)) {
6947 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6951 r
= strv_consume(strv
, TAKE_PTR(s
));
6956 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6957 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6962 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6965 int activation_details_append_pair(const ActivationDetails
*details
, char ***strv
) {
6973 if (!isempty(details
->trigger_unit_name
)) {
6974 r
= strv_extend_many(strv
, "trigger_unit", details
->trigger_unit_name
);
6979 if (ACTIVATION_DETAILS_VTABLE(details
)->append_pair
) {
6980 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6985 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6988 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);
6990 static const char* const unit_mount_dependency_type_table
[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX
] = {
6991 [UNIT_MOUNT_WANTS
] = "WantsMountsFor",
6992 [UNIT_MOUNT_REQUIRES
] = "RequiresMountsFor",
6995 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type
, UnitMountDependencyType
);
6997 static const char* const oom_policy_table
[_OOM_POLICY_MAX
] = {
6998 [OOM_CONTINUE
] = "continue",
6999 [OOM_STOP
] = "stop",
7000 [OOM_KILL
] = "kill",
7003 DEFINE_STRING_TABLE_LOOKUP(oom_policy
, OOMPolicy
);
7005 UnitDependency
unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t
) {
7008 case UNIT_MOUNT_WANTS
:
7011 case UNIT_MOUNT_REQUIRES
:
7012 return UNIT_REQUIRES
;
7015 assert_not_reached();