1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
4 #include <linux/capability.h>
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "ansi-color.h"
14 #include "bpf-firewall.h"
15 #include "bpf-restrict-fs.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
22 #include "condition.h"
23 #include "dbus-unit.h"
25 #include "dynamic-user.h"
28 #include "exec-credential.h"
32 #include "format-util.h"
34 #include "id128-util.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
41 #include "logarithm.h"
42 #include "mkdir-label.h"
44 #include "mount-util.h"
45 #include "mountpoint-util.h"
46 #include "path-util.h"
47 #include "process-util.h"
49 #include "serialize.h"
51 #include "signal-util.h"
52 #include "siphash24.h"
53 #include "sparse-endian.h"
55 #include "specifier.h"
56 #include "stat-util.h"
57 #include "string-table.h"
58 #include "string-util.h"
60 #include "tmpfile-util.h"
61 #include "umask-util.h"
63 #include "unit-name.h"
64 #include "user-util.h"
67 /* Thresholds for logging at INFO level about resource consumption */
68 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
69 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
70 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
71 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
73 /* Thresholds for logging at NOTICE level about resource consumption */
74 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
75 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
76 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
77 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
79 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
80 [UNIT_SERVICE
] = &service_vtable
,
81 [UNIT_SOCKET
] = &socket_vtable
,
82 [UNIT_TARGET
] = &target_vtable
,
83 [UNIT_DEVICE
] = &device_vtable
,
84 [UNIT_MOUNT
] = &mount_vtable
,
85 [UNIT_AUTOMOUNT
] = &automount_vtable
,
86 [UNIT_SWAP
] = &swap_vtable
,
87 [UNIT_TIMER
] = &timer_vtable
,
88 [UNIT_PATH
] = &path_vtable
,
89 [UNIT_SLICE
] = &slice_vtable
,
90 [UNIT_SCOPE
] = &scope_vtable
,
93 Unit
* unit_new(Manager
*m
, size_t size
) {
97 assert(size
>= sizeof(Unit
));
104 u
->type
= _UNIT_TYPE_INVALID
;
105 u
->default_dependencies
= true;
106 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
107 u
->unit_file_preset
= _PRESET_ACTION_INVALID
;
108 u
->on_failure_job_mode
= JOB_REPLACE
;
109 u
->on_success_job_mode
= JOB_FAIL
;
110 u
->job_timeout
= USEC_INFINITY
;
111 u
->job_running_timeout
= USEC_INFINITY
;
112 u
->ref_uid
= UID_INVALID
;
113 u
->ref_gid
= GID_INVALID
;
115 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
117 u
->last_section_private
= -1;
119 u
->start_ratelimit
= m
->defaults
.start_limit
;
121 u
->auto_start_stop_ratelimit
= (const RateLimit
) {
122 .interval
= 10 * USEC_PER_SEC
,
129 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
130 _cleanup_(unit_freep
) Unit
*u
= NULL
;
133 u
= unit_new(m
, size
);
137 r
= unit_add_name(u
, name
);
146 bool unit_has_name(const Unit
*u
, const char *name
) {
150 return streq_ptr(name
, u
->id
) ||
151 set_contains(u
->aliases
, name
);
154 static void unit_init(Unit
*u
) {
161 assert(u
->type
>= 0);
163 cc
= unit_get_cgroup_context(u
);
165 cgroup_context_init(cc
);
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
171 cc
->io_accounting
= u
->manager
->defaults
.io_accounting
;
172 cc
->memory_accounting
= u
->manager
->defaults
.memory_accounting
;
173 cc
->tasks_accounting
= u
->manager
->defaults
.tasks_accounting
;
174 cc
->ip_accounting
= u
->manager
->defaults
.ip_accounting
;
176 if (u
->type
!= UNIT_SLICE
)
177 cc
->tasks_max
= u
->manager
->defaults
.tasks_max
;
179 cc
->memory_pressure_watch
= u
->manager
->defaults
.memory_pressure_watch
;
180 cc
->memory_pressure_threshold_usec
= u
->manager
->defaults
.memory_pressure_threshold_usec
;
183 ec
= unit_get_exec_context(u
);
185 exec_context_init(ec
);
187 if (u
->manager
->defaults
.oom_score_adjust_set
) {
188 ec
->oom_score_adjust
= u
->manager
->defaults
.oom_score_adjust
;
189 ec
->oom_score_adjust_set
= true;
192 if (MANAGER_IS_SYSTEM(u
->manager
))
193 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
195 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
197 /* User manager might have its umask redefined by PAM or UMask=. In this
198 * case let the units it manages inherit this value by default. They can
199 * still tune this value through their own unit file */
200 (void) get_process_umask(0, &ec
->umask
);
204 kc
= unit_get_kill_context(u
);
206 kill_context_init(kc
);
208 if (UNIT_VTABLE(u
)->init
)
209 UNIT_VTABLE(u
)->init(u
);
212 static int unit_add_alias(Unit
*u
, char *donated_name
) {
215 /* Make sure that u->names is allocated. We may leave u->names
216 * empty if we fail later, but this is not a problem. */
217 r
= set_ensure_put(&u
->aliases
, &string_hash_ops_free
, donated_name
);
225 int unit_add_name(Unit
*u
, const char *text
) {
226 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
233 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
235 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
236 "Instance is not set when adding name '%s'.", text
);
238 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
240 return log_unit_debug_errno(u
, r
,
241 "Failed to build instance name from '%s': %m", text
);
248 if (unit_has_name(u
, name
))
251 if (hashmap_contains(u
->manager
->units
, name
))
252 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
253 "Unit already exist when adding name '%s'.", name
);
255 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
256 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
257 "Name '%s' is invalid.", name
);
259 t
= unit_name_to_type(name
);
261 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
262 "failed to derive unit type from name '%s'.", name
);
264 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
265 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
266 "Unit type is illegal: u->type(%d) and t(%d) for name '%s'.",
269 r
= unit_name_to_instance(name
, &instance
);
271 return log_unit_debug_errno(u
, r
, "Failed to extract instance from name '%s': %m", name
);
273 if (instance
&& !unit_type_may_template(t
))
274 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "Templates are not allowed for name '%s'.", name
);
276 /* Ensure that this unit either has no instance, or that the instance matches. */
277 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
278 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
279 "Cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
280 name
, instance
, u
->instance
);
282 if (u
->id
&& !unit_type_may_alias(t
))
283 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
284 "Cannot add name %s, aliases are not allowed for %s units.",
285 name
, unit_type_to_string(t
));
287 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
288 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "Cannot add name, manager has too many units.");
290 /* Add name to the global hashmap first, because that's easier to undo */
291 r
= hashmap_put(u
->manager
->units
, name
, u
);
293 return log_unit_debug_errno(u
, r
, "Add unit to hashmap failed for name '%s': %m", text
);
296 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
298 hashmap_remove(u
->manager
->units
, name
);
304 /* A new name, we don't need the set yet. */
305 assert(u
->type
== _UNIT_TYPE_INVALID
);
306 assert(!u
->instance
);
309 u
->id
= TAKE_PTR(name
);
310 u
->instance
= TAKE_PTR(instance
);
312 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
316 unit_add_to_dbus_queue(u
);
320 int unit_choose_id(Unit
*u
, const char *name
) {
321 _cleanup_free_
char *t
= NULL
;
328 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
332 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
339 if (streq_ptr(u
->id
, name
))
340 return 0; /* Nothing to do. */
342 /* Selects one of the aliases of this unit as the id */
343 s
= set_get(u
->aliases
, (char*) name
);
348 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
352 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
354 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
355 unit_add_to_dbus_queue(u
);
360 int unit_set_description(Unit
*u
, const char *description
) {
365 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
369 unit_add_to_dbus_queue(u
);
374 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
377 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
378 if (other
->job
|| other
->nop_job
)
381 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
382 if (other
->job
|| other
->nop_job
)
388 void unit_release_resources(Unit
*u
) {
389 UnitActiveState state
;
394 if (u
->job
|| u
->nop_job
)
400 state
= unit_active_state(u
);
401 if (!UNIT_IS_INACTIVE_OR_FAILED(state
))
404 if (unit_will_restart(u
))
407 ec
= unit_get_exec_context(u
);
408 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
409 exec_context_destroy_runtime_directory(ec
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
411 if (UNIT_VTABLE(u
)->release_resources
)
412 UNIT_VTABLE(u
)->release_resources(u
);
415 bool unit_may_gc(Unit
*u
) {
416 UnitActiveState state
;
421 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
422 * unit may be collected, and false if there's some reason to keep it loaded.
424 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
425 * using markers to properly collect dependency loops.
428 if (u
->job
|| u
->nop_job
)
434 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
435 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
436 * before we release the unit. */
437 if (u
->in_cgroup_empty_queue
|| u
->in_cgroup_oom_queue
)
440 /* Make sure to send out D-Bus events before we unload the unit */
441 if (u
->in_dbus_queue
)
444 if (sd_bus_track_count(u
->bus_track
) > 0)
447 state
= unit_active_state(u
);
449 /* But we keep the unit object around for longer when it is referenced or configured to not be
451 switch (u
->collect_mode
) {
453 case COLLECT_INACTIVE
:
454 if (state
!= UNIT_INACTIVE
)
459 case COLLECT_INACTIVE_OR_FAILED
:
460 if (!UNIT_IS_INACTIVE_OR_FAILED(state
))
466 assert_not_reached();
469 /* Check if any OnFailure= or on Success= jobs may be pending */
470 if (unit_success_failure_handler_has_jobs(u
))
473 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
474 * around. Units with active processes should never be collected. */
475 r
= unit_cgroup_is_empty(u
);
476 if (r
<= 0 && !IN_SET(r
, -ENXIO
, -EOWNERDEAD
))
477 return false; /* ENXIO/EOWNERDEAD means: currently not realized */
479 if (!UNIT_VTABLE(u
)->may_gc
)
482 return UNIT_VTABLE(u
)->may_gc(u
);
485 void unit_add_to_load_queue(Unit
*u
) {
487 assert(u
->type
!= _UNIT_TYPE_INVALID
);
489 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
492 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
493 u
->in_load_queue
= true;
496 void unit_add_to_cleanup_queue(Unit
*u
) {
499 if (u
->in_cleanup_queue
)
502 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
503 u
->in_cleanup_queue
= true;
506 void unit_add_to_gc_queue(Unit
*u
) {
509 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
515 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
516 u
->in_gc_queue
= true;
519 void unit_add_to_dbus_queue(Unit
*u
) {
521 assert(u
->type
!= _UNIT_TYPE_INVALID
);
523 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
526 /* Shortcut things if nobody cares */
527 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
528 sd_bus_track_count(u
->bus_track
) <= 0 &&
529 set_isempty(u
->manager
->private_buses
)) {
530 u
->sent_dbus_new_signal
= true;
534 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
535 u
->in_dbus_queue
= true;
538 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
541 if (u
->in_stop_when_unneeded_queue
)
544 if (!u
->stop_when_unneeded
)
547 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
550 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
551 u
->in_stop_when_unneeded_queue
= true;
554 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
557 if (u
->in_start_when_upheld_queue
)
560 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
563 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
566 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
567 u
->in_start_when_upheld_queue
= true;
570 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
573 if (u
->in_stop_when_bound_queue
)
576 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
579 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
582 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
583 u
->in_stop_when_bound_queue
= true;
586 static bool unit_can_release_resources(Unit
*u
) {
591 if (UNIT_VTABLE(u
)->release_resources
)
594 ec
= unit_get_exec_context(u
);
595 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
601 void unit_submit_to_release_resources_queue(Unit
*u
) {
604 if (u
->in_release_resources_queue
)
607 if (u
->job
|| u
->nop_job
)
613 if (!unit_can_release_resources(u
))
616 LIST_PREPEND(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
617 u
->in_release_resources_queue
= true;
620 static void unit_clear_dependencies(Unit
*u
) {
623 /* Removes all dependencies configured on u and their reverse dependencies. */
625 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
627 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
630 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
631 hashmap_remove(other_deps
, u
);
633 unit_add_to_gc_queue(other
);
634 other
->dependency_generation
++;
640 u
->dependencies
= hashmap_free(u
->dependencies
);
641 u
->dependency_generation
++;
644 static void unit_remove_transient(Unit
*u
) {
651 STRV_FOREACH(i
, u
->dropin_paths
) {
652 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
654 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
657 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
660 /* Only drop transient drop-ins */
661 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
668 if (u
->fragment_path
) {
669 (void) unlink(u
->fragment_path
);
670 (void) unit_file_remove_from_name_map(
671 &u
->manager
->lookup_paths
,
672 &u
->manager
->unit_cache_timestamp_hash
,
673 &u
->manager
->unit_id_map
,
674 &u
->manager
->unit_name_map
,
675 &u
->manager
->unit_path_cache
,
680 static void unit_free_mounts_for(Unit
*u
) {
683 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
685 _cleanup_free_
char *path
= NULL
;
687 path
= hashmap_steal_first_key(u
->mounts_for
[t
]);
691 char s
[strlen(path
) + 1];
693 PATH_FOREACH_PREFIX_MORE(s
, path
) {
697 x
= hashmap_get2(u
->manager
->units_needing_mounts_for
[t
], s
, (void**) &y
);
701 (void) set_remove(x
, u
);
703 if (set_isempty(x
)) {
704 assert_se(hashmap_remove(u
->manager
->units_needing_mounts_for
[t
], y
));
711 u
->mounts_for
[t
] = hashmap_free(u
->mounts_for
[t
]);
715 static void unit_done(Unit
*u
) {
724 if (UNIT_VTABLE(u
)->done
)
725 UNIT_VTABLE(u
)->done(u
);
727 ec
= unit_get_exec_context(u
);
729 exec_context_done(ec
);
731 cc
= unit_get_cgroup_context(u
);
733 cgroup_context_done(cc
);
736 Unit
* unit_free(Unit
*u
) {
743 sd_event_source_disable_unref(u
->auto_start_stop_event_source
);
745 u
->transient_file
= safe_fclose(u
->transient_file
);
747 if (!MANAGER_IS_RELOADING(u
->manager
))
748 unit_remove_transient(u
);
750 bus_unit_send_removed_signal(u
);
754 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
755 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
756 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
757 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
759 unit_free_mounts_for(u
);
761 SET_FOREACH(t
, u
->aliases
)
762 hashmap_remove_value(u
->manager
->units
, t
, u
);
764 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
766 if (!sd_id128_is_null(u
->invocation_id
))
767 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
781 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
782 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
783 slice
= UNIT_GET_SLICE(u
);
784 unit_clear_dependencies(u
);
786 unit_add_family_to_cgroup_realize_queue(slice
);
789 manager_unref_console(u
->manager
);
791 unit_release_cgroup(u
, /* drop_cgroup_runtime = */ true);
793 if (!MANAGER_IS_RELOADING(u
->manager
))
794 unit_unlink_state_files(u
);
796 unit_unref_uid_gid(u
, false);
798 (void) manager_update_failed_units(u
->manager
, u
, false);
799 set_remove(u
->manager
->startup_units
, u
);
801 unit_unwatch_all_pids(u
);
803 while (u
->refs_by_target
)
804 unit_ref_unset(u
->refs_by_target
);
806 if (u
->type
!= _UNIT_TYPE_INVALID
)
807 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
809 if (u
->in_load_queue
)
810 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
812 if (u
->in_dbus_queue
)
813 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
815 if (u
->in_cleanup_queue
)
816 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
819 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
821 if (u
->in_cgroup_realize_queue
)
822 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
824 if (u
->in_cgroup_empty_queue
)
825 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
827 if (u
->in_cgroup_oom_queue
)
828 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
830 if (u
->in_target_deps_queue
)
831 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
833 if (u
->in_stop_when_unneeded_queue
)
834 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
836 if (u
->in_start_when_upheld_queue
)
837 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
839 if (u
->in_stop_when_bound_queue
)
840 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
842 if (u
->in_release_resources_queue
)
843 LIST_REMOVE(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
845 condition_free_list(u
->conditions
);
846 condition_free_list(u
->asserts
);
848 free(u
->description
);
849 strv_free(u
->documentation
);
850 free(u
->fragment_path
);
851 free(u
->source_path
);
852 strv_free(u
->dropin_paths
);
855 free(u
->job_timeout_reboot_arg
);
858 free(u
->access_selinux_context
);
860 set_free(u
->aliases
);
863 activation_details_unref(u
->activation_details
);
868 UnitActiveState
unit_active_state(Unit
*u
) {
871 if (u
->load_state
== UNIT_MERGED
)
872 return unit_active_state(unit_follow_merge(u
));
874 /* After a reload it might happen that a unit is not correctly
875 * loaded but still has a process around. That's why we won't
876 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
878 return UNIT_VTABLE(u
)->active_state(u
);
881 const char* unit_sub_state_to_string(Unit
*u
) {
884 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
887 static int unit_merge_names(Unit
*u
, Unit
*other
) {
894 r
= unit_add_alias(u
, other
->id
);
898 r
= set_move(u
->aliases
, other
->aliases
);
900 set_remove(u
->aliases
, other
->id
);
905 other
->aliases
= set_free(other
->aliases
);
907 SET_FOREACH(name
, u
->aliases
)
908 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
913 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
922 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
925 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
926 * hashmaps is an estimate that is likely too high since they probably use some of the same
927 * types. But it's never too low, and that's all we need. */
929 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
931 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
935 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
940 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
941 * other unit's dependencies.
943 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
944 * reserve anything for. In that case other's set will be transferred as a whole to u by
945 * complete_move(). */
947 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
950 other_deps
= hashmap_get(other
->dependencies
, d
);
952 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
960 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
961 /* Only warn about some unit types */
962 return IN_SET(dependency
,
973 static int unit_per_dependency_type_hashmap_update(
976 UnitDependencyMask origin_mask
,
977 UnitDependencyMask destination_mask
) {
979 UnitDependencyInfo info
;
983 assert_cc(sizeof(void*) == sizeof(info
));
985 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
986 * exists, or insert it anew if not. */
988 info
.data
= hashmap_get(per_type
, other
);
990 /* Entry already exists. Add in our mask. */
992 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
993 FLAGS_SET(destination_mask
, info
.destination_mask
))
996 info
.origin_mask
|= origin_mask
;
997 info
.destination_mask
|= destination_mask
;
999 r
= hashmap_update(per_type
, other
, info
.data
);
1001 info
= (UnitDependencyInfo
) {
1002 .origin_mask
= origin_mask
,
1003 .destination_mask
= destination_mask
,
1006 r
= hashmap_put(per_type
, other
, info
.data
);
1014 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1016 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1017 * since the hashmaps all want it as void pointer. */
1025 /* First, remove dependency to other. */
1026 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1027 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1028 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1029 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1030 other
->id
, other
->id
, u
->id
);
1032 if (hashmap_isempty(deps
))
1033 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1037 _cleanup_hashmap_free_ Hashmap
*other_deps
= NULL
;
1038 UnitDependencyInfo di_back
;
1041 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1042 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1046 deps
= hashmap_get(u
->dependencies
, dt
);
1048 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1049 * referenced units as 'back'. */
1050 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1055 /* This is a dependency pointing back to the unit we want to merge with?
1056 * Suppress it (but warn) */
1057 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1058 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1059 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1060 u
->id
, other
->id
, other
->id
, u
->id
);
1062 hashmap_remove(other_deps
, back
);
1066 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1067 * point to 'u' instead. */
1068 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1069 UnitDependencyInfo di_move
;
1071 di_move
.data
= hashmap_remove(back_deps
, other
);
1075 assert_se(unit_per_dependency_type_hashmap_update(
1078 di_move
.origin_mask
,
1079 di_move
.destination_mask
) >= 0);
1082 /* The target unit already has dependencies of this type, let's then merge this individually. */
1084 assert_se(unit_per_dependency_type_hashmap_update(
1087 di_back
.origin_mask
,
1088 di_back
.destination_mask
) >= 0);
1091 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1092 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1093 * dependencies of this type, let's move them per type wholesale. */
1095 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1098 other
->dependencies
= hashmap_free(other
->dependencies
);
1100 u
->dependency_generation
++;
1101 other
->dependency_generation
++;
1104 int unit_merge(Unit
*u
, Unit
*other
) {
1109 assert(u
->manager
== other
->manager
);
1110 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1112 other
= unit_follow_merge(other
);
1117 if (u
->type
!= other
->type
)
1120 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1123 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1126 if (!streq_ptr(u
->instance
, other
->instance
))
1135 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1138 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1139 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1140 r
= unit_reserve_dependencies(u
, other
);
1144 /* Redirect all references */
1145 while (other
->refs_by_target
)
1146 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1148 /* Merge dependencies */
1149 unit_merge_dependencies(u
, other
);
1151 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1152 r
= unit_merge_names(u
, other
);
1156 other
->load_state
= UNIT_MERGED
;
1157 other
->merged_into
= u
;
1159 if (!u
->activation_details
)
1160 u
->activation_details
= activation_details_ref(other
->activation_details
);
1162 /* If there is still some data attached to the other node, we
1163 * don't need it anymore, and can free it. */
1164 if (other
->load_state
!= UNIT_STUB
)
1165 if (UNIT_VTABLE(other
)->done
)
1166 UNIT_VTABLE(other
)->done(other
);
1168 unit_add_to_dbus_queue(u
);
1169 unit_add_to_cleanup_queue(other
);
1174 int unit_merge_by_name(Unit
*u
, const char *name
) {
1175 _cleanup_free_
char *s
= NULL
;
1179 /* Either add name to u, or if a unit with name already exists, merge it with u.
1180 * If name is a template, do the same for name@instance, where instance is u's instance. */
1185 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1189 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1196 other
= manager_get_unit(u
->manager
, name
);
1198 return unit_merge(u
, other
);
1200 return unit_add_name(u
, name
);
1203 Unit
* unit_follow_merge(Unit
*u
) {
1206 while (u
->load_state
== UNIT_MERGED
)
1207 assert_se(u
= u
->merged_into
);
1212 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1218 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1220 if (c
->working_directory
) {
1221 r
= unit_add_mounts_for(
1223 c
->working_directory
,
1224 UNIT_DEPENDENCY_FILE
,
1225 c
->working_directory_missing_ok
? UNIT_MOUNT_WANTS
: UNIT_MOUNT_REQUIRES
);
1230 if (c
->root_directory
) {
1231 r
= unit_add_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1236 if (c
->root_image
) {
1237 r
= unit_add_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1242 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1243 if (!u
->manager
->prefix
[dt
])
1246 FOREACH_ARRAY(i
, c
->directories
[dt
].items
, c
->directories
[dt
].n_items
) {
1247 _cleanup_free_
char *p
= NULL
;
1249 p
= path_join(u
->manager
->prefix
[dt
], i
->path
);
1253 r
= unit_add_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_REQUIRES
);
1259 if (!MANAGER_IS_SYSTEM(u
->manager
))
1262 /* For the following three directory types we need write access, and /var/ is possibly on the root
1263 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1264 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1265 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1266 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1267 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1272 /* This must be already set in unit_patch_contexts(). */
1273 assert(c
->private_var_tmp
>= 0 && c
->private_var_tmp
< _PRIVATE_TMP_MAX
);
1275 if (c
->private_tmp
== PRIVATE_TMP_CONNECTED
) {
1276 assert(c
->private_var_tmp
== PRIVATE_TMP_CONNECTED
);
1278 r
= unit_add_mounts_for(u
, "/tmp/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1282 r
= unit_add_mounts_for(u
, "/var/tmp/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1286 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1290 } else if (c
->private_var_tmp
== PRIVATE_TMP_DISCONNECTED
&& !exec_context_with_rootfs(c
)) {
1291 /* Even if PrivateTmp=disconnected, we still require /var/tmp/ mountpoint to be present,
1292 * i.e. /var/ needs to be mounted. See comments in unit_patch_contexts(). */
1293 r
= unit_add_mounts_for(u
, "/var/", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1298 if (c
->root_image
) {
1299 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1300 * implicit dependency on udev */
1302 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1307 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1309 if (c
->log_namespace
) {
1310 static const struct {
1311 const char *template;
1314 { "systemd-journald", UNIT_SOCKET
, },
1315 { "systemd-journald-varlink", UNIT_SOCKET
, },
1316 { "systemd-journald-sync", UNIT_SERVICE
, },
1319 FOREACH_ELEMENT(i
, deps
) {
1320 _cleanup_free_
char *unit
= NULL
;
1322 r
= unit_name_build_from_type(i
->template, c
->log_namespace
, i
->type
, &unit
);
1326 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, unit
, true, UNIT_DEPENDENCY_FILE
);
1330 } else if (IN_SET(c
->std_output
, EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1331 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) ||
1332 IN_SET(c
->std_error
, EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1333 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
)) {
1335 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1343 const char* unit_description(Unit
*u
) {
1347 return u
->description
;
1349 return strna(u
->id
);
1352 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1356 /* Return u->id, u->description, or "{u->id} - {u->description}".
1357 * Versions with u->description are only used if it is set.
1358 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1361 * Note that *ret_combined_buffer may be set to NULL. */
1363 if (!u
->description
||
1364 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1365 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1366 streq(u
->description
, u
->id
)) {
1368 if (ret_combined_buffer
)
1369 *ret_combined_buffer
= NULL
;
1373 if (ret_combined_buffer
) {
1374 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1375 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1376 if (*ret_combined_buffer
)
1377 return *ret_combined_buffer
;
1378 log_oom(); /* Fall back to ->description */
1380 *ret_combined_buffer
= NULL
;
1383 return u
->description
;
1386 /* Common implementation for multiple backends */
1387 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1392 /* Load a .{service,socket,...} file */
1393 r
= unit_load_fragment(u
);
1397 if (u
->load_state
== UNIT_STUB
) {
1398 if (fragment_required
)
1401 u
->load_state
= UNIT_LOADED
;
1404 u
= unit_follow_merge(u
);
1406 /* Load drop-in directory data. If u is an alias, we might be reloading the
1407 * target unit needlessly. But we cannot be sure which drops-ins have already
1408 * been loaded and which not, at least without doing complicated book-keeping,
1409 * so let's always reread all drop-ins. */
1410 r
= unit_load_dropin(u
);
1414 if (u
->source_path
) {
1417 if (stat(u
->source_path
, &st
) >= 0)
1418 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1420 u
->source_mtime
= 0;
1426 void unit_add_to_target_deps_queue(Unit
*u
) {
1427 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1429 if (u
->in_target_deps_queue
)
1432 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1433 u
->in_target_deps_queue
= true;
1436 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1440 if (target
->type
!= UNIT_TARGET
)
1443 /* Only add the dependency if both units are loaded, so that
1444 * that loop check below is reliable */
1445 if (u
->load_state
!= UNIT_LOADED
||
1446 target
->load_state
!= UNIT_LOADED
)
1449 /* If either side wants no automatic dependencies, then let's
1451 if (!u
->default_dependencies
||
1452 !target
->default_dependencies
)
1455 /* Don't create loops */
1456 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1459 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1462 static int unit_add_slice_dependencies(Unit
*u
) {
1467 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1470 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1471 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1473 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1475 slice
= UNIT_GET_SLICE(u
);
1477 if (!IN_SET(slice
->freezer_state
, FREEZER_RUNNING
, FREEZER_THAWING
))
1478 u
->freezer_state
= FREEZER_FROZEN_BY_PARENT
;
1480 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1483 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1486 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1489 static int unit_add_mount_dependencies(Unit
*u
) {
1490 bool changed
= false;
1495 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
1496 UnitDependencyInfo di
;
1499 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->mounts_for
[t
]) {
1501 char prefix
[strlen(ASSERT_PTR(path
)) + 1];
1503 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1504 _cleanup_free_
char *p
= NULL
;
1507 r
= unit_name_from_path(prefix
, ".mount", &p
);
1509 continue; /* If the path cannot be converted to a mount unit name,
1510 * then it's not manageable as a unit by systemd, and
1511 * hence we don't need a dependency on it. Let's thus
1512 * silently ignore the issue. */
1516 m
= manager_get_unit(u
->manager
, p
);
1518 /* Make sure to load the mount unit if it exists. If so the
1519 * dependencies on this unit will be added later during the loading
1520 * of the mount unit. */
1521 (void) manager_load_unit_prepare(
1532 if (m
->load_state
!= UNIT_LOADED
)
1535 r
= unit_add_dependency(
1539 /* add_reference= */ true,
1543 changed
= changed
|| r
> 0;
1545 if (m
->fragment_path
) {
1546 r
= unit_add_dependency(
1548 unit_mount_dependency_type_to_dependency_type(t
),
1550 /* add_reference= */ true,
1554 changed
= changed
|| r
> 0;
1563 static int unit_add_oomd_dependencies(Unit
*u
) {
1570 if (!u
->default_dependencies
)
1573 c
= unit_get_cgroup_context(u
);
1577 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1581 r
= cg_mask_supported(&mask
);
1583 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1585 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1588 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1591 static int unit_add_startup_units(Unit
*u
) {
1592 if (!unit_has_startup_cgroup_constraints(u
))
1595 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1598 static const struct {
1599 UnitDependencyAtom atom
;
1600 size_t job_mode_offset
;
1601 const char *dependency_name
;
1602 const char *job_mode_setting_name
;
1603 } on_termination_settings
[] = {
1604 { UNIT_ATOM_ON_SUCCESS
, offsetof(Unit
, on_success_job_mode
), "OnSuccess=", "OnSuccessJobMode=" },
1605 { UNIT_ATOM_ON_FAILURE
, offsetof(Unit
, on_failure_job_mode
), "OnFailure=", "OnFailureJobMode=" },
1608 static int unit_validate_on_termination_job_modes(Unit
*u
) {
1611 /* Verify that if On{Success,Failure}JobMode=isolate, only one unit gets specified. */
1613 FOREACH_ELEMENT(setting
, on_termination_settings
) {
1614 JobMode job_mode
= *(JobMode
*) ((uint8_t*) u
+ setting
->job_mode_offset
);
1616 if (job_mode
!= JOB_ISOLATE
)
1619 Unit
*other
, *found
= NULL
;
1620 UNIT_FOREACH_DEPENDENCY(other
, u
, setting
->atom
) {
1623 else if (found
!= other
)
1624 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
),
1625 "More than one %s dependencies specified but %sisolate set. Refusing.",
1626 setting
->dependency_name
, setting
->job_mode_setting_name
);
1633 int unit_load(Unit
*u
) {
1638 if (u
->in_load_queue
) {
1639 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1640 u
->in_load_queue
= false;
1643 if (u
->type
== _UNIT_TYPE_INVALID
)
1646 if (u
->load_state
!= UNIT_STUB
)
1649 if (u
->transient_file
) {
1650 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1651 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1653 r
= fflush_and_check(u
->transient_file
);
1657 u
->transient_file
= safe_fclose(u
->transient_file
);
1658 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1661 r
= UNIT_VTABLE(u
)->load(u
);
1665 assert(u
->load_state
!= UNIT_STUB
);
1667 if (u
->load_state
== UNIT_LOADED
) {
1668 unit_add_to_target_deps_queue(u
);
1670 r
= unit_add_slice_dependencies(u
);
1674 r
= unit_add_mount_dependencies(u
);
1678 r
= unit_add_oomd_dependencies(u
);
1682 r
= unit_add_startup_units(u
);
1686 r
= unit_validate_on_termination_job_modes(u
);
1690 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1691 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1693 /* We finished loading, let's ensure our parents recalculate the members mask */
1694 unit_invalidate_cgroup_members_masks(u
);
1697 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1699 unit_add_to_dbus_queue(unit_follow_merge(u
));
1700 unit_add_to_gc_queue(u
);
1701 (void) manager_varlink_send_managed_oom_update(u
);
1706 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1707 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1709 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1710 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1714 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1715 * an attempt is made to load this unit, we know we need to check again. */
1716 if (u
->load_state
== UNIT_NOT_FOUND
)
1717 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1719 unit_add_to_dbus_queue(u
);
1720 unit_add_to_gc_queue(u
);
1722 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1726 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1731 if (u
&& !unit_log_level_test(u
, level
))
1732 return -ERRNO_VALUE(error
);
1734 va_start(ap
, format
);
1736 r
= log_object_internalv(level
, error
, file
, line
, func
,
1739 unit_invocation_log_field(u
),
1740 u
->invocation_id_string
,
1743 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1749 static bool unit_test_condition(Unit
*u
) {
1750 _cleanup_strv_free_
char **env
= NULL
;
1755 dual_timestamp_now(&u
->condition_timestamp
);
1757 r
= manager_get_effective_environment(u
->manager
, &env
);
1759 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1760 u
->condition_result
= true;
1762 u
->condition_result
= condition_test_list(
1765 condition_type_to_string
,
1769 unit_add_to_dbus_queue(u
);
1770 return u
->condition_result
;
1773 static bool unit_test_assert(Unit
*u
) {
1774 _cleanup_strv_free_
char **env
= NULL
;
1779 dual_timestamp_now(&u
->assert_timestamp
);
1781 r
= manager_get_effective_environment(u
->manager
, &env
);
1783 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1784 u
->assert_result
= CONDITION_ERROR
;
1786 u
->assert_result
= condition_test_list(
1789 assert_type_to_string
,
1793 unit_add_to_dbus_queue(u
);
1794 return u
->assert_result
;
1797 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1798 if (log_get_show_color()) {
1799 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1800 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1802 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1805 DISABLE_WARNING_FORMAT_NONLITERAL
;
1806 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1810 int unit_test_start_limit(Unit
*u
) {
1815 if (ratelimit_below(&u
->start_ratelimit
)) {
1816 u
->start_limit_hit
= false;
1820 log_unit_warning(u
, "Start request repeated too quickly.");
1821 u
->start_limit_hit
= true;
1823 reason
= strjoina("unit ", u
->id
, " failed");
1827 u
->start_limit_action
,
1828 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
|EMERGENCY_ACTION_SLEEP_5S
,
1830 /* exit_status= */ -1,
1836 static bool unit_verify_deps(Unit
*u
) {
1841 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1842 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1843 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1844 * that are not used in conjunction with After= as for them any such check would make things entirely
1847 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1849 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1852 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1853 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1861 /* Errors that aren't really errors:
1862 * -EALREADY: Unit is already started.
1863 * -ECOMM: Condition failed
1864 * -EAGAIN: An operation is already in progress. Retry later.
1866 * Errors that are real errors:
1867 * -EBADR: This unit type does not support starting.
1868 * -ECANCELED: Start limit hit, too many requests for now
1869 * -EPROTO: Assert failed
1870 * -EINVAL: Unit not loaded
1871 * -EOPNOTSUPP: Unit type not supported
1872 * -ENOLINK: The necessary dependencies are not fulfilled.
1873 * -ESTALE: This unit has been started before and can't be started a second time
1874 * -EDEADLK: This unit is frozen
1875 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1876 * -ETOOMANYREFS: The hard concurrency limit of at least one of the slices the unit is contained in has been reached
1878 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1879 UnitActiveState state
;
1885 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1886 if (UNIT_VTABLE(u
)->subsystem_ratelimited
) {
1887 r
= UNIT_VTABLE(u
)->subsystem_ratelimited(u
->manager
);
1894 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1895 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1896 * waiting is finished. */
1897 state
= unit_active_state(u
);
1898 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1900 if (state
== UNIT_MAINTENANCE
)
1903 /* Units that aren't loaded cannot be started */
1904 if (u
->load_state
!= UNIT_LOADED
)
1907 /* Refuse starting scope units more than once */
1908 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1911 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1912 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1913 * recheck the condition in that case. */
1914 if (state
!= UNIT_ACTIVATING
&&
1915 !unit_test_condition(u
))
1916 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition not met. Not starting unit.");
1918 /* If the asserts failed, fail the entire job */
1919 if (state
!= UNIT_ACTIVATING
&&
1920 !unit_test_assert(u
))
1921 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1923 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1924 * condition checks, so that we rather return condition check errors (which are usually not
1925 * considered a true failure) than "not supported" errors (which are considered a failure).
1927 if (!unit_type_supported(u
->type
))
1930 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1931 * should have taken care of this already, but let's check this here again. After all, our
1932 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1933 if (!unit_verify_deps(u
))
1936 /* Forward to the main object, if we aren't it. */
1937 following
= unit_following(u
);
1939 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1940 return unit_start(following
, details
);
1943 /* Check to make sure the unit isn't frozen */
1944 if (u
->freezer_state
!= FREEZER_RUNNING
)
1947 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1948 if (UNIT_VTABLE(u
)->can_start
) {
1949 r
= UNIT_VTABLE(u
)->can_start(u
);
1954 /* If it is stopped, but we cannot start it, then fail */
1955 if (!UNIT_VTABLE(u
)->start
)
1958 if (UNIT_IS_INACTIVE_OR_FAILED(state
)) {
1959 Slice
*slice
= SLICE(UNIT_GET_SLICE(u
));
1962 /* Check hard concurrency limit. Note this is partially redundant, we already checked
1963 * this when enqueuing jobs. However, between the time when we enqueued this and the
1964 * time we are dispatching the queue the configuration might have changed, hence
1965 * check here again */
1966 if (slice_concurrency_hard_max_reached(slice
, u
))
1967 return -ETOOMANYREFS
;
1969 /* Also check soft concurrenty limit, and return EAGAIN so that the job is kept in
1971 if (slice_concurrency_soft_max_reached(slice
, u
))
1972 return -EAGAIN
; /* Try again, keep in queue */
1976 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1977 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1978 * waits for a holdoff timer to elapse before it will start again. */
1980 unit_add_to_dbus_queue(u
);
1982 if (!u
->activation_details
) /* Older details object wins */
1983 u
->activation_details
= activation_details_ref(details
);
1985 return UNIT_VTABLE(u
)->start(u
);
1988 bool unit_can_start(Unit
*u
) {
1991 if (u
->load_state
!= UNIT_LOADED
)
1994 if (!unit_type_supported(u
->type
))
1997 /* Scope units may be started only once */
1998 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
2001 return !!UNIT_VTABLE(u
)->start
;
2004 bool unit_can_isolate(Unit
*u
) {
2007 return unit_can_start(u
) &&
2012 * -EBADR: This unit type does not support stopping.
2013 * -EALREADY: Unit is already stopped.
2014 * -EAGAIN: An operation is already in progress. Retry later.
2015 * -EDEADLK: Unit is frozen
2017 int unit_stop(Unit
*u
) {
2018 UnitActiveState state
;
2023 state
= unit_active_state(u
);
2024 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
2027 following
= unit_following(u
);
2029 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
2030 return unit_stop(following
);
2033 /* Check to make sure the unit isn't frozen */
2034 if (u
->freezer_state
!= FREEZER_RUNNING
)
2037 if (!UNIT_VTABLE(u
)->stop
)
2040 unit_add_to_dbus_queue(u
);
2042 return UNIT_VTABLE(u
)->stop(u
);
2045 bool unit_can_stop(Unit
*u
) {
2048 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2049 * Extrinsic units follow external state and they may stop following external state changes
2050 * (hence we return true here), but an attempt to do this through the manager will fail. */
2052 if (!unit_type_supported(u
->type
))
2058 return !!UNIT_VTABLE(u
)->stop
;
2062 * -EBADR: This unit type does not support reloading.
2063 * -ENOEXEC: Unit is not started.
2064 * -EAGAIN: An operation is already in progress. Retry later.
2065 * -EDEADLK: Unit is frozen.
2067 int unit_reload(Unit
*u
) {
2068 UnitActiveState state
;
2073 if (u
->load_state
!= UNIT_LOADED
)
2076 if (!unit_can_reload(u
))
2079 state
= unit_active_state(u
);
2080 if (IN_SET(state
, UNIT_RELOADING
, UNIT_REFRESHING
))
2081 /* "refreshing" means some resources in the unit namespace is being updated. Unlike reload,
2082 * the unit processes aren't made aware of refresh. Let's put the job back to queue
2083 * in both cases, as refresh typically takes place before reload and it's better to wait
2084 * for it rather than failing. */
2087 if (state
!= UNIT_ACTIVE
)
2088 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2090 following
= unit_following(u
);
2092 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2093 return unit_reload(following
);
2096 /* Check to make sure the unit isn't frozen */
2097 if (u
->freezer_state
!= FREEZER_RUNNING
)
2100 unit_add_to_dbus_queue(u
);
2102 if (!UNIT_VTABLE(u
)->reload
) {
2103 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2104 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), /* reload_success = */ true);
2108 return UNIT_VTABLE(u
)->reload(u
);
2111 bool unit_can_reload(Unit
*u
) {
2114 if (UNIT_VTABLE(u
)->can_reload
)
2115 return UNIT_VTABLE(u
)->can_reload(u
);
2117 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2120 return UNIT_VTABLE(u
)->reload
;
2123 bool unit_is_unneeded(Unit
*u
) {
2127 if (!u
->stop_when_unneeded
)
2130 /* Don't clean up while the unit is transitioning or is even inactive. */
2131 if (unit_active_state(u
) != UNIT_ACTIVE
)
2136 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2137 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2138 * restart, then don't clean this one up. */
2143 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2146 if (unit_will_restart(other
))
2153 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2158 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2159 * that is active declared an Uphold= dependencies on it */
2161 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2163 *ret_culprit
= NULL
;
2167 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2171 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2173 *ret_culprit
= other
;
2179 *ret_culprit
= NULL
;
2183 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2188 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2189 * because the other unit is down. */
2191 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2192 /* Don't clean up while the unit is transitioning or is even inactive. */
2194 *ret_culprit
= NULL
;
2198 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2202 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2204 *ret_culprit
= other
;
2211 *ret_culprit
= NULL
;
2215 static void check_unneeded_dependencies(Unit
*u
) {
2219 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2221 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2222 unit_submit_to_stop_when_unneeded_queue(other
);
2225 static void check_uphold_dependencies(Unit
*u
) {
2229 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2231 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2232 unit_submit_to_start_when_upheld_queue(other
);
2235 static void check_bound_by_dependencies(Unit
*u
) {
2239 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2241 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2242 unit_submit_to_stop_when_bound_queue(other
);
2245 static void retroactively_start_dependencies(Unit
*u
) {
2249 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2251 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2252 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2253 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2254 (void) manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2256 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2257 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2258 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2259 (void) manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, /* error = */ NULL
, /* ret = */ NULL
);
2261 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2262 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2263 (void) manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2266 static void retroactively_stop_dependencies(Unit
*u
) {
2270 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2272 /* Pull down units which are bound to us recursively if enabled */
2273 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2274 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2275 (void) manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, /* error = */ NULL
, /* ret = */ NULL
);
2278 void unit_start_on_termination_deps(Unit
*u
, UnitDependencyAtom atom
) {
2279 const char *dependency_name
= NULL
;
2281 unsigned n_jobs
= 0;
2284 /* Act on OnFailure= and OnSuccess= dependencies */
2288 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2290 FOREACH_ELEMENT(setting
, on_termination_settings
)
2291 if (atom
== setting
->atom
) {
2292 job_mode
= *(JobMode
*) ((uint8_t*) u
+ setting
->job_mode_offset
);
2293 dependency_name
= setting
->dependency_name
;
2297 assert(dependency_name
);
2300 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
2301 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2304 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2306 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, &error
, /* ret = */ NULL
);
2308 log_unit_warning_errno(u
, r
, "Failed to enqueue %s%s job, ignoring: %s",
2309 dependency_name
, other
->id
, bus_error_message(&error
, r
));
2314 log_unit_debug(u
, "Triggering %s dependencies done (%u %s).",
2315 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2318 void unit_trigger_notify(Unit
*u
) {
2323 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2324 if (UNIT_VTABLE(other
)->trigger_notify
)
2325 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2328 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2329 if (condition_notice
&& log_level
> LOG_NOTICE
)
2331 if (condition_info
&& log_level
> LOG_INFO
)
2336 static int unit_log_resources(Unit
*u
) {
2338 static const struct {
2339 const char *journal_field
;
2340 const char *message_suffix
;
2341 } memory_fields
[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1] = {
2342 [CGROUP_MEMORY_PEAK
] = { "MEMORY_PEAK", "memory peak" },
2343 [CGROUP_MEMORY_SWAP_PEAK
] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2344 }, ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2345 [CGROUP_IP_INGRESS_BYTES
] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2346 [CGROUP_IP_EGRESS_BYTES
] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2347 [CGROUP_IP_INGRESS_PACKETS
] = { "IP_METRIC_INGRESS_PACKETS", NULL
},
2348 [CGROUP_IP_EGRESS_PACKETS
] = { "IP_METRIC_EGRESS_PACKETS", NULL
},
2349 }, io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2350 [CGROUP_IO_READ_BYTES
] = { "IO_METRIC_READ_BYTES", "read from disk" },
2351 [CGROUP_IO_WRITE_BYTES
] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2352 [CGROUP_IO_READ_OPERATIONS
] = { "IO_METRIC_READ_OPERATIONS", NULL
},
2353 [CGROUP_IO_WRITE_OPERATIONS
] = { "IO_METRIC_WRITE_OPERATIONS", NULL
},
2356 struct iovec
*iovec
= NULL
;
2358 _cleanup_free_
char *message
= NULL
, *t
= NULL
;
2359 nsec_t cpu_nsec
= NSEC_INFINITY
;
2360 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2364 CLEANUP_ARRAY(iovec
, n_iovec
, iovec_array_free
);
2366 iovec
= new(struct iovec
, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1) +
2367 _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4);
2371 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2372 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2373 * information and the complete data in structured fields. */
2375 (void) unit_get_cpu_usage(u
, &cpu_nsec
);
2376 if (cpu_nsec
!= NSEC_INFINITY
) {
2377 /* Format the CPU time for inclusion in the structured log message */
2378 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, cpu_nsec
) < 0)
2380 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2382 /* Format the CPU time for inclusion in the human language message string */
2383 if (strextendf_with_separator(&message
, ", ",
2384 "Consumed %s CPU time",
2385 FORMAT_TIMESPAN(cpu_nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
)) < 0)
2388 log_level
= raise_level(log_level
,
2389 cpu_nsec
> MENTIONWORTHY_CPU_NSEC
,
2390 cpu_nsec
> NOTICEWORTHY_CPU_NSEC
);
2393 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++) {
2394 uint64_t value
= UINT64_MAX
;
2396 assert(memory_fields
[metric
].journal_field
);
2397 assert(memory_fields
[metric
].message_suffix
);
2399 (void) unit_get_memory_accounting(u
, metric
, &value
);
2400 if (value
== UINT64_MAX
)
2403 if (asprintf(&t
, "%s=%" PRIu64
, memory_fields
[metric
].journal_field
, value
) < 0)
2405 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2407 /* If value is 0, we don't log it in the MESSAGE= field. */
2411 if (strextendf_with_separator(&message
, ", ", "%s %s",
2412 FORMAT_BYTES(value
), memory_fields
[metric
].message_suffix
) < 0)
2415 log_level
= raise_level(log_level
,
2416 value
> MENTIONWORTHY_MEMORY_BYTES
,
2417 value
> NOTICEWORTHY_MEMORY_BYTES
);
2420 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2421 uint64_t value
= UINT64_MAX
;
2423 assert(io_fields
[k
].journal_field
);
2425 (void) unit_get_io_accounting(u
, k
, &value
);
2426 if (value
== UINT64_MAX
)
2429 /* Format IO accounting data for inclusion in the structured log message */
2430 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
].journal_field
, value
) < 0)
2432 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2434 /* If value is 0, we don't log it in the MESSAGE= field. */
2438 /* Format the IO accounting data for inclusion in the human language message string, but only
2439 * for the bytes counters (and not for the operations counters) */
2440 if (io_fields
[k
].message_suffix
) {
2441 if (strextendf_with_separator(&message
, ", ", "%s %s",
2442 FORMAT_BYTES(value
), io_fields
[k
].message_suffix
) < 0)
2445 log_level
= raise_level(log_level
,
2446 value
> MENTIONWORTHY_IO_BYTES
,
2447 value
> NOTICEWORTHY_IO_BYTES
);
2451 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2452 uint64_t value
= UINT64_MAX
;
2454 assert(ip_fields
[m
].journal_field
);
2456 (void) unit_get_ip_accounting(u
, m
, &value
);
2457 if (value
== UINT64_MAX
)
2460 /* Format IP accounting data for inclusion in the structured log message */
2461 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
].journal_field
, value
) < 0)
2463 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2465 /* If value is 0, we don't log it in the MESSAGE= field. */
2469 /* Format the IP accounting data for inclusion in the human language message string, but only
2470 * for the bytes counters (and not for the packets counters) */
2471 if (ip_fields
[m
].message_suffix
) {
2472 if (strextendf_with_separator(&message
, ", ", "%s %s",
2473 FORMAT_BYTES(value
), ip_fields
[m
].message_suffix
) < 0)
2476 log_level
= raise_level(log_level
,
2477 value
> MENTIONWORTHY_IP_BYTES
,
2478 value
> NOTICEWORTHY_IP_BYTES
);
2482 /* This check is here because it is the earliest point following all possible log_level assignments.
2483 * (If log_level is assigned anywhere after this point, move this check.) */
2484 if (!unit_log_level_test(u
, log_level
))
2487 /* Is there any accounting data available at all? */
2493 t
= strjoin("MESSAGE=", u
->id
, ": ", message
?: "Completed", ".");
2496 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2498 if (!set_iovec_string_field(iovec
, &n_iovec
, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR
))
2501 if (!set_iovec_string_field(iovec
, &n_iovec
, unit_log_field(u
), u
->id
))
2504 if (!set_iovec_string_field(iovec
, &n_iovec
, unit_invocation_log_field(u
), u
->invocation_id_string
))
2507 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
);
2512 static void unit_update_on_console(Unit
*u
) {
2517 b
= unit_needs_console(u
);
2518 if (u
->on_console
== b
)
2523 manager_ref_console(u
->manager
);
2525 manager_unref_console(u
->manager
);
2528 static void unit_emit_audit_start(Unit
*u
) {
2531 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2534 /* Write audit record if we have just finished starting up */
2535 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ true);
2539 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2542 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2546 /* Write audit record if we have just finished shutting down */
2547 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2548 u
->in_audit
= false;
2550 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2551 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2553 if (state
== UNIT_INACTIVE
)
2554 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ true);
2558 static bool unit_process_job(Job
*j
, UnitActiveState ns
, bool reload_success
) {
2559 bool unexpected
= false;
2564 if (j
->state
== JOB_WAITING
)
2565 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2567 job_add_to_run_queue(j
);
2569 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2570 * hence needs to invalidate jobs. */
2575 case JOB_VERIFY_ACTIVE
:
2577 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2578 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2579 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2582 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2583 if (ns
== UNIT_FAILED
)
2584 result
= JOB_FAILED
;
2588 job_finish_and_invalidate(j
, result
, true, false);
2595 case JOB_RELOAD_OR_START
:
2596 case JOB_TRY_RELOAD
:
2598 if (j
->state
== JOB_RUNNING
) {
2599 if (ns
== UNIT_ACTIVE
)
2600 job_finish_and_invalidate(j
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2601 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
, UNIT_REFRESHING
)) {
2604 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2605 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2613 case JOB_TRY_RESTART
:
2615 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2616 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2617 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2619 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2625 assert_not_reached();
2631 static void unit_recursive_add_to_run_queue(Unit
*u
) {
2635 job_add_to_run_queue(u
->job
);
2638 UNIT_FOREACH_DEPENDENCY(child
, u
, UNIT_ATOM_SLICE_OF
) {
2643 unit_recursive_add_to_run_queue(child
);
2647 static void unit_check_concurrency_limit(Unit
*u
) {
2650 Unit
*slice
= UNIT_GET_SLICE(u
);
2654 /* If a unit was stopped, maybe it has pending siblings (or children thereof) that can be started now */
2656 if (SLICE(slice
)->concurrency_soft_max
!= UINT_MAX
) {
2658 UNIT_FOREACH_DEPENDENCY(sibling
, slice
, UNIT_ATOM_SLICE_OF
) {
2662 unit_recursive_add_to_run_queue(sibling
);
2666 /* Also go up the tree. */
2667 unit_check_concurrency_limit(slice
);
2670 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2672 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2673 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2675 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2676 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2677 * remounted this function will be called too! */
2679 Manager
*m
= ASSERT_PTR(u
->manager
);
2681 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2682 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2683 unit_add_to_dbus_queue(u
);
2685 /* Update systemd-oomd on the property/state change.
2687 * Always send an update if the unit is going into an inactive state so systemd-oomd knows to
2689 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2690 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2691 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2692 * have the information on the property. Thus, indiscriminately send an update. */
2693 if (os
!= ns
&& (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
)))
2694 (void) manager_varlink_send_managed_oom_update(u
);
2696 /* Update timestamps for state changes */
2697 if (!MANAGER_IS_RELOADING(m
)) {
2698 dual_timestamp_now(&u
->state_change_timestamp
);
2700 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2701 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2702 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2703 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2705 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2706 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2707 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2708 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2711 /* Keep track of failed units */
2712 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2714 /* Make sure the cgroup and state files are always removed when we become inactive */
2715 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2716 SET_FLAG(u
->markers
,
2717 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2719 unit_prune_cgroup(u
);
2720 unit_unlink_state_files(u
);
2721 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2722 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2724 unit_update_on_console(u
);
2726 if (!MANAGER_IS_RELOADING(m
)) {
2729 /* Let's propagate state changes to the job */
2731 unexpected
= unit_process_job(u
->job
, ns
, reload_success
);
2735 /* If this state change happened without being requested by a job, then let's retroactively start or
2736 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2737 * additional jobs just because something is already activated. */
2740 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2741 retroactively_start_dependencies(u
);
2742 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2743 retroactively_stop_dependencies(u
);
2746 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2747 /* This unit just finished starting up */
2749 unit_emit_audit_start(u
);
2750 manager_send_unit_plymouth(m
, u
);
2751 manager_send_unit_supervisor(m
, u
, /* active= */ true);
2753 } else if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2754 /* This unit just stopped/failed. */
2756 unit_emit_audit_stop(u
, ns
);
2757 manager_send_unit_supervisor(m
, u
, /* active= */ false);
2758 unit_log_resources(u
);
2761 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
))
2762 unit_start_on_termination_deps(u
, UNIT_ATOM_ON_SUCCESS
);
2763 else if (ns
!= os
&& ns
== UNIT_FAILED
)
2764 unit_start_on_termination_deps(u
, UNIT_ATOM_ON_FAILURE
);
2767 manager_recheck_journal(m
);
2768 manager_recheck_dbus(m
);
2770 unit_trigger_notify(u
);
2772 if (!MANAGER_IS_RELOADING(m
)) {
2775 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2776 reason
= strjoina("unit ", u
->id
, " failed");
2777 emergency_action(m
, u
->failure_action
, EMERGENCY_ACTION_WARN
|EMERGENCY_ACTION_SLEEP_5S
, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2778 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2779 reason
= strjoina("unit ", u
->id
, " succeeded");
2780 emergency_action(m
, u
->success_action
, /* flags= */ 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2784 /* And now, add the unit or depending units to various queues that will act on the new situation if
2785 * needed. These queues generally check for continuous state changes rather than events (like most of
2786 * the state propagation above), and do work deferred instead of instantly, since they typically
2787 * don't want to run during reloading, and usually involve checking combined state of multiple units
2790 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2791 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2792 check_unneeded_dependencies(u
);
2793 check_bound_by_dependencies(u
);
2795 /* Maybe someone wants us to remain up? */
2796 unit_submit_to_start_when_upheld_queue(u
);
2798 /* Maybe the unit should be GC'ed now? */
2799 unit_add_to_gc_queue(u
);
2801 /* Maybe we can release some resources now? */
2802 unit_submit_to_release_resources_queue(u
);
2804 /* Maybe the concurrency limits now allow dispatching of another start job in this slice? */
2805 unit_check_concurrency_limit(u
);
2807 } else if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2808 /* Start uphold units regardless if going up was expected or not */
2809 check_uphold_dependencies(u
);
2811 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2812 unit_submit_to_stop_when_unneeded_queue(u
);
2814 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2815 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2816 * inactive, without ever entering started.) */
2817 unit_submit_to_stop_when_bound_queue(u
);
2821 int unit_watch_pidref(Unit
*u
, const PidRef
*pid
, bool exclusive
) {
2822 _cleanup_(pidref_freep
) PidRef
*pid_dup
= NULL
;
2825 /* Adds a specific PID to the set of PIDs this unit watches. */
2828 assert(pidref_is_set(pid
));
2830 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2831 * opportunity to remove any stalled references to this PID as they can be created
2832 * easily (when watching a process which is not our direct child). */
2834 manager_unwatch_pidref(u
->manager
, pid
);
2836 if (set_contains(u
->pids
, pid
)) { /* early exit if already being watched */
2841 r
= pidref_dup(pid
, &pid_dup
);
2845 /* First, insert into the set of PIDs maintained by the unit */
2846 r
= set_ensure_put(&u
->pids
, &pidref_hash_ops_free
, pid_dup
);
2850 pid
= TAKE_PTR(pid_dup
); /* continue with our copy now that we have installed it properly in our set */
2852 /* Second, insert it into the simple global table, see if that works */
2853 r
= hashmap_ensure_put(&u
->manager
->watch_pids
, &pidref_hash_ops
, pid
, u
);
2857 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2858 * hashmap that points to an array. */
2860 PidRef
*old_pid
= NULL
;
2861 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &old_pid
);
2863 /* Count entries in array */
2865 for (; array
&& array
[n
]; n
++)
2868 /* Allocate a new array */
2869 _cleanup_free_ Unit
**new_array
= new(Unit
*, n
+ 2);
2873 /* Append us to the end */
2874 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2876 new_array
[n
+1] = NULL
;
2878 /* Add or replace the old array */
2879 r
= hashmap_ensure_replace(&u
->manager
->watch_pids_more
, &pidref_hash_ops
, old_pid
?: pid
, new_array
);
2883 TAKE_PTR(new_array
); /* Now part of the hash table */
2884 free(array
); /* Which means we can now delete the old version */
2888 void unit_unwatch_pidref(Unit
*u
, const PidRef
*pid
) {
2890 assert(pidref_is_set(pid
));
2892 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2893 _cleanup_(pidref_freep
) PidRef
*pid1
= set_remove(u
->pids
, pid
);
2895 return; /* Early exit if this PID was never watched by us */
2897 /* First let's drop the unit from the simple hash table, if it is included there */
2898 PidRef
*pid2
= NULL
;
2899 Unit
*uu
= hashmap_get2(u
->manager
->watch_pids
, pid
, (void**) &pid2
);
2901 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2902 assert((uu
== u
) == (pid1
== pid2
));
2905 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2906 assert_se(hashmap_remove_value(u
->manager
->watch_pids
, pid2
, uu
));
2908 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2909 PidRef
*pid3
= NULL
;
2910 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &pid3
);
2912 /* Let's iterate through the array, dropping our own entry */
2913 size_t m
= 0, n
= 0;
2914 for (; array
&& array
[n
]; n
++)
2916 array
[m
++] = array
[n
];
2918 return; /* Not there */
2920 array
[m
] = NULL
; /* set trailing NULL marker on the new end */
2923 /* The array is now empty, remove the entire entry */
2924 assert_se(hashmap_remove_value(u
->manager
->watch_pids_more
, pid3
, array
));
2927 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2928 * we will delete, but by the PidRef object of the Unit that is now first in the
2931 PidRef
*new_pid3
= ASSERT_PTR(set_get(array
[0]->pids
, pid
));
2932 assert_se(hashmap_replace(u
->manager
->watch_pids_more
, new_pid3
, array
) >= 0);
2937 void unit_unwatch_all_pids(Unit
*u
) {
2940 while (!set_isempty(u
->pids
))
2941 unit_unwatch_pidref(u
, set_first(u
->pids
));
2943 u
->pids
= set_free(u
->pids
);
2946 void unit_unwatch_pidref_done(Unit
*u
, PidRef
*pidref
) {
2949 if (!pidref_is_set(pidref
))
2952 unit_unwatch_pidref(u
, pidref
);
2953 pidref_done(pidref
);
2956 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2958 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2962 case JOB_VERIFY_ACTIVE
:
2965 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2966 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2971 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2972 * external events), hence it makes no sense to permit enqueuing such a request either. */
2973 return !u
->perpetual
;
2976 case JOB_TRY_RESTART
:
2977 return unit_can_stop(u
) && unit_can_start(u
);
2980 case JOB_TRY_RELOAD
:
2981 return unit_can_reload(u
);
2983 case JOB_RELOAD_OR_START
:
2984 return unit_can_reload(u
) && unit_can_start(u
);
2987 assert_not_reached();
2991 static Hashmap
*unit_get_dependency_hashmap_per_type(Unit
*u
, UnitDependency d
) {
2995 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2997 deps
= hashmap_get(u
->dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
2999 _cleanup_hashmap_free_ Hashmap
*h
= NULL
;
3001 h
= hashmap_new(NULL
);
3005 if (hashmap_ensure_put(&u
->dependencies
, NULL
, UNIT_DEPENDENCY_TO_PTR(d
), h
) < 0)
3014 typedef enum NotifyDependencyFlags
{
3015 NOTIFY_DEPENDENCY_UPDATE_FROM
= 1 << 0,
3016 NOTIFY_DEPENDENCY_UPDATE_TO
= 1 << 1,
3017 } NotifyDependencyFlags
;
3019 static int unit_add_dependency_impl(
3023 UnitDependencyMask mask
) {
3025 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3026 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3027 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3028 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3029 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3030 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3031 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3032 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3033 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3034 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3035 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3036 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3037 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3038 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3039 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3040 [UNIT_BEFORE
] = UNIT_AFTER
,
3041 [UNIT_AFTER
] = UNIT_BEFORE
,
3042 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3043 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3044 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3045 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3046 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3047 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3048 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3049 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3050 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3051 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3052 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3053 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3054 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3055 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3056 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3059 Hashmap
*u_deps
, *other_deps
;
3060 UnitDependencyInfo u_info
, u_info_old
, other_info
, other_info_old
;
3061 NotifyDependencyFlags flags
= 0;
3066 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3067 assert(inverse_table
[d
] >= 0 && inverse_table
[d
] < _UNIT_DEPENDENCY_MAX
);
3068 assert(mask
> 0 && mask
< _UNIT_DEPENDENCY_MASK_FULL
);
3070 /* Ensure the following two hashmaps for each unit exist:
3071 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3072 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3073 u_deps
= unit_get_dependency_hashmap_per_type(u
, d
);
3077 other_deps
= unit_get_dependency_hashmap_per_type(other
, inverse_table
[d
]);
3081 /* Save the original dependency info. */
3082 u_info
.data
= u_info_old
.data
= hashmap_get(u_deps
, other
);
3083 other_info
.data
= other_info_old
.data
= hashmap_get(other_deps
, u
);
3085 /* Update dependency info. */
3086 u_info
.origin_mask
|= mask
;
3087 other_info
.destination_mask
|= mask
;
3089 /* Save updated dependency info. */
3090 if (u_info
.data
!= u_info_old
.data
) {
3091 r
= hashmap_replace(u_deps
, other
, u_info
.data
);
3095 flags
= NOTIFY_DEPENDENCY_UPDATE_FROM
;
3096 u
->dependency_generation
++;
3099 if (other_info
.data
!= other_info_old
.data
) {
3100 r
= hashmap_replace(other_deps
, u
, other_info
.data
);
3102 if (u_info
.data
!= u_info_old
.data
) {
3103 /* Restore the old dependency. */
3104 if (u_info_old
.data
)
3105 (void) hashmap_update(u_deps
, other
, u_info_old
.data
);
3107 hashmap_remove(u_deps
, other
);
3112 flags
|= NOTIFY_DEPENDENCY_UPDATE_TO
;
3113 other
->dependency_generation
++;
3119 int unit_add_dependency(
3124 UnitDependencyMask mask
) {
3126 UnitDependencyAtom a
;
3129 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3130 * there, no need to notify! */
3131 NotifyDependencyFlags notify_flags
;
3134 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3137 u
= unit_follow_merge(u
);
3138 other
= unit_follow_merge(other
);
3139 a
= unit_dependency_to_atom(d
);
3142 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3144 if (unit_should_warn_about_dependency(d
))
3145 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3146 unit_dependency_to_string(d
), u
->id
);
3150 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3153 /* Note that ordering a device unit after a unit is permitted since it allows its job running
3154 * timeout to be started at a specific time. */
3155 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3156 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3160 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3161 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3165 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3166 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3167 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3168 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3169 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3170 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3172 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3173 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3174 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3175 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3176 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3177 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3179 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3180 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3181 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3183 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3184 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3185 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3187 r
= unit_add_dependency_impl(u
, d
, other
, mask
);
3192 if (add_reference
) {
3193 r
= unit_add_dependency_impl(u
, UNIT_REFERENCES
, other
, mask
);
3199 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_FROM
))
3200 unit_add_to_dbus_queue(u
);
3201 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_TO
))
3202 unit_add_to_dbus_queue(other
);
3204 return notify_flags
!= 0;
3207 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3211 assert(d
>= 0 || e
>= 0);
3214 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3220 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3225 return r
> 0 || s
> 0;
3228 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3236 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3243 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3245 _cleanup_free_
char *i
= NULL
;
3247 r
= unit_name_to_prefix(u
->id
, &i
);
3251 r
= unit_name_replace_instance(name
, i
, buf
);
3260 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3261 _cleanup_free_
char *buf
= NULL
;
3268 r
= resolve_template(u
, name
, &buf
, &name
);
3272 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3275 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3279 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3282 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3283 _cleanup_free_
char *buf
= NULL
;
3290 r
= resolve_template(u
, name
, &buf
, &name
);
3294 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3297 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3301 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3304 int setenv_unit_path(const char *p
) {
3307 /* This is mostly for debug purposes */
3308 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, /* overwrite = */ true));
3311 char* unit_dbus_path(Unit
*u
) {
3317 return unit_dbus_path_from_name(u
->id
);
3320 char* unit_dbus_path_invocation_id(Unit
*u
) {
3323 if (sd_id128_is_null(u
->invocation_id
))
3326 return unit_dbus_path_from_name(u
->invocation_id_string
);
3329 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3334 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3336 if (sd_id128_equal(u
->invocation_id
, id
))
3339 if (!sd_id128_is_null(u
->invocation_id
))
3340 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3342 if (sd_id128_is_null(id
)) {
3347 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3351 u
->invocation_id
= id
;
3352 sd_id128_to_string(id
, u
->invocation_id_string
);
3354 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3361 u
->invocation_id
= SD_ID128_NULL
;
3362 u
->invocation_id_string
[0] = 0;
3366 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3372 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3373 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3374 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3376 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3379 if (u
->type
== UNIT_SLICE
)
3382 if (unit_active_state(u
) != UNIT_INACTIVE
)
3385 if (slice
->type
!= UNIT_SLICE
)
3388 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3389 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3392 if (UNIT_GET_SLICE(u
) == slice
)
3395 /* Disallow slice changes if @u is already bound to cgroups */
3396 if (UNIT_GET_SLICE(u
)) {
3397 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3398 if (crt
&& crt
->cgroup_path
)
3402 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3403 if (UNIT_GET_SLICE(u
))
3404 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3406 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3413 int unit_set_default_slice(Unit
*u
) {
3414 const char *slice_name
;
3420 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3423 if (UNIT_GET_SLICE(u
))
3427 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3429 /* Implicitly place all instantiated units in their
3430 * own per-template slice */
3432 r
= unit_name_to_prefix(u
->id
, &prefix
);
3436 /* The prefix is already escaped, but it might include
3437 * "-" which has a special meaning for slice units,
3438 * hence escape it here extra. */
3439 escaped
= unit_name_escape(prefix
);
3443 if (MANAGER_IS_SYSTEM(u
->manager
))
3444 slice_name
= strjoina("system-", escaped
, ".slice");
3446 slice_name
= strjoina("app-", escaped
, ".slice");
3448 } else if (unit_is_extrinsic(u
))
3449 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3450 * the root slice. They don't really belong in one of the subslices. */
3451 slice_name
= SPECIAL_ROOT_SLICE
;
3453 else if (MANAGER_IS_SYSTEM(u
->manager
))
3454 slice_name
= SPECIAL_SYSTEM_SLICE
;
3456 slice_name
= SPECIAL_APP_SLICE
;
3458 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3462 return unit_set_slice(u
, slice
);
3465 const char* unit_slice_name(Unit
*u
) {
3469 slice
= UNIT_GET_SLICE(u
);
3476 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3477 _cleanup_free_
char *t
= NULL
;
3484 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3487 if (unit_has_name(u
, t
))
3490 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3491 assert(r
< 0 || *_found
!= u
);
3495 static int signal_name_owner_changed_install_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3496 Unit
*u
= ASSERT_PTR(userdata
);
3497 const sd_bus_error
*e
;
3500 e
= sd_bus_message_get_error(message
);
3502 log_unit_trace(u
, "Successfully installed NameOwnerChanged signal match.");
3506 r
= sd_bus_error_get_errno(e
);
3507 log_unit_error_errno(u
, r
,
3508 "Unexpected error response on installing NameOwnerChanged signal match: %s",
3509 bus_error_message(e
, r
));
3511 /* If we failed to install NameOwnerChanged signal, also unref the bus slot of GetNameOwner(). */
3512 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3513 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3515 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3516 UNIT_VTABLE(u
)->bus_name_owner_change(u
, NULL
);
3521 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3522 const char *new_owner
;
3523 Unit
*u
= ASSERT_PTR(userdata
);
3528 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3530 bus_log_parse_error(r
);
3534 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3535 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3540 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3541 const sd_bus_error
*e
;
3542 const char *new_owner
;
3543 Unit
*u
= ASSERT_PTR(userdata
);
3548 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3550 e
= sd_bus_message_get_error(message
);
3552 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3553 r
= sd_bus_error_get_errno(e
);
3554 log_unit_error_errno(u
, r
,
3555 "Unexpected error response from GetNameOwner(): %s",
3556 bus_error_message(e
, r
));
3561 r
= sd_bus_message_read(message
, "s", &new_owner
);
3563 return bus_log_parse_error(r
);
3565 assert(!isempty(new_owner
));
3568 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3569 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3574 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3575 _cleanup_(sd_bus_message_unrefp
) sd_bus_message
*m
= NULL
;
3577 usec_t timeout_usec
= 0;
3584 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3587 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3588 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3589 * value defined above. */
3590 if (UNIT_VTABLE(u
)->get_timeout_start_usec
)
3591 timeout_usec
= UNIT_VTABLE(u
)->get_timeout_start_usec(u
);
3593 match
= strjoina("type='signal',"
3594 "sender='org.freedesktop.DBus',"
3595 "path='/org/freedesktop/DBus',"
3596 "interface='org.freedesktop.DBus',"
3597 "member='NameOwnerChanged',"
3598 "arg0='", name
, "'");
3600 r
= bus_add_match_full(
3603 /* asynchronous = */ true,
3605 signal_name_owner_changed
,
3606 signal_name_owner_changed_install_handler
,
3612 r
= sd_bus_message_new_method_call(
3615 "org.freedesktop.DBus",
3616 "/org/freedesktop/DBus",
3617 "org.freedesktop.DBus",
3622 r
= sd_bus_message_append(m
, "s", name
);
3626 r
= sd_bus_call_async(
3628 &u
->get_name_owner_slot
,
3630 get_name_owner_handler
,
3634 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3638 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3642 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3648 /* Watch a specific name on the bus. We only support one unit
3649 * watching each name for now. */
3651 if (u
->manager
->api_bus
) {
3652 /* If the bus is already available, install the match directly.
3653 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3654 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3656 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3659 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3661 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3662 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3663 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3669 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3673 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3674 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3675 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3678 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3679 _cleanup_free_
char *e
= NULL
;
3685 /* Adds in links to the device node that this unit is based on */
3689 if (!is_device_path(what
))
3692 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3693 if (!unit_type_supported(UNIT_DEVICE
))
3696 r
= unit_name_from_path(what
, ".device", &e
);
3700 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3704 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3705 dep
= UNIT_BINDS_TO
;
3707 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3708 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3709 device
, true, mask
);
3712 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3713 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3721 if (!path_startswith(what
, "/dev/"))
3724 /* If we don't support devices, then also don't bother with blockdev@.target */
3725 if (!unit_type_supported(UNIT_DEVICE
))
3728 r
= unit_name_path_escape(what
, &escaped
);
3732 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3736 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3739 int unit_coldplug(Unit
*u
) {
3744 /* Make sure we don't enter a loop, when coldplugging recursively. */
3748 u
->coldplugged
= true;
3750 STRV_FOREACH(i
, u
->deserialized_refs
)
3751 RET_GATHER(r
, bus_unit_track_add_name(u
, *i
));
3753 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3755 if (UNIT_VTABLE(u
)->coldplug
)
3756 RET_GATHER(r
, UNIT_VTABLE(u
)->coldplug(u
));
3759 RET_GATHER(r
, job_coldplug(u
->job
));
3761 RET_GATHER(r
, job_coldplug(u
->nop_job
));
3763 unit_modify_nft_set(u
, /* add = */ true);
3767 void unit_catchup(Unit
*u
) {
3770 if (UNIT_VTABLE(u
)->catchup
)
3771 UNIT_VTABLE(u
)->catchup(u
);
3773 unit_cgroup_catchup(u
);
3776 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3782 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3783 * are never out-of-date. */
3784 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3787 if (stat(path
, &st
) < 0)
3788 /* What, cannot access this anymore? */
3792 /* For masked files check if they are still so */
3793 return !null_or_empty(&st
);
3795 /* For non-empty files check the mtime */
3796 return timespec_load(&st
.st_mtim
) > mtime
;
3801 bool unit_need_daemon_reload(Unit
*u
) {
3805 if (u
->manager
->unit_file_state_outdated
)
3808 /* For unit files, we allow masking… */
3809 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3810 u
->load_state
== UNIT_MASKED
))
3813 /* Source paths should not be masked… */
3814 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3817 if (u
->load_state
== UNIT_LOADED
) {
3818 _cleanup_strv_free_
char **dropins
= NULL
;
3820 (void) unit_find_dropin_paths(u
, /* use_unit_path_cache = */ false, &dropins
);
3822 if (!strv_equal(u
->dropin_paths
, dropins
))
3825 /* … any drop-ins that are masked are simply omitted from the list. */
3826 STRV_FOREACH(path
, u
->dropin_paths
)
3827 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3834 void unit_reset_failed(Unit
*u
) {
3837 if (UNIT_VTABLE(u
)->reset_failed
)
3838 UNIT_VTABLE(u
)->reset_failed(u
);
3840 ratelimit_reset(&u
->start_ratelimit
);
3841 u
->start_limit_hit
= false;
3843 (void) unit_set_debug_invocation(u
, /* enable= */ false);
3846 Unit
*unit_following(Unit
*u
) {
3849 if (UNIT_VTABLE(u
)->following
)
3850 return UNIT_VTABLE(u
)->following(u
);
3855 bool unit_stop_pending(Unit
*u
) {
3858 /* This call does check the current state of the unit. It's
3859 * hence useful to be called from state change calls of the
3860 * unit itself, where the state isn't updated yet. This is
3861 * different from unit_inactive_or_pending() which checks both
3862 * the current state and for a queued job. */
3864 return unit_has_job_type(u
, JOB_STOP
);
3867 bool unit_inactive_or_pending(Unit
*u
) {
3870 /* Returns true if the unit is inactive or going down */
3872 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3875 if (unit_stop_pending(u
))
3881 bool unit_active_or_pending(Unit
*u
) {
3884 /* Returns true if the unit is active or going up */
3886 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3890 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3896 bool unit_will_restart_default(Unit
*u
) {
3899 return unit_has_job_type(u
, JOB_START
);
3902 bool unit_will_restart(Unit
*u
) {
3905 if (!UNIT_VTABLE(u
)->will_restart
)
3908 return UNIT_VTABLE(u
)->will_restart(u
);
3911 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3914 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3915 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3918 static int unit_pid_set(Unit
*u
, Set
**pid_set
) {
3924 set_clear(*pid_set
); /* This updates input. */
3926 /* Exclude the main/control pids from being killed via the cgroup */
3929 FOREACH_ARGUMENT(pid
, unit_main_pid(u
), unit_control_pid(u
))
3930 if (pidref_is_set(pid
)) {
3931 r
= set_ensure_put(pid_set
, NULL
, PID_TO_PTR(pid
->pid
));
3939 static int kill_common_log(const PidRef
*pid
, int signo
, void *userdata
) {
3940 _cleanup_free_
char *comm
= NULL
;
3941 Unit
*u
= ASSERT_PTR(userdata
);
3943 (void) pidref_get_comm(pid
, &comm
);
3945 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
3946 signal_to_string(signo
), pid
->pid
, strna(comm
));
3951 static int kill_or_sigqueue(PidRef
*pidref
, int signo
, int code
, int value
) {
3952 assert(pidref_is_set(pidref
));
3953 assert(SIGNAL_VALID(signo
));
3958 log_debug("Killing " PID_FMT
" with signal SIG%s.", pidref
->pid
, signal_to_string(signo
));
3959 return pidref_kill(pidref
, signo
);
3962 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pidref
->pid
, signal_to_string(signo
));
3963 return pidref_sigqueue(pidref
, signo
, value
);
3966 assert_not_reached();
3970 static int unit_kill_one(
3977 sd_bus_error
*ret_error
) {
3984 if (!pidref_is_set(pidref
))
3987 _cleanup_free_
char *comm
= NULL
;
3988 (void) pidref_get_comm(pidref
, &comm
);
3990 r
= kill_or_sigqueue(pidref
, signo
, code
, value
);
3994 /* Report this failure both to the logs and to the client */
3996 sd_bus_error_set_errnof(
3998 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s): %m",
3999 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4001 return log_unit_warning_errno(
4003 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s) on client request: %m",
4004 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4007 log_unit_info(u
, "Sent signal SIG%s to %s process " PID_FMT
" (%s) on client request.",
4008 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4009 return 1; /* killed */
4018 sd_bus_error
*ret_error
) {
4020 PidRef
*main_pid
, *control_pid
;
4021 bool killed
= false;
4024 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4025 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4026 * stop a service ourselves. */
4030 assert(whom
< _KILL_WHOM_MAX
);
4031 assert(SIGNAL_VALID(signo
));
4032 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
4034 main_pid
= unit_main_pid(u
);
4035 control_pid
= unit_control_pid(u
);
4037 if (!UNIT_HAS_CGROUP_CONTEXT(u
) && !main_pid
&& !control_pid
)
4038 return sd_bus_error_setf(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
, "Unit type does not support process killing.");
4040 if (IN_SET(whom
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4042 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4043 if (!pidref_is_set(main_pid
))
4044 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4047 if (IN_SET(whom
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4049 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4050 if (!pidref_is_set(control_pid
))
4051 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4054 if (IN_SET(whom
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4055 r
= unit_kill_one(u
, control_pid
, "control", signo
, code
, value
, ret_error
);
4057 killed
= killed
|| r
> 0;
4060 if (IN_SET(whom
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4061 r
= unit_kill_one(u
, main_pid
, "main", signo
, code
, value
, ret
>= 0 ? ret_error
: NULL
);
4063 killed
= killed
|| r
> 0;
4066 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4067 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4068 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4069 if (IN_SET(whom
, KILL_ALL
, KILL_ALL_FAIL
) && code
== SI_USER
) {
4070 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4071 if (crt
&& crt
->cgroup_path
) {
4072 _cleanup_set_free_ Set
*pid_set
= NULL
;
4074 if (signo
== SIGKILL
) {
4075 r
= cg_kill_kernel_sigkill(crt
->cgroup_path
);
4078 log_unit_info(u
, "Killed unit cgroup with SIGKILL on client request.");
4081 if (r
!= -EOPNOTSUPP
) {
4083 sd_bus_error_set_errnof(ret_error
, r
,
4084 "Failed to kill unit cgroup: %m");
4085 RET_GATHER(ret
, log_unit_warning_errno(u
, r
, "Failed to kill unit cgroup: %m"));
4088 /* Fall back to manual enumeration */
4090 /* Exclude the main/control pids from being killed via the cgroup if
4092 r
= unit_pid_set(u
, &pid_set
);
4097 r
= cg_kill_recursive(crt
->cgroup_path
, signo
, 0, pid_set
, kill_common_log
, u
);
4098 if (r
< 0 && !IN_SET(r
, -ESRCH
, -ENOENT
)) {
4100 sd_bus_error_set_errnof(
4102 "Failed to send signal SIG%s to auxiliary processes: %m",
4103 signal_to_string(signo
));
4105 RET_GATHER(ret
, log_unit_warning_errno(
4107 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4108 signal_to_string(signo
)));
4110 killed
= killed
|| r
>= 0;
4115 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4116 if (ret
>= 0 && !killed
&& IN_SET(whom
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
))
4117 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4122 int unit_following_set(Unit
*u
, Set
**s
) {
4126 if (UNIT_VTABLE(u
)->following_set
)
4127 return UNIT_VTABLE(u
)->following_set(u
, s
);
4133 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4138 if (u
->unit_file_state
>= 0 || !u
->fragment_path
)
4139 return u
->unit_file_state
;
4141 /* If we know this is a transient unit no need to ask the unit file state for details. Let's bypass
4142 * the more expensive on-disk check. */
4144 return (u
->unit_file_state
= UNIT_FILE_TRANSIENT
);
4146 r
= unit_file_get_state(
4147 u
->manager
->runtime_scope
,
4148 /* root_dir= */ NULL
,
4150 &u
->unit_file_state
);
4152 u
->unit_file_state
= UNIT_FILE_BAD
;
4154 return u
->unit_file_state
;
4157 PresetAction
unit_get_unit_file_preset(Unit
*u
) {
4162 if (u
->unit_file_preset
>= 0)
4163 return u
->unit_file_preset
;
4165 /* If this is a transient or perpetual unit file it doesn't make much sense to ask the preset
4166 * database about this, because enabling/disabling makes no sense for either. Hence don't. */
4167 if (!u
->fragment_path
|| u
->transient
|| u
->perpetual
)
4168 return (u
->unit_file_preset
= -ENOEXEC
);
4170 _cleanup_free_
char *bn
= NULL
;
4171 r
= path_extract_filename(u
->fragment_path
, &bn
);
4173 return (u
->unit_file_preset
= r
);
4174 if (r
== O_DIRECTORY
)
4175 return (u
->unit_file_preset
= -EISDIR
);
4177 return (u
->unit_file_preset
= unit_file_query_preset(
4178 u
->manager
->runtime_scope
,
4179 /* root_dir= */ NULL
,
4181 /* cached= */ NULL
));
4184 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4190 unit_ref_unset(ref
);
4192 ref
->source
= source
;
4193 ref
->target
= target
;
4194 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4198 void unit_ref_unset(UnitRef
*ref
) {
4204 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4205 * be unreferenced now. */
4206 unit_add_to_gc_queue(ref
->target
);
4208 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4209 ref
->source
= ref
->target
= NULL
;
4212 static int user_from_unit_name(Unit
*u
, char **ret
) {
4214 static const uint8_t hash_key
[] = {
4215 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4216 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4219 _cleanup_free_
char *n
= NULL
;
4222 r
= unit_name_to_prefix(u
->id
, &n
);
4226 if (valid_user_group_name(n
, 0)) {
4231 /* If we can't use the unit name as a user name, then let's hash it and use that */
4232 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4238 static int unit_verify_contexts(const Unit
*u
) {
4241 const ExecContext
*ec
= unit_get_exec_context(u
);
4245 if (MANAGER_IS_USER(u
->manager
) && ec
->dynamic_user
)
4246 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "DynamicUser= enabled for user unit, which is not supported. Refusing.");
4248 if (ec
->dynamic_user
&& ec
->working_directory_home
)
4249 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "WorkingDirectory=~ is not allowed under DynamicUser=yes. Refusing.");
4251 if (ec
->working_directory
&& path_below_api_vfs(ec
->working_directory
) &&
4252 exec_needs_mount_namespace(ec
, /* params = */ NULL
, /* runtime = */ NULL
))
4253 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "WorkingDirectory= may not be below /proc/, /sys/ or /dev/ when using mount namespacing. Refusing.");
4255 if (exec_needs_pid_namespace(ec
, /* params= */ NULL
) && !UNIT_VTABLE(u
)->notify_pidref
)
4256 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "PrivatePIDs= setting is only supported for service units. Refusing.");
4258 const KillContext
*kc
= unit_get_kill_context(u
);
4260 if (ec
->pam_name
&& kc
&& !IN_SET(kc
->kill_mode
, KILL_CONTROL_GROUP
, KILL_MIXED
))
4261 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit has PAM enabled. Kill mode must be set to 'control-group' or 'mixed'. Refusing.");
4266 static PrivateTmp
unit_get_private_var_tmp(const Unit
*u
, const ExecContext
*c
) {
4269 assert(c
->private_tmp
>= 0 && c
->private_tmp
< _PRIVATE_TMP_MAX
);
4271 /* Disable disconnected private tmpfs on /var/tmp/ when DefaultDependencies=no and
4272 * RootImage=/RootDirectory= are not set, as /var/ may be a separated partition.
4273 * See issue #37258. */
4275 /* PrivateTmp=yes/no also enables/disables private tmpfs on /var/tmp/. */
4276 if (c
->private_tmp
!= PRIVATE_TMP_DISCONNECTED
)
4277 return c
->private_tmp
;
4279 /* When DefaultDependencies=yes, disconnected tmpfs is also enabled on /var/tmp/, and an explicit
4280 * dependency to the mount on /var/ will be added in unit_add_exec_dependencies(). */
4281 if (u
->default_dependencies
)
4282 return PRIVATE_TMP_DISCONNECTED
;
4284 /* When RootImage=/RootDirectory= is enabled, /var/ should be prepared by the image or directory,
4285 * hence we can mount a disconnected tmpfs on /var/tmp/. */
4286 if (exec_context_with_rootfs(c
))
4287 return PRIVATE_TMP_DISCONNECTED
;
4289 /* Even if DefaultDependencies=no, enable disconnected tmpfs when
4290 * RequiresMountsFor=/WantsMountsFor=/var/ is explicitly set. */
4291 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; t
++)
4292 if (hashmap_contains(u
->mounts_for
[t
], "/var/"))
4293 return PRIVATE_TMP_DISCONNECTED
;
4295 /* Check the same but for After= with Requires=/Requisite=/Wants= or friends. */
4296 Unit
*m
= manager_get_unit(u
->manager
, "var.mount");
4298 return PRIVATE_TMP_NO
;
4300 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, m
))
4301 return PRIVATE_TMP_NO
;
4303 if (unit_has_dependency(u
, UNIT_ATOM_PULL_IN_START
, m
) ||
4304 unit_has_dependency(u
, UNIT_ATOM_PULL_IN_VERIFY
, m
) ||
4305 unit_has_dependency(u
, UNIT_ATOM_PULL_IN_START_IGNORED
, m
))
4306 return PRIVATE_TMP_DISCONNECTED
;
4308 return PRIVATE_TMP_NO
;
4311 int unit_patch_contexts(Unit
*u
) {
4318 /* Patch in the manager defaults into the exec and cgroup
4319 * contexts, _after_ the rest of the settings have been
4322 ec
= unit_get_exec_context(u
);
4324 /* This only copies in the ones that need memory */
4325 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4326 if (u
->manager
->defaults
.rlimit
[i
] && !ec
->rlimit
[i
]) {
4327 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->defaults
.rlimit
[i
], 1);
4332 if (MANAGER_IS_USER(u
->manager
) && !ec
->working_directory
) {
4333 r
= get_home_dir(&ec
->working_directory
);
4337 if (!ec
->working_directory_home
)
4338 /* If home directory is implied by us, allow it to be missing. */
4339 ec
->working_directory_missing_ok
= true;
4342 if (ec
->private_devices
)
4343 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4345 if (ec
->protect_kernel_modules
)
4346 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4348 if (ec
->protect_kernel_logs
)
4349 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4351 if (ec
->protect_clock
)
4352 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4354 if (ec
->dynamic_user
) {
4356 r
= user_from_unit_name(u
, &ec
->user
);
4362 ec
->group
= strdup(ec
->user
);
4367 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4368 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4371 /* With DynamicUser= we want private directories, so if the user hasn't manually
4372 * selected PrivateTmp=, enable it, but to a fully private (disconnected) tmpfs
4374 if (ec
->private_tmp
== PRIVATE_TMP_NO
)
4375 ec
->private_tmp
= PRIVATE_TMP_DISCONNECTED
;
4376 ec
->remove_ipc
= true;
4377 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4378 if (ec
->protect_home
== PROTECT_HOME_NO
)
4379 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4381 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4383 ec
->no_new_privileges
= true;
4384 ec
->restrict_suid_sgid
= true;
4387 ec
->private_var_tmp
= unit_get_private_var_tmp(u
, ec
);
4389 FOREACH_ARRAY(d
, ec
->directories
, _EXEC_DIRECTORY_TYPE_MAX
)
4390 exec_directory_sort(d
);
4393 cc
= unit_get_cgroup_context(u
);
4396 if (ec
->private_devices
&&
4397 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4398 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4400 /* Only add these if needed, as they imply that everything else is blocked. */
4401 if (cc
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
|| cc
->device_allow
) {
4402 if (ec
->root_image
|| ec
->mount_images
) {
4404 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4405 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4406 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4410 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4411 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
);
4416 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4417 * Same for mapper and verity. */
4418 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4419 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4425 if (ec
->protect_clock
) {
4426 r
= cgroup_context_add_device_allow(cc
, "char-rtc", CGROUP_DEVICE_READ
);
4433 return unit_verify_contexts(u
);
4436 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4443 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4447 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4450 KillContext
*unit_get_kill_context(const Unit
*u
) {
4457 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4461 return (KillContext
*) ((uint8_t*) u
+ offset
);
4464 CGroupContext
*unit_get_cgroup_context(const Unit
*u
) {
4470 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4474 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4477 ExecRuntime
*unit_get_exec_runtime(const Unit
*u
) {
4483 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4487 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4490 CGroupRuntime
*unit_get_cgroup_runtime(const Unit
*u
) {
4496 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
4500 return *(CGroupRuntime
**) ((uint8_t*) u
+ offset
);
4503 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4506 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4509 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4510 return u
->manager
->lookup_paths
.transient
;
4512 if (flags
& UNIT_PERSISTENT
)
4513 return u
->manager
->lookup_paths
.persistent_control
;
4515 if (flags
& UNIT_RUNTIME
)
4516 return u
->manager
->lookup_paths
.runtime_control
;
4521 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4523 assert(popcount(flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
)) <= 1);
4526 _cleanup_free_
char *t
= NULL
;
4528 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4529 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4530 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4531 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4534 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4535 t
= specifier_escape(s
);
4542 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4543 * ExecStart= and friends, i.e. '$' and quotes. */
4545 if (flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
)) {
4548 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX_ENV
) {
4549 t2
= strreplace(s
, "$", "$$");
4552 free_and_replace(t
, t2
);
4555 t2
= shell_escape(t
?: s
, "\"");
4558 free_and_replace(t
, t2
);
4562 } else if (flags
& UNIT_ESCAPE_C
) {
4568 free_and_replace(t
, t2
);
4577 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4578 _cleanup_free_
char *result
= NULL
;
4581 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4582 * lines in a way suitable for ExecStart= stanzas. */
4584 STRV_FOREACH(i
, l
) {
4585 _cleanup_free_
char *buf
= NULL
;
4590 p
= unit_escape_setting(*i
, flags
, &buf
);
4594 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4595 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4609 if (!GREEDY_REALLOC(result
, n
+ 1))
4614 return TAKE_PTR(result
);
4617 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4618 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4619 const char *dir
, *wrapped
;
4626 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4629 data
= unit_escape_setting(data
, flags
, &escaped
);
4633 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4634 * previous section header is the same */
4636 if (flags
& UNIT_PRIVATE
) {
4637 if (!UNIT_VTABLE(u
)->private_section
)
4640 if (!u
->transient_file
|| u
->last_section_private
< 0)
4641 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4642 else if (u
->last_section_private
== 0)
4643 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4645 if (!u
->transient_file
|| u
->last_section_private
< 0)
4646 data
= strjoina("[Unit]\n", data
);
4647 else if (u
->last_section_private
> 0)
4648 data
= strjoina("\n[Unit]\n", data
);
4651 if (u
->transient_file
) {
4652 /* When this is a transient unit file in creation, then let's not create a new drop-in,
4653 * but instead write to the transient unit file. */
4654 fputs_with_newline(u
->transient_file
, data
);
4656 /* Remember which section we wrote this entry to */
4657 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4661 dir
= unit_drop_in_dir(u
, flags
);
4665 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4666 "# or an equivalent operation. Do not edit.\n",
4670 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4674 (void) mkdir_p_label(p
, 0755);
4676 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4677 * recreate the cache after every drop-in we write. */
4678 if (u
->manager
->unit_path_cache
) {
4679 r
= set_put_strdup_full(&u
->manager
->unit_path_cache
, &path_hash_ops_free
, p
);
4684 r
= write_string_file(q
, wrapped
, WRITE_STRING_FILE_CREATE
|WRITE_STRING_FILE_ATOMIC
|WRITE_STRING_FILE_LABEL
);
4688 r
= strv_push(&u
->dropin_paths
, q
);
4693 strv_uniq(u
->dropin_paths
);
4695 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4700 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4701 _cleanup_free_
char *p
= NULL
;
4709 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4712 va_start(ap
, format
);
4713 r
= vasprintf(&p
, format
, ap
);
4719 return unit_write_setting(u
, flags
, name
, p
);
4722 int unit_make_transient(Unit
*u
) {
4723 _cleanup_free_
char *path
= NULL
;
4728 if (!UNIT_VTABLE(u
)->can_transient
)
4731 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4733 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4737 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4738 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4741 f
= fopen(path
, "we");
4746 safe_fclose(u
->transient_file
);
4747 u
->transient_file
= f
;
4749 free_and_replace(u
->fragment_path
, path
);
4751 u
->source_path
= mfree(u
->source_path
);
4752 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4753 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4755 u
->load_state
= UNIT_STUB
;
4757 u
->transient
= true;
4759 unit_add_to_dbus_queue(u
);
4760 unit_add_to_gc_queue(u
);
4762 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4768 static bool ignore_leftover_process(const char *comm
) {
4769 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
4772 static int log_kill(const PidRef
*pid
, int sig
, void *userdata
) {
4773 const Unit
*u
= ASSERT_PTR(userdata
);
4774 _cleanup_free_
char *comm
= NULL
;
4776 assert(pidref_is_set(pid
));
4778 (void) pidref_get_comm(pid
, &comm
);
4780 if (ignore_leftover_process(comm
))
4781 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4782 * here to let the manager know that a process was killed. */
4786 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4789 signal_to_string(sig
));
4794 static int operation_to_signal(
4795 const KillContext
*c
,
4797 bool *ret_noteworthy
) {
4800 assert(ret_noteworthy
);
4804 case KILL_TERMINATE
:
4805 case KILL_TERMINATE_AND_LOG
:
4806 *ret_noteworthy
= false;
4807 return c
->kill_signal
;
4810 *ret_noteworthy
= false;
4811 return restart_kill_signal(c
);
4814 *ret_noteworthy
= true;
4815 return c
->final_kill_signal
;
4818 *ret_noteworthy
= true;
4819 return c
->watchdog_signal
;
4822 assert_not_reached();
4826 static int unit_kill_context_one(
4828 const PidRef
*pidref
,
4833 cg_kill_log_func_t log_func
) {
4840 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4842 if (!pidref_is_set(pidref
))
4846 log_func(pidref
, sig
, u
);
4848 r
= pidref_kill_and_sigcont(pidref
, sig
);
4852 _cleanup_free_
char *comm
= NULL
;
4854 (void) pidref_get_comm(pidref
, &comm
);
4855 return log_unit_warning_errno(u
, r
, "Failed to kill %s process " PID_FMT
" (%s), ignoring: %m", type
, pidref
->pid
, strna(comm
));
4859 (void) pidref_kill(pidref
, SIGHUP
);
4864 int unit_kill_context(Unit
*u
, KillOperation k
) {
4865 bool wait_for_exit
= false, send_sighup
;
4866 cg_kill_log_func_t log_func
= NULL
;
4871 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4872 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4873 * which is used for user-requested killing of unit processes. */
4875 KillContext
*c
= unit_get_kill_context(u
);
4876 if (!c
|| c
->kill_mode
== KILL_NONE
)
4880 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4882 log_func
= log_kill
;
4886 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4890 PidRef
*main_pid
= unit_main_pid_full(u
, &is_alien
);
4891 r
= unit_kill_context_one(u
, main_pid
, "main", is_alien
, sig
, send_sighup
, log_func
);
4892 wait_for_exit
= wait_for_exit
|| r
> 0;
4894 r
= unit_kill_context_one(u
, unit_control_pid(u
), "control", /* is_alien = */ false, sig
, send_sighup
, log_func
);
4895 wait_for_exit
= wait_for_exit
|| r
> 0;
4897 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4898 if (crt
&& crt
->cgroup_path
&&
4899 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4900 _cleanup_set_free_ Set
*pid_set
= NULL
;
4902 /* Exclude the main/control pids from being killed via the cgroup */
4903 r
= unit_pid_set(u
, &pid_set
);
4907 r
= cg_kill_recursive(
4910 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4914 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4915 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
4919 wait_for_exit
= true;
4922 r
= unit_pid_set(u
, &pid_set
);
4926 (void) cg_kill_recursive(
4931 /* log_kill= */ NULL
,
4932 /* userdata= */ NULL
);
4937 return wait_for_exit
;
4940 int unit_add_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
, UnitMountDependencyType type
) {
4941 Hashmap
**unit_map
, **manager_map
;
4946 assert(type
>= 0 && type
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
);
4948 unit_map
= &u
->mounts_for
[type
];
4949 manager_map
= &u
->manager
->units_needing_mounts_for
[type
];
4951 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4952 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4953 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4954 * appearing mount units can easily determine which units to make themselves a dependency of. */
4956 if (!path_is_absolute(path
))
4959 if (hashmap_contains(*unit_map
, path
)) /* Exit quickly if the path is already covered. */
4962 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4963 * only after simplification, since path_is_normalized() rejects paths with '.'.
4964 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4965 _cleanup_free_
char *p
= NULL
;
4966 r
= path_simplify_alloc(path
, &p
);
4971 if (!path_is_normalized(path
))
4974 UnitDependencyInfo di
= {
4978 r
= hashmap_ensure_put(unit_map
, &path_hash_ops
, p
, di
.data
);
4982 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
4984 char prefix
[strlen(path
) + 1];
4985 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4988 x
= hashmap_get(*manager_map
, prefix
);
4990 _cleanup_free_
char *q
= NULL
;
4992 r
= hashmap_ensure_allocated(manager_map
, &path_hash_ops
);
5004 r
= hashmap_put(*manager_map
, q
, x
);
5020 int unit_setup_exec_runtime(Unit
*u
) {
5021 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
5022 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
5023 _cleanup_set_free_ Set
*units
= NULL
;
5030 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
5033 /* Check if there already is an ExecRuntime for this unit? */
5034 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
5038 ec
= ASSERT_PTR(unit_get_exec_context(u
));
5040 r
= unit_get_transitive_dependency_set(u
, UNIT_ATOM_JOINS_NAMESPACE_OF
, &units
);
5044 /* Try to get it from somebody else */
5045 SET_FOREACH(other
, units
) {
5046 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
5054 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
5059 if (ec
->dynamic_user
) {
5060 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
5065 r
= exec_runtime_make(u
, ec
, esr
, dcreds
, rt
);
5075 CGroupRuntime
*unit_setup_cgroup_runtime(Unit
*u
) {
5080 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
5083 CGroupRuntime
**rt
= (CGroupRuntime
**) ((uint8_t*) u
+ offset
);
5087 return (*rt
= cgroup_runtime_new());
5090 bool unit_type_supported(UnitType t
) {
5091 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5094 assert(t
>= 0 && t
< _UNIT_TYPE_MAX
);
5096 if (cache
[t
] == 0) {
5099 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
5101 r
= getenv_bool(ascii_strupper(e
));
5102 if (r
< 0 && r
!= -ENXIO
)
5103 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
5105 cache
[t
] = r
== 0 ? -1 : 1;
5110 if (!unit_vtable
[t
]->supported
)
5113 return unit_vtable
[t
]->supported();
5116 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
5122 if (!unit_log_level_test(u
, LOG_NOTICE
))
5125 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
5126 if (r
> 0 || r
== -ENOTDIR
)
5129 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
5133 log_unit_struct(u
, LOG_NOTICE
,
5134 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING_STR
),
5135 LOG_UNIT_INVOCATION_ID(u
),
5136 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
5137 LOG_ITEM("WHERE=%s", where
));
5140 int unit_log_noncanonical_mount_path(Unit
*u
, const char *where
) {
5144 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5145 log_unit_struct(u
, LOG_ERR
,
5146 LOG_MESSAGE_ID(SD_MESSAGE_NON_CANONICAL_MOUNT_STR
),
5147 LOG_UNIT_INVOCATION_ID(u
),
5148 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
5149 LOG_ITEM("WHERE=%s", where
));
5154 int unit_fail_if_noncanonical_mount_path(Unit
*u
, const char* where
) {
5160 _cleanup_free_
char *canonical_where
= NULL
;
5161 r
= chase(where
, /* root= */ NULL
, CHASE_NONEXISTENT
, &canonical_where
, /* ret_fd= */ NULL
);
5163 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
5167 /* We will happily ignore a trailing slash (or any redundant slashes) */
5168 if (path_equal(where
, canonical_where
))
5171 return unit_log_noncanonical_mount_path(u
, where
);
5174 bool unit_is_pristine(Unit
*u
) {
5177 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5178 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5179 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5181 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5182 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5183 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5186 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
5187 !u
->fragment_path
&&
5193 PidRef
* unit_control_pid(Unit
*u
) {
5196 if (UNIT_VTABLE(u
)->control_pid
)
5197 return UNIT_VTABLE(u
)->control_pid(u
);
5202 PidRef
* unit_main_pid_full(Unit
*u
, bool *ret_is_alien
) {
5205 if (UNIT_VTABLE(u
)->main_pid
)
5206 return UNIT_VTABLE(u
)->main_pid(u
, ret_is_alien
);
5209 *ret_is_alien
= false;
5213 static void unit_modify_user_nft_set(Unit
*u
, bool add
, NFTSetSource source
, uint32_t element
) {
5218 if (!MANAGER_IS_SYSTEM(u
->manager
))
5222 c
= unit_get_cgroup_context(u
);
5226 if (!u
->manager
->fw_ctx
) {
5227 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
5231 assert(u
->manager
->fw_ctx
);
5234 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
5235 if (nft_set
->source
!= source
)
5238 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
5240 log_warning_errno(r
, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5241 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5243 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5244 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5248 static void unit_unref_uid_internal(
5252 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
5256 assert(_manager_unref_uid
);
5258 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5259 * gid_t are actually the same time, with the same validity rules.
5261 * Drops a reference to UID/GID from a unit. */
5263 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5264 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5266 if (!uid_is_valid(*ref_uid
))
5269 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
5270 *ref_uid
= UID_INVALID
;
5273 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
5276 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_USER
, u
->ref_uid
);
5278 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
5281 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
5284 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_GROUP
, u
->ref_gid
);
5286 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
5289 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5292 unit_unref_uid(u
, destroy_now
);
5293 unit_unref_gid(u
, destroy_now
);
5296 static int unit_ref_uid_internal(
5301 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5307 assert(uid_is_valid(uid
));
5308 assert(_manager_ref_uid
);
5310 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5311 * are actually the same type, and have the same validity rules.
5313 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5314 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5317 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5318 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5320 if (*ref_uid
== uid
)
5323 if (uid_is_valid(*ref_uid
)) /* Already set? */
5326 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5334 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5335 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5338 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5339 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5342 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5347 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5349 if (uid_is_valid(uid
)) {
5350 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5355 if (gid_is_valid(gid
)) {
5356 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5359 unit_unref_uid(u
, false);
5365 return r
> 0 || q
> 0;
5368 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5374 c
= unit_get_exec_context(u
);
5376 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5378 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5380 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_USER
, uid
);
5381 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_GROUP
, gid
);
5386 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5391 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5392 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5393 * objects when no service references the UID/GID anymore. */
5395 r
= unit_ref_uid_gid(u
, uid
, gid
);
5397 unit_add_to_dbus_queue(u
);
5400 int unit_acquire_invocation_id(Unit
*u
) {
5406 r
= sd_id128_randomize(&id
);
5408 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5410 r
= unit_set_invocation_id(u
, id
);
5412 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5414 unit_add_to_dbus_queue(u
);
5418 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5424 /* Copy parameters from manager */
5425 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5429 p
->runtime_scope
= u
->manager
->runtime_scope
;
5431 r
= strdup_to(&p
->confirm_spawn
, manager_get_confirm_spawn(u
->manager
));
5435 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5436 p
->prefix
= u
->manager
->prefix
;
5437 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5439 /* Copy parameters from unit */
5440 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5441 p
->cgroup_path
= crt
? crt
->cgroup_path
: NULL
;
5442 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5444 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5445 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5447 p
->shall_confirm_spawn
= u
->manager
->confirm_spawn
;
5449 p
->fallback_smack_process_label
= u
->manager
->defaults
.smack_process_label
;
5451 if (u
->manager
->restrict_fs
&& p
->bpf_restrict_fs_map_fd
< 0) {
5452 int fd
= bpf_restrict_fs_map_fd(u
);
5456 p
->bpf_restrict_fs_map_fd
= fd
;
5459 p
->user_lookup_fd
= u
->manager
->user_lookup_fds
[1];
5460 p
->handoff_timestamp_fd
= u
->manager
->handoff_timestamp_fds
[1];
5461 if (UNIT_VTABLE(u
)->notify_pidref
)
5462 p
->pidref_transport_fd
= u
->manager
->pidref_transport_fds
[1];
5464 p
->cgroup_id
= crt
? crt
->cgroup_id
: 0;
5465 p
->invocation_id
= u
->invocation_id
;
5466 sd_id128_to_string(p
->invocation_id
, p
->invocation_id_string
);
5467 p
->unit_id
= strdup(u
->id
);
5471 p
->debug_invocation
= u
->debug_invocation
;
5476 int unit_fork_helper_process(Unit
*u
, const char *name
, bool into_cgroup
, PidRef
*ret
) {
5477 CGroupRuntime
*crt
= NULL
;
5484 /* Forks off a helper process and makes sure it is a member of the unit's cgroup, if configured to
5485 * do so. Returns == 0 in the child, and > 0 in the parent. The pid parameter is always filled in
5486 * with the child's PID. */
5489 (void) unit_realize_cgroup(u
);
5491 crt
= unit_setup_cgroup_runtime(u
);
5496 r
= safe_fork(name
, FORK_REOPEN_LOG
|FORK_DEATHSIG_SIGTERM
, &pid
);
5500 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
5505 q
= pidref_set_pid(&pidref
, pid
);
5509 *ret
= TAKE_PIDREF(pidref
);
5515 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5516 (void) ignore_signals(SIGPIPE
);
5518 if (crt
&& crt
->cgroup_path
) {
5519 r
= cg_attach(crt
->cgroup_path
, 0);
5521 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(crt
->cgroup_path
));
5529 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, PidRef
*ret_pid
) {
5530 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
5536 r
= unit_fork_helper_process(u
, "(sd-rmrf)", /* into_cgroup= */ true, &pid
);
5540 int ret
= EXIT_SUCCESS
;
5542 STRV_FOREACH(i
, paths
) {
5543 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5545 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5553 r
= unit_watch_pidref(u
, &pid
, /* exclusive= */ true);
5557 *ret_pid
= TAKE_PIDREF(pid
);
5561 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5565 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5566 /* No bit set anymore, let's drop the whole entry */
5567 assert_se(hashmap_remove(deps
, other
));
5569 /* Mask was reduced, let's update the entry */
5570 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5573 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5577 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5582 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5586 UnitDependencyInfo di
;
5591 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5592 Hashmap
*other_deps
;
5594 if (FLAGS_SET(~mask
, di
.origin_mask
))
5597 di
.origin_mask
&= ~mask
;
5598 unit_update_dependency_mask(deps
, other
, di
);
5600 /* We updated the dependency from our unit to the other unit now. But most
5601 * dependencies imply a reverse dependency. Hence, let's delete that one
5602 * too. For that we go through all dependency types on the other unit and
5603 * delete all those which point to us and have the right mask set. */
5605 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5606 UnitDependencyInfo dj
;
5608 dj
.data
= hashmap_get(other_deps
, u
);
5609 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5612 dj
.destination_mask
&= ~mask
;
5613 unit_update_dependency_mask(other_deps
, u
, dj
);
5616 unit_add_to_gc_queue(other
);
5618 /* The unit 'other' may not be wanted by the unit 'u'. */
5619 unit_submit_to_stop_when_unneeded_queue(other
);
5621 u
->dependency_generation
++;
5622 other
->dependency_generation
++;
5632 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5639 if (MANAGER_IS_SYSTEM(u
->manager
))
5640 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5642 _cleanup_free_
char *user_path
= NULL
;
5644 r
= xdg_user_runtime_dir("/systemd/units/invocation:", &user_path
);
5648 p
= strjoin(user_path
, u
->id
);
5657 static int unit_export_invocation_id(Unit
*u
) {
5658 _cleanup_free_
char *p
= NULL
;
5663 if (u
->exported_invocation_id
)
5666 if (sd_id128_is_null(u
->invocation_id
))
5669 r
= unit_get_invocation_path(u
, &p
);
5671 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5673 r
= symlinkat_atomic_full(u
->invocation_id_string
, AT_FDCWD
, p
, SYMLINK_LABEL
);
5675 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5677 u
->exported_invocation_id
= true;
5681 static int unit_export_log_level_max(Unit
*u
, int log_level_max
, bool overwrite
) {
5688 /* When the debug_invocation logic runs, overwrite will be true as we always want to switch the max
5689 * log level that the journal applies, and we want to always restore the previous level once done */
5691 if (!overwrite
&& u
->exported_log_level_max
)
5694 if (log_level_max
< 0)
5697 assert(log_level_max
<= 7);
5699 buf
[0] = '0' + log_level_max
;
5702 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5703 r
= symlink_atomic(buf
, p
);
5705 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5707 u
->exported_log_level_max
= true;
5711 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5712 _cleanup_close_
int fd
= -EBADF
;
5713 struct iovec
*iovec
;
5720 if (u
->exported_log_extra_fields
)
5723 if (c
->n_log_extra_fields
<= 0)
5726 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5727 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5729 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5730 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5732 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5733 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5736 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5737 pattern
= strjoina(p
, ".XXXXXX");
5739 fd
= mkostemp_safe(pattern
);
5741 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5743 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5745 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5749 (void) fchmod(fd
, 0644);
5751 if (rename(pattern
, p
) < 0) {
5752 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5756 u
->exported_log_extra_fields
= true;
5760 (void) unlink(pattern
);
5764 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5765 _cleanup_free_
char *buf
= NULL
;
5772 if (u
->exported_log_ratelimit_interval
)
5775 if (c
->log_ratelimit
.interval
== 0)
5778 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5780 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit
.interval
) < 0)
5783 r
= symlink_atomic(buf
, p
);
5785 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5787 u
->exported_log_ratelimit_interval
= true;
5791 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5792 _cleanup_free_
char *buf
= NULL
;
5799 if (u
->exported_log_ratelimit_burst
)
5802 if (c
->log_ratelimit
.burst
== 0)
5805 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5807 if (asprintf(&buf
, "%u", c
->log_ratelimit
.burst
) < 0)
5810 r
= symlink_atomic(buf
, p
);
5812 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5814 u
->exported_log_ratelimit_burst
= true;
5818 void unit_export_state_files(Unit
*u
) {
5819 const ExecContext
*c
;
5826 if (MANAGER_IS_TEST_RUN(u
->manager
))
5829 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5830 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5831 * the IPC system itself and PID 1 also log to the journal.
5833 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5834 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5835 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5836 * namespace at least.
5838 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5839 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5842 (void) unit_export_invocation_id(u
);
5844 if (!MANAGER_IS_SYSTEM(u
->manager
))
5847 c
= unit_get_exec_context(u
);
5849 (void) unit_export_log_level_max(u
, c
->log_level_max
, /* overwrite= */ false);
5850 (void) unit_export_log_extra_fields(u
, c
);
5851 (void) unit_export_log_ratelimit_interval(u
, c
);
5852 (void) unit_export_log_ratelimit_burst(u
, c
);
5856 void unit_unlink_state_files(Unit
*u
) {
5864 /* Undoes the effect of unit_export_state() */
5866 if (u
->exported_invocation_id
) {
5867 _cleanup_free_
char *invocation_path
= NULL
;
5868 int r
= unit_get_invocation_path(u
, &invocation_path
);
5870 (void) unlink(invocation_path
);
5871 u
->exported_invocation_id
= false;
5875 if (!MANAGER_IS_SYSTEM(u
->manager
))
5878 if (u
->exported_log_level_max
) {
5879 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5882 u
->exported_log_level_max
= false;
5885 if (u
->exported_log_extra_fields
) {
5886 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5889 u
->exported_log_extra_fields
= false;
5892 if (u
->exported_log_ratelimit_interval
) {
5893 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5896 u
->exported_log_ratelimit_interval
= false;
5899 if (u
->exported_log_ratelimit_burst
) {
5900 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5903 u
->exported_log_ratelimit_burst
= false;
5907 int unit_set_debug_invocation(Unit
*u
, bool enable
) {
5912 if (u
->debug_invocation
== enable
)
5913 return 0; /* Nothing to do */
5915 u
->debug_invocation
= enable
;
5917 /* Ensure that the new log level is exported for the journal, in place of the previous one */
5918 if (u
->exported_log_level_max
) {
5919 const ExecContext
*ec
= unit_get_exec_context(u
);
5921 r
= unit_export_log_level_max(u
, enable
? LOG_PRI(LOG_DEBUG
) : ec
->log_level_max
, /* overwrite= */ true);
5930 int unit_prepare_exec(Unit
*u
) {
5935 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5936 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5937 r
= bpf_firewall_load_custom(u
);
5941 /* Prepares everything so that we can fork of a process for this unit */
5943 (void) unit_realize_cgroup(u
);
5945 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5946 if (crt
&& crt
->reset_accounting
) {
5947 (void) unit_reset_accounting(u
);
5948 crt
->reset_accounting
= false;
5951 unit_export_state_files(u
);
5953 r
= unit_setup_exec_runtime(u
);
5960 static int unit_log_leftover_process_start(const PidRef
*pid
, int sig
, void *userdata
) {
5961 const Unit
*u
= ASSERT_PTR(userdata
);
5962 _cleanup_free_
char *comm
= NULL
;
5964 assert(pidref_is_set(pid
));
5966 (void) pidref_get_comm(pid
, &comm
);
5968 if (ignore_leftover_process(comm
))
5971 /* During start we print a warning */
5974 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5975 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5976 pid
->pid
, strna(comm
));
5981 static int unit_log_leftover_process_stop(const PidRef
*pid
, int sig
, void *userdata
) {
5982 const Unit
*u
= ASSERT_PTR(userdata
);
5983 _cleanup_free_
char *comm
= NULL
;
5985 assert(pidref_is_set(pid
));
5987 (void) pidref_get_comm(pid
, &comm
);
5989 if (ignore_leftover_process(comm
))
5992 /* During stop we only print an informational message */
5995 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
5996 pid
->pid
, strna(comm
));
6001 int unit_warn_leftover_processes(Unit
*u
, bool start
) {
6002 _cleanup_free_
char *cgroup
= NULL
;
6007 r
= unit_get_cgroup_path_with_fallback(u
, &cgroup
);
6011 return cg_kill_recursive(
6015 /* killed_pids= */ NULL
,
6016 start
? unit_log_leftover_process_start
: unit_log_leftover_process_stop
,
6020 bool unit_needs_console(Unit
*u
) {
6022 UnitActiveState state
;
6026 state
= unit_active_state(u
);
6028 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
6031 if (UNIT_VTABLE(u
)->needs_console
)
6032 return UNIT_VTABLE(u
)->needs_console(u
);
6034 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
6035 ec
= unit_get_exec_context(u
);
6039 return exec_context_may_touch_console(ec
);
6042 int unit_pid_attachable(Unit
*u
, PidRef
*pid
, sd_bus_error
*error
) {
6047 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
6048 * and not a kernel thread either */
6050 /* First, a simple range check */
6051 if (!pidref_is_set(pid
))
6052 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier is not valid.");
6054 /* Some extra safety check */
6055 if (pid
->pid
== 1 || pidref_is_self(pid
))
6056 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
->pid
);
6058 /* Don't even begin to bother with kernel threads */
6059 r
= pidref_is_kernel_thread(pid
);
6061 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
->pid
);
6063 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
->pid
);
6065 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
->pid
);
6070 int unit_get_log_level_max(const Unit
*u
) {
6072 if (u
->debug_invocation
)
6075 ExecContext
*ec
= unit_get_exec_context(u
);
6076 if (ec
&& ec
->log_level_max
>= 0)
6077 return ec
->log_level_max
;
6080 return log_get_max_level();
6083 bool unit_log_level_test(const Unit
*u
, int level
) {
6085 return LOG_PRI(level
) <= unit_get_log_level_max(u
);
6088 void unit_log_success(Unit
*u
) {
6091 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
6092 * This message has low information value for regular users and it might be a bit overwhelming on a system with
6093 * a lot of devices. */
6095 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
6096 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SUCCESS_STR
),
6097 LOG_UNIT_INVOCATION_ID(u
),
6098 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
6101 void unit_log_failure(Unit
*u
, const char *result
) {
6105 log_unit_struct(u
, LOG_WARNING
,
6106 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_FAILURE_RESULT_STR
),
6107 LOG_UNIT_INVOCATION_ID(u
),
6108 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
6109 LOG_ITEM("UNIT_RESULT=%s", result
));
6112 void unit_log_skip(Unit
*u
, const char *result
) {
6116 log_unit_struct(u
, LOG_INFO
,
6117 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_SKIPPED_STR
),
6118 LOG_UNIT_INVOCATION_ID(u
),
6119 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
6120 LOG_ITEM("UNIT_RESULT=%s", result
));
6123 void unit_log_process_exit(
6126 const char *command
,
6136 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6137 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6138 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6142 else if (code
== CLD_EXITED
)
6145 level
= LOG_WARNING
;
6147 log_unit_struct(u
, level
,
6148 LOG_MESSAGE_ID(SD_MESSAGE_UNIT_PROCESS_EXIT_STR
),
6149 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
6151 sigchld_code_to_string(code
), status
,
6152 strna(code
== CLD_EXITED
6153 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
6154 : signal_to_string(status
)),
6155 success
? " (success)" : ""),
6156 LOG_ITEM("EXIT_CODE=%s", sigchld_code_to_string(code
)),
6157 LOG_ITEM("EXIT_STATUS=%i", status
),
6158 LOG_ITEM("COMMAND=%s", strna(command
)),
6159 LOG_UNIT_INVOCATION_ID(u
));
6162 int unit_exit_status(Unit
*u
) {
6165 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6166 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6167 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6168 * service process has exited abnormally (signal/coredump). */
6170 if (!UNIT_VTABLE(u
)->exit_status
)
6173 return UNIT_VTABLE(u
)->exit_status(u
);
6176 int unit_failure_action_exit_status(Unit
*u
) {
6181 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6183 if (u
->failure_action_exit_status
>= 0)
6184 return u
->failure_action_exit_status
;
6186 r
= unit_exit_status(u
);
6187 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6193 int unit_success_action_exit_status(Unit
*u
) {
6198 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6200 if (u
->success_action_exit_status
>= 0)
6201 return u
->success_action_exit_status
;
6203 r
= unit_exit_status(u
);
6204 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6210 int unit_test_trigger_loaded(Unit
*u
) {
6213 /* Tests whether the unit to trigger is loaded */
6215 trigger
= UNIT_TRIGGER(u
);
6217 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6218 "Refusing to start, no unit to trigger.");
6219 if (trigger
->load_state
!= UNIT_LOADED
)
6220 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6221 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
6226 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
, bool destroy_runtime_dir
) {
6231 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6232 if (destroy_runtime_dir
&& context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
)
6233 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
6235 exec_context_destroy_credentials(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
], u
->id
);
6236 exec_context_destroy_mount_ns_dir(u
);
6239 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
6240 UnitActiveState state
;
6244 /* Special return values:
6246 * -EOPNOTSUPP → cleaning not supported for this unit type
6247 * -EUNATCH → cleaning not defined for this resource type
6248 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6249 * a job queued or similar
6252 if (!UNIT_VTABLE(u
)->clean
)
6258 if (u
->load_state
!= UNIT_LOADED
)
6264 state
= unit_active_state(u
);
6265 if (state
!= UNIT_INACTIVE
)
6268 return UNIT_VTABLE(u
)->clean(u
, mask
);
6271 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
6274 if (!UNIT_VTABLE(u
)->clean
||
6275 u
->load_state
!= UNIT_LOADED
) {
6280 /* When the clean() method is set, can_clean() really should be set too */
6281 assert(UNIT_VTABLE(u
)->can_clean
);
6283 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
6286 bool unit_can_start_refuse_manual(Unit
*u
) {
6287 return unit_can_start(u
) && !u
->refuse_manual_start
;
6290 bool unit_can_stop_refuse_manual(Unit
*u
) {
6291 return unit_can_stop(u
) && !u
->refuse_manual_stop
;
6294 bool unit_can_isolate_refuse_manual(Unit
*u
) {
6295 return unit_can_isolate(u
) && !u
->refuse_manual_start
;
6298 void unit_next_freezer_state(Unit
*u
, FreezerAction action
, FreezerState
*ret_next
, FreezerState
*ret_objective
) {
6299 FreezerState current
, parent
, next
, objective
;
6302 assert(action
>= 0);
6303 assert(action
< _FREEZER_ACTION_MAX
);
6305 assert(ret_objective
);
6307 /* This function determines the correct freezer state transitions for a unit
6308 * given the action being requested. It returns the next state, and also the "objective",
6309 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6310 * ultimately want to achieve. */
6312 current
= u
->freezer_state
;
6314 Unit
*slice
= UNIT_GET_SLICE(u
);
6316 parent
= slice
->freezer_state
;
6318 parent
= FREEZER_RUNNING
;
6322 case FREEZER_FREEZE
:
6323 /* We always "promote" a freeze initiated by parent into a normal freeze */
6324 if (IN_SET(current
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6325 next
= FREEZER_FROZEN
;
6327 next
= FREEZER_FREEZING
;
6331 /* Thawing is the most complicated operation here, because we can't thaw a unit
6332 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6333 * initiated by parent if the parent is frozen */
6334 if (IN_SET(current
, FREEZER_RUNNING
, FREEZER_THAWING
,
6335 FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
)) /* Should usually be refused by unit_freezer_action */
6337 else if (current
== FREEZER_FREEZING
) {
6338 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6339 next
= FREEZER_THAWING
;
6341 next
= FREEZER_FREEZING_BY_PARENT
;
6342 } else if (current
== FREEZER_FROZEN
) {
6343 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6344 next
= FREEZER_THAWING
;
6346 next
= FREEZER_FROZEN_BY_PARENT
;
6348 assert_not_reached();
6351 case FREEZER_PARENT_FREEZE
:
6352 /* We need to avoid accidentally demoting units frozen manually */
6353 if (IN_SET(current
, FREEZER_FREEZING
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6356 next
= FREEZER_FREEZING_BY_PARENT
;
6359 case FREEZER_PARENT_THAW
:
6360 /* We don't want to thaw units from a parent if they were frozen
6361 * manually, so for such units this action is a no-op */
6362 if (IN_SET(current
, FREEZER_RUNNING
, FREEZER_FREEZING
, FREEZER_FROZEN
))
6365 next
= FREEZER_THAWING
;
6369 assert_not_reached();
6372 objective
= freezer_state_finish(next
);
6373 if (objective
== FREEZER_FROZEN_BY_PARENT
)
6374 objective
= FREEZER_FROZEN
;
6375 assert(IN_SET(objective
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6378 *ret_objective
= objective
;
6381 bool unit_can_freeze(const Unit
*u
) {
6384 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
) || unit_has_name(u
, SPECIAL_INIT_SCOPE
))
6387 if (UNIT_VTABLE(u
)->can_freeze
)
6388 return UNIT_VTABLE(u
)->can_freeze(u
);
6390 return UNIT_VTABLE(u
)->freezer_action
;
6393 void unit_set_freezer_state(Unit
*u
, FreezerState state
) {
6396 assert(state
< _FREEZER_STATE_MAX
);
6398 if (u
->freezer_state
== state
)
6401 log_unit_debug(u
, "Freezer state changed %s -> %s",
6402 freezer_state_to_string(u
->freezer_state
), freezer_state_to_string(state
));
6404 u
->freezer_state
= state
;
6406 unit_add_to_dbus_queue(u
);
6409 void unit_freezer_complete(Unit
*u
, FreezerState kernel_state
) {
6413 assert(IN_SET(kernel_state
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6415 expected
= IN_SET(u
->freezer_state
, FREEZER_RUNNING
, FREEZER_THAWING
) == (kernel_state
== FREEZER_RUNNING
);
6417 unit_set_freezer_state(u
, expected
? freezer_state_finish(u
->freezer_state
) : kernel_state
);
6418 log_unit_info(u
, "Unit now %s.", u
->freezer_state
== FREEZER_RUNNING
? "thawed" :
6419 freezer_state_to_string(u
->freezer_state
));
6421 /* If the cgroup's final state is against what's requested by us, report as canceled. */
6422 bus_unit_send_pending_freezer_message(u
, /* canceled = */ !expected
);
6425 int unit_freezer_action(Unit
*u
, FreezerAction action
) {
6430 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
6432 if (!unit_can_freeze(u
))
6438 if (u
->load_state
!= UNIT_LOADED
)
6441 s
= unit_active_state(u
);
6442 if (s
!= UNIT_ACTIVE
)
6445 if (action
== FREEZER_FREEZE
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
))
6447 if (action
== FREEZER_THAW
&& u
->freezer_state
== FREEZER_THAWING
)
6449 if (action
== FREEZER_THAW
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6452 r
= UNIT_VTABLE(u
)->freezer_action(u
, action
);
6456 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_THAWING
));
6460 Condition
*unit_find_failed_condition(Unit
*u
) {
6461 Condition
*failed_trigger
= NULL
;
6462 bool has_succeeded_trigger
= false;
6464 if (u
->condition_result
)
6467 LIST_FOREACH(conditions
, c
, u
->conditions
)
6469 if (c
->result
== CONDITION_SUCCEEDED
)
6470 has_succeeded_trigger
= true;
6471 else if (!failed_trigger
)
6473 } else if (c
->result
!= CONDITION_SUCCEEDED
)
6476 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
6479 int unit_can_live_mount(Unit
*u
, sd_bus_error
*error
) {
6482 if (!UNIT_VTABLE(u
)->live_mount
)
6483 return sd_bus_error_setf(
6485 SD_BUS_ERROR_NOT_SUPPORTED
,
6486 "Live mounting not supported by unit type '%s'",
6487 unit_type_to_string(u
->type
));
6489 if (u
->load_state
!= UNIT_LOADED
)
6490 return sd_bus_error_setf(
6492 BUS_ERROR_NO_SUCH_UNIT
,
6493 "Unit '%s' not loaded, cannot live mount",
6496 if (!UNIT_VTABLE(u
)->can_live_mount
)
6499 return UNIT_VTABLE(u
)->can_live_mount(u
, error
);
6502 int unit_live_mount(
6506 sd_bus_message
*message
,
6507 MountInNamespaceFlags flags
,
6508 const MountOptions
*options
,
6509 sd_bus_error
*error
) {
6512 assert(UNIT_VTABLE(u
)->live_mount
);
6514 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
))) {
6515 log_unit_debug(u
, "Unit not active, cannot perform live mount.");
6516 return sd_bus_error_setf(
6518 BUS_ERROR_UNIT_INACTIVE
,
6519 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: unit not active",
6525 if (unit_active_state(u
) == UNIT_REFRESHING
) {
6526 log_unit_debug(u
, "Unit already live mounting, refusing further requests.");
6527 return sd_bus_error_setf(
6529 BUS_ERROR_UNIT_BUSY
,
6530 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another live mount in progress",
6537 log_unit_debug(u
, "Unit already has a job in progress, cannot live mount");
6538 return sd_bus_error_setf(
6540 BUS_ERROR_UNIT_BUSY
,
6541 "Live mounting '%s' on '%s' for unit '%s' cannot be scheduled: another operation in progress",
6547 return UNIT_VTABLE(u
)->live_mount(u
, src
, dst
, message
, flags
, options
, error
);
6550 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
6551 [COLLECT_INACTIVE
] = "inactive",
6552 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
6555 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
6557 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
6562 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6563 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6564 * is NULL the first entry found), or NULL if not found. */
6566 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
6567 if (!other
|| other
== i
)
6573 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6574 _cleanup_free_ Unit
**array
= NULL
;
6581 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6582 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6583 * while the dependency table is continuously updated. */
6585 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6586 if (!GREEDY_REALLOC(array
, n
+ 1))
6592 *ret_array
= TAKE_PTR(array
);
6594 assert(n
<= INT_MAX
);
6598 int unit_get_transitive_dependency_set(Unit
*u
, UnitDependencyAtom atom
, Set
**ret
) {
6599 _cleanup_set_free_ Set
*units
= NULL
, *queue
= NULL
;
6606 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6609 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6610 r
= set_ensure_put(&units
, NULL
, other
);
6615 r
= set_ensure_put(&queue
, NULL
, other
);
6619 } while ((u
= set_steal_first(queue
)));
6621 *ret
= TAKE_PTR(units
);
6627 sd_event_source
**source
,
6630 sd_event_time_handler_t handler
) {
6639 if (usec
== USEC_INFINITY
)
6640 return sd_event_source_set_enabled(*source
, SD_EVENT_OFF
);
6642 r
= (relative
? sd_event_source_set_time_relative
: sd_event_source_set_time
)(*source
, usec
);
6646 return sd_event_source_set_enabled(*source
, SD_EVENT_ONESHOT
);
6649 if (usec
== USEC_INFINITY
)
6652 r
= (relative
? sd_event_add_time_relative
: sd_event_add_time
)(
6662 const char *d
= strjoina(unit_type_to_string(u
->type
), "-timer");
6663 (void) sd_event_source_set_description(*source
, d
);
6668 bool unit_passes_filter(Unit
*u
, char * const *states
, char * const *patterns
) {
6671 if (!strv_isempty(states
)) {
6672 char * const *unit_states
= STRV_MAKE(
6673 unit_load_state_to_string(u
->load_state
),
6674 unit_active_state_to_string(unit_active_state(u
)),
6675 unit_sub_state_to_string(u
));
6677 if (!strv_overlap(states
, unit_states
))
6681 return strv_fnmatch_or_empty(patterns
, u
->id
, FNM_NOESCAPE
);
6684 static int unit_get_nice(Unit
*u
) {
6687 ec
= unit_get_exec_context(u
);
6688 return ec
? ec
->nice
: 0;
6691 static uint64_t unit_get_cpu_weight(Unit
*u
) {
6694 cc
= unit_get_cgroup_context(u
);
6695 return cc
? cgroup_context_cpu_weight(cc
, manager_state(u
->manager
)) : CGROUP_WEIGHT_DEFAULT
;
6698 int unit_compare_priority(Unit
*a
, Unit
*b
) {
6701 ret
= CMP(a
->type
, b
->type
);
6705 ret
= CMP(unit_get_cpu_weight(a
), unit_get_cpu_weight(b
));
6709 ret
= CMP(unit_get_nice(a
), unit_get_nice(b
));
6713 return strcmp(a
->id
, b
->id
);
6716 const char* unit_log_field(const Unit
*u
) {
6717 return MANAGER_IS_SYSTEM(ASSERT_PTR(u
)->manager
) ? "UNIT=" : "USER_UNIT=";
6720 const char* unit_invocation_log_field(const Unit
*u
) {
6721 return MANAGER_IS_SYSTEM(ASSERT_PTR(u
)->manager
) ? "INVOCATION_ID=" : "USER_INVOCATION_ID=";
6724 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6725 [UNIT_PATH
] = &activation_details_path_vtable
,
6726 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6729 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6730 _cleanup_free_ ActivationDetails
*details
= NULL
;
6732 assert(trigger_unit
);
6733 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6734 assert(trigger_unit
->id
);
6736 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6740 *details
= (ActivationDetails
) {
6742 .trigger_unit_type
= trigger_unit
->type
,
6745 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6746 if (!details
->trigger_unit_name
)
6749 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6750 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6752 return TAKE_PTR(details
);
6755 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6759 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6760 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6762 free(details
->trigger_unit_name
);
6764 return mfree(details
);
6767 void activation_details_serialize(const ActivationDetails
*details
, FILE *f
) {
6768 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6771 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6772 if (details
->trigger_unit_name
)
6773 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6774 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6775 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6778 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6788 if (!streq(key
, "activation-details-unit-type"))
6791 t
= unit_type_from_string(value
);
6795 /* The activation details vtable has defined ops only for path and timer units */
6796 if (!activation_details_vtable
[t
])
6799 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6803 **details
= (ActivationDetails
) {
6805 .trigger_unit_type
= t
,
6811 if (streq(key
, "activation-details-unit-name")) {
6812 r
= free_and_strdup(&(*details
)->trigger_unit_name
, value
);
6819 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6820 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6825 int activation_details_append_env(const ActivationDetails
*details
, char ***strv
) {
6833 if (!isempty(details
->trigger_unit_name
)) {
6834 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6838 r
= strv_consume(strv
, TAKE_PTR(s
));
6843 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6844 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6849 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6852 int activation_details_append_pair(const ActivationDetails
*details
, char ***strv
) {
6860 if (!isempty(details
->trigger_unit_name
)) {
6861 r
= strv_extend_many(strv
, "trigger_unit", details
->trigger_unit_name
);
6866 if (ACTIVATION_DETAILS_VTABLE(details
)->append_pair
) {
6867 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6872 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6875 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);
6877 static const char* const unit_mount_dependency_type_table
[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX
] = {
6878 [UNIT_MOUNT_WANTS
] = "WantsMountsFor",
6879 [UNIT_MOUNT_REQUIRES
] = "RequiresMountsFor",
6882 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type
, UnitMountDependencyType
);
6884 static const char* const oom_policy_table
[_OOM_POLICY_MAX
] = {
6885 [OOM_CONTINUE
] = "continue",
6886 [OOM_STOP
] = "stop",
6887 [OOM_KILL
] = "kill",
6890 DEFINE_STRING_TABLE_LOOKUP(oom_policy
, OOMPolicy
);
6892 UnitDependency
unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t
) {
6895 case UNIT_MOUNT_WANTS
:
6898 case UNIT_MOUNT_REQUIRES
:
6899 return UNIT_REQUIRES
;
6902 assert_not_reached();