1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
28 #include "exec-credential.h"
31 #include "fileio-label.h"
33 #include "format-util.h"
34 #include "id128-util.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
41 #include "logarithm.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
47 #include "serialize.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
63 #include "user-util.h"
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
81 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
82 [UNIT_SERVICE
] = &service_vtable
,
83 [UNIT_SOCKET
] = &socket_vtable
,
84 [UNIT_TARGET
] = &target_vtable
,
85 [UNIT_DEVICE
] = &device_vtable
,
86 [UNIT_MOUNT
] = &mount_vtable
,
87 [UNIT_AUTOMOUNT
] = &automount_vtable
,
88 [UNIT_SWAP
] = &swap_vtable
,
89 [UNIT_TIMER
] = &timer_vtable
,
90 [UNIT_PATH
] = &path_vtable
,
91 [UNIT_SLICE
] = &slice_vtable
,
92 [UNIT_SCOPE
] = &scope_vtable
,
95 Unit
* unit_new(Manager
*m
, size_t size
) {
99 assert(size
>= sizeof(Unit
));
106 u
->type
= _UNIT_TYPE_INVALID
;
107 u
->default_dependencies
= true;
108 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
109 u
->unit_file_preset
= -1;
110 u
->on_failure_job_mode
= JOB_REPLACE
;
111 u
->on_success_job_mode
= JOB_FAIL
;
112 u
->job_timeout
= USEC_INFINITY
;
113 u
->job_running_timeout
= USEC_INFINITY
;
114 u
->ref_uid
= UID_INVALID
;
115 u
->ref_gid
= GID_INVALID
;
117 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
119 u
->last_section_private
= -1;
121 u
->start_ratelimit
= (const RateLimit
) {
122 m
->defaults
.start_limit_interval
,
123 m
->defaults
.start_limit_burst
,
126 u
->auto_start_stop_ratelimit
= (const RateLimit
) {
127 .interval
= 10 * USEC_PER_SEC
,
131 unit_reset_memory_accounting_last(u
);
132 unit_reset_io_accounting_last(u
);
137 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
138 _cleanup_(unit_freep
) Unit
*u
= NULL
;
141 u
= unit_new(m
, size
);
145 r
= unit_add_name(u
, name
);
154 bool unit_has_name(const Unit
*u
, const char *name
) {
158 return streq_ptr(name
, u
->id
) ||
159 set_contains(u
->aliases
, name
);
162 static void unit_init(Unit
*u
) {
169 assert(u
->type
>= 0);
171 cc
= unit_get_cgroup_context(u
);
173 cgroup_context_init(cc
);
175 /* Copy in the manager defaults into the cgroup
176 * context, _before_ the rest of the settings have
177 * been initialized */
179 cc
->cpu_accounting
= u
->manager
->defaults
.cpu_accounting
;
180 cc
->io_accounting
= u
->manager
->defaults
.io_accounting
;
181 cc
->blockio_accounting
= u
->manager
->defaults
.blockio_accounting
;
182 cc
->memory_accounting
= u
->manager
->defaults
.memory_accounting
;
183 cc
->tasks_accounting
= u
->manager
->defaults
.tasks_accounting
;
184 cc
->ip_accounting
= u
->manager
->defaults
.ip_accounting
;
186 if (u
->type
!= UNIT_SLICE
)
187 cc
->tasks_max
= u
->manager
->defaults
.tasks_max
;
189 cc
->memory_pressure_watch
= u
->manager
->defaults
.memory_pressure_watch
;
190 cc
->memory_pressure_threshold_usec
= u
->manager
->defaults
.memory_pressure_threshold_usec
;
193 ec
= unit_get_exec_context(u
);
195 exec_context_init(ec
);
197 if (u
->manager
->defaults
.oom_score_adjust_set
) {
198 ec
->oom_score_adjust
= u
->manager
->defaults
.oom_score_adjust
;
199 ec
->oom_score_adjust_set
= true;
202 if (MANAGER_IS_SYSTEM(u
->manager
))
203 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
205 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
207 /* User manager might have its umask redefined by PAM or UMask=. In this
208 * case let the units it manages inherit this value by default. They can
209 * still tune this value through their own unit file */
210 (void) get_process_umask(0, &ec
->umask
);
214 kc
= unit_get_kill_context(u
);
216 kill_context_init(kc
);
218 if (UNIT_VTABLE(u
)->init
)
219 UNIT_VTABLE(u
)->init(u
);
222 static int unit_add_alias(Unit
*u
, char *donated_name
) {
225 /* Make sure that u->names is allocated. We may leave u->names
226 * empty if we fail later, but this is not a problem. */
227 r
= set_ensure_put(&u
->aliases
, &string_hash_ops
, donated_name
);
235 int unit_add_name(Unit
*u
, const char *text
) {
236 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
243 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
245 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
246 "instance is not set when adding name '%s': %m", text
);
248 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
250 return log_unit_debug_errno(u
, r
,
251 "failed to build instance name from '%s': %m", text
);
258 if (unit_has_name(u
, name
))
261 if (hashmap_contains(u
->manager
->units
, name
))
262 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
263 "unit already exist when adding name '%s': %m", name
);
265 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
266 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
267 "name '%s' is invalid: %m", name
);
269 t
= unit_name_to_type(name
);
271 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
272 "failed to derive unit type from name '%s': %m", name
);
274 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
275 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
276 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
279 r
= unit_name_to_instance(name
, &instance
);
281 return log_unit_debug_errno(u
, r
, "failed to extract instance from name '%s': %m", name
);
283 if (instance
&& !unit_type_may_template(t
))
284 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "templates are not allowed for name '%s': %m", name
);
286 /* Ensure that this unit either has no instance, or that the instance matches. */
287 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
288 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
289 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
290 name
, instance
, u
->instance
);
292 if (u
->id
&& !unit_type_may_alias(t
))
293 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
294 "cannot add name %s, aliases are not allowed for %s units.",
295 name
, unit_type_to_string(t
));
297 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
298 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "cannot add name, manager has too many units: %m");
300 /* Add name to the global hashmap first, because that's easier to undo */
301 r
= hashmap_put(u
->manager
->units
, name
, u
);
303 return log_unit_debug_errno(u
, r
, "add unit to hashmap failed for name '%s': %m", text
);
306 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
308 hashmap_remove(u
->manager
->units
, name
);
314 /* A new name, we don't need the set yet. */
315 assert(u
->type
== _UNIT_TYPE_INVALID
);
316 assert(!u
->instance
);
319 u
->id
= TAKE_PTR(name
);
320 u
->instance
= TAKE_PTR(instance
);
322 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
326 unit_add_to_dbus_queue(u
);
330 int unit_choose_id(Unit
*u
, const char *name
) {
331 _cleanup_free_
char *t
= NULL
;
338 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
342 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
349 if (streq_ptr(u
->id
, name
))
350 return 0; /* Nothing to do. */
352 /* Selects one of the aliases of this unit as the id */
353 s
= set_get(u
->aliases
, (char*) name
);
358 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
362 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
364 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
365 unit_add_to_dbus_queue(u
);
370 int unit_set_description(Unit
*u
, const char *description
) {
375 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
379 unit_add_to_dbus_queue(u
);
384 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
387 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
388 if (other
->job
|| other
->nop_job
)
391 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
392 if (other
->job
|| other
->nop_job
)
398 void unit_release_resources(Unit
*u
) {
399 UnitActiveState state
;
404 if (u
->job
|| u
->nop_job
)
410 state
= unit_active_state(u
);
411 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
414 if (unit_will_restart(u
))
417 ec
= unit_get_exec_context(u
);
418 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
419 exec_context_destroy_runtime_directory(ec
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
421 if (UNIT_VTABLE(u
)->release_resources
)
422 UNIT_VTABLE(u
)->release_resources(u
);
425 bool unit_may_gc(Unit
*u
) {
426 UnitActiveState state
;
431 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
432 * unit may be collected, and false if there's some reason to keep it loaded.
434 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
435 * using markers to properly collect dependency loops.
438 if (u
->job
|| u
->nop_job
)
444 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
445 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
446 * before we release the unit. */
447 if (u
->in_cgroup_empty_queue
|| u
->in_cgroup_oom_queue
)
450 /* Make sure to send out D-Bus events before we unload the unit */
451 if (u
->in_dbus_queue
)
454 if (sd_bus_track_count(u
->bus_track
) > 0)
457 state
= unit_active_state(u
);
459 /* But we keep the unit object around for longer when it is referenced or configured to not be
461 switch (u
->collect_mode
) {
463 case COLLECT_INACTIVE
:
464 if (state
!= UNIT_INACTIVE
)
469 case COLLECT_INACTIVE_OR_FAILED
:
470 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
476 assert_not_reached();
479 /* Check if any OnFailure= or on Success= jobs may be pending */
480 if (unit_success_failure_handler_has_jobs(u
))
483 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
484 * around. Units with active processes should never be collected. */
485 r
= unit_cgroup_is_empty(u
);
486 if (r
<= 0 && r
!= -ENXIO
)
487 return false; /* ENXIO means: currently not realized */
489 if (!UNIT_VTABLE(u
)->may_gc
)
492 return UNIT_VTABLE(u
)->may_gc(u
);
495 void unit_add_to_load_queue(Unit
*u
) {
497 assert(u
->type
!= _UNIT_TYPE_INVALID
);
499 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
502 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
503 u
->in_load_queue
= true;
506 void unit_add_to_cleanup_queue(Unit
*u
) {
509 if (u
->in_cleanup_queue
)
512 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
513 u
->in_cleanup_queue
= true;
516 void unit_add_to_gc_queue(Unit
*u
) {
519 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
525 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
526 u
->in_gc_queue
= true;
529 void unit_add_to_dbus_queue(Unit
*u
) {
531 assert(u
->type
!= _UNIT_TYPE_INVALID
);
533 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
536 /* Shortcut things if nobody cares */
537 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
538 sd_bus_track_count(u
->bus_track
) <= 0 &&
539 set_isempty(u
->manager
->private_buses
)) {
540 u
->sent_dbus_new_signal
= true;
544 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
545 u
->in_dbus_queue
= true;
548 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
551 if (u
->in_stop_when_unneeded_queue
)
554 if (!u
->stop_when_unneeded
)
557 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
560 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
561 u
->in_stop_when_unneeded_queue
= true;
564 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
567 if (u
->in_start_when_upheld_queue
)
570 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
573 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
576 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
577 u
->in_start_when_upheld_queue
= true;
580 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
583 if (u
->in_stop_when_bound_queue
)
586 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
589 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
592 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
593 u
->in_stop_when_bound_queue
= true;
596 static bool unit_can_release_resources(Unit
*u
) {
601 if (UNIT_VTABLE(u
)->release_resources
)
604 ec
= unit_get_exec_context(u
);
605 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
611 void unit_submit_to_release_resources_queue(Unit
*u
) {
614 if (u
->in_release_resources_queue
)
617 if (u
->job
|| u
->nop_job
)
623 if (!unit_can_release_resources(u
))
626 LIST_PREPEND(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
627 u
->in_release_resources_queue
= true;
630 static void unit_clear_dependencies(Unit
*u
) {
633 /* Removes all dependencies configured on u and their reverse dependencies. */
635 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
637 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
640 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
641 hashmap_remove(other_deps
, u
);
643 unit_add_to_gc_queue(other
);
649 u
->dependencies
= hashmap_free(u
->dependencies
);
652 static void unit_remove_transient(Unit
*u
) {
658 if (u
->fragment_path
)
659 (void) unlink(u
->fragment_path
);
661 STRV_FOREACH(i
, u
->dropin_paths
) {
662 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
664 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
667 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
670 /* Only drop transient drop-ins */
671 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
679 static void unit_free_mounts_for(Unit
*u
) {
682 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
684 _cleanup_free_
char *path
= NULL
;
686 path
= hashmap_steal_first_key(u
->mounts_for
[t
]);
690 char s
[strlen(path
) + 1];
692 PATH_FOREACH_PREFIX_MORE(s
, path
) {
696 x
= hashmap_get2(u
->manager
->units_needing_mounts_for
[t
], s
, (void**) &y
);
700 (void) set_remove(x
, u
);
702 if (set_isempty(x
)) {
703 assert_se(hashmap_remove(u
->manager
->units_needing_mounts_for
[t
], y
));
710 u
->mounts_for
[t
] = hashmap_free(u
->mounts_for
[t
]);
714 static void unit_done(Unit
*u
) {
723 if (UNIT_VTABLE(u
)->done
)
724 UNIT_VTABLE(u
)->done(u
);
726 ec
= unit_get_exec_context(u
);
728 exec_context_done(ec
);
730 cc
= unit_get_cgroup_context(u
);
732 cgroup_context_done(cc
);
735 Unit
* unit_free(Unit
*u
) {
742 sd_event_source_disable_unref(u
->auto_start_stop_event_source
);
744 u
->transient_file
= safe_fclose(u
->transient_file
);
746 if (!MANAGER_IS_RELOADING(u
->manager
))
747 unit_remove_transient(u
);
749 bus_unit_send_removed_signal(u
);
753 unit_dequeue_rewatch_pids(u
);
755 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
756 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
757 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
758 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
760 unit_free_mounts_for(u
);
762 SET_FOREACH(t
, u
->aliases
)
763 hashmap_remove_value(u
->manager
->units
, t
, u
);
765 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
767 if (!sd_id128_is_null(u
->invocation_id
))
768 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
782 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
783 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
784 slice
= UNIT_GET_SLICE(u
);
785 unit_clear_dependencies(u
);
787 unit_add_family_to_cgroup_realize_queue(slice
);
790 manager_unref_console(u
->manager
);
792 unit_release_cgroup(u
);
794 if (!MANAGER_IS_RELOADING(u
->manager
))
795 unit_unlink_state_files(u
);
797 unit_unref_uid_gid(u
, false);
799 (void) manager_update_failed_units(u
->manager
, u
, false);
800 set_remove(u
->manager
->startup_units
, u
);
802 unit_unwatch_all_pids(u
);
804 while (u
->refs_by_target
)
805 unit_ref_unset(u
->refs_by_target
);
807 if (u
->type
!= _UNIT_TYPE_INVALID
)
808 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
810 if (u
->in_load_queue
)
811 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
813 if (u
->in_dbus_queue
)
814 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
816 if (u
->in_cleanup_queue
)
817 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
820 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
822 if (u
->in_cgroup_realize_queue
)
823 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
825 if (u
->in_cgroup_empty_queue
)
826 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
828 if (u
->in_cgroup_oom_queue
)
829 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
831 if (u
->in_target_deps_queue
)
832 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
834 if (u
->in_stop_when_unneeded_queue
)
835 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
837 if (u
->in_start_when_upheld_queue
)
838 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
840 if (u
->in_stop_when_bound_queue
)
841 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
843 if (u
->in_release_resources_queue
)
844 LIST_REMOVE(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
846 bpf_firewall_close(u
);
848 condition_free_list(u
->conditions
);
849 condition_free_list(u
->asserts
);
851 free(u
->description
);
852 strv_free(u
->documentation
);
853 free(u
->fragment_path
);
854 free(u
->source_path
);
855 strv_free(u
->dropin_paths
);
858 free(u
->job_timeout_reboot_arg
);
861 free(u
->access_selinux_context
);
863 set_free_free(u
->aliases
);
866 activation_details_unref(u
->activation_details
);
871 FreezerState
unit_freezer_state(Unit
*u
) {
874 return u
->freezer_state
;
877 UnitActiveState
unit_active_state(Unit
*u
) {
880 if (u
->load_state
== UNIT_MERGED
)
881 return unit_active_state(unit_follow_merge(u
));
883 /* After a reload it might happen that a unit is not correctly
884 * loaded but still has a process around. That's why we won't
885 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
887 return UNIT_VTABLE(u
)->active_state(u
);
890 const char* unit_sub_state_to_string(Unit
*u
) {
893 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
896 static int unit_merge_names(Unit
*u
, Unit
*other
) {
903 r
= unit_add_alias(u
, other
->id
);
907 r
= set_move(u
->aliases
, other
->aliases
);
909 set_remove(u
->aliases
, other
->id
);
914 other
->aliases
= set_free_free(other
->aliases
);
916 SET_FOREACH(name
, u
->aliases
)
917 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
922 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
931 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
934 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
935 * hashmaps is an estimate that is likely too high since they probably use some of the same
936 * types. But it's never too low, and that's all we need. */
938 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
940 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
944 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
949 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
950 * other unit's dependencies.
952 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
953 * reserve anything for. In that case other's set will be transferred as a whole to u by
954 * complete_move(). */
956 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
959 other_deps
= hashmap_get(other
->dependencies
, d
);
961 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
969 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
970 /* Only warn about some unit types */
971 return IN_SET(dependency
,
982 static int unit_per_dependency_type_hashmap_update(
985 UnitDependencyMask origin_mask
,
986 UnitDependencyMask destination_mask
) {
988 UnitDependencyInfo info
;
992 assert_cc(sizeof(void*) == sizeof(info
));
994 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
995 * exists, or insert it anew if not. */
997 info
.data
= hashmap_get(per_type
, other
);
999 /* Entry already exists. Add in our mask. */
1001 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
1002 FLAGS_SET(destination_mask
, info
.destination_mask
))
1005 info
.origin_mask
|= origin_mask
;
1006 info
.destination_mask
|= destination_mask
;
1008 r
= hashmap_update(per_type
, other
, info
.data
);
1010 info
= (UnitDependencyInfo
) {
1011 .origin_mask
= origin_mask
,
1012 .destination_mask
= destination_mask
,
1015 r
= hashmap_put(per_type
, other
, info
.data
);
1023 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1025 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1026 * since the hashmaps all want it as void pointer. */
1034 /* First, remove dependency to other. */
1035 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1036 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1037 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1038 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1039 other
->id
, other
->id
, u
->id
);
1041 if (hashmap_isempty(deps
))
1042 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1046 _cleanup_hashmap_free_ Hashmap
*other_deps
= NULL
;
1047 UnitDependencyInfo di_back
;
1050 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1051 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1055 deps
= hashmap_get(u
->dependencies
, dt
);
1057 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1058 * referenced units as 'back'. */
1059 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1064 /* This is a dependency pointing back to the unit we want to merge with?
1065 * Suppress it (but warn) */
1066 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1067 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1068 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1069 u
->id
, other
->id
, other
->id
, u
->id
);
1071 hashmap_remove(other_deps
, back
);
1075 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1076 * point to 'u' instead. */
1077 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1078 UnitDependencyInfo di_move
;
1080 di_move
.data
= hashmap_remove(back_deps
, other
);
1084 assert_se(unit_per_dependency_type_hashmap_update(
1087 di_move
.origin_mask
,
1088 di_move
.destination_mask
) >= 0);
1091 /* The target unit already has dependencies of this type, let's then merge this individually. */
1093 assert_se(unit_per_dependency_type_hashmap_update(
1096 di_back
.origin_mask
,
1097 di_back
.destination_mask
) >= 0);
1100 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1101 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1102 * dependencies of this type, let's move them per type wholesale. */
1104 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1107 other
->dependencies
= hashmap_free(other
->dependencies
);
1110 int unit_merge(Unit
*u
, Unit
*other
) {
1115 assert(u
->manager
== other
->manager
);
1116 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1118 other
= unit_follow_merge(other
);
1123 if (u
->type
!= other
->type
)
1126 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1129 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1132 if (!streq_ptr(u
->instance
, other
->instance
))
1141 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1144 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1145 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1146 r
= unit_reserve_dependencies(u
, other
);
1150 /* Redirect all references */
1151 while (other
->refs_by_target
)
1152 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1154 /* Merge dependencies */
1155 unit_merge_dependencies(u
, other
);
1157 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1158 r
= unit_merge_names(u
, other
);
1162 other
->load_state
= UNIT_MERGED
;
1163 other
->merged_into
= u
;
1165 if (!u
->activation_details
)
1166 u
->activation_details
= activation_details_ref(other
->activation_details
);
1168 /* If there is still some data attached to the other node, we
1169 * don't need it anymore, and can free it. */
1170 if (other
->load_state
!= UNIT_STUB
)
1171 if (UNIT_VTABLE(other
)->done
)
1172 UNIT_VTABLE(other
)->done(other
);
1174 unit_add_to_dbus_queue(u
);
1175 unit_add_to_cleanup_queue(other
);
1180 int unit_merge_by_name(Unit
*u
, const char *name
) {
1181 _cleanup_free_
char *s
= NULL
;
1185 /* Either add name to u, or if a unit with name already exists, merge it with u.
1186 * If name is a template, do the same for name@instance, where instance is u's instance. */
1191 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1195 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1202 other
= manager_get_unit(u
->manager
, name
);
1204 return unit_merge(u
, other
);
1206 return unit_add_name(u
, name
);
1209 Unit
* unit_follow_merge(Unit
*u
) {
1212 while (u
->load_state
== UNIT_MERGED
)
1213 assert_se(u
= u
->merged_into
);
1218 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1224 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1226 if (c
->working_directory
) {
1227 r
= unit_add_mounts_for(
1229 c
->working_directory
,
1230 UNIT_DEPENDENCY_FILE
,
1231 c
->working_directory_missing_ok
? UNIT_MOUNT_WANTS
: UNIT_MOUNT_REQUIRES
);
1236 if (c
->root_directory
) {
1237 r
= unit_add_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1242 if (c
->root_image
) {
1243 r
= unit_add_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1248 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1249 if (!u
->manager
->prefix
[dt
])
1252 FOREACH_ARRAY(i
, c
->directories
[dt
].items
, c
->directories
[dt
].n_items
) {
1253 _cleanup_free_
char *p
= NULL
;
1255 p
= path_join(u
->manager
->prefix
[dt
], i
->path
);
1259 r
= unit_add_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_REQUIRES
);
1265 if (!MANAGER_IS_SYSTEM(u
->manager
))
1268 /* For the following three directory types we need write access, and /var/ is possibly on the root
1269 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1270 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1271 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1272 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1273 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1278 if (c
->private_tmp
) {
1279 r
= unit_add_mounts_for(u
, "/tmp", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1283 r
= unit_add_mounts_for(u
, "/var/tmp", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1287 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1292 if (c
->root_image
) {
1293 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1294 * implicit dependency on udev */
1296 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1301 if (!IN_SET(c
->std_output
,
1302 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1303 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1304 !IN_SET(c
->std_error
,
1305 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1306 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1310 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1313 if (c
->log_namespace
) {
1314 _cleanup_free_
char *socket_unit
= NULL
, *varlink_socket_unit
= NULL
;
1316 r
= unit_name_build_from_type("systemd-journald", c
->log_namespace
, UNIT_SOCKET
, &socket_unit
);
1320 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1324 r
= unit_name_build_from_type("systemd-journald-varlink", c
->log_namespace
, UNIT_SOCKET
, &varlink_socket_unit
);
1328 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, varlink_socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1332 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1337 r
= unit_add_default_credential_dependencies(u
, c
);
1344 const char* unit_description(Unit
*u
) {
1348 return u
->description
;
1350 return strna(u
->id
);
1353 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1357 /* Return u->id, u->description, or "{u->id} - {u->description}".
1358 * Versions with u->description are only used if it is set.
1359 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1362 * Note that *ret_combined_buffer may be set to NULL. */
1364 if (!u
->description
||
1365 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1366 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1367 streq(u
->description
, u
->id
)) {
1369 if (ret_combined_buffer
)
1370 *ret_combined_buffer
= NULL
;
1374 if (ret_combined_buffer
) {
1375 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1376 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1377 if (*ret_combined_buffer
)
1378 return *ret_combined_buffer
;
1379 log_oom(); /* Fall back to ->description */
1381 *ret_combined_buffer
= NULL
;
1384 return u
->description
;
1387 /* Common implementation for multiple backends */
1388 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1393 /* Load a .{service,socket,...} file */
1394 r
= unit_load_fragment(u
);
1398 if (u
->load_state
== UNIT_STUB
) {
1399 if (fragment_required
)
1402 u
->load_state
= UNIT_LOADED
;
1405 /* Load drop-in directory data. If u is an alias, we might be reloading the
1406 * target unit needlessly. But we cannot be sure which drops-ins have already
1407 * been loaded and which not, at least without doing complicated book-keeping,
1408 * so let's always reread all drop-ins. */
1409 r
= unit_load_dropin(unit_follow_merge(u
));
1413 if (u
->source_path
) {
1416 if (stat(u
->source_path
, &st
) >= 0)
1417 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1419 u
->source_mtime
= 0;
1425 void unit_add_to_target_deps_queue(Unit
*u
) {
1426 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1428 if (u
->in_target_deps_queue
)
1431 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1432 u
->in_target_deps_queue
= true;
1435 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1439 if (target
->type
!= UNIT_TARGET
)
1442 /* Only add the dependency if both units are loaded, so that
1443 * that loop check below is reliable */
1444 if (u
->load_state
!= UNIT_LOADED
||
1445 target
->load_state
!= UNIT_LOADED
)
1448 /* If either side wants no automatic dependencies, then let's
1450 if (!u
->default_dependencies
||
1451 !target
->default_dependencies
)
1454 /* Don't create loops */
1455 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1458 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1461 static int unit_add_slice_dependencies(Unit
*u
) {
1465 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1468 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1469 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1471 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1473 slice
= UNIT_GET_SLICE(u
);
1475 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1477 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1480 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1483 static int unit_add_mount_dependencies(Unit
*u
) {
1484 bool changed
= false;
1489 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
1490 UnitDependencyInfo di
;
1493 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->mounts_for
[t
]) {
1495 char prefix
[strlen(ASSERT_PTR(path
)) + 1];
1497 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1498 _cleanup_free_
char *p
= NULL
;
1501 r
= unit_name_from_path(prefix
, ".mount", &p
);
1503 continue; /* If the path cannot be converted to a mount unit name,
1504 * then it's not manageable as a unit by systemd, and
1505 * hence we don't need a dependency on it. Let's thus
1506 * silently ignore the issue. */
1510 m
= manager_get_unit(u
->manager
, p
);
1512 /* Make sure to load the mount unit if it exists. If so the
1513 * dependencies on this unit will be added later during the loading
1514 * of the mount unit. */
1515 (void) manager_load_unit_prepare(
1526 if (m
->load_state
!= UNIT_LOADED
)
1529 r
= unit_add_dependency(
1533 /* add_reference= */ true,
1537 changed
= changed
|| r
> 0;
1539 if (m
->fragment_path
) {
1540 r
= unit_add_dependency(
1542 unit_mount_dependency_type_to_dependency_type(t
),
1544 /* add_reference= */ true,
1548 changed
= changed
|| r
> 0;
1557 static int unit_add_oomd_dependencies(Unit
*u
) {
1564 if (!u
->default_dependencies
)
1567 c
= unit_get_cgroup_context(u
);
1571 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1575 if (!cg_all_unified())
1578 r
= cg_mask_supported(&mask
);
1580 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1582 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1585 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1588 static int unit_add_startup_units(Unit
*u
) {
1589 if (!unit_has_startup_cgroup_constraints(u
))
1592 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1595 static int unit_validate_on_failure_job_mode(
1597 const char *job_mode_setting
,
1599 const char *dependency_name
,
1600 UnitDependencyAtom atom
) {
1602 Unit
*other
, *found
= NULL
;
1604 if (job_mode
!= JOB_ISOLATE
)
1607 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
1610 else if (found
!= other
)
1611 return log_unit_error_errno(
1612 u
, SYNTHETIC_ERRNO(ENOEXEC
),
1613 "More than one %s dependencies specified but %sisolate set. Refusing.",
1614 dependency_name
, job_mode_setting
);
1620 int unit_load(Unit
*u
) {
1625 if (u
->in_load_queue
) {
1626 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1627 u
->in_load_queue
= false;
1630 if (u
->type
== _UNIT_TYPE_INVALID
)
1633 if (u
->load_state
!= UNIT_STUB
)
1636 if (u
->transient_file
) {
1637 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1638 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1640 r
= fflush_and_check(u
->transient_file
);
1644 u
->transient_file
= safe_fclose(u
->transient_file
);
1645 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1648 r
= UNIT_VTABLE(u
)->load(u
);
1652 assert(u
->load_state
!= UNIT_STUB
);
1654 if (u
->load_state
== UNIT_LOADED
) {
1655 unit_add_to_target_deps_queue(u
);
1657 r
= unit_add_slice_dependencies(u
);
1661 r
= unit_add_mount_dependencies(u
);
1665 r
= unit_add_oomd_dependencies(u
);
1669 r
= unit_add_startup_units(u
);
1673 r
= unit_validate_on_failure_job_mode(u
, "OnSuccessJobMode=", u
->on_success_job_mode
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
);
1677 r
= unit_validate_on_failure_job_mode(u
, "OnFailureJobMode=", u
->on_failure_job_mode
, "OnFailure=", UNIT_ATOM_ON_FAILURE
);
1681 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1682 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1684 /* We finished loading, let's ensure our parents recalculate the members mask */
1685 unit_invalidate_cgroup_members_masks(u
);
1688 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1690 unit_add_to_dbus_queue(unit_follow_merge(u
));
1691 unit_add_to_gc_queue(u
);
1692 (void) manager_varlink_send_managed_oom_update(u
);
1697 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1698 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1700 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1701 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1705 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1706 * an attempt is made to load this unit, we know we need to check again. */
1707 if (u
->load_state
== UNIT_NOT_FOUND
)
1708 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1710 unit_add_to_dbus_queue(u
);
1711 unit_add_to_gc_queue(u
);
1713 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1717 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1722 if (u
&& !unit_log_level_test(u
, level
))
1723 return -ERRNO_VALUE(error
);
1725 va_start(ap
, format
);
1727 r
= log_object_internalv(level
, error
, file
, line
, func
,
1728 u
->manager
->unit_log_field
,
1730 u
->manager
->invocation_log_field
,
1731 u
->invocation_id_string
,
1734 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1740 static bool unit_test_condition(Unit
*u
) {
1741 _cleanup_strv_free_
char **env
= NULL
;
1746 dual_timestamp_now(&u
->condition_timestamp
);
1748 r
= manager_get_effective_environment(u
->manager
, &env
);
1750 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1751 u
->condition_result
= true;
1753 u
->condition_result
= condition_test_list(
1756 condition_type_to_string
,
1760 unit_add_to_dbus_queue(u
);
1761 return u
->condition_result
;
1764 static bool unit_test_assert(Unit
*u
) {
1765 _cleanup_strv_free_
char **env
= NULL
;
1770 dual_timestamp_now(&u
->assert_timestamp
);
1772 r
= manager_get_effective_environment(u
->manager
, &env
);
1774 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1775 u
->assert_result
= CONDITION_ERROR
;
1777 u
->assert_result
= condition_test_list(
1780 assert_type_to_string
,
1784 unit_add_to_dbus_queue(u
);
1785 return u
->assert_result
;
1788 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1789 if (log_get_show_color()) {
1790 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1791 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1793 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1796 DISABLE_WARNING_FORMAT_NONLITERAL
;
1797 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1801 int unit_test_start_limit(Unit
*u
) {
1806 if (ratelimit_below(&u
->start_ratelimit
)) {
1807 u
->start_limit_hit
= false;
1811 log_unit_warning(u
, "Start request repeated too quickly.");
1812 u
->start_limit_hit
= true;
1814 reason
= strjoina("unit ", u
->id
, " failed");
1816 emergency_action(u
->manager
, u
->start_limit_action
,
1817 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1818 u
->reboot_arg
, -1, reason
);
1823 static bool unit_verify_deps(Unit
*u
) {
1828 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1829 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1830 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1831 * that are not used in conjunction with After= as for them any such check would make things entirely
1834 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1836 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1839 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1840 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1848 /* Errors that aren't really errors:
1849 * -EALREADY: Unit is already started.
1850 * -ECOMM: Condition failed
1851 * -EAGAIN: An operation is already in progress. Retry later.
1853 * Errors that are real errors:
1854 * -EBADR: This unit type does not support starting.
1855 * -ECANCELED: Start limit hit, too many requests for now
1856 * -EPROTO: Assert failed
1857 * -EINVAL: Unit not loaded
1858 * -EOPNOTSUPP: Unit type not supported
1859 * -ENOLINK: The necessary dependencies are not fulfilled.
1860 * -ESTALE: This unit has been started before and can't be started a second time
1861 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1863 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1864 UnitActiveState state
;
1870 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1871 if (UNIT_VTABLE(u
)->subsystem_ratelimited
) {
1872 r
= UNIT_VTABLE(u
)->subsystem_ratelimited(u
->manager
);
1879 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1880 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1881 * waiting is finished. */
1882 state
= unit_active_state(u
);
1883 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1885 if (state
== UNIT_MAINTENANCE
)
1888 /* Units that aren't loaded cannot be started */
1889 if (u
->load_state
!= UNIT_LOADED
)
1892 /* Refuse starting scope units more than once */
1893 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1896 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1897 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1898 * recheck the condition in that case. */
1899 if (state
!= UNIT_ACTIVATING
&&
1900 !unit_test_condition(u
))
1901 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition not met. Not starting unit.");
1903 /* If the asserts failed, fail the entire job */
1904 if (state
!= UNIT_ACTIVATING
&&
1905 !unit_test_assert(u
))
1906 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1908 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1909 * condition checks, so that we rather return condition check errors (which are usually not
1910 * considered a true failure) than "not supported" errors (which are considered a failure).
1912 if (!unit_type_supported(u
->type
))
1915 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1916 * should have taken care of this already, but let's check this here again. After all, our
1917 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1918 if (!unit_verify_deps(u
))
1921 /* Forward to the main object, if we aren't it. */
1922 following
= unit_following(u
);
1924 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1925 return unit_start(following
, details
);
1928 /* Check to make sure the unit isn't frozen */
1929 if (u
->freezer_state
!= FREEZER_RUNNING
)
1932 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1933 if (UNIT_VTABLE(u
)->can_start
) {
1934 r
= UNIT_VTABLE(u
)->can_start(u
);
1939 /* If it is stopped, but we cannot start it, then fail */
1940 if (!UNIT_VTABLE(u
)->start
)
1943 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1944 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1945 * waits for a holdoff timer to elapse before it will start again. */
1947 unit_add_to_dbus_queue(u
);
1949 if (!u
->activation_details
) /* Older details object wins */
1950 u
->activation_details
= activation_details_ref(details
);
1952 return UNIT_VTABLE(u
)->start(u
);
1955 bool unit_can_start(Unit
*u
) {
1958 if (u
->load_state
!= UNIT_LOADED
)
1961 if (!unit_type_supported(u
->type
))
1964 /* Scope units may be started only once */
1965 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1968 return !!UNIT_VTABLE(u
)->start
;
1971 bool unit_can_isolate(Unit
*u
) {
1974 return unit_can_start(u
) &&
1979 * -EBADR: This unit type does not support stopping.
1980 * -EALREADY: Unit is already stopped.
1981 * -EAGAIN: An operation is already in progress. Retry later.
1982 * -EDEADLK: Unit is frozen
1984 int unit_stop(Unit
*u
) {
1985 UnitActiveState state
;
1990 state
= unit_active_state(u
);
1991 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1994 following
= unit_following(u
);
1996 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1997 return unit_stop(following
);
2000 /* Check to make sure the unit isn't frozen */
2001 if (u
->freezer_state
!= FREEZER_RUNNING
)
2004 if (!UNIT_VTABLE(u
)->stop
)
2007 unit_add_to_dbus_queue(u
);
2009 return UNIT_VTABLE(u
)->stop(u
);
2012 bool unit_can_stop(Unit
*u
) {
2015 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2016 * Extrinsic units follow external state and they may stop following external state changes
2017 * (hence we return true here), but an attempt to do this through the manager will fail. */
2019 if (!unit_type_supported(u
->type
))
2025 return !!UNIT_VTABLE(u
)->stop
;
2029 * -EBADR: This unit type does not support reloading.
2030 * -ENOEXEC: Unit is not started.
2031 * -EAGAIN: An operation is already in progress. Retry later.
2032 * -EDEADLK: Unit is frozen.
2034 int unit_reload(Unit
*u
) {
2035 UnitActiveState state
;
2040 if (u
->load_state
!= UNIT_LOADED
)
2043 if (!unit_can_reload(u
))
2046 state
= unit_active_state(u
);
2047 if (state
== UNIT_RELOADING
)
2050 if (state
!= UNIT_ACTIVE
)
2051 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2053 following
= unit_following(u
);
2055 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2056 return unit_reload(following
);
2059 /* Check to make sure the unit isn't frozen */
2060 if (u
->freezer_state
!= FREEZER_RUNNING
)
2063 unit_add_to_dbus_queue(u
);
2065 if (!UNIT_VTABLE(u
)->reload
) {
2066 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2067 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), /* reload_success = */ true);
2071 return UNIT_VTABLE(u
)->reload(u
);
2074 bool unit_can_reload(Unit
*u
) {
2077 if (UNIT_VTABLE(u
)->can_reload
)
2078 return UNIT_VTABLE(u
)->can_reload(u
);
2080 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2083 return UNIT_VTABLE(u
)->reload
;
2086 bool unit_is_unneeded(Unit
*u
) {
2090 if (!u
->stop_when_unneeded
)
2093 /* Don't clean up while the unit is transitioning or is even inactive. */
2094 if (unit_active_state(u
) != UNIT_ACTIVE
)
2099 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2100 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2101 * restart, then don't clean this one up. */
2106 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2109 if (unit_will_restart(other
))
2116 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2121 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2122 * that is active declared an Uphold= dependencies on it */
2124 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2126 *ret_culprit
= NULL
;
2130 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2134 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2136 *ret_culprit
= other
;
2142 *ret_culprit
= NULL
;
2146 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2151 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2152 * because the other unit is down. */
2154 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2155 /* Don't clean up while the unit is transitioning or is even inactive. */
2157 *ret_culprit
= NULL
;
2161 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2165 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2167 *ret_culprit
= other
;
2174 *ret_culprit
= NULL
;
2178 static void check_unneeded_dependencies(Unit
*u
) {
2182 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2184 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2185 unit_submit_to_stop_when_unneeded_queue(other
);
2188 static void check_uphold_dependencies(Unit
*u
) {
2192 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2194 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2195 unit_submit_to_start_when_upheld_queue(other
);
2198 static void check_bound_by_dependencies(Unit
*u
) {
2202 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2204 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2205 unit_submit_to_stop_when_bound_queue(other
);
2208 static void retroactively_start_dependencies(Unit
*u
) {
2212 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2214 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2215 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2216 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2217 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2219 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2220 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2221 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2222 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
, NULL
);
2224 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2225 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2226 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2229 static void retroactively_stop_dependencies(Unit
*u
) {
2233 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2235 /* Pull down units which are bound to us recursively if enabled */
2236 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2237 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2238 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2241 void unit_start_on_failure(
2243 const char *dependency_name
,
2244 UnitDependencyAtom atom
,
2252 assert(dependency_name
);
2253 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2255 /* Act on OnFailure= and OnSuccess= dependencies */
2257 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
2258 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2261 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2265 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, NULL
, &error
, NULL
);
2267 log_unit_warning_errno(
2268 u
, r
, "Failed to enqueue %s job, ignoring: %s",
2269 dependency_name
, bus_error_message(&error
, r
));
2274 log_unit_debug(u
, "Triggering %s dependencies done (%i %s).",
2275 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2278 void unit_trigger_notify(Unit
*u
) {
2283 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2284 if (UNIT_VTABLE(other
)->trigger_notify
)
2285 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2288 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2289 if (condition_notice
&& log_level
> LOG_NOTICE
)
2291 if (condition_info
&& log_level
> LOG_INFO
)
2296 static int unit_log_resources(Unit
*u
) {
2298 static const struct {
2299 const char *journal_field
;
2300 const char *message_suffix
;
2301 } memory_fields
[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1] = {
2302 [CGROUP_MEMORY_PEAK
] = { "MEMORY_PEAK", "memory peak" },
2303 [CGROUP_MEMORY_SWAP_PEAK
] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2304 }, ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2305 [CGROUP_IP_INGRESS_BYTES
] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2306 [CGROUP_IP_EGRESS_BYTES
] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2307 [CGROUP_IP_INGRESS_PACKETS
] = { "IP_METRIC_INGRESS_PACKETS", NULL
},
2308 [CGROUP_IP_EGRESS_PACKETS
] = { "IP_METRIC_EGRESS_PACKETS", NULL
},
2309 }, io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2310 [CGROUP_IO_READ_BYTES
] = { "IO_METRIC_READ_BYTES", "read from disk" },
2311 [CGROUP_IO_WRITE_BYTES
] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2312 [CGROUP_IO_READ_OPERATIONS
] = { "IO_METRIC_READ_OPERATIONS", NULL
},
2313 [CGROUP_IO_WRITE_OPERATIONS
] = { "IO_METRIC_WRITE_OPERATIONS", NULL
},
2316 struct iovec
*iovec
= NULL
;
2318 _cleanup_free_
char *message
= NULL
, *t
= NULL
;
2319 nsec_t cpu_nsec
= NSEC_INFINITY
;
2320 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2324 CLEANUP_ARRAY(iovec
, n_iovec
, iovec_array_free
);
2326 iovec
= new(struct iovec
, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1) +
2327 _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4);
2331 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2332 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2333 * information and the complete data in structured fields. */
2335 (void) unit_get_cpu_usage(u
, &cpu_nsec
);
2336 if (cpu_nsec
!= NSEC_INFINITY
) {
2337 /* Format the CPU time for inclusion in the structured log message */
2338 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, cpu_nsec
) < 0)
2340 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2342 /* Format the CPU time for inclusion in the human language message string */
2343 if (strextendf_with_separator(&message
, ", ",
2344 "Consumed %s CPU time",
2345 FORMAT_TIMESPAN(cpu_nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
)) < 0)
2348 log_level
= raise_level(log_level
,
2349 cpu_nsec
> MENTIONWORTHY_CPU_NSEC
,
2350 cpu_nsec
> NOTICEWORTHY_CPU_NSEC
);
2353 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++) {
2354 uint64_t value
= UINT64_MAX
;
2356 assert(memory_fields
[metric
].journal_field
);
2357 assert(memory_fields
[metric
].message_suffix
);
2359 (void) unit_get_memory_accounting(u
, metric
, &value
);
2360 if (value
== UINT64_MAX
)
2363 if (asprintf(&t
, "%s=%" PRIu64
, memory_fields
[metric
].journal_field
, value
) < 0)
2365 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2367 /* If value is 0, we don't log it in the MESSAGE= field. */
2371 if (strextendf_with_separator(&message
, ", ", "%s %s",
2372 FORMAT_BYTES(value
), memory_fields
[metric
].message_suffix
) < 0)
2375 log_level
= raise_level(log_level
,
2376 value
> MENTIONWORTHY_MEMORY_BYTES
,
2377 value
> NOTICEWORTHY_MEMORY_BYTES
);
2380 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2381 uint64_t value
= UINT64_MAX
;
2383 assert(io_fields
[k
].journal_field
);
2385 (void) unit_get_io_accounting(u
, k
, k
> 0, &value
);
2386 if (value
== UINT64_MAX
)
2389 /* Format IO accounting data for inclusion in the structured log message */
2390 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
].journal_field
, value
) < 0)
2392 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2394 /* If value is 0, we don't log it in the MESSAGE= field. */
2398 /* Format the IO accounting data for inclusion in the human language message string, but only
2399 * for the bytes counters (and not for the operations counters) */
2400 if (io_fields
[k
].message_suffix
) {
2401 if (strextendf_with_separator(&message
, ", ", "%s %s",
2402 FORMAT_BYTES(value
), io_fields
[k
].message_suffix
) < 0)
2405 log_level
= raise_level(log_level
,
2406 value
> MENTIONWORTHY_IO_BYTES
,
2407 value
> NOTICEWORTHY_IO_BYTES
);
2411 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2412 uint64_t value
= UINT64_MAX
;
2414 assert(ip_fields
[m
].journal_field
);
2416 (void) unit_get_ip_accounting(u
, m
, &value
);
2417 if (value
== UINT64_MAX
)
2420 /* Format IP accounting data for inclusion in the structured log message */
2421 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
].journal_field
, value
) < 0)
2423 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2425 /* If value is 0, we don't log it in the MESSAGE= field. */
2429 /* Format the IP accounting data for inclusion in the human language message string, but only
2430 * for the bytes counters (and not for the packets counters) */
2431 if (ip_fields
[m
].message_suffix
) {
2432 if (strextendf_with_separator(&message
, ", ", "%s %s",
2433 FORMAT_BYTES(value
), ip_fields
[m
].message_suffix
) < 0)
2436 log_level
= raise_level(log_level
,
2437 value
> MENTIONWORTHY_IP_BYTES
,
2438 value
> NOTICEWORTHY_IP_BYTES
);
2442 /* This check is here because it is the earliest point following all possible log_level assignments.
2443 * (If log_level is assigned anywhere after this point, move this check.) */
2444 if (!unit_log_level_test(u
, log_level
))
2447 /* Is there any accounting data available at all? */
2453 t
= strjoin("MESSAGE=", u
->id
, ": ", message
?: "Completed", ".");
2456 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2458 if (!set_iovec_string_field(iovec
, &n_iovec
, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR
))
2461 if (!set_iovec_string_field(iovec
, &n_iovec
, u
->manager
->unit_log_field
, u
->id
))
2464 if (!set_iovec_string_field(iovec
, &n_iovec
, u
->manager
->invocation_log_field
, u
->invocation_id_string
))
2467 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
);
2472 static void unit_update_on_console(Unit
*u
) {
2477 b
= unit_needs_console(u
);
2478 if (u
->on_console
== b
)
2483 manager_ref_console(u
->manager
);
2485 manager_unref_console(u
->manager
);
2488 static void unit_emit_audit_start(Unit
*u
) {
2491 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2494 /* Write audit record if we have just finished starting up */
2495 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ true);
2499 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2502 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2506 /* Write audit record if we have just finished shutting down */
2507 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2508 u
->in_audit
= false;
2510 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2511 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2513 if (state
== UNIT_INACTIVE
)
2514 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ true);
2518 static bool unit_process_job(Job
*j
, UnitActiveState ns
, bool reload_success
) {
2519 bool unexpected
= false;
2524 if (j
->state
== JOB_WAITING
)
2525 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2527 job_add_to_run_queue(j
);
2529 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2530 * hence needs to invalidate jobs. */
2535 case JOB_VERIFY_ACTIVE
:
2537 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2538 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2539 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2542 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2543 if (ns
== UNIT_FAILED
)
2544 result
= JOB_FAILED
;
2548 job_finish_and_invalidate(j
, result
, true, false);
2555 case JOB_RELOAD_OR_START
:
2556 case JOB_TRY_RELOAD
:
2558 if (j
->state
== JOB_RUNNING
) {
2559 if (ns
== UNIT_ACTIVE
)
2560 job_finish_and_invalidate(j
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2561 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2564 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2565 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2573 case JOB_TRY_RESTART
:
2575 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2576 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2577 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2579 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2585 assert_not_reached();
2591 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2596 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2597 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2599 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2600 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2601 * remounted this function will be called too! */
2605 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2606 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2607 unit_add_to_dbus_queue(u
);
2609 /* Update systemd-oomd on the property/state change */
2611 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2613 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2614 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2615 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2616 * have the information on the property. Thus, indiscriminately send an update. */
2617 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2618 (void) manager_varlink_send_managed_oom_update(u
);
2621 /* Update timestamps for state changes */
2622 if (!MANAGER_IS_RELOADING(m
)) {
2623 dual_timestamp_now(&u
->state_change_timestamp
);
2625 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2626 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2627 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2628 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2630 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2631 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2632 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2633 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2636 /* Keep track of failed units */
2637 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2639 /* Make sure the cgroup and state files are always removed when we become inactive */
2640 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2641 SET_FLAG(u
->markers
,
2642 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2644 unit_prune_cgroup(u
);
2645 unit_unlink_state_files(u
);
2646 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2647 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2649 unit_update_on_console(u
);
2651 if (!MANAGER_IS_RELOADING(m
)) {
2654 /* Let's propagate state changes to the job */
2656 unexpected
= unit_process_job(u
->job
, ns
, reload_success
);
2660 /* If this state change happened without being requested by a job, then let's retroactively start or
2661 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2662 * additional jobs just because something is already activated. */
2665 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2666 retroactively_start_dependencies(u
);
2667 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2668 retroactively_stop_dependencies(u
);
2671 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2672 log_unit_debug(u
, "Unit entered failed state.");
2673 unit_start_on_failure(u
, "OnFailure=", UNIT_ATOM_ON_FAILURE
, u
->on_failure_job_mode
);
2676 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2677 /* This unit just finished starting up */
2679 unit_emit_audit_start(u
);
2680 manager_send_unit_plymouth(m
, u
);
2681 manager_send_unit_supervisor(m
, u
, /* active= */ true);
2684 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2685 /* This unit just stopped/failed. */
2687 unit_emit_audit_stop(u
, ns
);
2688 manager_send_unit_supervisor(m
, u
, /* active= */ false);
2689 unit_log_resources(u
);
2692 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
))
2693 unit_start_on_failure(u
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
, u
->on_success_job_mode
);
2696 manager_recheck_journal(m
);
2697 manager_recheck_dbus(m
);
2699 unit_trigger_notify(u
);
2701 if (!MANAGER_IS_RELOADING(m
)) {
2702 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2703 reason
= strjoina("unit ", u
->id
, " failed");
2704 emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2705 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2706 reason
= strjoina("unit ", u
->id
, " succeeded");
2707 emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2711 /* And now, add the unit or depending units to various queues that will act on the new situation if
2712 * needed. These queues generally check for continuous state changes rather than events (like most of
2713 * the state propagation above), and do work deferred instead of instantly, since they typically
2714 * don't want to run during reloading, and usually involve checking combined state of multiple units
2717 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2718 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2719 check_unneeded_dependencies(u
);
2720 check_bound_by_dependencies(u
);
2722 /* Maybe someone wants us to remain up? */
2723 unit_submit_to_start_when_upheld_queue(u
);
2725 /* Maybe the unit should be GC'ed now? */
2726 unit_add_to_gc_queue(u
);
2728 /* Maybe we can release some resources now? */
2729 unit_submit_to_release_resources_queue(u
);
2732 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2733 /* Start uphold units regardless if going up was expected or not */
2734 check_uphold_dependencies(u
);
2736 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2737 unit_submit_to_stop_when_unneeded_queue(u
);
2739 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2740 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2741 * inactive, without ever entering started.) */
2742 unit_submit_to_stop_when_bound_queue(u
);
2746 int unit_watch_pidref(Unit
*u
, const PidRef
*pid
, bool exclusive
) {
2747 _cleanup_(pidref_freep
) PidRef
*pid_dup
= NULL
;
2750 /* Adds a specific PID to the set of PIDs this unit watches. */
2753 assert(pidref_is_set(pid
));
2755 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2756 * opportunity to remove any stalled references to this PID as they can be created
2757 * easily (when watching a process which is not our direct child). */
2759 manager_unwatch_pidref(u
->manager
, pid
);
2761 if (set_contains(u
->pids
, pid
)) /* early exit if already being watched */
2764 r
= pidref_dup(pid
, &pid_dup
);
2768 /* First, insert into the set of PIDs maintained by the unit */
2769 r
= set_ensure_put(&u
->pids
, &pidref_hash_ops_free
, pid_dup
);
2773 pid
= TAKE_PTR(pid_dup
); /* continue with our copy now that we have installed it properly in our set */
2775 /* Second, insert it into the simple global table, see if that works */
2776 r
= hashmap_ensure_put(&u
->manager
->watch_pids
, &pidref_hash_ops_free
, pid
, u
);
2780 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2781 * hashmap that points to an array. */
2783 PidRef
*old_pid
= NULL
;
2784 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &old_pid
);
2786 /* Count entries in array */
2788 for (; array
&& array
[n
]; n
++)
2791 /* Allocate a new array */
2792 _cleanup_free_ Unit
**new_array
= new(Unit
*, n
+ 2);
2796 /* Append us to the end */
2797 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2799 new_array
[n
+1] = NULL
;
2801 /* Make sure the hashmap is allocated */
2802 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids_more
, &pidref_hash_ops_free
);
2806 /* Add or replace the old array */
2807 r
= hashmap_replace(u
->manager
->watch_pids_more
, old_pid
?: pid
, new_array
);
2811 TAKE_PTR(new_array
); /* Now part of the hash table */
2812 free(array
); /* Which means we can now delete the old version */
2816 int unit_watch_pid(Unit
*u
, pid_t pid
, bool exclusive
) {
2817 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
2821 assert(pid_is_valid(pid
));
2823 r
= pidref_set_pid(&pidref
, pid
);
2827 return unit_watch_pidref(u
, &pidref
, exclusive
);
2830 void unit_unwatch_pidref(Unit
*u
, const PidRef
*pid
) {
2832 assert(pidref_is_set(pid
));
2834 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2835 _cleanup_(pidref_freep
) PidRef
*pid1
= set_remove(u
->pids
, pid
);
2837 return; /* Early exit if this PID was never watched by us */
2839 /* First let's drop the unit from the simple hash table, if it is included there */
2840 PidRef
*pid2
= NULL
;
2841 Unit
*uu
= hashmap_get2(u
->manager
->watch_pids
, pid
, (void**) &pid2
);
2843 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2844 assert((uu
== u
) == (pid1
== pid2
));
2847 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2848 assert_se(hashmap_remove_value(u
->manager
->watch_pids
, pid2
, uu
));
2850 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2851 PidRef
*pid3
= NULL
;
2852 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &pid3
);
2854 /* Let's iterate through the array, dropping our own entry */
2855 size_t m
= 0, n
= 0;
2856 for (; array
&& array
[n
]; n
++)
2858 array
[m
++] = array
[n
];
2860 return; /* Not there */
2862 array
[m
] = NULL
; /* set trailing NULL marker on the new end */
2865 /* The array is now empty, remove the entire entry */
2866 assert_se(hashmap_remove_value(u
->manager
->watch_pids_more
, pid3
, array
));
2869 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2870 * we will delete, but by the PidRef object of the Unit that is now first in the
2873 PidRef
*new_pid3
= ASSERT_PTR(set_get(array
[0]->pids
, pid
));
2874 assert_se(hashmap_replace(u
->manager
->watch_pids_more
, new_pid3
, array
) >= 0);
2879 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2880 return unit_unwatch_pidref(u
, &PIDREF_MAKE_FROM_PID(pid
));
2883 void unit_unwatch_all_pids(Unit
*u
) {
2886 while (!set_isempty(u
->pids
))
2887 unit_unwatch_pidref(u
, set_first(u
->pids
));
2889 u
->pids
= set_free(u
->pids
);
2892 void unit_unwatch_pidref_done(Unit
*u
, PidRef
*pidref
) {
2895 if (!pidref_is_set(pidref
))
2898 unit_unwatch_pidref(u
, pidref
);
2899 pidref_done(pidref
);
2902 static void unit_tidy_watch_pids(Unit
*u
) {
2903 PidRef
*except1
, *except2
, *e
;
2907 /* Cleans dead PIDs from our list */
2909 except1
= unit_main_pid(u
);
2910 except2
= unit_control_pid(u
);
2912 SET_FOREACH(e
, u
->pids
) {
2913 if (pidref_equal(except1
, e
) || pidref_equal(except2
, e
))
2916 if (pidref_is_unwaited(e
) <= 0)
2917 unit_unwatch_pidref(u
, e
);
2921 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2922 Unit
*u
= ASSERT_PTR(userdata
);
2926 unit_tidy_watch_pids(u
);
2927 unit_watch_all_pids(u
);
2929 /* If the PID set is empty now, then let's finish this off. */
2930 unit_synthesize_cgroup_empty_event(u
);
2935 int unit_enqueue_rewatch_pids(Unit
*u
) {
2940 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
2941 if (!crt
|| !crt
->cgroup_path
)
2944 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2947 if (r
> 0) /* On unified we can use proper notifications */
2950 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2951 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2952 * involves issuing kill(pid, 0) on all processes we watch. */
2954 if (!u
->rewatch_pids_event_source
) {
2955 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2957 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2959 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2961 r
= sd_event_source_set_priority(s
, EVENT_PRIORITY_REWATCH_PIDS
);
2963 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2965 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2967 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2970 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2972 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2977 void unit_dequeue_rewatch_pids(Unit
*u
) {
2981 if (!u
->rewatch_pids_event_source
)
2984 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2986 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2988 u
->rewatch_pids_event_source
= sd_event_source_disable_unref(u
->rewatch_pids_event_source
);
2991 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2993 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2997 case JOB_VERIFY_ACTIVE
:
3000 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3001 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3006 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3007 * external events), hence it makes no sense to permit enqueuing such a request either. */
3008 return !u
->perpetual
;
3011 case JOB_TRY_RESTART
:
3012 return unit_can_stop(u
) && unit_can_start(u
);
3015 case JOB_TRY_RELOAD
:
3016 return unit_can_reload(u
);
3018 case JOB_RELOAD_OR_START
:
3019 return unit_can_reload(u
) && unit_can_start(u
);
3022 assert_not_reached();
3026 static Hashmap
*unit_get_dependency_hashmap_per_type(Unit
*u
, UnitDependency d
) {
3030 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3032 deps
= hashmap_get(u
->dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
3034 _cleanup_hashmap_free_ Hashmap
*h
= NULL
;
3036 h
= hashmap_new(NULL
);
3040 if (hashmap_ensure_put(&u
->dependencies
, NULL
, UNIT_DEPENDENCY_TO_PTR(d
), h
) < 0)
3049 typedef enum NotifyDependencyFlags
{
3050 NOTIFY_DEPENDENCY_UPDATE_FROM
= 1 << 0,
3051 NOTIFY_DEPENDENCY_UPDATE_TO
= 1 << 1,
3052 } NotifyDependencyFlags
;
3054 static int unit_add_dependency_impl(
3058 UnitDependencyMask mask
) {
3060 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3061 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3062 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3063 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3064 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3065 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3066 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3067 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3068 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3069 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3070 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3071 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3072 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3073 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3074 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3075 [UNIT_BEFORE
] = UNIT_AFTER
,
3076 [UNIT_AFTER
] = UNIT_BEFORE
,
3077 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3078 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3079 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3080 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3081 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3082 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3083 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3084 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3085 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3086 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3087 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3088 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3089 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3090 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3091 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3094 Hashmap
*u_deps
, *other_deps
;
3095 UnitDependencyInfo u_info
, u_info_old
, other_info
, other_info_old
;
3096 NotifyDependencyFlags flags
= 0;
3101 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3102 assert(inverse_table
[d
] >= 0 && inverse_table
[d
] < _UNIT_DEPENDENCY_MAX
);
3103 assert(mask
> 0 && mask
< _UNIT_DEPENDENCY_MASK_FULL
);
3105 /* Ensure the following two hashmaps for each unit exist:
3106 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3107 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3108 u_deps
= unit_get_dependency_hashmap_per_type(u
, d
);
3112 other_deps
= unit_get_dependency_hashmap_per_type(other
, inverse_table
[d
]);
3116 /* Save the original dependency info. */
3117 u_info
.data
= u_info_old
.data
= hashmap_get(u_deps
, other
);
3118 other_info
.data
= other_info_old
.data
= hashmap_get(other_deps
, u
);
3120 /* Update dependency info. */
3121 u_info
.origin_mask
|= mask
;
3122 other_info
.destination_mask
|= mask
;
3124 /* Save updated dependency info. */
3125 if (u_info
.data
!= u_info_old
.data
) {
3126 r
= hashmap_replace(u_deps
, other
, u_info
.data
);
3130 flags
= NOTIFY_DEPENDENCY_UPDATE_FROM
;
3133 if (other_info
.data
!= other_info_old
.data
) {
3134 r
= hashmap_replace(other_deps
, u
, other_info
.data
);
3136 if (u_info
.data
!= u_info_old
.data
) {
3137 /* Restore the old dependency. */
3138 if (u_info_old
.data
)
3139 (void) hashmap_update(u_deps
, other
, u_info_old
.data
);
3141 hashmap_remove(u_deps
, other
);
3146 flags
|= NOTIFY_DEPENDENCY_UPDATE_TO
;
3152 int unit_add_dependency(
3157 UnitDependencyMask mask
) {
3159 UnitDependencyAtom a
;
3162 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3163 * there, no need to notify! */
3164 NotifyDependencyFlags notify_flags
;
3167 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3170 u
= unit_follow_merge(u
);
3171 other
= unit_follow_merge(other
);
3172 a
= unit_dependency_to_atom(d
);
3175 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3177 if (unit_should_warn_about_dependency(d
))
3178 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3179 unit_dependency_to_string(d
), u
->id
);
3183 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3186 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3187 * running timeout at a specific time. */
3188 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3189 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3193 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3194 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3198 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3199 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3200 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3201 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3202 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3203 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3205 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3206 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3207 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3208 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3209 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3210 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3212 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3213 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3214 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3216 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3217 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3218 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3220 r
= unit_add_dependency_impl(u
, d
, other
, mask
);
3225 if (add_reference
) {
3226 r
= unit_add_dependency_impl(u
, UNIT_REFERENCES
, other
, mask
);
3232 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_FROM
))
3233 unit_add_to_dbus_queue(u
);
3234 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_TO
))
3235 unit_add_to_dbus_queue(other
);
3237 return notify_flags
!= 0;
3240 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3244 assert(d
>= 0 || e
>= 0);
3247 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3253 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3258 return r
> 0 || s
> 0;
3261 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3269 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3276 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3278 _cleanup_free_
char *i
= NULL
;
3280 r
= unit_name_to_prefix(u
->id
, &i
);
3284 r
= unit_name_replace_instance(name
, i
, buf
);
3293 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3294 _cleanup_free_
char *buf
= NULL
;
3301 r
= resolve_template(u
, name
, &buf
, &name
);
3305 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3308 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3312 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3315 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3316 _cleanup_free_
char *buf
= NULL
;
3323 r
= resolve_template(u
, name
, &buf
, &name
);
3327 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3330 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3334 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3337 int set_unit_path(const char *p
) {
3338 /* This is mostly for debug purposes */
3339 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, 1));
3342 char *unit_dbus_path(Unit
*u
) {
3348 return unit_dbus_path_from_name(u
->id
);
3351 char *unit_dbus_path_invocation_id(Unit
*u
) {
3354 if (sd_id128_is_null(u
->invocation_id
))
3357 return unit_dbus_path_from_name(u
->invocation_id_string
);
3360 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3365 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3367 if (sd_id128_equal(u
->invocation_id
, id
))
3370 if (!sd_id128_is_null(u
->invocation_id
))
3371 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3373 if (sd_id128_is_null(id
)) {
3378 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3382 u
->invocation_id
= id
;
3383 sd_id128_to_string(id
, u
->invocation_id_string
);
3385 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3392 u
->invocation_id
= SD_ID128_NULL
;
3393 u
->invocation_id_string
[0] = 0;
3397 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3403 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3404 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3405 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3407 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3410 if (u
->type
== UNIT_SLICE
)
3413 if (unit_active_state(u
) != UNIT_INACTIVE
)
3416 if (slice
->type
!= UNIT_SLICE
)
3419 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3420 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3423 if (UNIT_GET_SLICE(u
) == slice
)
3426 /* Disallow slice changes if @u is already bound to cgroups */
3427 if (UNIT_GET_SLICE(u
)) {
3428 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
3429 if (crt
&& crt
->cgroup_realized
)
3433 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3434 if (UNIT_GET_SLICE(u
))
3435 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3437 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3444 int unit_set_default_slice(Unit
*u
) {
3445 const char *slice_name
;
3451 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3454 if (UNIT_GET_SLICE(u
))
3458 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3460 /* Implicitly place all instantiated units in their
3461 * own per-template slice */
3463 r
= unit_name_to_prefix(u
->id
, &prefix
);
3467 /* The prefix is already escaped, but it might include
3468 * "-" which has a special meaning for slice units,
3469 * hence escape it here extra. */
3470 escaped
= unit_name_escape(prefix
);
3474 if (MANAGER_IS_SYSTEM(u
->manager
))
3475 slice_name
= strjoina("system-", escaped
, ".slice");
3477 slice_name
= strjoina("app-", escaped
, ".slice");
3479 } else if (unit_is_extrinsic(u
))
3480 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3481 * the root slice. They don't really belong in one of the subslices. */
3482 slice_name
= SPECIAL_ROOT_SLICE
;
3484 else if (MANAGER_IS_SYSTEM(u
->manager
))
3485 slice_name
= SPECIAL_SYSTEM_SLICE
;
3487 slice_name
= SPECIAL_APP_SLICE
;
3489 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3493 return unit_set_slice(u
, slice
);
3496 const char *unit_slice_name(Unit
*u
) {
3500 slice
= UNIT_GET_SLICE(u
);
3507 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3508 _cleanup_free_
char *t
= NULL
;
3515 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3518 if (unit_has_name(u
, t
))
3521 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3522 assert(r
< 0 || *_found
!= u
);
3526 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3527 const char *new_owner
;
3528 Unit
*u
= ASSERT_PTR(userdata
);
3533 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3535 bus_log_parse_error(r
);
3539 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3540 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3545 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3546 const sd_bus_error
*e
;
3547 const char *new_owner
;
3548 Unit
*u
= ASSERT_PTR(userdata
);
3553 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3555 e
= sd_bus_message_get_error(message
);
3557 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3558 r
= sd_bus_error_get_errno(e
);
3559 log_unit_error_errno(u
, r
,
3560 "Unexpected error response from GetNameOwner(): %s",
3561 bus_error_message(e
, r
));
3566 r
= sd_bus_message_read(message
, "s", &new_owner
);
3568 return bus_log_parse_error(r
);
3570 assert(!isempty(new_owner
));
3573 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3574 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3579 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3580 _cleanup_(sd_bus_message_unrefp
) sd_bus_message
*m
= NULL
;
3582 usec_t timeout_usec
= 0;
3589 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3592 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3593 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3594 * value defined above. */
3595 if (UNIT_VTABLE(u
)->get_timeout_start_usec
)
3596 timeout_usec
= UNIT_VTABLE(u
)->get_timeout_start_usec(u
);
3598 match
= strjoina("type='signal',"
3599 "sender='org.freedesktop.DBus',"
3600 "path='/org/freedesktop/DBus',"
3601 "interface='org.freedesktop.DBus',"
3602 "member='NameOwnerChanged',"
3603 "arg0='", name
, "'");
3605 r
= bus_add_match_full(
3610 signal_name_owner_changed
,
3617 r
= sd_bus_message_new_method_call(
3620 "org.freedesktop.DBus",
3621 "/org/freedesktop/DBus",
3622 "org.freedesktop.DBus",
3627 r
= sd_bus_message_append(m
, "s", name
);
3631 r
= sd_bus_call_async(
3633 &u
->get_name_owner_slot
,
3635 get_name_owner_handler
,
3640 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3644 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3648 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3654 /* Watch a specific name on the bus. We only support one unit
3655 * watching each name for now. */
3657 if (u
->manager
->api_bus
) {
3658 /* If the bus is already available, install the match directly.
3659 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3660 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3662 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3665 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3667 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3668 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3669 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3675 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3679 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3680 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3681 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3684 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3685 _cleanup_free_
char *e
= NULL
;
3691 /* Adds in links to the device node that this unit is based on */
3695 if (!is_device_path(what
))
3698 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3699 if (!unit_type_supported(UNIT_DEVICE
))
3702 r
= unit_name_from_path(what
, ".device", &e
);
3706 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3710 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3711 dep
= UNIT_BINDS_TO
;
3713 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3714 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3715 device
, true, mask
);
3718 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3719 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3727 if (!path_startswith(what
, "/dev/"))
3730 /* If we don't support devices, then also don't bother with blockdev@.target */
3731 if (!unit_type_supported(UNIT_DEVICE
))
3734 r
= unit_name_path_escape(what
, &escaped
);
3738 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3742 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3745 int unit_coldplug(Unit
*u
) {
3750 /* Make sure we don't enter a loop, when coldplugging recursively. */
3754 u
->coldplugged
= true;
3756 STRV_FOREACH(i
, u
->deserialized_refs
)
3757 RET_GATHER(r
, bus_unit_track_add_name(u
, *i
));
3759 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3761 if (UNIT_VTABLE(u
)->coldplug
)
3762 RET_GATHER(r
, UNIT_VTABLE(u
)->coldplug(u
));
3765 RET_GATHER(r
, job_coldplug(u
->job
));
3767 RET_GATHER(r
, job_coldplug(u
->nop_job
));
3769 unit_modify_nft_set(u
, /* add = */ true);
3773 void unit_catchup(Unit
*u
) {
3776 if (UNIT_VTABLE(u
)->catchup
)
3777 UNIT_VTABLE(u
)->catchup(u
);
3779 unit_cgroup_catchup(u
);
3782 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3788 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3789 * are never out-of-date. */
3790 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3793 if (stat(path
, &st
) < 0)
3794 /* What, cannot access this anymore? */
3798 /* For masked files check if they are still so */
3799 return !null_or_empty(&st
);
3801 /* For non-empty files check the mtime */
3802 return timespec_load(&st
.st_mtim
) > mtime
;
3807 bool unit_need_daemon_reload(Unit
*u
) {
3808 _cleanup_strv_free_
char **dropins
= NULL
;
3813 if (u
->manager
->unit_file_state_outdated
)
3816 /* For unit files, we allow masking… */
3817 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3818 u
->load_state
== UNIT_MASKED
))
3821 /* Source paths should not be masked… */
3822 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3825 if (u
->load_state
== UNIT_LOADED
)
3826 (void) unit_find_dropin_paths(u
, &dropins
);
3827 if (!strv_equal(u
->dropin_paths
, dropins
))
3830 /* … any drop-ins that are masked are simply omitted from the list. */
3831 STRV_FOREACH(path
, u
->dropin_paths
)
3832 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3838 void unit_reset_failed(Unit
*u
) {
3841 if (UNIT_VTABLE(u
)->reset_failed
)
3842 UNIT_VTABLE(u
)->reset_failed(u
);
3844 ratelimit_reset(&u
->start_ratelimit
);
3845 u
->start_limit_hit
= false;
3848 Unit
*unit_following(Unit
*u
) {
3851 if (UNIT_VTABLE(u
)->following
)
3852 return UNIT_VTABLE(u
)->following(u
);
3857 bool unit_stop_pending(Unit
*u
) {
3860 /* This call does check the current state of the unit. It's
3861 * hence useful to be called from state change calls of the
3862 * unit itself, where the state isn't updated yet. This is
3863 * different from unit_inactive_or_pending() which checks both
3864 * the current state and for a queued job. */
3866 return unit_has_job_type(u
, JOB_STOP
);
3869 bool unit_inactive_or_pending(Unit
*u
) {
3872 /* Returns true if the unit is inactive or going down */
3874 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3877 if (unit_stop_pending(u
))
3883 bool unit_active_or_pending(Unit
*u
) {
3886 /* Returns true if the unit is active or going up */
3888 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3892 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3898 bool unit_will_restart_default(Unit
*u
) {
3901 return unit_has_job_type(u
, JOB_START
);
3904 bool unit_will_restart(Unit
*u
) {
3907 if (!UNIT_VTABLE(u
)->will_restart
)
3910 return UNIT_VTABLE(u
)->will_restart(u
);
3913 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3916 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3917 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3920 static int unit_pid_set(Unit
*u
, Set
**pid_set
) {
3926 set_clear(*pid_set
); /* This updates input. */
3928 /* Exclude the main/control pids from being killed via the cgroup */
3931 FOREACH_ARGUMENT(pid
, unit_main_pid(u
), unit_control_pid(u
))
3932 if (pidref_is_set(pid
)) {
3933 r
= set_ensure_put(pid_set
, NULL
, PID_TO_PTR(pid
->pid
));
3941 static int kill_common_log(const PidRef
*pid
, int signo
, void *userdata
) {
3942 _cleanup_free_
char *comm
= NULL
;
3943 Unit
*u
= ASSERT_PTR(userdata
);
3945 (void) pidref_get_comm(pid
, &comm
);
3947 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
3948 signal_to_string(signo
), pid
->pid
, strna(comm
));
3953 static int kill_or_sigqueue(PidRef
* pidref
, int signo
, int code
, int value
) {
3954 assert(pidref_is_set(pidref
));
3955 assert(SIGNAL_VALID(signo
));
3960 log_debug("Killing " PID_FMT
" with signal SIG%s.", pidref
->pid
, signal_to_string(signo
));
3961 return pidref_kill(pidref
, signo
);
3964 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pidref
->pid
, signal_to_string(signo
));
3965 return pidref_sigqueue(pidref
, signo
, value
);
3968 assert_not_reached();
3972 static int unit_kill_one(
3979 sd_bus_error
*ret_error
) {
3986 if (!pidref_is_set(pidref
))
3989 _cleanup_free_
char *comm
= NULL
;
3990 (void) pidref_get_comm(pidref
, &comm
);
3992 r
= kill_or_sigqueue(pidref
, signo
, code
, value
);
3996 /* Report this failure both to the logs and to the client */
3998 sd_bus_error_set_errnof(
4000 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s): %m",
4001 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4003 return log_unit_warning_errno(
4005 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s) on client request: %m",
4006 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4009 log_unit_info(u
, "Sent signal SIG%s to %s process " PID_FMT
" (%s) on client request.",
4010 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4011 return 1; /* killed */
4020 sd_bus_error
*ret_error
) {
4022 PidRef
*main_pid
, *control_pid
;
4023 bool killed
= false;
4026 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4027 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4028 * stop a service ourselves. */
4032 assert(who
< _KILL_WHO_MAX
);
4033 assert(SIGNAL_VALID(signo
));
4034 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
4036 main_pid
= unit_main_pid(u
);
4037 control_pid
= unit_control_pid(u
);
4039 if (!UNIT_HAS_CGROUP_CONTEXT(u
) && !main_pid
&& !control_pid
)
4040 return sd_bus_error_setf(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
, "Unit type does not support process killing.");
4042 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4044 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4045 if (!pidref_is_set(main_pid
))
4046 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4049 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4051 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4052 if (!pidref_is_set(control_pid
))
4053 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4056 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4057 r
= unit_kill_one(u
, control_pid
, "control", signo
, code
, value
, ret_error
);
4059 killed
= killed
|| r
> 0;
4062 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4063 r
= unit_kill_one(u
, main_pid
, "main", signo
, code
, value
, ret
>= 0 ? ret_error
: NULL
);
4065 killed
= killed
|| r
> 0;
4068 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4069 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4070 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4071 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && code
== SI_USER
) {
4072 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4074 if (crt
&& crt
->cgroup_path
) {
4075 _cleanup_set_free_ Set
*pid_set
= NULL
;
4077 /* Exclude the main/control pids from being killed via the cgroup */
4078 r
= unit_pid_set(u
, &pid_set
);
4082 r
= cg_kill_recursive(crt
->cgroup_path
, signo
, 0, pid_set
, kill_common_log
, u
);
4083 if (r
< 0 && !IN_SET(r
, -ESRCH
, -ENOENT
)) {
4085 sd_bus_error_set_errnof(
4087 "Failed to send signal SIG%s to auxiliary processes: %m",
4088 signal_to_string(signo
));
4090 log_unit_warning_errno(
4092 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4093 signal_to_string(signo
));
4098 killed
= killed
|| r
>= 0;
4102 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4103 if (ret
>= 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
))
4104 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4109 int unit_following_set(Unit
*u
, Set
**s
) {
4113 if (UNIT_VTABLE(u
)->following_set
)
4114 return UNIT_VTABLE(u
)->following_set(u
, s
);
4120 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4125 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4126 r
= unit_file_get_state(
4127 u
->manager
->runtime_scope
,
4130 &u
->unit_file_state
);
4132 u
->unit_file_state
= UNIT_FILE_BAD
;
4135 return u
->unit_file_state
;
4138 PresetAction
unit_get_unit_file_preset(Unit
*u
) {
4143 if (u
->unit_file_preset
< 0 && u
->fragment_path
) {
4144 _cleanup_free_
char *bn
= NULL
;
4146 r
= path_extract_filename(u
->fragment_path
, &bn
);
4148 return (u
->unit_file_preset
= r
);
4150 if (r
== O_DIRECTORY
)
4151 return (u
->unit_file_preset
= -EISDIR
);
4153 u
->unit_file_preset
= unit_file_query_preset(
4154 u
->manager
->runtime_scope
,
4160 return u
->unit_file_preset
;
4163 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4169 unit_ref_unset(ref
);
4171 ref
->source
= source
;
4172 ref
->target
= target
;
4173 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4177 void unit_ref_unset(UnitRef
*ref
) {
4183 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4184 * be unreferenced now. */
4185 unit_add_to_gc_queue(ref
->target
);
4187 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4188 ref
->source
= ref
->target
= NULL
;
4191 static int user_from_unit_name(Unit
*u
, char **ret
) {
4193 static const uint8_t hash_key
[] = {
4194 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4195 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4198 _cleanup_free_
char *n
= NULL
;
4201 r
= unit_name_to_prefix(u
->id
, &n
);
4205 if (valid_user_group_name(n
, 0)) {
4210 /* If we can't use the unit name as a user name, then let's hash it and use that */
4211 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4217 int unit_patch_contexts(Unit
*u
) {
4224 /* Patch in the manager defaults into the exec and cgroup
4225 * contexts, _after_ the rest of the settings have been
4228 ec
= unit_get_exec_context(u
);
4230 /* This only copies in the ones that need memory */
4231 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4232 if (u
->manager
->defaults
.rlimit
[i
] && !ec
->rlimit
[i
]) {
4233 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->defaults
.rlimit
[i
], 1);
4238 if (MANAGER_IS_USER(u
->manager
) &&
4239 !ec
->working_directory
) {
4241 r
= get_home_dir(&ec
->working_directory
);
4245 /* Allow user services to run, even if the
4246 * home directory is missing */
4247 ec
->working_directory_missing_ok
= true;
4250 if (ec
->private_devices
)
4251 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4253 if (ec
->protect_kernel_modules
)
4254 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4256 if (ec
->protect_kernel_logs
)
4257 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4259 if (ec
->protect_clock
)
4260 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4262 if (ec
->dynamic_user
) {
4264 r
= user_from_unit_name(u
, &ec
->user
);
4270 ec
->group
= strdup(ec
->user
);
4275 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4276 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4279 ec
->private_tmp
= true;
4280 ec
->remove_ipc
= true;
4281 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4282 if (ec
->protect_home
== PROTECT_HOME_NO
)
4283 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4285 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4287 ec
->no_new_privileges
= true;
4288 ec
->restrict_suid_sgid
= true;
4291 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++)
4292 exec_directory_sort(ec
->directories
+ dt
);
4295 cc
= unit_get_cgroup_context(u
);
4298 if (ec
->private_devices
&&
4299 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4300 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4302 /* Only add these if needed, as they imply that everything else is blocked. */
4303 if (cc
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
|| cc
->device_allow
) {
4304 if (ec
->root_image
|| ec
->mount_images
) {
4306 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4307 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4308 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4312 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4313 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
);
4318 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4319 * Same for mapper and verity. */
4320 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4321 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4327 if (ec
->protect_clock
) {
4328 r
= cgroup_context_add_device_allow(cc
, "char-rtc", CGROUP_DEVICE_READ
);
4333 /* If there are encrypted credentials we might need to access the TPM. */
4334 if (exec_context_has_encrypted_credentials(ec
)) {
4335 r
= cgroup_context_add_device_allow(cc
, "char-tpm", CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4345 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4352 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4356 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4359 KillContext
*unit_get_kill_context(const Unit
*u
) {
4366 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4370 return (KillContext
*) ((uint8_t*) u
+ offset
);
4373 CGroupContext
*unit_get_cgroup_context(const Unit
*u
) {
4379 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4383 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4386 ExecRuntime
*unit_get_exec_runtime(const Unit
*u
) {
4392 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4396 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4399 CGroupRuntime
*unit_get_cgroup_runtime(const Unit
*u
) {
4405 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
4409 return *(CGroupRuntime
**) ((uint8_t*) u
+ offset
);
4412 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4415 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4418 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4419 return u
->manager
->lookup_paths
.transient
;
4421 if (flags
& UNIT_PERSISTENT
)
4422 return u
->manager
->lookup_paths
.persistent_control
;
4424 if (flags
& UNIT_RUNTIME
)
4425 return u
->manager
->lookup_paths
.runtime_control
;
4430 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4432 assert(popcount(flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
)) <= 1);
4435 _cleanup_free_
char *t
= NULL
;
4437 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4438 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4439 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4440 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4443 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4444 t
= specifier_escape(s
);
4451 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4452 * ExecStart= and friends, i.e. '$' and quotes. */
4454 if (flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
)) {
4457 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX_ENV
) {
4458 t2
= strreplace(s
, "$", "$$");
4461 free_and_replace(t
, t2
);
4464 t2
= shell_escape(t
?: s
, "\"");
4467 free_and_replace(t
, t2
);
4471 } else if (flags
& UNIT_ESCAPE_C
) {
4477 free_and_replace(t
, t2
);
4486 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4487 _cleanup_free_
char *result
= NULL
;
4490 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4491 * lines in a way suitable for ExecStart= stanzas. */
4493 STRV_FOREACH(i
, l
) {
4494 _cleanup_free_
char *buf
= NULL
;
4499 p
= unit_escape_setting(*i
, flags
, &buf
);
4503 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4504 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4518 if (!GREEDY_REALLOC(result
, n
+ 1))
4523 return TAKE_PTR(result
);
4526 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4527 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4528 const char *dir
, *wrapped
;
4535 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4538 data
= unit_escape_setting(data
, flags
, &escaped
);
4542 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4543 * previous section header is the same */
4545 if (flags
& UNIT_PRIVATE
) {
4546 if (!UNIT_VTABLE(u
)->private_section
)
4549 if (!u
->transient_file
|| u
->last_section_private
< 0)
4550 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4551 else if (u
->last_section_private
== 0)
4552 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4554 if (!u
->transient_file
|| u
->last_section_private
< 0)
4555 data
= strjoina("[Unit]\n", data
);
4556 else if (u
->last_section_private
> 0)
4557 data
= strjoina("\n[Unit]\n", data
);
4560 if (u
->transient_file
) {
4561 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4562 * write to the transient unit file. */
4563 fputs(data
, u
->transient_file
);
4565 if (!endswith(data
, "\n"))
4566 fputc('\n', u
->transient_file
);
4568 /* Remember which section we wrote this entry to */
4569 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4573 dir
= unit_drop_in_dir(u
, flags
);
4577 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4578 "# or an equivalent operation. Do not edit.\n",
4582 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4586 (void) mkdir_p_label(p
, 0755);
4588 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4589 * recreate the cache after every drop-in we write. */
4590 if (u
->manager
->unit_path_cache
) {
4591 r
= set_put_strdup(&u
->manager
->unit_path_cache
, p
);
4596 r
= write_string_file_atomic_label(q
, wrapped
);
4600 r
= strv_push(&u
->dropin_paths
, q
);
4605 strv_uniq(u
->dropin_paths
);
4607 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4612 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4613 _cleanup_free_
char *p
= NULL
;
4621 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4624 va_start(ap
, format
);
4625 r
= vasprintf(&p
, format
, ap
);
4631 return unit_write_setting(u
, flags
, name
, p
);
4634 int unit_make_transient(Unit
*u
) {
4635 _cleanup_free_
char *path
= NULL
;
4640 if (!UNIT_VTABLE(u
)->can_transient
)
4643 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4645 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4649 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4650 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4653 f
= fopen(path
, "we");
4658 safe_fclose(u
->transient_file
);
4659 u
->transient_file
= f
;
4661 free_and_replace(u
->fragment_path
, path
);
4663 u
->source_path
= mfree(u
->source_path
);
4664 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4665 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4667 u
->load_state
= UNIT_STUB
;
4669 u
->transient
= true;
4671 unit_add_to_dbus_queue(u
);
4672 unit_add_to_gc_queue(u
);
4674 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4680 static int log_kill(const PidRef
*pid
, int sig
, void *userdata
) {
4681 _cleanup_free_
char *comm
= NULL
;
4683 assert(pidref_is_set(pid
));
4685 (void) pidref_get_comm(pid
, &comm
);
4687 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4688 only, like for example systemd's own PAM stub process. */
4689 if (comm
&& comm
[0] == '(')
4690 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4691 * here to let the manager know that a process was killed. */
4694 log_unit_notice(userdata
,
4695 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4698 signal_to_string(sig
));
4703 static int operation_to_signal(
4704 const KillContext
*c
,
4706 bool *ret_noteworthy
) {
4712 case KILL_TERMINATE
:
4713 case KILL_TERMINATE_AND_LOG
:
4714 *ret_noteworthy
= false;
4715 return c
->kill_signal
;
4718 *ret_noteworthy
= false;
4719 return restart_kill_signal(c
);
4722 *ret_noteworthy
= true;
4723 return c
->final_kill_signal
;
4726 *ret_noteworthy
= true;
4727 return c
->watchdog_signal
;
4730 assert_not_reached();
4734 static int unit_kill_context_one(
4736 const PidRef
*pidref
,
4741 cg_kill_log_func_t log_func
) {
4748 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4750 if (!pidref_is_set(pidref
))
4754 log_func(pidref
, sig
, u
);
4756 r
= pidref_kill_and_sigcont(pidref
, sig
);
4760 _cleanup_free_
char *comm
= NULL
;
4762 (void) pidref_get_comm(pidref
, &comm
);
4763 return log_unit_warning_errno(u
, r
, "Failed to kill %s process " PID_FMT
" (%s), ignoring: %m", type
, pidref
->pid
, strna(comm
));
4767 (void) pidref_kill(pidref
, SIGHUP
);
4772 int unit_kill_context(Unit
*u
, KillOperation k
) {
4773 bool wait_for_exit
= false, send_sighup
;
4774 cg_kill_log_func_t log_func
= NULL
;
4779 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4780 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4781 * which is used for user-requested killing of unit processes. */
4783 KillContext
*c
= unit_get_kill_context(u
);
4784 if (!c
|| c
->kill_mode
== KILL_NONE
)
4788 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4790 log_func
= log_kill
;
4794 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4798 PidRef
*main_pid
= unit_main_pid_full(u
, &is_alien
);
4799 r
= unit_kill_context_one(u
, main_pid
, "main", is_alien
, sig
, send_sighup
, log_func
);
4800 wait_for_exit
= wait_for_exit
|| r
> 0;
4802 r
= unit_kill_context_one(u
, unit_control_pid(u
), "control", /* is_alien = */ false, sig
, send_sighup
, log_func
);
4803 wait_for_exit
= wait_for_exit
|| r
> 0;
4805 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
4806 if (crt
&& crt
->cgroup_path
&&
4807 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4808 _cleanup_set_free_ Set
*pid_set
= NULL
;
4810 /* Exclude the main/control pids from being killed via the cgroup */
4811 r
= unit_pid_set(u
, &pid_set
);
4815 r
= cg_kill_recursive(
4818 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4822 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4823 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(crt
->cgroup_path
));
4827 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4828 * we are running in a container or if this is a delegation unit, simply because cgroup
4829 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4830 * of containers it can be confused easily by left-over directories in the cgroup — which
4831 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4832 * there we get proper events. Hence rely on them. */
4834 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4835 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4836 wait_for_exit
= true;
4839 r
= unit_pid_set(u
, &pid_set
);
4843 (void) cg_kill_recursive(
4848 /* kill_log= */ NULL
,
4849 /* userdata= */ NULL
);
4854 return wait_for_exit
;
4857 int unit_add_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
, UnitMountDependencyType type
) {
4858 Hashmap
**unit_map
, **manager_map
;
4863 assert(type
>= 0 && type
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
);
4865 unit_map
= &u
->mounts_for
[type
];
4866 manager_map
= &u
->manager
->units_needing_mounts_for
[type
];
4868 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4869 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4870 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4871 * appearing mount units can easily determine which units to make themselves a dependency of. */
4873 if (!path_is_absolute(path
))
4876 if (hashmap_contains(*unit_map
, path
)) /* Exit quickly if the path is already covered. */
4879 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4880 * only after simplification, since path_is_normalized() rejects paths with '.'.
4881 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4882 _cleanup_free_
char *p
= NULL
;
4883 r
= path_simplify_alloc(path
, &p
);
4888 if (!path_is_normalized(path
))
4891 UnitDependencyInfo di
= {
4895 r
= hashmap_ensure_put(unit_map
, &path_hash_ops
, p
, di
.data
);
4899 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
4901 char prefix
[strlen(path
) + 1];
4902 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4905 x
= hashmap_get(*manager_map
, prefix
);
4907 _cleanup_free_
char *q
= NULL
;
4909 r
= hashmap_ensure_allocated(manager_map
, &path_hash_ops
);
4921 r
= hashmap_put(*manager_map
, q
, x
);
4937 int unit_setup_exec_runtime(Unit
*u
) {
4938 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
4939 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
4940 _cleanup_set_free_ Set
*units
= NULL
;
4947 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4950 /* Check if there already is an ExecRuntime for this unit? */
4951 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4955 ec
= ASSERT_PTR(unit_get_exec_context(u
));
4957 r
= unit_get_transitive_dependency_set(u
, UNIT_ATOM_JOINS_NAMESPACE_OF
, &units
);
4961 /* Try to get it from somebody else */
4962 SET_FOREACH(other
, units
) {
4963 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
4971 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
4976 if (ec
->dynamic_user
) {
4977 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
4982 r
= exec_runtime_make(u
, ec
, esr
, dcreds
, rt
);
4992 CGroupRuntime
*unit_setup_cgroup_runtime(Unit
*u
) {
4997 offset
= UNIT_VTABLE(u
)->cgroup_runtime_offset
;
5000 CGroupRuntime
**rt
= (CGroupRuntime
**) ((uint8_t*) u
+ offset
);
5004 return (*rt
= cgroup_runtime_new());
5007 bool unit_type_supported(UnitType t
) {
5008 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5011 assert(t
>= 0 && t
< _UNIT_TYPE_MAX
);
5013 if (cache
[t
] == 0) {
5016 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
5018 r
= getenv_bool(ascii_strupper(e
));
5019 if (r
< 0 && r
!= -ENXIO
)
5020 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
5022 cache
[t
] = r
== 0 ? -1 : 1;
5027 if (!unit_vtable
[t
]->supported
)
5030 return unit_vtable
[t
]->supported();
5033 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
5039 if (!unit_log_level_test(u
, LOG_NOTICE
))
5042 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
5043 if (r
> 0 || r
== -ENOTDIR
)
5046 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
5050 log_unit_struct(u
, LOG_NOTICE
,
5051 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5052 LOG_UNIT_INVOCATION_ID(u
),
5053 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
5057 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
5058 _cleanup_free_
char *canonical_where
= NULL
;
5064 r
= chase(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
, NULL
);
5066 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
5070 /* We will happily ignore a trailing slash (or any redundant slashes) */
5071 if (path_equal(where
, canonical_where
))
5074 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5075 log_unit_struct(u
, LOG_ERR
,
5076 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5077 LOG_UNIT_INVOCATION_ID(u
),
5078 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
5084 bool unit_is_pristine(Unit
*u
) {
5087 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5088 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5089 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5091 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5092 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5093 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5096 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
5097 !u
->fragment_path
&&
5103 PidRef
* unit_control_pid(Unit
*u
) {
5106 if (UNIT_VTABLE(u
)->control_pid
)
5107 return UNIT_VTABLE(u
)->control_pid(u
);
5112 PidRef
* unit_main_pid_full(Unit
*u
, bool *ret_is_alien
) {
5115 if (UNIT_VTABLE(u
)->main_pid
)
5116 return UNIT_VTABLE(u
)->main_pid(u
, ret_is_alien
);
5119 *ret_is_alien
= false;
5123 static void unit_modify_user_nft_set(Unit
*u
, bool add
, NFTSetSource source
, uint32_t element
) {
5128 if (!MANAGER_IS_SYSTEM(u
->manager
))
5132 c
= unit_get_cgroup_context(u
);
5136 if (!u
->manager
->fw_ctx
) {
5137 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
5141 assert(u
->manager
->fw_ctx
);
5144 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
5145 if (nft_set
->source
!= source
)
5148 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
5150 log_warning_errno(r
, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5151 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5153 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5154 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5158 static void unit_unref_uid_internal(
5162 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
5166 assert(_manager_unref_uid
);
5168 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5169 * gid_t are actually the same time, with the same validity rules.
5171 * Drops a reference to UID/GID from a unit. */
5173 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5174 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5176 if (!uid_is_valid(*ref_uid
))
5179 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
5180 *ref_uid
= UID_INVALID
;
5183 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
5186 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_USER
, u
->ref_uid
);
5188 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
5191 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
5194 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_GROUP
, u
->ref_gid
);
5196 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
5199 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5202 unit_unref_uid(u
, destroy_now
);
5203 unit_unref_gid(u
, destroy_now
);
5206 static int unit_ref_uid_internal(
5211 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5217 assert(uid_is_valid(uid
));
5218 assert(_manager_ref_uid
);
5220 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5221 * are actually the same type, and have the same validity rules.
5223 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5224 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5227 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5228 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5230 if (*ref_uid
== uid
)
5233 if (uid_is_valid(*ref_uid
)) /* Already set? */
5236 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5244 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5245 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5248 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5249 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5252 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5257 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5259 if (uid_is_valid(uid
)) {
5260 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5265 if (gid_is_valid(gid
)) {
5266 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5269 unit_unref_uid(u
, false);
5275 return r
> 0 || q
> 0;
5278 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5284 c
= unit_get_exec_context(u
);
5286 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5288 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5290 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_USER
, uid
);
5291 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_GROUP
, gid
);
5296 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5301 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5302 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5303 * objects when no service references the UID/GID anymore. */
5305 r
= unit_ref_uid_gid(u
, uid
, gid
);
5307 unit_add_to_dbus_queue(u
);
5310 int unit_acquire_invocation_id(Unit
*u
) {
5316 r
= sd_id128_randomize(&id
);
5318 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5320 r
= unit_set_invocation_id(u
, id
);
5322 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5324 unit_add_to_dbus_queue(u
);
5328 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5334 /* Copy parameters from manager */
5335 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5339 p
->runtime_scope
= u
->manager
->runtime_scope
;
5341 r
= strdup_to(&p
->confirm_spawn
, manager_get_confirm_spawn(u
->manager
));
5345 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5346 p
->prefix
= u
->manager
->prefix
;
5347 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5349 /* Copy parameters from unit */
5350 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5351 p
->cgroup_path
= crt
? crt
->cgroup_path
: NULL
;
5352 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5354 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5355 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5357 p
->shall_confirm_spawn
= u
->manager
->confirm_spawn
;
5359 p
->fallback_smack_process_label
= u
->manager
->defaults
.smack_process_label
;
5361 if (u
->manager
->restrict_fs
&& p
->bpf_restrict_fs_map_fd
< 0) {
5362 int fd
= bpf_restrict_fs_map_fd(u
);
5366 p
->bpf_restrict_fs_map_fd
= fd
;
5369 p
->user_lookup_fd
= u
->manager
->user_lookup_fds
[1];
5371 p
->cgroup_id
= crt
? crt
->cgroup_id
: 0;
5372 p
->invocation_id
= u
->invocation_id
;
5373 sd_id128_to_string(p
->invocation_id
, p
->invocation_id_string
);
5374 p
->unit_id
= strdup(u
->id
);
5381 int unit_fork_helper_process(Unit
*u
, const char *name
, PidRef
*ret
) {
5388 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5389 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5391 (void) unit_realize_cgroup(u
);
5393 CGroupRuntime
*crt
= unit_setup_cgroup_runtime(u
);
5397 r
= safe_fork(name
, FORK_REOPEN_LOG
|FORK_DEATHSIG_SIGTERM
, &pid
);
5401 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
5406 q
= pidref_set_pid(&pidref
, pid
);
5410 *ret
= TAKE_PIDREF(pidref
);
5416 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5417 (void) ignore_signals(SIGPIPE
);
5419 if (crt
->cgroup_path
) {
5420 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, crt
->cgroup_path
, 0, NULL
, NULL
);
5422 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(crt
->cgroup_path
));
5430 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, PidRef
*ret_pid
) {
5431 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
5437 r
= unit_fork_helper_process(u
, "(sd-rmrf)", &pid
);
5441 int ret
= EXIT_SUCCESS
;
5443 STRV_FOREACH(i
, paths
) {
5444 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5446 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5454 r
= unit_watch_pidref(u
, &pid
, /* exclusive= */ true);
5458 *ret_pid
= TAKE_PIDREF(pid
);
5462 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5466 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5467 /* No bit set anymore, let's drop the whole entry */
5468 assert_se(hashmap_remove(deps
, other
));
5470 /* Mask was reduced, let's update the entry */
5471 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5474 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5478 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5483 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5487 UnitDependencyInfo di
;
5492 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5493 Hashmap
*other_deps
;
5495 if (FLAGS_SET(~mask
, di
.origin_mask
))
5498 di
.origin_mask
&= ~mask
;
5499 unit_update_dependency_mask(deps
, other
, di
);
5501 /* We updated the dependency from our unit to the other unit now. But most
5502 * dependencies imply a reverse dependency. Hence, let's delete that one
5503 * too. For that we go through all dependency types on the other unit and
5504 * delete all those which point to us and have the right mask set. */
5506 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5507 UnitDependencyInfo dj
;
5509 dj
.data
= hashmap_get(other_deps
, u
);
5510 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5513 dj
.destination_mask
&= ~mask
;
5514 unit_update_dependency_mask(other_deps
, u
, dj
);
5517 unit_add_to_gc_queue(other
);
5519 /* The unit 'other' may not be wanted by the unit 'u'. */
5520 unit_submit_to_stop_when_unneeded_queue(other
);
5530 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5537 if (MANAGER_IS_SYSTEM(u
->manager
))
5538 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5540 _cleanup_free_
char *user_path
= NULL
;
5541 r
= xdg_user_runtime_dir(&user_path
, "/systemd/units/invocation:");
5544 p
= strjoin(user_path
, u
->id
);
5554 static int unit_export_invocation_id(Unit
*u
) {
5555 _cleanup_free_
char *p
= NULL
;
5560 if (u
->exported_invocation_id
)
5563 if (sd_id128_is_null(u
->invocation_id
))
5566 r
= unit_get_invocation_path(u
, &p
);
5568 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5570 r
= symlink_atomic_label(u
->invocation_id_string
, p
);
5572 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5574 u
->exported_invocation_id
= true;
5578 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5586 if (u
->exported_log_level_max
)
5589 if (c
->log_level_max
< 0)
5592 assert(c
->log_level_max
<= 7);
5594 buf
[0] = '0' + c
->log_level_max
;
5597 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5598 r
= symlink_atomic(buf
, p
);
5600 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5602 u
->exported_log_level_max
= true;
5606 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5607 _cleanup_close_
int fd
= -EBADF
;
5608 struct iovec
*iovec
;
5615 if (u
->exported_log_extra_fields
)
5618 if (c
->n_log_extra_fields
<= 0)
5621 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5622 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5624 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5625 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5627 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5628 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5631 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5632 pattern
= strjoina(p
, ".XXXXXX");
5634 fd
= mkostemp_safe(pattern
);
5636 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5638 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5640 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5644 (void) fchmod(fd
, 0644);
5646 if (rename(pattern
, p
) < 0) {
5647 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5651 u
->exported_log_extra_fields
= true;
5655 (void) unlink(pattern
);
5659 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5660 _cleanup_free_
char *buf
= NULL
;
5667 if (u
->exported_log_ratelimit_interval
)
5670 if (c
->log_ratelimit_interval_usec
== 0)
5673 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5675 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit_interval_usec
) < 0)
5678 r
= symlink_atomic(buf
, p
);
5680 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5682 u
->exported_log_ratelimit_interval
= true;
5686 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5687 _cleanup_free_
char *buf
= NULL
;
5694 if (u
->exported_log_ratelimit_burst
)
5697 if (c
->log_ratelimit_burst
== 0)
5700 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5702 if (asprintf(&buf
, "%u", c
->log_ratelimit_burst
) < 0)
5705 r
= symlink_atomic(buf
, p
);
5707 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5709 u
->exported_log_ratelimit_burst
= true;
5713 void unit_export_state_files(Unit
*u
) {
5714 const ExecContext
*c
;
5721 if (MANAGER_IS_TEST_RUN(u
->manager
))
5724 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5725 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5726 * the IPC system itself and PID 1 also log to the journal.
5728 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5729 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5730 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5731 * namespace at least.
5733 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5734 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5737 (void) unit_export_invocation_id(u
);
5739 if (!MANAGER_IS_SYSTEM(u
->manager
))
5742 c
= unit_get_exec_context(u
);
5744 (void) unit_export_log_level_max(u
, c
);
5745 (void) unit_export_log_extra_fields(u
, c
);
5746 (void) unit_export_log_ratelimit_interval(u
, c
);
5747 (void) unit_export_log_ratelimit_burst(u
, c
);
5751 void unit_unlink_state_files(Unit
*u
) {
5759 /* Undoes the effect of unit_export_state() */
5761 if (u
->exported_invocation_id
) {
5762 _cleanup_free_
char *invocation_path
= NULL
;
5763 int r
= unit_get_invocation_path(u
, &invocation_path
);
5765 (void) unlink(invocation_path
);
5766 u
->exported_invocation_id
= false;
5770 if (!MANAGER_IS_SYSTEM(u
->manager
))
5773 if (u
->exported_log_level_max
) {
5774 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5777 u
->exported_log_level_max
= false;
5780 if (u
->exported_log_extra_fields
) {
5781 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5784 u
->exported_log_extra_fields
= false;
5787 if (u
->exported_log_ratelimit_interval
) {
5788 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5791 u
->exported_log_ratelimit_interval
= false;
5794 if (u
->exported_log_ratelimit_burst
) {
5795 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5798 u
->exported_log_ratelimit_burst
= false;
5802 int unit_prepare_exec(Unit
*u
) {
5807 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5808 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5809 r
= bpf_firewall_load_custom(u
);
5813 /* Prepares everything so that we can fork of a process for this unit */
5815 (void) unit_realize_cgroup(u
);
5817 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5818 if (crt
&& crt
->reset_accounting
) {
5819 (void) unit_reset_accounting(u
);
5820 crt
->reset_accounting
= false;
5823 unit_export_state_files(u
);
5825 r
= unit_setup_exec_runtime(u
);
5832 static bool ignore_leftover_process(const char *comm
) {
5833 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5836 int unit_log_leftover_process_start(const PidRef
*pid
, int sig
, void *userdata
) {
5837 _cleanup_free_
char *comm
= NULL
;
5839 assert(pidref_is_set(pid
));
5841 (void) pidref_get_comm(pid
, &comm
);
5843 if (ignore_leftover_process(comm
))
5846 /* During start we print a warning */
5848 log_unit_warning(userdata
,
5849 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5850 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5851 pid
->pid
, strna(comm
));
5856 int unit_log_leftover_process_stop(const PidRef
*pid
, int sig
, void *userdata
) {
5857 _cleanup_free_
char *comm
= NULL
;
5859 assert(pidref_is_set(pid
));
5861 (void) pidref_get_comm(pid
, &comm
);
5863 if (ignore_leftover_process(comm
))
5866 /* During stop we only print an informational message */
5868 log_unit_info(userdata
,
5869 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
5870 pid
->pid
, strna(comm
));
5875 int unit_warn_leftover_processes(Unit
*u
, cg_kill_log_func_t log_func
) {
5878 (void) unit_pick_cgroup_path(u
);
5880 CGroupRuntime
*crt
= unit_get_cgroup_runtime(u
);
5882 if (!crt
|| !crt
->cgroup_path
)
5885 return cg_kill_recursive(
5894 bool unit_needs_console(Unit
*u
) {
5896 UnitActiveState state
;
5900 state
= unit_active_state(u
);
5902 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5905 if (UNIT_VTABLE(u
)->needs_console
)
5906 return UNIT_VTABLE(u
)->needs_console(u
);
5908 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5909 ec
= unit_get_exec_context(u
);
5913 return exec_context_may_touch_console(ec
);
5916 int unit_pid_attachable(Unit
*u
, const PidRef
*pid
, sd_bus_error
*error
) {
5921 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5922 * and not a kernel thread either */
5924 /* First, a simple range check */
5925 if (!pidref_is_set(pid
))
5926 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier is not valid.");
5928 /* Some extra safety check */
5929 if (pid
->pid
== 1 || pidref_is_self(pid
))
5930 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
->pid
);
5932 /* Don't even begin to bother with kernel threads */
5933 r
= pidref_is_kernel_thread(pid
);
5935 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
->pid
);
5937 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
->pid
);
5939 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
->pid
);
5944 void unit_log_success(Unit
*u
) {
5947 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5948 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5949 * a lot of devices. */
5951 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
5952 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5953 LOG_UNIT_INVOCATION_ID(u
),
5954 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
5957 void unit_log_failure(Unit
*u
, const char *result
) {
5961 log_unit_struct(u
, LOG_WARNING
,
5962 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5963 LOG_UNIT_INVOCATION_ID(u
),
5964 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5965 "UNIT_RESULT=%s", result
);
5968 void unit_log_skip(Unit
*u
, const char *result
) {
5972 log_unit_struct(u
, LOG_INFO
,
5973 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR
,
5974 LOG_UNIT_INVOCATION_ID(u
),
5975 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
5976 "UNIT_RESULT=%s", result
);
5979 void unit_log_process_exit(
5982 const char *command
,
5992 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5993 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5994 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5998 else if (code
== CLD_EXITED
)
6001 level
= LOG_WARNING
;
6003 log_unit_struct(u
, level
,
6004 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
6005 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
6007 sigchld_code_to_string(code
), status
,
6008 strna(code
== CLD_EXITED
6009 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
6010 : signal_to_string(status
)),
6011 success
? " (success)" : ""),
6012 "EXIT_CODE=%s", sigchld_code_to_string(code
),
6013 "EXIT_STATUS=%i", status
,
6014 "COMMAND=%s", strna(command
),
6015 LOG_UNIT_INVOCATION_ID(u
));
6018 int unit_exit_status(Unit
*u
) {
6021 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6022 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6023 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6024 * service process has exited abnormally (signal/coredump). */
6026 if (!UNIT_VTABLE(u
)->exit_status
)
6029 return UNIT_VTABLE(u
)->exit_status(u
);
6032 int unit_failure_action_exit_status(Unit
*u
) {
6037 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6039 if (u
->failure_action_exit_status
>= 0)
6040 return u
->failure_action_exit_status
;
6042 r
= unit_exit_status(u
);
6043 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6049 int unit_success_action_exit_status(Unit
*u
) {
6054 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6056 if (u
->success_action_exit_status
>= 0)
6057 return u
->success_action_exit_status
;
6059 r
= unit_exit_status(u
);
6060 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6066 int unit_test_trigger_loaded(Unit
*u
) {
6069 /* Tests whether the unit to trigger is loaded */
6071 trigger
= UNIT_TRIGGER(u
);
6073 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6074 "Refusing to start, no unit to trigger.");
6075 if (trigger
->load_state
!= UNIT_LOADED
)
6076 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6077 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
6082 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
) {
6086 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6087 if (context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
)
6088 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
6090 exec_context_destroy_credentials(u
);
6091 exec_context_destroy_mount_ns_dir(u
);
6094 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
6095 UnitActiveState state
;
6099 /* Special return values:
6101 * -EOPNOTSUPP → cleaning not supported for this unit type
6102 * -EUNATCH → cleaning not defined for this resource type
6103 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6104 * a job queued or similar
6107 if (!UNIT_VTABLE(u
)->clean
)
6113 if (u
->load_state
!= UNIT_LOADED
)
6119 state
= unit_active_state(u
);
6120 if (state
!= UNIT_INACTIVE
)
6123 return UNIT_VTABLE(u
)->clean(u
, mask
);
6126 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
6129 if (!UNIT_VTABLE(u
)->clean
||
6130 u
->load_state
!= UNIT_LOADED
) {
6135 /* When the clean() method is set, can_clean() really should be set too */
6136 assert(UNIT_VTABLE(u
)->can_clean
);
6138 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
6141 bool unit_can_start_refuse_manual(Unit
*u
) {
6142 return unit_can_start(u
) && !u
->refuse_manual_start
;
6145 bool unit_can_stop_refuse_manual(Unit
*u
) {
6146 return unit_can_stop(u
) && !u
->refuse_manual_stop
;
6149 bool unit_can_isolate_refuse_manual(Unit
*u
) {
6150 return unit_can_isolate(u
) && !u
->refuse_manual_start
;
6153 void unit_next_freezer_state(Unit
*u
, FreezerAction action
, FreezerState
*ret
, FreezerState
*ret_target
) {
6155 FreezerState curr
, parent
, next
, tgt
;
6158 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_PARENT_FREEZE
,
6159 FREEZER_THAW
, FREEZER_PARENT_THAW
));
6163 /* This function determines the correct freezer state transitions for a unit
6164 * given the action being requested. It returns the next state, and also the "target",
6165 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6166 * ultimately want to achieve. */
6168 curr
= u
->freezer_state
;
6169 slice
= UNIT_GET_SLICE(u
);
6171 parent
= slice
->freezer_state
;
6173 parent
= FREEZER_RUNNING
;
6175 if (action
== FREEZER_FREEZE
) {
6176 /* We always "promote" a freeze initiated by parent into a normal freeze */
6177 if (IN_SET(curr
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6178 next
= FREEZER_FROZEN
;
6180 next
= FREEZER_FREEZING
;
6181 } else if (action
== FREEZER_THAW
) {
6182 /* Thawing is the most complicated operation here, because we can't thaw a unit
6183 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6184 * initiated by parent if the parent is frozen */
6185 if (IN_SET(curr
, FREEZER_RUNNING
, FREEZER_THAWING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6187 else if (curr
== FREEZER_FREEZING
) {
6188 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6189 next
= FREEZER_THAWING
;
6191 next
= FREEZER_FREEZING_BY_PARENT
;
6193 assert(curr
== FREEZER_FROZEN
);
6194 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6195 next
= FREEZER_THAWING
;
6197 next
= FREEZER_FROZEN_BY_PARENT
;
6199 } else if (action
== FREEZER_PARENT_FREEZE
) {
6200 /* We need to avoid accidentally demoting units frozen manually */
6201 if (IN_SET(curr
, FREEZER_FREEZING
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6204 next
= FREEZER_FREEZING_BY_PARENT
;
6206 assert(action
== FREEZER_PARENT_THAW
);
6208 /* We don't want to thaw units from a parent if they were frozen
6209 * manually, so for such units this action is a no-op */
6210 if (IN_SET(curr
, FREEZER_RUNNING
, FREEZER_FREEZING
, FREEZER_FROZEN
))
6213 next
= FREEZER_THAWING
;
6216 tgt
= freezer_state_finish(next
);
6217 if (tgt
== FREEZER_FROZEN_BY_PARENT
)
6218 tgt
= FREEZER_FROZEN
;
6219 assert(IN_SET(tgt
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6225 bool unit_can_freeze(Unit
*u
) {
6228 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
) || unit_has_name(u
, SPECIAL_INIT_SCOPE
))
6231 if (UNIT_VTABLE(u
)->can_freeze
)
6232 return UNIT_VTABLE(u
)->can_freeze(u
);
6234 return UNIT_VTABLE(u
)->freezer_action
;
6237 void unit_frozen(Unit
*u
) {
6240 u
->freezer_state
= u
->freezer_state
== FREEZER_FREEZING_BY_PARENT
6241 ? FREEZER_FROZEN_BY_PARENT
6244 log_unit_debug(u
, "Unit now %s.", freezer_state_to_string(u
->freezer_state
));
6246 bus_unit_send_pending_freezer_message(u
, false);
6249 void unit_thawed(Unit
*u
) {
6252 u
->freezer_state
= FREEZER_RUNNING
;
6254 log_unit_debug(u
, "Unit thawed.");
6256 bus_unit_send_pending_freezer_message(u
, false);
6259 int unit_freezer_action(Unit
*u
, FreezerAction action
) {
6264 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
6266 if (!cg_freezer_supported() || !unit_can_freeze(u
))
6272 if (u
->load_state
!= UNIT_LOADED
)
6275 s
= unit_active_state(u
);
6276 if (s
!= UNIT_ACTIVE
)
6279 if (action
== FREEZER_FREEZE
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
))
6281 if (action
== FREEZER_THAW
&& u
->freezer_state
== FREEZER_THAWING
)
6283 if (action
== FREEZER_THAW
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6286 r
= UNIT_VTABLE(u
)->freezer_action(u
, action
);
6290 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_THAWING
));
6294 Condition
*unit_find_failed_condition(Unit
*u
) {
6295 Condition
*failed_trigger
= NULL
;
6296 bool has_succeeded_trigger
= false;
6298 if (u
->condition_result
)
6301 LIST_FOREACH(conditions
, c
, u
->conditions
)
6303 if (c
->result
== CONDITION_SUCCEEDED
)
6304 has_succeeded_trigger
= true;
6305 else if (!failed_trigger
)
6307 } else if (c
->result
!= CONDITION_SUCCEEDED
)
6310 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
6313 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
6314 [COLLECT_INACTIVE
] = "inactive",
6315 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
6318 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
6320 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
6325 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6326 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6327 * is NULL the first entry found), or NULL if not found. */
6329 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
6330 if (!other
|| other
== i
)
6336 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6337 _cleanup_free_ Unit
**array
= NULL
;
6344 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6345 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6346 * while the dependency table is continuously updated. */
6348 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6349 if (!GREEDY_REALLOC(array
, n
+ 1))
6355 *ret_array
= TAKE_PTR(array
);
6357 assert(n
<= INT_MAX
);
6361 int unit_get_transitive_dependency_set(Unit
*u
, UnitDependencyAtom atom
, Set
**ret
) {
6362 _cleanup_set_free_ Set
*units
= NULL
, *queue
= NULL
;
6369 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6372 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6373 r
= set_ensure_put(&units
, NULL
, other
);
6378 r
= set_ensure_put(&queue
, NULL
, other
);
6382 } while ((u
= set_steal_first(queue
)));
6384 *ret
= TAKE_PTR(units
);
6390 sd_event_source
**source
,
6393 sd_event_time_handler_t handler
) {
6402 if (usec
== USEC_INFINITY
)
6403 return sd_event_source_set_enabled(*source
, SD_EVENT_OFF
);
6405 r
= (relative
? sd_event_source_set_time_relative
: sd_event_source_set_time
)(*source
, usec
);
6409 return sd_event_source_set_enabled(*source
, SD_EVENT_ONESHOT
);
6412 if (usec
== USEC_INFINITY
)
6415 r
= (relative
? sd_event_add_time_relative
: sd_event_add_time
)(
6425 const char *d
= strjoina(unit_type_to_string(u
->type
), "-timer");
6426 (void) sd_event_source_set_description(*source
, d
);
6431 static int unit_get_nice(Unit
*u
) {
6434 ec
= unit_get_exec_context(u
);
6435 return ec
? ec
->nice
: 0;
6438 static uint64_t unit_get_cpu_weight(Unit
*u
) {
6441 cc
= unit_get_cgroup_context(u
);
6442 return cc
? cgroup_context_cpu_weight(cc
, manager_state(u
->manager
)) : CGROUP_WEIGHT_DEFAULT
;
6445 int unit_compare_priority(Unit
*a
, Unit
*b
) {
6448 ret
= CMP(a
->type
, b
->type
);
6452 ret
= CMP(unit_get_cpu_weight(a
), unit_get_cpu_weight(b
));
6456 ret
= CMP(unit_get_nice(a
), unit_get_nice(b
));
6460 return strcmp(a
->id
, b
->id
);
6463 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6464 [UNIT_PATH
] = &activation_details_path_vtable
,
6465 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6468 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6469 _cleanup_free_ ActivationDetails
*details
= NULL
;
6471 assert(trigger_unit
);
6472 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6473 assert(trigger_unit
->id
);
6475 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6479 *details
= (ActivationDetails
) {
6481 .trigger_unit_type
= trigger_unit
->type
,
6484 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6485 if (!details
->trigger_unit_name
)
6488 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6489 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6491 return TAKE_PTR(details
);
6494 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6498 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6499 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6501 free(details
->trigger_unit_name
);
6503 return mfree(details
);
6506 void activation_details_serialize(ActivationDetails
*details
, FILE *f
) {
6507 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6510 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6511 if (details
->trigger_unit_name
)
6512 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6513 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6514 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6517 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6527 if (!streq(key
, "activation-details-unit-type"))
6530 t
= unit_type_from_string(value
);
6534 /* The activation details vtable has defined ops only for path and timer units */
6535 if (!activation_details_vtable
[t
])
6538 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6542 **details
= (ActivationDetails
) {
6544 .trigger_unit_type
= t
,
6550 if (streq(key
, "activation-details-unit-name")) {
6551 r
= free_and_strdup(&(*details
)->trigger_unit_name
, value
);
6558 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6559 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6564 int activation_details_append_env(ActivationDetails
*details
, char ***strv
) {
6572 if (!isempty(details
->trigger_unit_name
)) {
6573 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6577 r
= strv_consume(strv
, TAKE_PTR(s
));
6582 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6583 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6588 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6591 int activation_details_append_pair(ActivationDetails
*details
, char ***strv
) {
6599 if (!isempty(details
->trigger_unit_name
)) {
6600 r
= strv_extend_many(strv
, "trigger_unit", details
->trigger_unit_name
);
6605 if (ACTIVATION_DETAILS_VTABLE(details
)->append_pair
) {
6606 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6611 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6614 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);
6616 static const char* const unit_mount_dependency_type_table
[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX
] = {
6617 [UNIT_MOUNT_WANTS
] = "WantsMountsFor",
6618 [UNIT_MOUNT_REQUIRES
] = "RequiresMountsFor",
6621 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type
, UnitMountDependencyType
);
6623 UnitDependency
unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t
) {
6626 case UNIT_MOUNT_WANTS
:
6629 case UNIT_MOUNT_REQUIRES
:
6630 return UNIT_REQUIRES
;
6633 assert_not_reached();