1 /* SPDX-License-Identifier: LGPL-2.1+ */
11 #include "sd-messages.h"
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
24 #include "fileio-label.h"
26 #include "format-util.h"
28 #include "id128-util.h"
30 #include "load-dropin.h"
31 #include "load-fragment.h"
36 #include "parse-util.h"
37 #include "path-util.h"
38 #include "process-util.h"
39 #include "serialize.h"
41 #include "signal-util.h"
42 #include "sparse-endian.h"
44 #include "specifier.h"
45 #include "stat-util.h"
46 #include "stdio-util.h"
47 #include "string-table.h"
48 #include "string-util.h"
50 #include "terminal-util.h"
51 #include "tmpfile-util.h"
52 #include "umask-util.h"
53 #include "unit-name.h"
55 #include "user-util.h"
58 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
59 [UNIT_SERVICE
] = &service_vtable
,
60 [UNIT_SOCKET
] = &socket_vtable
,
61 [UNIT_TARGET
] = &target_vtable
,
62 [UNIT_DEVICE
] = &device_vtable
,
63 [UNIT_MOUNT
] = &mount_vtable
,
64 [UNIT_AUTOMOUNT
] = &automount_vtable
,
65 [UNIT_SWAP
] = &swap_vtable
,
66 [UNIT_TIMER
] = &timer_vtable
,
67 [UNIT_PATH
] = &path_vtable
,
68 [UNIT_SLICE
] = &slice_vtable
,
69 [UNIT_SCOPE
] = &scope_vtable
,
72 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
74 Unit
*unit_new(Manager
*m
, size_t size
) {
78 assert(size
>= sizeof(Unit
));
84 u
->names
= set_new(&string_hash_ops
);
89 u
->type
= _UNIT_TYPE_INVALID
;
90 u
->default_dependencies
= true;
91 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
92 u
->unit_file_preset
= -1;
93 u
->on_failure_job_mode
= JOB_REPLACE
;
94 u
->cgroup_control_inotify_wd
= -1;
95 u
->cgroup_memory_inotify_wd
= -1;
96 u
->job_timeout
= USEC_INFINITY
;
97 u
->job_running_timeout
= USEC_INFINITY
;
98 u
->ref_uid
= UID_INVALID
;
99 u
->ref_gid
= GID_INVALID
;
100 u
->cpu_usage_last
= NSEC_INFINITY
;
101 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
102 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
104 u
->ip_accounting_ingress_map_fd
= -1;
105 u
->ip_accounting_egress_map_fd
= -1;
106 u
->ipv4_allow_map_fd
= -1;
107 u
->ipv6_allow_map_fd
= -1;
108 u
->ipv4_deny_map_fd
= -1;
109 u
->ipv6_deny_map_fd
= -1;
111 u
->last_section_private
= -1;
113 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
114 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
116 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++)
117 u
->io_accounting_last
[i
] = UINT64_MAX
;
122 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
123 _cleanup_(unit_freep
) Unit
*u
= NULL
;
126 u
= unit_new(m
, size
);
130 r
= unit_add_name(u
, name
);
139 bool unit_has_name(const Unit
*u
, const char *name
) {
143 return set_contains(u
->names
, (char*) name
);
146 static void unit_init(Unit
*u
) {
153 assert(u
->type
>= 0);
155 cc
= unit_get_cgroup_context(u
);
157 cgroup_context_init(cc
);
159 /* Copy in the manager defaults into the cgroup
160 * context, _before_ the rest of the settings have
161 * been initialized */
163 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
164 cc
->io_accounting
= u
->manager
->default_io_accounting
;
165 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
166 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
167 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
168 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
170 if (u
->type
!= UNIT_SLICE
)
171 cc
->tasks_max
= u
->manager
->default_tasks_max
;
174 ec
= unit_get_exec_context(u
);
176 exec_context_init(ec
);
178 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
179 EXEC_KEYRING_SHARED
: EXEC_KEYRING_INHERIT
;
182 kc
= unit_get_kill_context(u
);
184 kill_context_init(kc
);
186 if (UNIT_VTABLE(u
)->init
)
187 UNIT_VTABLE(u
)->init(u
);
190 int unit_add_name(Unit
*u
, const char *text
) {
191 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
198 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
203 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
212 if (set_contains(u
->names
, s
))
214 if (hashmap_contains(u
->manager
->units
, s
))
217 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
220 t
= unit_name_to_type(s
);
224 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
227 r
= unit_name_to_instance(s
, &i
);
231 if (i
&& !unit_type_may_template(t
))
234 /* Ensure that this unit is either instanced or not instanced,
235 * but not both. Note that we do allow names with different
236 * instance names however! */
237 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
240 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
243 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
246 r
= set_put(u
->names
, s
);
251 r
= hashmap_put(u
->manager
->units
, s
, u
);
253 (void) set_remove(u
->names
, s
);
257 if (u
->type
== _UNIT_TYPE_INVALID
) {
260 u
->instance
= TAKE_PTR(i
);
262 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
269 unit_add_to_dbus_queue(u
);
273 int unit_choose_id(Unit
*u
, const char *name
) {
274 _cleanup_free_
char *t
= NULL
;
281 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
286 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
293 /* Selects one of the names of this unit as the id */
294 s
= set_get(u
->names
, (char*) name
);
298 /* Determine the new instance from the new id */
299 r
= unit_name_to_instance(s
, &i
);
308 unit_add_to_dbus_queue(u
);
313 int unit_set_description(Unit
*u
, const char *description
) {
318 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
322 unit_add_to_dbus_queue(u
);
327 bool unit_may_gc(Unit
*u
) {
328 UnitActiveState state
;
333 /* Checks whether the unit is ready to be unloaded for garbage collection.
334 * Returns true when the unit may be collected, and false if there's some
335 * reason to keep it loaded.
337 * References from other units are *not* checked here. Instead, this is done
338 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
347 state
= unit_active_state(u
);
349 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
350 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
351 UNIT_VTABLE(u
)->release_resources
)
352 UNIT_VTABLE(u
)->release_resources(u
);
357 if (sd_bus_track_count(u
->bus_track
) > 0)
360 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
361 switch (u
->collect_mode
) {
363 case COLLECT_INACTIVE
:
364 if (state
!= UNIT_INACTIVE
)
369 case COLLECT_INACTIVE_OR_FAILED
:
370 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
376 assert_not_reached("Unknown garbage collection mode");
379 if (u
->cgroup_path
) {
380 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
381 * around. Units with active processes should never be collected. */
383 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
385 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
390 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
396 void unit_add_to_load_queue(Unit
*u
) {
398 assert(u
->type
!= _UNIT_TYPE_INVALID
);
400 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
403 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
404 u
->in_load_queue
= true;
407 void unit_add_to_cleanup_queue(Unit
*u
) {
410 if (u
->in_cleanup_queue
)
413 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
414 u
->in_cleanup_queue
= true;
417 void unit_add_to_gc_queue(Unit
*u
) {
420 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
426 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
427 u
->in_gc_queue
= true;
430 void unit_add_to_dbus_queue(Unit
*u
) {
432 assert(u
->type
!= _UNIT_TYPE_INVALID
);
434 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
437 /* Shortcut things if nobody cares */
438 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
439 sd_bus_track_count(u
->bus_track
) <= 0 &&
440 set_isempty(u
->manager
->private_buses
)) {
441 u
->sent_dbus_new_signal
= true;
445 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
446 u
->in_dbus_queue
= true;
449 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
452 if (u
->in_stop_when_unneeded_queue
)
455 if (!u
->stop_when_unneeded
)
458 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
461 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
462 u
->in_stop_when_unneeded_queue
= true;
465 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
472 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
474 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
477 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
478 hashmap_remove(other
->dependencies
[d
], u
);
480 unit_add_to_gc_queue(other
);
486 static void unit_remove_transient(Unit
*u
) {
494 if (u
->fragment_path
)
495 (void) unlink(u
->fragment_path
);
497 STRV_FOREACH(i
, u
->dropin_paths
) {
498 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
500 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
504 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
508 /* Only drop transient drop-ins */
509 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
517 static void unit_free_requires_mounts_for(Unit
*u
) {
521 _cleanup_free_
char *path
;
523 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
527 char s
[strlen(path
) + 1];
529 PATH_FOREACH_PREFIX_MORE(s
, path
) {
533 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
537 (void) set_remove(x
, u
);
539 if (set_isempty(x
)) {
540 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
548 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
551 static void unit_done(Unit
*u
) {
560 if (UNIT_VTABLE(u
)->done
)
561 UNIT_VTABLE(u
)->done(u
);
563 ec
= unit_get_exec_context(u
);
565 exec_context_done(ec
);
567 cc
= unit_get_cgroup_context(u
);
569 cgroup_context_done(cc
);
572 void unit_free(Unit
*u
) {
580 if (UNIT_ISSET(u
->slice
)) {
581 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
582 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u
->slice
));
584 /* And make sure the parent is realized again, updating cgroup memberships */
585 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u
->slice
));
588 u
->transient_file
= safe_fclose(u
->transient_file
);
590 if (!MANAGER_IS_RELOADING(u
->manager
))
591 unit_remove_transient(u
);
593 bus_unit_send_removed_signal(u
);
597 unit_dequeue_rewatch_pids(u
);
599 sd_bus_slot_unref(u
->match_bus_slot
);
600 sd_bus_track_unref(u
->bus_track
);
601 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
603 unit_free_requires_mounts_for(u
);
605 SET_FOREACH(t
, u
->names
, i
)
606 hashmap_remove_value(u
->manager
->units
, t
, u
);
608 if (!sd_id128_is_null(u
->invocation_id
))
609 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
623 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
624 bidi_set_free(u
, u
->dependencies
[d
]);
627 manager_unref_console(u
->manager
);
629 unit_release_cgroup(u
);
631 if (!MANAGER_IS_RELOADING(u
->manager
))
632 unit_unlink_state_files(u
);
634 unit_unref_uid_gid(u
, false);
636 (void) manager_update_failed_units(u
->manager
, u
, false);
637 set_remove(u
->manager
->startup_units
, u
);
639 unit_unwatch_all_pids(u
);
641 unit_ref_unset(&u
->slice
);
642 while (u
->refs_by_target
)
643 unit_ref_unset(u
->refs_by_target
);
645 if (u
->type
!= _UNIT_TYPE_INVALID
)
646 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
648 if (u
->in_load_queue
)
649 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
651 if (u
->in_dbus_queue
)
652 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
655 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
657 if (u
->in_cgroup_realize_queue
)
658 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
660 if (u
->in_cgroup_empty_queue
)
661 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
663 if (u
->in_cleanup_queue
)
664 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
666 if (u
->in_target_deps_queue
)
667 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
669 if (u
->in_stop_when_unneeded_queue
)
670 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
672 safe_close(u
->ip_accounting_ingress_map_fd
);
673 safe_close(u
->ip_accounting_egress_map_fd
);
675 safe_close(u
->ipv4_allow_map_fd
);
676 safe_close(u
->ipv6_allow_map_fd
);
677 safe_close(u
->ipv4_deny_map_fd
);
678 safe_close(u
->ipv6_deny_map_fd
);
680 bpf_program_unref(u
->ip_bpf_ingress
);
681 bpf_program_unref(u
->ip_bpf_ingress_installed
);
682 bpf_program_unref(u
->ip_bpf_egress
);
683 bpf_program_unref(u
->ip_bpf_egress_installed
);
685 bpf_program_unref(u
->bpf_device_control_installed
);
687 condition_free_list(u
->conditions
);
688 condition_free_list(u
->asserts
);
690 free(u
->description
);
691 strv_free(u
->documentation
);
692 free(u
->fragment_path
);
693 free(u
->source_path
);
694 strv_free(u
->dropin_paths
);
697 free(u
->job_timeout_reboot_arg
);
699 set_free_free(u
->names
);
706 UnitActiveState
unit_active_state(Unit
*u
) {
709 if (u
->load_state
== UNIT_MERGED
)
710 return unit_active_state(unit_follow_merge(u
));
712 /* After a reload it might happen that a unit is not correctly
713 * loaded but still has a process around. That's why we won't
714 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
716 return UNIT_VTABLE(u
)->active_state(u
);
719 const char* unit_sub_state_to_string(Unit
*u
) {
722 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
725 static int set_complete_move(Set
**s
, Set
**other
) {
733 return set_move(*s
, *other
);
735 *s
= TAKE_PTR(*other
);
740 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
748 return hashmap_move(*s
, *other
);
750 *s
= TAKE_PTR(*other
);
755 static int merge_names(Unit
*u
, Unit
*other
) {
763 r
= set_complete_move(&u
->names
, &other
->names
);
767 set_free_free(other
->names
);
771 SET_FOREACH(t
, u
->names
, i
)
772 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
777 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
782 assert(d
< _UNIT_DEPENDENCY_MAX
);
785 * If u does not have this dependency set allocated, there is no need
786 * to reserve anything. In that case other's set will be transferred
787 * as a whole to u by complete_move().
789 if (!u
->dependencies
[d
])
792 /* merge_dependencies() will skip a u-on-u dependency */
793 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
795 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
798 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
804 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
808 assert(d
< _UNIT_DEPENDENCY_MAX
);
810 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
811 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
814 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
815 * pointers back, and let's fix them up, to instead point to 'u'. */
817 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
819 /* Do not add dependencies between u and itself. */
820 if (hashmap_remove(back
->dependencies
[k
], other
))
821 maybe_warn_about_dependency(u
, other_id
, k
);
823 UnitDependencyInfo di_u
, di_other
, di_merged
;
825 /* Let's drop this dependency between "back" and "other", and let's create it between
826 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
827 * and any such dependency which might already exist */
829 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
831 continue; /* dependency isn't set, let's try the next one */
833 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
835 di_merged
= (UnitDependencyInfo
) {
836 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
837 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
840 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
842 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
845 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
851 /* Also do not move dependencies on u to itself */
852 back
= hashmap_remove(other
->dependencies
[d
], u
);
854 maybe_warn_about_dependency(u
, other_id
, d
);
856 /* The move cannot fail. The caller must have performed a reservation. */
857 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
859 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
862 int unit_merge(Unit
*u
, Unit
*other
) {
864 const char *other_id
= NULL
;
869 assert(u
->manager
== other
->manager
);
870 assert(u
->type
!= _UNIT_TYPE_INVALID
);
872 other
= unit_follow_merge(other
);
877 if (u
->type
!= other
->type
)
880 if (!u
->instance
!= !other
->instance
)
883 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
886 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
895 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
899 other_id
= strdupa(other
->id
);
901 /* Make reservations to ensure merge_dependencies() won't fail */
902 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
903 r
= reserve_dependencies(u
, other
, d
);
905 * We don't rollback reservations if we fail. We don't have
906 * a way to undo reservations. A reservation is not a leak.
913 r
= merge_names(u
, other
);
917 /* Redirect all references */
918 while (other
->refs_by_target
)
919 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
921 /* Merge dependencies */
922 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
923 merge_dependencies(u
, other
, other_id
, d
);
925 other
->load_state
= UNIT_MERGED
;
926 other
->merged_into
= u
;
928 /* If there is still some data attached to the other node, we
929 * don't need it anymore, and can free it. */
930 if (other
->load_state
!= UNIT_STUB
)
931 if (UNIT_VTABLE(other
)->done
)
932 UNIT_VTABLE(other
)->done(other
);
934 unit_add_to_dbus_queue(u
);
935 unit_add_to_cleanup_queue(other
);
940 int unit_merge_by_name(Unit
*u
, const char *name
) {
941 _cleanup_free_
char *s
= NULL
;
948 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
952 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
959 other
= manager_get_unit(u
->manager
, name
);
961 return unit_merge(u
, other
);
963 return unit_add_name(u
, name
);
966 Unit
* unit_follow_merge(Unit
*u
) {
969 while (u
->load_state
== UNIT_MERGED
)
970 assert_se(u
= u
->merged_into
);
975 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
976 ExecDirectoryType dt
;
983 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
984 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
989 if (c
->root_directory
) {
990 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
996 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
1001 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1002 if (!u
->manager
->prefix
[dt
])
1005 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
1006 _cleanup_free_
char *p
;
1008 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
1012 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1018 if (!MANAGER_IS_SYSTEM(u
->manager
))
1021 if (c
->private_tmp
) {
1024 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1025 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1030 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1035 if (!IN_SET(c
->std_output
,
1036 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1037 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1038 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1039 !IN_SET(c
->std_error
,
1040 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1041 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1042 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1045 /* If syslog or kernel logging is requested, make sure our own
1046 * logging daemon is run first. */
1048 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1055 const char *unit_description(Unit
*u
) {
1059 return u
->description
;
1061 return strna(u
->id
);
1064 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1066 UnitDependencyMask mask
;
1069 { UNIT_DEPENDENCY_FILE
, "file" },
1070 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1071 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1072 { UNIT_DEPENDENCY_UDEV
, "udev" },
1073 { UNIT_DEPENDENCY_PATH
, "path" },
1074 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1075 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1076 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1084 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1089 if (FLAGS_SET(mask
, table
[i
].mask
)) {
1097 fputs(table
[i
].name
, f
);
1099 mask
&= ~table
[i
].mask
;
1106 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1110 const char *prefix2
;
1112 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1113 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1114 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1115 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1116 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1117 timespan
[FORMAT_TIMESPAN_MAX
];
1119 _cleanup_set_free_ Set
*following_set
= NULL
;
1125 assert(u
->type
>= 0);
1127 prefix
= strempty(prefix
);
1128 prefix2
= strjoina(prefix
, "\t");
1132 "%s\tDescription: %s\n"
1133 "%s\tInstance: %s\n"
1134 "%s\tUnit Load State: %s\n"
1135 "%s\tUnit Active State: %s\n"
1136 "%s\tState Change Timestamp: %s\n"
1137 "%s\tInactive Exit Timestamp: %s\n"
1138 "%s\tActive Enter Timestamp: %s\n"
1139 "%s\tActive Exit Timestamp: %s\n"
1140 "%s\tInactive Enter Timestamp: %s\n"
1142 "%s\tNeed Daemon Reload: %s\n"
1143 "%s\tTransient: %s\n"
1144 "%s\tPerpetual: %s\n"
1145 "%s\tGarbage Collection Mode: %s\n"
1148 "%s\tCGroup realized: %s\n",
1150 prefix
, unit_description(u
),
1151 prefix
, strna(u
->instance
),
1152 prefix
, unit_load_state_to_string(u
->load_state
),
1153 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1154 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1155 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1156 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1157 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1158 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1159 prefix
, yes_no(unit_may_gc(u
)),
1160 prefix
, yes_no(unit_need_daemon_reload(u
)),
1161 prefix
, yes_no(u
->transient
),
1162 prefix
, yes_no(u
->perpetual
),
1163 prefix
, collect_mode_to_string(u
->collect_mode
),
1164 prefix
, strna(unit_slice_name(u
)),
1165 prefix
, strna(u
->cgroup_path
),
1166 prefix
, yes_no(u
->cgroup_realized
));
1168 if (u
->cgroup_realized_mask
!= 0) {
1169 _cleanup_free_
char *s
= NULL
;
1170 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1171 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1174 if (u
->cgroup_enabled_mask
!= 0) {
1175 _cleanup_free_
char *s
= NULL
;
1176 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1177 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1180 m
= unit_get_own_mask(u
);
1182 _cleanup_free_
char *s
= NULL
;
1183 (void) cg_mask_to_string(m
, &s
);
1184 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1187 m
= unit_get_members_mask(u
);
1189 _cleanup_free_
char *s
= NULL
;
1190 (void) cg_mask_to_string(m
, &s
);
1191 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1194 m
= unit_get_delegate_mask(u
);
1196 _cleanup_free_
char *s
= NULL
;
1197 (void) cg_mask_to_string(m
, &s
);
1198 fprintf(f
, "%s\tCGroup delegate mask: %s\n", prefix
, strnull(s
));
1201 SET_FOREACH(t
, u
->names
, i
)
1202 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1204 if (!sd_id128_is_null(u
->invocation_id
))
1205 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1206 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1208 STRV_FOREACH(j
, u
->documentation
)
1209 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1211 following
= unit_following(u
);
1213 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1215 r
= unit_following_set(u
, &following_set
);
1219 SET_FOREACH(other
, following_set
, i
)
1220 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1223 if (u
->fragment_path
)
1224 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1227 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1229 STRV_FOREACH(j
, u
->dropin_paths
)
1230 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1232 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1233 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1234 if (u
->failure_action_exit_status
>= 0)
1235 fprintf(f
, "%s\tFailure Action Exit Status: %i\n", prefix
, u
->failure_action_exit_status
);
1236 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1237 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1238 if (u
->success_action_exit_status
>= 0)
1239 fprintf(f
, "%s\tSuccess Action Exit Status: %i\n", prefix
, u
->success_action_exit_status
);
1241 if (u
->job_timeout
!= USEC_INFINITY
)
1242 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1244 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1245 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1247 if (u
->job_timeout_reboot_arg
)
1248 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1250 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1251 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1253 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1255 "%s\tCondition Timestamp: %s\n"
1256 "%s\tCondition Result: %s\n",
1257 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1258 prefix
, yes_no(u
->condition_result
));
1260 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1262 "%s\tAssert Timestamp: %s\n"
1263 "%s\tAssert Result: %s\n",
1264 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1265 prefix
, yes_no(u
->assert_result
));
1267 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1268 UnitDependencyInfo di
;
1271 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1274 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1276 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1277 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1283 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1284 UnitDependencyInfo di
;
1287 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1290 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1292 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1293 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1299 if (u
->load_state
== UNIT_LOADED
) {
1302 "%s\tStopWhenUnneeded: %s\n"
1303 "%s\tRefuseManualStart: %s\n"
1304 "%s\tRefuseManualStop: %s\n"
1305 "%s\tDefaultDependencies: %s\n"
1306 "%s\tOnFailureJobMode: %s\n"
1307 "%s\tIgnoreOnIsolate: %s\n",
1308 prefix
, yes_no(u
->stop_when_unneeded
),
1309 prefix
, yes_no(u
->refuse_manual_start
),
1310 prefix
, yes_no(u
->refuse_manual_stop
),
1311 prefix
, yes_no(u
->default_dependencies
),
1312 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1313 prefix
, yes_no(u
->ignore_on_isolate
));
1315 if (UNIT_VTABLE(u
)->dump
)
1316 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1318 } else if (u
->load_state
== UNIT_MERGED
)
1320 "%s\tMerged into: %s\n",
1321 prefix
, u
->merged_into
->id
);
1322 else if (u
->load_state
== UNIT_ERROR
)
1323 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1325 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1326 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1329 job_dump(u
->job
, f
, prefix2
);
1332 job_dump(u
->nop_job
, f
, prefix2
);
1335 /* Common implementation for multiple backends */
1336 int unit_load_fragment_and_dropin(Unit
*u
) {
1341 /* Load a .{service,socket,...} file */
1342 r
= unit_load_fragment(u
);
1346 if (u
->load_state
== UNIT_STUB
)
1349 /* Load drop-in directory data. If u is an alias, we might be reloading the
1350 * target unit needlessly. But we cannot be sure which drops-ins have already
1351 * been loaded and which not, at least without doing complicated book-keeping,
1352 * so let's always reread all drop-ins. */
1353 return unit_load_dropin(unit_follow_merge(u
));
1356 /* Common implementation for multiple backends */
1357 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1362 /* Same as unit_load_fragment_and_dropin(), but whether
1363 * something can be loaded or not doesn't matter. */
1365 /* Load a .service/.socket/.slice/… file */
1366 r
= unit_load_fragment(u
);
1370 if (u
->load_state
== UNIT_STUB
)
1371 u
->load_state
= UNIT_LOADED
;
1373 /* Load drop-in directory data */
1374 return unit_load_dropin(unit_follow_merge(u
));
1377 void unit_add_to_target_deps_queue(Unit
*u
) {
1378 Manager
*m
= u
->manager
;
1382 if (u
->in_target_deps_queue
)
1385 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1386 u
->in_target_deps_queue
= true;
1389 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1393 if (target
->type
!= UNIT_TARGET
)
1396 /* Only add the dependency if both units are loaded, so that
1397 * that loop check below is reliable */
1398 if (u
->load_state
!= UNIT_LOADED
||
1399 target
->load_state
!= UNIT_LOADED
)
1402 /* If either side wants no automatic dependencies, then let's
1404 if (!u
->default_dependencies
||
1405 !target
->default_dependencies
)
1408 /* Don't create loops */
1409 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1412 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1415 static int unit_add_slice_dependencies(Unit
*u
) {
1416 UnitDependencyMask mask
;
1419 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1422 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1423 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1425 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1427 if (UNIT_ISSET(u
->slice
))
1428 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1430 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1433 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1436 static int unit_add_mount_dependencies(Unit
*u
) {
1437 UnitDependencyInfo di
;
1444 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1445 char prefix
[strlen(path
) + 1];
1447 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1448 _cleanup_free_
char *p
= NULL
;
1451 r
= unit_name_from_path(prefix
, ".mount", &p
);
1455 m
= manager_get_unit(u
->manager
, p
);
1457 /* Make sure to load the mount unit if
1458 * it exists. If so the dependencies
1459 * on this unit will be added later
1460 * during the loading of the mount
1462 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1468 if (m
->load_state
!= UNIT_LOADED
)
1471 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1475 if (m
->fragment_path
) {
1476 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1486 static int unit_add_startup_units(Unit
*u
) {
1490 c
= unit_get_cgroup_context(u
);
1494 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1495 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1496 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1499 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1503 return set_put(u
->manager
->startup_units
, u
);
1506 int unit_load(Unit
*u
) {
1511 if (u
->in_load_queue
) {
1512 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1513 u
->in_load_queue
= false;
1516 if (u
->type
== _UNIT_TYPE_INVALID
)
1519 if (u
->load_state
!= UNIT_STUB
)
1522 if (u
->transient_file
) {
1523 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1524 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1526 r
= fflush_and_check(u
->transient_file
);
1530 u
->transient_file
= safe_fclose(u
->transient_file
);
1531 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1534 if (UNIT_VTABLE(u
)->load
) {
1535 r
= UNIT_VTABLE(u
)->load(u
);
1540 if (u
->load_state
== UNIT_STUB
) {
1545 if (u
->load_state
== UNIT_LOADED
) {
1546 unit_add_to_target_deps_queue(u
);
1548 r
= unit_add_slice_dependencies(u
);
1552 r
= unit_add_mount_dependencies(u
);
1556 r
= unit_add_startup_units(u
);
1560 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1561 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1566 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1567 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1569 /* We finished loading, let's ensure our parents recalculate the members mask */
1570 unit_invalidate_cgroup_members_masks(u
);
1573 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1575 unit_add_to_dbus_queue(unit_follow_merge(u
));
1576 unit_add_to_gc_queue(u
);
1581 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1582 * return ENOEXEC to ensure units are placed in this state after loading */
1584 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1585 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1589 unit_add_to_dbus_queue(u
);
1590 unit_add_to_gc_queue(u
);
1592 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1596 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1601 va_start(ap
, format
);
1603 r
= log_object_internalv(level
, error
, file
, line
, func
,
1604 u
->manager
->unit_log_field
,
1606 u
->manager
->invocation_log_field
,
1607 u
->invocation_id_string
,
1610 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1616 static bool unit_test_condition(Unit
*u
) {
1619 dual_timestamp_get(&u
->condition_timestamp
);
1620 u
->condition_result
= condition_test_list(u
->conditions
, condition_type_to_string
, log_unit_internal
, u
);
1622 unit_add_to_dbus_queue(u
);
1624 return u
->condition_result
;
1627 static bool unit_test_assert(Unit
*u
) {
1630 dual_timestamp_get(&u
->assert_timestamp
);
1631 u
->assert_result
= condition_test_list(u
->asserts
, assert_type_to_string
, log_unit_internal
, u
);
1633 unit_add_to_dbus_queue(u
);
1635 return u
->assert_result
;
1638 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1641 d
= unit_description(u
);
1642 if (log_get_show_color())
1643 d
= strjoina(ANSI_HIGHLIGHT
, d
, ANSI_NORMAL
);
1645 DISABLE_WARNING_FORMAT_NONLITERAL
;
1646 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, d
);
1650 int unit_test_start_limit(Unit
*u
) {
1655 if (ratelimit_below(&u
->start_limit
)) {
1656 u
->start_limit_hit
= false;
1660 log_unit_warning(u
, "Start request repeated too quickly.");
1661 u
->start_limit_hit
= true;
1663 reason
= strjoina("unit ", u
->id
, " failed");
1665 emergency_action(u
->manager
, u
->start_limit_action
,
1666 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1667 u
->reboot_arg
, -1, reason
);
1672 bool unit_shall_confirm_spawn(Unit
*u
) {
1675 if (manager_is_confirm_spawn_disabled(u
->manager
))
1678 /* For some reasons units remaining in the same process group
1679 * as PID 1 fail to acquire the console even if it's not used
1680 * by any process. So skip the confirmation question for them. */
1681 return !unit_get_exec_context(u
)->same_pgrp
;
1684 static bool unit_verify_deps(Unit
*u
) {
1691 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1692 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1693 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1694 * conjunction with After= as for them any such check would make things entirely racy. */
1696 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1698 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1701 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1702 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1710 /* Errors that aren't really errors:
1711 * -EALREADY: Unit is already started.
1712 * -ECOMM: Condition failed
1713 * -EAGAIN: An operation is already in progress. Retry later.
1715 * Errors that are real errors:
1716 * -EBADR: This unit type does not support starting.
1717 * -ECANCELED: Start limit hit, too many requests for now
1718 * -EPROTO: Assert failed
1719 * -EINVAL: Unit not loaded
1720 * -EOPNOTSUPP: Unit type not supported
1721 * -ENOLINK: The necessary dependencies are not fulfilled.
1722 * -ESTALE: This unit has been started before and can't be started a second time
1723 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1725 int unit_start(Unit
*u
) {
1726 UnitActiveState state
;
1732 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1733 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1734 * waiting is finished. */
1735 state
= unit_active_state(u
);
1736 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1739 /* Units that aren't loaded cannot be started */
1740 if (u
->load_state
!= UNIT_LOADED
)
1743 /* Refuse starting scope units more than once */
1744 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1747 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1748 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1749 * recheck the condition in that case. */
1750 if (state
!= UNIT_ACTIVATING
&&
1751 !unit_test_condition(u
)) {
1753 /* Let's also check the start limit here. Normally, the start limit is only checked by the
1754 * .start() method of the unit type after it did some additional checks verifying everything
1755 * is in order (so that those other checks can propagate errors properly). However, if a
1756 * condition check doesn't hold we don't get that far but we should still ensure we are not
1757 * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1758 * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1759 * hence the chance is big that any triggering unit for us will trigger us again. Note this
1760 * condition check is a bit different from the condition check inside the per-unit .start()
1761 * function, as this one will not change the unit's state in any way (and we shouldn't here,
1762 * after all the condition failed). */
1764 r
= unit_test_start_limit(u
);
1768 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition failed. Not starting unit.");
1771 /* If the asserts failed, fail the entire job */
1772 if (state
!= UNIT_ACTIVATING
&&
1773 !unit_test_assert(u
))
1774 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1776 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1777 * condition checks, so that we rather return condition check errors (which are usually not
1778 * considered a true failure) than "not supported" errors (which are considered a failure).
1780 if (!unit_supported(u
))
1783 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1784 * should have taken care of this already, but let's check this here again. After all, our
1785 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1786 if (!unit_verify_deps(u
))
1789 /* Forward to the main object, if we aren't it. */
1790 following
= unit_following(u
);
1792 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1793 return unit_start(following
);
1796 /* If it is stopped, but we cannot start it, then fail */
1797 if (!UNIT_VTABLE(u
)->start
)
1800 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1801 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1802 * waits for a holdoff timer to elapse before it will start again. */
1804 unit_add_to_dbus_queue(u
);
1806 return UNIT_VTABLE(u
)->start(u
);
1809 bool unit_can_start(Unit
*u
) {
1812 if (u
->load_state
!= UNIT_LOADED
)
1815 if (!unit_supported(u
))
1818 /* Scope units may be started only once */
1819 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1822 return !!UNIT_VTABLE(u
)->start
;
1825 bool unit_can_isolate(Unit
*u
) {
1828 return unit_can_start(u
) &&
1833 * -EBADR: This unit type does not support stopping.
1834 * -EALREADY: Unit is already stopped.
1835 * -EAGAIN: An operation is already in progress. Retry later.
1837 int unit_stop(Unit
*u
) {
1838 UnitActiveState state
;
1843 state
= unit_active_state(u
);
1844 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1847 following
= unit_following(u
);
1849 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1850 return unit_stop(following
);
1853 if (!UNIT_VTABLE(u
)->stop
)
1856 unit_add_to_dbus_queue(u
);
1858 return UNIT_VTABLE(u
)->stop(u
);
1861 bool unit_can_stop(Unit
*u
) {
1864 if (!unit_supported(u
))
1870 return !!UNIT_VTABLE(u
)->stop
;
1874 * -EBADR: This unit type does not support reloading.
1875 * -ENOEXEC: Unit is not started.
1876 * -EAGAIN: An operation is already in progress. Retry later.
1878 int unit_reload(Unit
*u
) {
1879 UnitActiveState state
;
1884 if (u
->load_state
!= UNIT_LOADED
)
1887 if (!unit_can_reload(u
))
1890 state
= unit_active_state(u
);
1891 if (state
== UNIT_RELOADING
)
1894 if (state
!= UNIT_ACTIVE
) {
1895 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1899 following
= unit_following(u
);
1901 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1902 return unit_reload(following
);
1905 unit_add_to_dbus_queue(u
);
1907 if (!UNIT_VTABLE(u
)->reload
) {
1908 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1909 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
1913 return UNIT_VTABLE(u
)->reload(u
);
1916 bool unit_can_reload(Unit
*u
) {
1919 if (UNIT_VTABLE(u
)->can_reload
)
1920 return UNIT_VTABLE(u
)->can_reload(u
);
1922 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1925 return UNIT_VTABLE(u
)->reload
;
1928 bool unit_is_unneeded(Unit
*u
) {
1929 static const UnitDependency deps
[] = {
1939 if (!u
->stop_when_unneeded
)
1942 /* Don't clean up while the unit is transitioning or is even inactive. */
1943 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
1948 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1953 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1954 * restart, then don't clean this one up. */
1956 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
) {
1960 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1963 if (unit_will_restart(other
))
1971 static void check_unneeded_dependencies(Unit
*u
) {
1973 static const UnitDependency deps
[] = {
1983 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1985 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1990 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
)
1991 unit_submit_to_stop_when_unneeded_queue(other
);
1995 static void unit_check_binds_to(Unit
*u
) {
1996 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2008 if (unit_active_state(u
) != UNIT_ACTIVE
)
2011 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
2015 if (!other
->coldplugged
)
2016 /* We might yet create a job for the other unit… */
2019 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2029 /* If stopping a unit fails continuously we might enter a stop
2030 * loop here, hence stop acting on the service being
2031 * unnecessary after a while. */
2032 if (!ratelimit_below(&u
->auto_stop_ratelimit
)) {
2033 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2038 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2040 /* A unit we need to run is gone. Sniff. Let's stop this. */
2041 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, NULL
, &error
, NULL
);
2043 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2046 static void retroactively_start_dependencies(Unit
*u
) {
2052 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2054 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2055 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2056 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2057 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2059 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2060 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2061 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2062 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2064 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2065 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2066 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2067 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
, NULL
);
2069 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2070 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2071 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2073 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2074 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2075 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2078 static void retroactively_stop_dependencies(Unit
*u
) {
2084 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2086 /* Pull down units which are bound to us recursively if enabled */
2087 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2088 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2089 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2092 void unit_start_on_failure(Unit
*u
) {
2100 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2103 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2105 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2106 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2108 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, NULL
, &error
, NULL
);
2110 log_unit_warning_errno(u
, r
, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error
, r
));
2114 void unit_trigger_notify(Unit
*u
) {
2121 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2122 if (UNIT_VTABLE(other
)->trigger_notify
)
2123 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2126 static int unit_log_resources(Unit
*u
) {
2127 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4];
2128 bool any_traffic
= false, have_ip_accounting
= false, any_io
= false, have_io_accounting
= false;
2129 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
, *rr
= NULL
, *wr
= NULL
;
2130 size_t n_message_parts
= 0, n_iovec
= 0;
2131 char* message_parts
[1 + 2 + 2 + 1], *t
;
2132 nsec_t nsec
= NSEC_INFINITY
;
2133 CGroupIPAccountingMetric m
;
2136 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2137 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2138 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2139 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2140 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2142 const char* const io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2143 [CGROUP_IO_READ_BYTES
] = "IO_METRIC_READ_BYTES",
2144 [CGROUP_IO_WRITE_BYTES
] = "IO_METRIC_WRITE_BYTES",
2145 [CGROUP_IO_READ_OPERATIONS
] = "IO_METRIC_READ_OPERATIONS",
2146 [CGROUP_IO_WRITE_OPERATIONS
] = "IO_METRIC_WRITE_OPERATIONS",
2151 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2152 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2153 * information and the complete data in structured fields. */
2155 (void) unit_get_cpu_usage(u
, &nsec
);
2156 if (nsec
!= NSEC_INFINITY
) {
2157 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2159 /* Format the CPU time for inclusion in the structured log message */
2160 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2164 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2166 /* Format the CPU time for inclusion in the human language message string */
2167 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2168 t
= strjoin("consumed ", buf
, " CPU time");
2174 message_parts
[n_message_parts
++] = t
;
2177 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2178 char buf
[FORMAT_BYTES_MAX
] = "";
2179 uint64_t value
= UINT64_MAX
;
2181 assert(io_fields
[k
]);
2183 (void) unit_get_io_accounting(u
, k
, k
> 0, &value
);
2184 if (value
== UINT64_MAX
)
2187 have_io_accounting
= true;
2191 /* Format IO accounting data for inclusion in the structured log message */
2192 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
], value
) < 0) {
2196 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2198 /* Format the IO accounting data for inclusion in the human language message string, but only
2199 * for the bytes counters (and not for the operations counters) */
2200 if (k
== CGROUP_IO_READ_BYTES
) {
2202 rr
= strjoin("read ", format_bytes(buf
, sizeof(buf
), value
), " from disk");
2207 } else if (k
== CGROUP_IO_WRITE_BYTES
) {
2209 wr
= strjoin("written ", format_bytes(buf
, sizeof(buf
), value
), " to disk");
2217 if (have_io_accounting
) {
2220 message_parts
[n_message_parts
++] = TAKE_PTR(rr
);
2222 message_parts
[n_message_parts
++] = TAKE_PTR(wr
);
2227 k
= strdup("no IO");
2233 message_parts
[n_message_parts
++] = k
;
2237 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2238 char buf
[FORMAT_BYTES_MAX
] = "";
2239 uint64_t value
= UINT64_MAX
;
2241 assert(ip_fields
[m
]);
2243 (void) unit_get_ip_accounting(u
, m
, &value
);
2244 if (value
== UINT64_MAX
)
2247 have_ip_accounting
= true;
2251 /* Format IP accounting data for inclusion in the structured log message */
2252 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2256 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2258 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2259 * bytes counters (and not for the packets counters) */
2260 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2262 igress
= strjoin("received ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2267 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2269 egress
= strjoin("sent ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2277 if (have_ip_accounting
) {
2280 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2282 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2287 k
= strdup("no IP traffic");
2293 message_parts
[n_message_parts
++] = k
;
2297 /* Is there any accounting data available at all? */
2303 if (n_message_parts
== 0)
2304 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2306 _cleanup_free_
char *joined
;
2308 message_parts
[n_message_parts
] = NULL
;
2310 joined
= strv_join(message_parts
, ", ");
2316 joined
[0] = ascii_toupper(joined
[0]);
2317 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2320 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2321 * and hence don't increase n_iovec for them */
2322 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2323 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2325 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2326 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2328 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2329 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2331 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2335 for (i
= 0; i
< n_message_parts
; i
++)
2336 free(message_parts
[i
]);
2338 for (i
= 0; i
< n_iovec
; i
++)
2339 free(iovec
[i
].iov_base
);
2345 static void unit_update_on_console(Unit
*u
) {
2350 b
= unit_needs_console(u
);
2351 if (u
->on_console
== b
)
2356 manager_ref_console(u
->manager
);
2358 manager_unref_console(u
->manager
);
2361 static void unit_emit_audit_start(Unit
*u
) {
2364 if (u
->type
!= UNIT_SERVICE
)
2367 /* Write audit record if we have just finished starting up */
2368 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, true);
2372 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2375 if (u
->type
!= UNIT_SERVICE
)
2379 /* Write audit record if we have just finished shutting down */
2380 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, state
== UNIT_INACTIVE
);
2381 u
->in_audit
= false;
2383 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2384 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, state
== UNIT_INACTIVE
);
2386 if (state
== UNIT_INACTIVE
)
2387 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, true);
2391 static bool unit_process_job(Job
*j
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2392 bool unexpected
= false;
2396 if (j
->state
== JOB_WAITING
)
2398 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2400 job_add_to_run_queue(j
);
2402 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2403 * hence needs to invalidate jobs. */
2408 case JOB_VERIFY_ACTIVE
:
2410 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2411 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2412 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2415 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2416 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2422 case JOB_RELOAD_OR_START
:
2423 case JOB_TRY_RELOAD
:
2425 if (j
->state
== JOB_RUNNING
) {
2426 if (ns
== UNIT_ACTIVE
)
2427 job_finish_and_invalidate(j
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2428 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2431 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2432 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2440 case JOB_TRY_RESTART
:
2442 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2443 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2444 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2446 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2452 assert_not_reached("Job type unknown");
2458 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2463 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2464 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2466 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2467 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2468 * remounted this function will be called too! */
2472 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2473 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2474 unit_add_to_dbus_queue(u
);
2476 /* Update timestamps for state changes */
2477 if (!MANAGER_IS_RELOADING(m
)) {
2478 dual_timestamp_get(&u
->state_change_timestamp
);
2480 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2481 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2482 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2483 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2485 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2486 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2487 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2488 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2491 /* Keep track of failed units */
2492 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2494 /* Make sure the cgroup and state files are always removed when we become inactive */
2495 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2496 unit_prune_cgroup(u
);
2497 unit_unlink_state_files(u
);
2500 unit_update_on_console(u
);
2502 if (!MANAGER_IS_RELOADING(m
)) {
2505 /* Let's propagate state changes to the job */
2507 unexpected
= unit_process_job(u
->job
, ns
, flags
);
2511 /* If this state change happened without being requested by a job, then let's retroactively start or
2512 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2513 * additional jobs just because something is already activated. */
2516 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2517 retroactively_start_dependencies(u
);
2518 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2519 retroactively_stop_dependencies(u
);
2522 /* stop unneeded units regardless if going down was expected or not */
2523 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2524 check_unneeded_dependencies(u
);
2526 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2527 log_unit_debug(u
, "Unit entered failed state.");
2529 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2530 unit_start_on_failure(u
);
2533 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2534 /* This unit just finished starting up */
2536 unit_emit_audit_start(u
);
2537 manager_send_unit_plymouth(m
, u
);
2540 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2541 /* This unit just stopped/failed. */
2543 unit_emit_audit_stop(u
, ns
);
2544 unit_log_resources(u
);
2548 manager_recheck_journal(m
);
2549 manager_recheck_dbus(m
);
2551 unit_trigger_notify(u
);
2553 if (!MANAGER_IS_RELOADING(m
)) {
2554 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2555 unit_submit_to_stop_when_unneeded_queue(u
);
2557 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2558 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2559 * without ever entering started.) */
2560 unit_check_binds_to(u
);
2562 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2563 reason
= strjoina("unit ", u
->id
, " failed");
2564 emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2565 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2566 reason
= strjoina("unit ", u
->id
, " succeeded");
2567 emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2571 unit_add_to_gc_queue(u
);
2574 int unit_watch_pid(Unit
*u
, pid_t pid
, bool exclusive
) {
2578 assert(pid_is_valid(pid
));
2580 /* Watch a specific PID */
2582 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2583 * opportunity to remove any stalled references to this PID as they can be created
2584 * easily (when watching a process which is not our direct child). */
2586 manager_unwatch_pid(u
->manager
, pid
);
2588 r
= set_ensure_allocated(&u
->pids
, NULL
);
2592 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2596 /* First try, let's add the unit keyed by "pid". */
2597 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2603 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2604 * to an array of Units rather than just a Unit), lists us already. */
2606 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2608 for (; array
[n
]; n
++)
2612 if (found
) /* Found it already? if so, do nothing */
2617 /* Allocate a new array */
2618 new_array
= new(Unit
*, n
+ 2);
2622 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2624 new_array
[n
+1] = NULL
;
2626 /* Add or replace the old array */
2627 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2638 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2645 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2649 assert(pid_is_valid(pid
));
2651 /* First let's drop the unit in case it's keyed as "pid". */
2652 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2654 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2655 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2659 /* Let's iterate through the array, dropping our own entry */
2660 for (n
= 0; array
[n
]; n
++)
2662 array
[m
++] = array
[n
];
2666 /* The array is now empty, remove the entire entry */
2667 assert(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2672 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2675 void unit_unwatch_all_pids(Unit
*u
) {
2678 while (!set_isempty(u
->pids
))
2679 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2681 u
->pids
= set_free(u
->pids
);
2684 static void unit_tidy_watch_pids(Unit
*u
) {
2685 pid_t except1
, except2
;
2691 /* Cleans dead PIDs from our list */
2693 except1
= unit_main_pid(u
);
2694 except2
= unit_control_pid(u
);
2696 SET_FOREACH(e
, u
->pids
, i
) {
2697 pid_t pid
= PTR_TO_PID(e
);
2699 if (pid
== except1
|| pid
== except2
)
2702 if (!pid_is_unwaited(pid
))
2703 unit_unwatch_pid(u
, pid
);
2707 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2713 unit_tidy_watch_pids(u
);
2714 unit_watch_all_pids(u
);
2716 /* If the PID set is empty now, then let's finish this off. */
2717 unit_synthesize_cgroup_empty_event(u
);
2722 int unit_enqueue_rewatch_pids(Unit
*u
) {
2727 if (!u
->cgroup_path
)
2730 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2733 if (r
> 0) /* On unified we can use proper notifications */
2736 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2737 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2738 * involves issuing kill(pid, 0) on all processes we watch. */
2740 if (!u
->rewatch_pids_event_source
) {
2741 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2743 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2745 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2747 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2749 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: m");
2751 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2753 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2756 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2758 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2763 void unit_dequeue_rewatch_pids(Unit
*u
) {
2767 if (!u
->rewatch_pids_event_source
)
2770 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2772 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2774 u
->rewatch_pids_event_source
= sd_event_source_unref(u
->rewatch_pids_event_source
);
2777 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2779 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2783 case JOB_VERIFY_ACTIVE
:
2786 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2787 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2792 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2793 * external events), hence it makes no sense to permit enqueing such a request either. */
2794 return !u
->perpetual
;
2797 case JOB_TRY_RESTART
:
2798 return unit_can_stop(u
) && unit_can_start(u
);
2801 case JOB_TRY_RELOAD
:
2802 return unit_can_reload(u
);
2804 case JOB_RELOAD_OR_START
:
2805 return unit_can_reload(u
) && unit_can_start(u
);
2808 assert_not_reached("Invalid job type");
2812 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2815 /* Only warn about some unit types */
2816 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2819 if (streq_ptr(u
->id
, other
))
2820 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2822 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2825 static int unit_add_dependency_hashmap(
2828 UnitDependencyMask origin_mask
,
2829 UnitDependencyMask destination_mask
) {
2831 UnitDependencyInfo info
;
2836 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2837 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2838 assert(origin_mask
> 0 || destination_mask
> 0);
2840 r
= hashmap_ensure_allocated(h
, NULL
);
2844 assert_cc(sizeof(void*) == sizeof(info
));
2846 info
.data
= hashmap_get(*h
, other
);
2848 /* Entry already exists. Add in our mask. */
2850 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
2851 FLAGS_SET(destination_mask
, info
.destination_mask
))
2854 info
.origin_mask
|= origin_mask
;
2855 info
.destination_mask
|= destination_mask
;
2857 r
= hashmap_update(*h
, other
, info
.data
);
2859 info
= (UnitDependencyInfo
) {
2860 .origin_mask
= origin_mask
,
2861 .destination_mask
= destination_mask
,
2864 r
= hashmap_put(*h
, other
, info
.data
);
2872 int unit_add_dependency(
2877 UnitDependencyMask mask
) {
2879 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2880 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2881 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2882 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2883 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2884 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2885 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2886 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2887 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2888 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2889 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2890 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2891 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2892 [UNIT_BEFORE
] = UNIT_AFTER
,
2893 [UNIT_AFTER
] = UNIT_BEFORE
,
2894 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2895 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2896 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2897 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2898 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2899 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2900 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2901 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2903 Unit
*original_u
= u
, *original_other
= other
;
2907 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2910 u
= unit_follow_merge(u
);
2911 other
= unit_follow_merge(other
);
2913 /* We won't allow dependencies on ourselves. We will not
2914 * consider them an error however. */
2916 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2920 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2921 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2922 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2926 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2930 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2931 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2936 if (add_reference
) {
2937 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2941 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2946 unit_add_to_dbus_queue(u
);
2950 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2955 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2959 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2962 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
2970 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2977 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2979 _cleanup_free_
char *i
= NULL
;
2981 r
= unit_name_to_prefix(u
->id
, &i
);
2985 r
= unit_name_replace_instance(name
, i
, buf
);
2994 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2995 _cleanup_free_
char *buf
= NULL
;
3002 r
= resolve_template(u
, name
, &buf
, &name
);
3006 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3010 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3013 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3014 _cleanup_free_
char *buf
= NULL
;
3021 r
= resolve_template(u
, name
, &buf
, &name
);
3025 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3029 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3032 int set_unit_path(const char *p
) {
3033 /* This is mostly for debug purposes */
3034 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
3040 char *unit_dbus_path(Unit
*u
) {
3046 return unit_dbus_path_from_name(u
->id
);
3049 char *unit_dbus_path_invocation_id(Unit
*u
) {
3052 if (sd_id128_is_null(u
->invocation_id
))
3055 return unit_dbus_path_from_name(u
->invocation_id_string
);
3058 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3062 /* Sets the unit slice if it has not been set before. Is extra
3063 * careful, to only allow this for units that actually have a
3064 * cgroup context. Also, we don't allow to set this for slices
3065 * (since the parent slice is derived from the name). Make
3066 * sure the unit we set is actually a slice. */
3068 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3071 if (u
->type
== UNIT_SLICE
)
3074 if (unit_active_state(u
) != UNIT_INACTIVE
)
3077 if (slice
->type
!= UNIT_SLICE
)
3080 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3081 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3084 if (UNIT_DEREF(u
->slice
) == slice
)
3087 /* Disallow slice changes if @u is already bound to cgroups */
3088 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
3091 unit_ref_set(&u
->slice
, u
, slice
);
3095 int unit_set_default_slice(Unit
*u
) {
3096 const char *slice_name
;
3102 if (UNIT_ISSET(u
->slice
))
3106 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3108 /* Implicitly place all instantiated units in their
3109 * own per-template slice */
3111 r
= unit_name_to_prefix(u
->id
, &prefix
);
3115 /* The prefix is already escaped, but it might include
3116 * "-" which has a special meaning for slice units,
3117 * hence escape it here extra. */
3118 escaped
= unit_name_escape(prefix
);
3122 if (MANAGER_IS_SYSTEM(u
->manager
))
3123 slice_name
= strjoina("system-", escaped
, ".slice");
3125 slice_name
= strjoina(escaped
, ".slice");
3128 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
3129 ? SPECIAL_SYSTEM_SLICE
3130 : SPECIAL_ROOT_SLICE
;
3132 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3136 return unit_set_slice(u
, slice
);
3139 const char *unit_slice_name(Unit
*u
) {
3142 if (!UNIT_ISSET(u
->slice
))
3145 return UNIT_DEREF(u
->slice
)->id
;
3148 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3149 _cleanup_free_
char *t
= NULL
;
3156 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3159 if (unit_has_name(u
, t
))
3162 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3163 assert(r
< 0 || *_found
!= u
);
3167 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3168 const char *name
, *old_owner
, *new_owner
;
3175 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3177 bus_log_parse_error(r
);
3181 old_owner
= empty_to_null(old_owner
);
3182 new_owner
= empty_to_null(new_owner
);
3184 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3185 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3190 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3197 if (u
->match_bus_slot
)
3200 match
= strjoina("type='signal',"
3201 "sender='org.freedesktop.DBus',"
3202 "path='/org/freedesktop/DBus',"
3203 "interface='org.freedesktop.DBus',"
3204 "member='NameOwnerChanged',"
3205 "arg0='", name
, "'");
3207 return sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3210 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3216 /* Watch a specific name on the bus. We only support one unit
3217 * watching each name for now. */
3219 if (u
->manager
->api_bus
) {
3220 /* If the bus is already available, install the match directly.
3221 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3222 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3224 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3227 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3229 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3230 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3236 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3240 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3241 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3244 bool unit_can_serialize(Unit
*u
) {
3247 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3250 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3251 _cleanup_free_
char *s
= NULL
;
3260 r
= cg_mask_to_string(mask
, &s
);
3262 return log_error_errno(r
, "Failed to format cgroup mask: %m");
3264 return serialize_item(f
, key
, s
);
3267 static const char *const ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3268 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3269 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3270 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3271 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3274 static const char *const io_accounting_metric_field_base
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
3275 [CGROUP_IO_READ_BYTES
] = "io-accounting-read-bytes-base",
3276 [CGROUP_IO_WRITE_BYTES
] = "io-accounting-write-bytes-base",
3277 [CGROUP_IO_READ_OPERATIONS
] = "io-accounting-read-operations-base",
3278 [CGROUP_IO_WRITE_OPERATIONS
] = "io-accounting-write-operations-base",
3281 static const char *const io_accounting_metric_field_last
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
3282 [CGROUP_IO_READ_BYTES
] = "io-accounting-read-bytes-last",
3283 [CGROUP_IO_WRITE_BYTES
] = "io-accounting-write-bytes-last",
3284 [CGROUP_IO_READ_OPERATIONS
] = "io-accounting-read-operations-last",
3285 [CGROUP_IO_WRITE_OPERATIONS
] = "io-accounting-write-operations-last",
3288 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3289 CGroupIPAccountingMetric m
;
3296 if (unit_can_serialize(u
)) {
3297 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3302 (void) serialize_dual_timestamp(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3304 (void) serialize_dual_timestamp(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3305 (void) serialize_dual_timestamp(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3306 (void) serialize_dual_timestamp(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3307 (void) serialize_dual_timestamp(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3309 (void) serialize_dual_timestamp(f
, "condition-timestamp", &u
->condition_timestamp
);
3310 (void) serialize_dual_timestamp(f
, "assert-timestamp", &u
->assert_timestamp
);
3312 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3313 (void) serialize_bool(f
, "condition-result", u
->condition_result
);
3315 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3316 (void) serialize_bool(f
, "assert-result", u
->assert_result
);
3318 (void) serialize_bool(f
, "transient", u
->transient
);
3319 (void) serialize_bool(f
, "in-audit", u
->in_audit
);
3321 (void) serialize_bool(f
, "exported-invocation-id", u
->exported_invocation_id
);
3322 (void) serialize_bool(f
, "exported-log-level-max", u
->exported_log_level_max
);
3323 (void) serialize_bool(f
, "exported-log-extra-fields", u
->exported_log_extra_fields
);
3324 (void) serialize_bool(f
, "exported-log-rate-limit-interval", u
->exported_log_rate_limit_interval
);
3325 (void) serialize_bool(f
, "exported-log-rate-limit-burst", u
->exported_log_rate_limit_burst
);
3327 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3328 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3329 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3331 if (u
->oom_kill_last
> 0)
3332 (void) serialize_item_format(f
, "oom-kill-last", "%" PRIu64
, u
->oom_kill_last
);
3334 for (CGroupIOAccountingMetric im
= 0; im
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; im
++) {
3335 (void) serialize_item_format(f
, io_accounting_metric_field_base
[im
], "%" PRIu64
, u
->io_accounting_base
[im
]);
3337 if (u
->io_accounting_last
[im
] != UINT64_MAX
)
3338 (void) serialize_item_format(f
, io_accounting_metric_field_last
[im
], "%" PRIu64
, u
->io_accounting_last
[im
]);
3342 (void) serialize_item(f
, "cgroup", u
->cgroup_path
);
3344 (void) serialize_bool(f
, "cgroup-realized", u
->cgroup_realized
);
3345 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3346 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3347 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", u
->cgroup_invalidated_mask
);
3349 if (uid_is_valid(u
->ref_uid
))
3350 (void) serialize_item_format(f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3351 if (gid_is_valid(u
->ref_gid
))
3352 (void) serialize_item_format(f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3354 if (!sd_id128_is_null(u
->invocation_id
))
3355 (void) serialize_item_format(f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3357 bus_track_serialize(u
->bus_track
, f
, "ref");
3359 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3362 r
= unit_get_ip_accounting(u
, m
, &v
);
3364 (void) serialize_item_format(f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3367 if (serialize_jobs
) {
3370 job_serialize(u
->job
, f
);
3375 job_serialize(u
->nop_job
, f
);
3384 static int unit_deserialize_job(Unit
*u
, FILE *f
) {
3385 _cleanup_(job_freep
) Job
*j
= NULL
;
3395 r
= job_deserialize(j
, f
);
3399 r
= job_install_deserialized(j
);
3407 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3415 _cleanup_free_
char *line
= NULL
;
3420 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3422 return log_error_errno(r
, "Failed to read serialization line: %m");
3423 if (r
== 0) /* eof */
3427 if (isempty(l
)) /* End marker */
3430 k
= strcspn(l
, "=");
3438 if (streq(l
, "job")) {
3440 /* New-style serialized job */
3441 r
= unit_deserialize_job(u
, f
);
3444 } else /* Legacy for pre-44 */
3445 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3447 } else if (streq(l
, "state-change-timestamp")) {
3448 (void) deserialize_dual_timestamp(v
, &u
->state_change_timestamp
);
3450 } else if (streq(l
, "inactive-exit-timestamp")) {
3451 (void) deserialize_dual_timestamp(v
, &u
->inactive_exit_timestamp
);
3453 } else if (streq(l
, "active-enter-timestamp")) {
3454 (void) deserialize_dual_timestamp(v
, &u
->active_enter_timestamp
);
3456 } else if (streq(l
, "active-exit-timestamp")) {
3457 (void) deserialize_dual_timestamp(v
, &u
->active_exit_timestamp
);
3459 } else if (streq(l
, "inactive-enter-timestamp")) {
3460 (void) deserialize_dual_timestamp(v
, &u
->inactive_enter_timestamp
);
3462 } else if (streq(l
, "condition-timestamp")) {
3463 (void) deserialize_dual_timestamp(v
, &u
->condition_timestamp
);
3465 } else if (streq(l
, "assert-timestamp")) {
3466 (void) deserialize_dual_timestamp(v
, &u
->assert_timestamp
);
3468 } else if (streq(l
, "condition-result")) {
3470 r
= parse_boolean(v
);
3472 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3474 u
->condition_result
= r
;
3478 } else if (streq(l
, "assert-result")) {
3480 r
= parse_boolean(v
);
3482 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3484 u
->assert_result
= r
;
3488 } else if (streq(l
, "transient")) {
3490 r
= parse_boolean(v
);
3492 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3498 } else if (streq(l
, "in-audit")) {
3500 r
= parse_boolean(v
);
3502 log_unit_debug(u
, "Failed to parse in-audit bool %s, ignoring.", v
);
3508 } else if (streq(l
, "exported-invocation-id")) {
3510 r
= parse_boolean(v
);
3512 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3514 u
->exported_invocation_id
= r
;
3518 } else if (streq(l
, "exported-log-level-max")) {
3520 r
= parse_boolean(v
);
3522 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3524 u
->exported_log_level_max
= r
;
3528 } else if (streq(l
, "exported-log-extra-fields")) {
3530 r
= parse_boolean(v
);
3532 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3534 u
->exported_log_extra_fields
= r
;
3538 } else if (streq(l
, "exported-log-rate-limit-interval")) {
3540 r
= parse_boolean(v
);
3542 log_unit_debug(u
, "Failed to parse exported log rate limit interval %s, ignoring.", v
);
3544 u
->exported_log_rate_limit_interval
= r
;
3548 } else if (streq(l
, "exported-log-rate-limit-burst")) {
3550 r
= parse_boolean(v
);
3552 log_unit_debug(u
, "Failed to parse exported log rate limit burst %s, ignoring.", v
);
3554 u
->exported_log_rate_limit_burst
= r
;
3558 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3560 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3562 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3566 } else if (streq(l
, "cpu-usage-last")) {
3568 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3570 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3574 } else if (streq(l
, "oom-kill-last")) {
3576 r
= safe_atou64(v
, &u
->oom_kill_last
);
3578 log_unit_debug(u
, "Failed to read OOM kill last %s, ignoring.", v
);
3582 } else if (streq(l
, "cgroup")) {
3584 r
= unit_set_cgroup_path(u
, v
);
3586 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3588 (void) unit_watch_cgroup(u
);
3589 (void) unit_watch_cgroup_memory(u
);
3592 } else if (streq(l
, "cgroup-realized")) {
3595 b
= parse_boolean(v
);
3597 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3599 u
->cgroup_realized
= b
;
3603 } else if (streq(l
, "cgroup-realized-mask")) {
3605 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3607 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3610 } else if (streq(l
, "cgroup-enabled-mask")) {
3612 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3614 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3617 } else if (streq(l
, "cgroup-invalidated-mask")) {
3619 r
= cg_mask_from_string(v
, &u
->cgroup_invalidated_mask
);
3621 log_unit_debug(u
, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v
);
3624 } else if (streq(l
, "ref-uid")) {
3627 r
= parse_uid(v
, &uid
);
3629 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3631 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3635 } else if (streq(l
, "ref-gid")) {
3638 r
= parse_gid(v
, &gid
);
3640 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3642 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3646 } else if (streq(l
, "ref")) {
3648 r
= strv_extend(&u
->deserialized_refs
, v
);
3653 } else if (streq(l
, "invocation-id")) {
3656 r
= sd_id128_from_string(v
, &id
);
3658 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3660 r
= unit_set_invocation_id(u
, id
);
3662 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3668 /* Check if this is an IP accounting metric serialization field */
3669 m
= string_table_lookup(ip_accounting_metric_field
, ELEMENTSOF(ip_accounting_metric_field
), l
);
3673 r
= safe_atou64(v
, &c
);
3675 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3677 u
->ip_accounting_extra
[m
] = c
;
3681 m
= string_table_lookup(io_accounting_metric_field_base
, ELEMENTSOF(io_accounting_metric_field_base
), l
);
3685 r
= safe_atou64(v
, &c
);
3687 log_unit_debug(u
, "Failed to parse IO accounting base value %s, ignoring.", v
);
3689 u
->io_accounting_base
[m
] = c
;
3693 m
= string_table_lookup(io_accounting_metric_field_last
, ELEMENTSOF(io_accounting_metric_field_last
), l
);
3697 r
= safe_atou64(v
, &c
);
3699 log_unit_debug(u
, "Failed to parse IO accounting last value %s, ignoring.", v
);
3701 u
->io_accounting_last
[m
] = c
;
3705 if (unit_can_serialize(u
)) {
3706 r
= exec_runtime_deserialize_compat(u
, l
, v
, fds
);
3708 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3712 /* Returns positive if key was handled by the call */
3716 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3718 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3722 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3723 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3724 * before 228 where the base for timeouts was not persistent across reboots. */
3726 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3727 dual_timestamp_get(&u
->state_change_timestamp
);
3729 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3730 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3731 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3732 unit_invalidate_cgroup_bpf(u
);
3737 int unit_deserialize_skip(FILE *f
) {
3741 /* Skip serialized data for this unit. We don't know what it is. */
3744 _cleanup_free_
char *line
= NULL
;
3747 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3749 return log_error_errno(r
, "Failed to read serialization line: %m");
3761 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3763 _cleanup_free_
char *e
= NULL
;
3768 /* Adds in links to the device node that this unit is based on */
3772 if (!is_device_path(what
))
3775 /* When device units aren't supported (such as in a
3776 * container), don't create dependencies on them. */
3777 if (!unit_type_supported(UNIT_DEVICE
))
3780 r
= unit_name_from_path(what
, ".device", &e
);
3784 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3788 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3789 dep
= UNIT_BINDS_TO
;
3791 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3792 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3793 device
, true, mask
);
3798 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3806 int unit_coldplug(Unit
*u
) {
3812 /* Make sure we don't enter a loop, when coldplugging recursively. */
3816 u
->coldplugged
= true;
3818 STRV_FOREACH(i
, u
->deserialized_refs
) {
3819 q
= bus_unit_track_add_name(u
, *i
);
3820 if (q
< 0 && r
>= 0)
3823 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3825 if (UNIT_VTABLE(u
)->coldplug
) {
3826 q
= UNIT_VTABLE(u
)->coldplug(u
);
3827 if (q
< 0 && r
>= 0)
3832 q
= job_coldplug(u
->job
);
3833 if (q
< 0 && r
>= 0)
3840 void unit_catchup(Unit
*u
) {
3843 if (UNIT_VTABLE(u
)->catchup
)
3844 UNIT_VTABLE(u
)->catchup(u
);
3847 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3853 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3854 * are never out-of-date. */
3855 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3858 if (stat(path
, &st
) < 0)
3859 /* What, cannot access this anymore? */
3863 /* For masked files check if they are still so */
3864 return !null_or_empty(&st
);
3866 /* For non-empty files check the mtime */
3867 return timespec_load(&st
.st_mtim
) > mtime
;
3872 bool unit_need_daemon_reload(Unit
*u
) {
3873 _cleanup_strv_free_
char **t
= NULL
;
3878 /* For unit files, we allow masking… */
3879 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3880 u
->load_state
== UNIT_MASKED
))
3883 /* Source paths should not be masked… */
3884 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3887 if (u
->load_state
== UNIT_LOADED
)
3888 (void) unit_find_dropin_paths(u
, &t
);
3889 if (!strv_equal(u
->dropin_paths
, t
))
3892 /* … any drop-ins that are masked are simply omitted from the list. */
3893 STRV_FOREACH(path
, u
->dropin_paths
)
3894 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3900 void unit_reset_failed(Unit
*u
) {
3903 if (UNIT_VTABLE(u
)->reset_failed
)
3904 UNIT_VTABLE(u
)->reset_failed(u
);
3906 RATELIMIT_RESET(u
->start_limit
);
3907 u
->start_limit_hit
= false;
3910 Unit
*unit_following(Unit
*u
) {
3913 if (UNIT_VTABLE(u
)->following
)
3914 return UNIT_VTABLE(u
)->following(u
);
3919 bool unit_stop_pending(Unit
*u
) {
3922 /* This call does check the current state of the unit. It's
3923 * hence useful to be called from state change calls of the
3924 * unit itself, where the state isn't updated yet. This is
3925 * different from unit_inactive_or_pending() which checks both
3926 * the current state and for a queued job. */
3928 return u
->job
&& u
->job
->type
== JOB_STOP
;
3931 bool unit_inactive_or_pending(Unit
*u
) {
3934 /* Returns true if the unit is inactive or going down */
3936 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3939 if (unit_stop_pending(u
))
3945 bool unit_active_or_pending(Unit
*u
) {
3948 /* Returns true if the unit is active or going up */
3950 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3954 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3960 bool unit_will_restart(Unit
*u
) {
3963 if (!UNIT_VTABLE(u
)->will_restart
)
3966 return UNIT_VTABLE(u
)->will_restart(u
);
3969 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3971 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3972 assert(SIGNAL_VALID(signo
));
3974 if (!UNIT_VTABLE(u
)->kill
)
3977 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3980 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3981 _cleanup_set_free_ Set
*pid_set
= NULL
;
3984 pid_set
= set_new(NULL
);
3988 /* Exclude the main/control pids from being killed via the cgroup */
3990 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3995 if (control_pid
> 0) {
3996 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
4001 return TAKE_PTR(pid_set
);
4004 int unit_kill_common(
4010 sd_bus_error
*error
) {
4013 bool killed
= false;
4015 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4017 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4018 else if (main_pid
== 0)
4019 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4022 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4023 if (control_pid
< 0)
4024 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4025 else if (control_pid
== 0)
4026 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4029 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
4030 if (control_pid
> 0) {
4031 if (kill(control_pid
, signo
) < 0)
4037 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
4039 if (kill(main_pid
, signo
) < 0)
4045 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
4046 _cleanup_set_free_ Set
*pid_set
= NULL
;
4049 /* Exclude the main/control pids from being killed via the cgroup */
4050 pid_set
= unit_pid_set(main_pid
, control_pid
);
4054 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
4055 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
4061 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
4067 int unit_following_set(Unit
*u
, Set
**s
) {
4071 if (UNIT_VTABLE(u
)->following_set
)
4072 return UNIT_VTABLE(u
)->following_set(u
, s
);
4078 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4083 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4084 r
= unit_file_get_state(
4085 u
->manager
->unit_file_scope
,
4088 &u
->unit_file_state
);
4090 u
->unit_file_state
= UNIT_FILE_BAD
;
4093 return u
->unit_file_state
;
4096 int unit_get_unit_file_preset(Unit
*u
) {
4099 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
4100 u
->unit_file_preset
= unit_file_query_preset(
4101 u
->manager
->unit_file_scope
,
4103 basename(u
->fragment_path
));
4105 return u
->unit_file_preset
;
4108 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4114 unit_ref_unset(ref
);
4116 ref
->source
= source
;
4117 ref
->target
= target
;
4118 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4122 void unit_ref_unset(UnitRef
*ref
) {
4128 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4129 * be unreferenced now. */
4130 unit_add_to_gc_queue(ref
->target
);
4132 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4133 ref
->source
= ref
->target
= NULL
;
4136 static int user_from_unit_name(Unit
*u
, char **ret
) {
4138 static const uint8_t hash_key
[] = {
4139 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4140 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4143 _cleanup_free_
char *n
= NULL
;
4146 r
= unit_name_to_prefix(u
->id
, &n
);
4150 if (valid_user_group_name(n
)) {
4155 /* If we can't use the unit name as a user name, then let's hash it and use that */
4156 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4162 int unit_patch_contexts(Unit
*u
) {
4170 /* Patch in the manager defaults into the exec and cgroup
4171 * contexts, _after_ the rest of the settings have been
4174 ec
= unit_get_exec_context(u
);
4176 /* This only copies in the ones that need memory */
4177 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4178 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4179 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4184 if (MANAGER_IS_USER(u
->manager
) &&
4185 !ec
->working_directory
) {
4187 r
= get_home_dir(&ec
->working_directory
);
4191 /* Allow user services to run, even if the
4192 * home directory is missing */
4193 ec
->working_directory_missing_ok
= true;
4196 if (ec
->private_devices
)
4197 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4199 if (ec
->protect_kernel_modules
)
4200 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4202 if (ec
->dynamic_user
) {
4204 r
= user_from_unit_name(u
, &ec
->user
);
4210 ec
->group
= strdup(ec
->user
);
4215 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4216 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4219 ec
->private_tmp
= true;
4220 ec
->remove_ipc
= true;
4221 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4222 if (ec
->protect_home
== PROTECT_HOME_NO
)
4223 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4225 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4227 ec
->no_new_privileges
= true;
4228 ec
->restrict_suid_sgid
= true;
4232 cc
= unit_get_cgroup_context(u
);
4235 if (ec
->private_devices
&&
4236 cc
->device_policy
== CGROUP_AUTO
)
4237 cc
->device_policy
= CGROUP_CLOSED
;
4239 if (ec
->root_image
&&
4240 (cc
->device_policy
!= CGROUP_AUTO
|| cc
->device_allow
)) {
4242 /* When RootImage= is specified, the following devices are touched. */
4243 r
= cgroup_add_device_allow(cc
, "/dev/loop-control", "rw");
4247 r
= cgroup_add_device_allow(cc
, "block-loop", "rwm");
4251 r
= cgroup_add_device_allow(cc
, "block-blkext", "rwm");
4260 ExecContext
*unit_get_exec_context(Unit
*u
) {
4267 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4271 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4274 KillContext
*unit_get_kill_context(Unit
*u
) {
4281 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4285 return (KillContext
*) ((uint8_t*) u
+ offset
);
4288 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4294 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4298 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4301 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4307 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4311 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4314 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4317 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4320 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4321 return u
->manager
->lookup_paths
.transient
;
4323 if (flags
& UNIT_PERSISTENT
)
4324 return u
->manager
->lookup_paths
.persistent_control
;
4326 if (flags
& UNIT_RUNTIME
)
4327 return u
->manager
->lookup_paths
.runtime_control
;
4332 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4338 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4339 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4340 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4341 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4342 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4345 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4346 ret
= specifier_escape(s
);
4353 if (flags
& UNIT_ESCAPE_C
) {
4366 return ret
?: (char*) s
;
4369 return ret
?: strdup(s
);
4372 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4373 _cleanup_free_
char *result
= NULL
;
4374 size_t n
= 0, allocated
= 0;
4377 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4378 * way suitable for ExecStart= stanzas */
4380 STRV_FOREACH(i
, l
) {
4381 _cleanup_free_
char *buf
= NULL
;
4386 p
= unit_escape_setting(*i
, flags
, &buf
);
4390 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4391 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4405 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4410 return TAKE_PTR(result
);
4413 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4414 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4415 const char *dir
, *wrapped
;
4422 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4425 data
= unit_escape_setting(data
, flags
, &escaped
);
4429 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4430 * previous section header is the same */
4432 if (flags
& UNIT_PRIVATE
) {
4433 if (!UNIT_VTABLE(u
)->private_section
)
4436 if (!u
->transient_file
|| u
->last_section_private
< 0)
4437 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4438 else if (u
->last_section_private
== 0)
4439 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4441 if (!u
->transient_file
|| u
->last_section_private
< 0)
4442 data
= strjoina("[Unit]\n", data
);
4443 else if (u
->last_section_private
> 0)
4444 data
= strjoina("\n[Unit]\n", data
);
4447 if (u
->transient_file
) {
4448 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4449 * write to the transient unit file. */
4450 fputs(data
, u
->transient_file
);
4452 if (!endswith(data
, "\n"))
4453 fputc('\n', u
->transient_file
);
4455 /* Remember which section we wrote this entry to */
4456 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4460 dir
= unit_drop_in_dir(u
, flags
);
4464 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4465 "# or an equivalent operation. Do not edit.\n",
4469 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4473 (void) mkdir_p_label(p
, 0755);
4474 r
= write_string_file_atomic_label(q
, wrapped
);
4478 r
= strv_push(&u
->dropin_paths
, q
);
4483 strv_uniq(u
->dropin_paths
);
4485 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4490 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4491 _cleanup_free_
char *p
= NULL
;
4499 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4502 va_start(ap
, format
);
4503 r
= vasprintf(&p
, format
, ap
);
4509 return unit_write_setting(u
, flags
, name
, p
);
4512 int unit_make_transient(Unit
*u
) {
4513 _cleanup_free_
char *path
= NULL
;
4518 if (!UNIT_VTABLE(u
)->can_transient
)
4521 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4523 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4527 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4528 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4530 RUN_WITH_UMASK(0022) {
4531 f
= fopen(path
, "we");
4536 safe_fclose(u
->transient_file
);
4537 u
->transient_file
= f
;
4539 free_and_replace(u
->fragment_path
, path
);
4541 u
->source_path
= mfree(u
->source_path
);
4542 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4543 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4545 u
->load_state
= UNIT_STUB
;
4547 u
->transient
= true;
4549 unit_add_to_dbus_queue(u
);
4550 unit_add_to_gc_queue(u
);
4552 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4558 static int log_kill(pid_t pid
, int sig
, void *userdata
) {
4559 _cleanup_free_
char *comm
= NULL
;
4561 (void) get_process_comm(pid
, &comm
);
4563 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4564 only, like for example systemd's own PAM stub process. */
4565 if (comm
&& comm
[0] == '(')
4568 log_unit_notice(userdata
,
4569 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4572 signal_to_string(sig
));
4577 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4582 case KILL_TERMINATE
:
4583 case KILL_TERMINATE_AND_LOG
:
4584 return c
->kill_signal
;
4587 return c
->final_kill_signal
;
4590 return c
->watchdog_signal
;
4593 assert_not_reached("KillOperation unknown");
4597 int unit_kill_context(
4603 bool main_pid_alien
) {
4605 bool wait_for_exit
= false, send_sighup
;
4606 cg_kill_log_func_t log_func
= NULL
;
4612 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4613 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4615 if (c
->kill_mode
== KILL_NONE
)
4618 sig
= operation_to_signal(c
, k
);
4622 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4625 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4626 log_func
= log_kill
;
4630 log_func(main_pid
, sig
, u
);
4632 r
= kill_and_sigcont(main_pid
, sig
);
4633 if (r
< 0 && r
!= -ESRCH
) {
4634 _cleanup_free_
char *comm
= NULL
;
4635 (void) get_process_comm(main_pid
, &comm
);
4637 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4639 if (!main_pid_alien
)
4640 wait_for_exit
= true;
4642 if (r
!= -ESRCH
&& send_sighup
)
4643 (void) kill(main_pid
, SIGHUP
);
4647 if (control_pid
> 0) {
4649 log_func(control_pid
, sig
, u
);
4651 r
= kill_and_sigcont(control_pid
, sig
);
4652 if (r
< 0 && r
!= -ESRCH
) {
4653 _cleanup_free_
char *comm
= NULL
;
4654 (void) get_process_comm(control_pid
, &comm
);
4656 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4658 wait_for_exit
= true;
4660 if (r
!= -ESRCH
&& send_sighup
)
4661 (void) kill(control_pid
, SIGHUP
);
4665 if (u
->cgroup_path
&&
4666 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4667 _cleanup_set_free_ Set
*pid_set
= NULL
;
4669 /* Exclude the main/control pids from being killed via the cgroup */
4670 pid_set
= unit_pid_set(main_pid
, control_pid
);
4674 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4676 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4680 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4681 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4685 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4686 * we are running in a container or if this is a delegation unit, simply because cgroup
4687 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4688 * of containers it can be confused easily by left-over directories in the cgroup — which
4689 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4690 * there we get proper events. Hence rely on them. */
4692 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4693 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4694 wait_for_exit
= true;
4699 pid_set
= unit_pid_set(main_pid
, control_pid
);
4703 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4712 return wait_for_exit
;
4715 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4716 _cleanup_free_
char *p
= NULL
;
4717 UnitDependencyInfo di
;
4723 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4724 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4725 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4726 * determine which units to make themselves a dependency of. */
4728 if (!path_is_absolute(path
))
4731 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &path_hash_ops
);
4739 path
= path_simplify(p
, true);
4741 if (!path_is_normalized(path
))
4744 if (hashmap_contains(u
->requires_mounts_for
, path
))
4747 di
= (UnitDependencyInfo
) {
4751 r
= hashmap_put(u
->requires_mounts_for
, path
, di
.data
);
4756 char prefix
[strlen(path
) + 1];
4757 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4760 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4762 _cleanup_free_
char *q
= NULL
;
4764 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4776 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4792 int unit_setup_exec_runtime(Unit
*u
) {
4800 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4803 /* Check if there already is an ExecRuntime for this unit? */
4804 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4808 /* Try to get it from somebody else */
4809 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4810 r
= exec_runtime_acquire(u
->manager
, NULL
, other
->id
, false, rt
);
4815 return exec_runtime_acquire(u
->manager
, unit_get_exec_context(u
), u
->id
, true, rt
);
4818 int unit_setup_dynamic_creds(Unit
*u
) {
4820 DynamicCreds
*dcreds
;
4825 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4827 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4829 ec
= unit_get_exec_context(u
);
4832 if (!ec
->dynamic_user
)
4835 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4838 bool unit_type_supported(UnitType t
) {
4839 if (_unlikely_(t
< 0))
4841 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4844 if (!unit_vtable
[t
]->supported
)
4847 return unit_vtable
[t
]->supported();
4850 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4856 r
= dir_is_empty(where
);
4857 if (r
> 0 || r
== -ENOTDIR
)
4860 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4864 log_struct(LOG_NOTICE
,
4865 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4867 LOG_UNIT_INVOCATION_ID(u
),
4868 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4872 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4873 _cleanup_free_
char *canonical_where
= NULL
;
4879 r
= chase_symlinks(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
);
4881 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4885 /* We will happily ignore a trailing slash (or any redundant slashes) */
4886 if (path_equal(where
, canonical_where
))
4889 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4891 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4893 LOG_UNIT_INVOCATION_ID(u
),
4894 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4900 bool unit_is_pristine(Unit
*u
) {
4903 /* Check if the unit already exists or is already around,
4904 * in a number of different ways. Note that to cater for unit
4905 * types such as slice, we are generally fine with units that
4906 * are marked UNIT_LOADED even though nothing was actually
4907 * loaded, as those unit types don't require a file on disk. */
4909 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4912 !strv_isempty(u
->dropin_paths
) ||
4917 pid_t
unit_control_pid(Unit
*u
) {
4920 if (UNIT_VTABLE(u
)->control_pid
)
4921 return UNIT_VTABLE(u
)->control_pid(u
);
4926 pid_t
unit_main_pid(Unit
*u
) {
4929 if (UNIT_VTABLE(u
)->main_pid
)
4930 return UNIT_VTABLE(u
)->main_pid(u
);
4935 static void unit_unref_uid_internal(
4939 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4943 assert(_manager_unref_uid
);
4945 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4946 * gid_t are actually the same time, with the same validity rules.
4948 * Drops a reference to UID/GID from a unit. */
4950 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4951 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4953 if (!uid_is_valid(*ref_uid
))
4956 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4957 *ref_uid
= UID_INVALID
;
4960 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4961 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4964 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4965 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4968 static int unit_ref_uid_internal(
4973 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4979 assert(uid_is_valid(uid
));
4980 assert(_manager_ref_uid
);
4982 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4983 * are actually the same type, and have the same validity rules.
4985 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4986 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4989 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4990 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4992 if (*ref_uid
== uid
)
4995 if (uid_is_valid(*ref_uid
)) /* Already set? */
4998 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5006 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5007 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5010 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5011 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5014 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5019 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5021 if (uid_is_valid(uid
)) {
5022 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5027 if (gid_is_valid(gid
)) {
5028 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5031 unit_unref_uid(u
, false);
5037 return r
> 0 || q
> 0;
5040 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5046 c
= unit_get_exec_context(u
);
5048 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5050 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5055 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5058 unit_unref_uid(u
, destroy_now
);
5059 unit_unref_gid(u
, destroy_now
);
5062 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5067 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5068 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5069 * objects when no service references the UID/GID anymore. */
5071 r
= unit_ref_uid_gid(u
, uid
, gid
);
5073 unit_add_to_dbus_queue(u
);
5076 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
5081 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5083 if (sd_id128_equal(u
->invocation_id
, id
))
5086 if (!sd_id128_is_null(u
->invocation_id
))
5087 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
5089 if (sd_id128_is_null(id
)) {
5094 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
5098 u
->invocation_id
= id
;
5099 sd_id128_to_string(id
, u
->invocation_id_string
);
5101 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
5108 u
->invocation_id
= SD_ID128_NULL
;
5109 u
->invocation_id_string
[0] = 0;
5113 int unit_acquire_invocation_id(Unit
*u
) {
5119 r
= sd_id128_randomize(&id
);
5121 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5123 r
= unit_set_invocation_id(u
, id
);
5125 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5127 unit_add_to_dbus_queue(u
);
5131 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5137 /* Copy parameters from manager */
5138 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5142 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5143 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5144 p
->prefix
= u
->manager
->prefix
;
5145 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5147 /* Copy parameters from unit */
5148 p
->cgroup_path
= u
->cgroup_path
;
5149 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5154 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
5160 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5161 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5163 (void) unit_realize_cgroup(u
);
5165 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
5169 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
5170 (void) ignore_signals(SIGPIPE
, -1);
5172 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5174 if (u
->cgroup_path
) {
5175 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5177 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5185 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5188 assert(d
< _UNIT_DEPENDENCY_MAX
);
5191 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5192 /* No bit set anymore, let's drop the whole entry */
5193 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5194 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5196 /* Mask was reduced, let's update the entry */
5197 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5200 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5205 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5210 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5214 UnitDependencyInfo di
;
5220 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5223 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5225 di
.origin_mask
&= ~mask
;
5226 unit_update_dependency_mask(u
, d
, other
, di
);
5228 /* We updated the dependency from our unit to the other unit now. But most dependencies
5229 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5230 * all dependency types on the other unit and delete all those which point to us and
5231 * have the right mask set. */
5233 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5234 UnitDependencyInfo dj
;
5236 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5237 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5239 dj
.destination_mask
&= ~mask
;
5241 unit_update_dependency_mask(other
, q
, u
, dj
);
5244 unit_add_to_gc_queue(other
);
5254 static int unit_export_invocation_id(Unit
*u
) {
5260 if (u
->exported_invocation_id
)
5263 if (sd_id128_is_null(u
->invocation_id
))
5266 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5267 r
= symlink_atomic(u
->invocation_id_string
, p
);
5269 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5271 u
->exported_invocation_id
= true;
5275 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5283 if (u
->exported_log_level_max
)
5286 if (c
->log_level_max
< 0)
5289 assert(c
->log_level_max
<= 7);
5291 buf
[0] = '0' + c
->log_level_max
;
5294 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5295 r
= symlink_atomic(buf
, p
);
5297 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5299 u
->exported_log_level_max
= true;
5303 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5304 _cleanup_close_
int fd
= -1;
5305 struct iovec
*iovec
;
5313 if (u
->exported_log_extra_fields
)
5316 if (c
->n_log_extra_fields
<= 0)
5319 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5320 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5322 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5323 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5325 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5326 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5329 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5330 pattern
= strjoina(p
, ".XXXXXX");
5332 fd
= mkostemp_safe(pattern
);
5334 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5336 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5338 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5342 (void) fchmod(fd
, 0644);
5344 if (rename(pattern
, p
) < 0) {
5345 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5349 u
->exported_log_extra_fields
= true;
5353 (void) unlink(pattern
);
5357 static int unit_export_log_rate_limit_interval(Unit
*u
, const ExecContext
*c
) {
5358 _cleanup_free_
char *buf
= NULL
;
5365 if (u
->exported_log_rate_limit_interval
)
5368 if (c
->log_rate_limit_interval_usec
== 0)
5371 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5373 if (asprintf(&buf
, "%" PRIu64
, c
->log_rate_limit_interval_usec
) < 0)
5376 r
= symlink_atomic(buf
, p
);
5378 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5380 u
->exported_log_rate_limit_interval
= true;
5384 static int unit_export_log_rate_limit_burst(Unit
*u
, const ExecContext
*c
) {
5385 _cleanup_free_
char *buf
= NULL
;
5392 if (u
->exported_log_rate_limit_burst
)
5395 if (c
->log_rate_limit_burst
== 0)
5398 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5400 if (asprintf(&buf
, "%u", c
->log_rate_limit_burst
) < 0)
5403 r
= symlink_atomic(buf
, p
);
5405 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5407 u
->exported_log_rate_limit_burst
= true;
5411 void unit_export_state_files(Unit
*u
) {
5412 const ExecContext
*c
;
5419 if (!MANAGER_IS_SYSTEM(u
->manager
))
5422 if (MANAGER_IS_TEST_RUN(u
->manager
))
5425 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5426 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5427 * the IPC system itself and PID 1 also log to the journal.
5429 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5430 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5431 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5432 * namespace at least.
5434 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5435 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5438 (void) unit_export_invocation_id(u
);
5440 c
= unit_get_exec_context(u
);
5442 (void) unit_export_log_level_max(u
, c
);
5443 (void) unit_export_log_extra_fields(u
, c
);
5444 (void) unit_export_log_rate_limit_interval(u
, c
);
5445 (void) unit_export_log_rate_limit_burst(u
, c
);
5449 void unit_unlink_state_files(Unit
*u
) {
5457 if (!MANAGER_IS_SYSTEM(u
->manager
))
5460 /* Undoes the effect of unit_export_state() */
5462 if (u
->exported_invocation_id
) {
5463 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5466 u
->exported_invocation_id
= false;
5469 if (u
->exported_log_level_max
) {
5470 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5473 u
->exported_log_level_max
= false;
5476 if (u
->exported_log_extra_fields
) {
5477 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5480 u
->exported_log_extra_fields
= false;
5483 if (u
->exported_log_rate_limit_interval
) {
5484 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5487 u
->exported_log_rate_limit_interval
= false;
5490 if (u
->exported_log_rate_limit_burst
) {
5491 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5494 u
->exported_log_rate_limit_burst
= false;
5498 int unit_prepare_exec(Unit
*u
) {
5503 /* Prepares everything so that we can fork of a process for this unit */
5505 (void) unit_realize_cgroup(u
);
5507 if (u
->reset_accounting
) {
5508 (void) unit_reset_accounting(u
);
5509 u
->reset_accounting
= false;
5512 unit_export_state_files(u
);
5514 r
= unit_setup_exec_runtime(u
);
5518 r
= unit_setup_dynamic_creds(u
);
5525 static int log_leftover(pid_t pid
, int sig
, void *userdata
) {
5526 _cleanup_free_
char *comm
= NULL
;
5528 (void) get_process_comm(pid
, &comm
);
5530 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5533 log_unit_warning(userdata
,
5534 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5535 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5541 int unit_warn_leftover_processes(Unit
*u
) {
5544 (void) unit_pick_cgroup_path(u
);
5546 if (!u
->cgroup_path
)
5549 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5552 bool unit_needs_console(Unit
*u
) {
5554 UnitActiveState state
;
5558 state
= unit_active_state(u
);
5560 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5563 if (UNIT_VTABLE(u
)->needs_console
)
5564 return UNIT_VTABLE(u
)->needs_console(u
);
5566 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5567 ec
= unit_get_exec_context(u
);
5571 return exec_context_may_touch_console(ec
);
5574 const char *unit_label_path(Unit
*u
) {
5577 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5578 * when validating access checks. */
5580 p
= u
->source_path
?: u
->fragment_path
;
5584 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5585 if (path_equal(p
, "/dev/null"))
5591 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5596 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5597 * and not a kernel thread either */
5599 /* First, a simple range check */
5600 if (!pid_is_valid(pid
))
5601 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5603 /* Some extra safety check */
5604 if (pid
== 1 || pid
== getpid_cached())
5605 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5607 /* Don't even begin to bother with kernel threads */
5608 r
= is_kernel_thread(pid
);
5610 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5612 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5614 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5619 void unit_log_success(Unit
*u
) {
5622 log_struct(LOG_INFO
,
5623 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5625 LOG_UNIT_INVOCATION_ID(u
),
5626 LOG_UNIT_MESSAGE(u
, "Succeeded."));
5629 void unit_log_failure(Unit
*u
, const char *result
) {
5633 log_struct(LOG_WARNING
,
5634 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5636 LOG_UNIT_INVOCATION_ID(u
),
5637 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5638 "UNIT_RESULT=%s", result
);
5641 void unit_log_process_exit(
5645 const char *command
,
5652 if (code
!= CLD_EXITED
)
5653 level
= LOG_WARNING
;
5656 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
5657 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s",
5659 sigchld_code_to_string(code
), status
,
5660 strna(code
== CLD_EXITED
5661 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
5662 : signal_to_string(status
))),
5663 "EXIT_CODE=%s", sigchld_code_to_string(code
),
5664 "EXIT_STATUS=%i", status
,
5665 "COMMAND=%s", strna(command
),
5667 LOG_UNIT_INVOCATION_ID(u
));
5670 int unit_exit_status(Unit
*u
) {
5673 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5674 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5675 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5676 * service process has exited abnormally (signal/coredump). */
5678 if (!UNIT_VTABLE(u
)->exit_status
)
5681 return UNIT_VTABLE(u
)->exit_status(u
);
5684 int unit_failure_action_exit_status(Unit
*u
) {
5689 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5691 if (u
->failure_action_exit_status
>= 0)
5692 return u
->failure_action_exit_status
;
5694 r
= unit_exit_status(u
);
5695 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5701 int unit_success_action_exit_status(Unit
*u
) {
5706 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5708 if (u
->success_action_exit_status
>= 0)
5709 return u
->success_action_exit_status
;
5711 r
= unit_exit_status(u
);
5712 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5718 int unit_test_trigger_loaded(Unit
*u
) {
5721 /* Tests whether the unit to trigger is loaded */
5723 trigger
= UNIT_TRIGGER(u
);
5725 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
), "Refusing to start, unit to trigger not loaded.");
5726 if (trigger
->load_state
!= UNIT_LOADED
)
5727 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
), "Refusing to start, unit %s to trigger not loaded.", u
->id
);
5732 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5733 [COLLECT_INACTIVE
] = "inactive",
5734 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5737 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);