1 /* SPDX-License-Identifier: LGPL-2.1+ */
11 #include "sd-messages.h"
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
27 #include "id128-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
52 #include "user-util.h"
55 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
56 [UNIT_SERVICE
] = &service_vtable
,
57 [UNIT_SOCKET
] = &socket_vtable
,
58 [UNIT_TARGET
] = &target_vtable
,
59 [UNIT_DEVICE
] = &device_vtable
,
60 [UNIT_MOUNT
] = &mount_vtable
,
61 [UNIT_AUTOMOUNT
] = &automount_vtable
,
62 [UNIT_SWAP
] = &swap_vtable
,
63 [UNIT_TIMER
] = &timer_vtable
,
64 [UNIT_PATH
] = &path_vtable
,
65 [UNIT_SLICE
] = &slice_vtable
,
66 [UNIT_SCOPE
] = &scope_vtable
,
69 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
71 Unit
*unit_new(Manager
*m
, size_t size
) {
75 assert(size
>= sizeof(Unit
));
81 u
->names
= set_new(&string_hash_ops
);
86 u
->type
= _UNIT_TYPE_INVALID
;
87 u
->default_dependencies
= true;
88 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
89 u
->unit_file_preset
= -1;
90 u
->on_failure_job_mode
= JOB_REPLACE
;
91 u
->cgroup_inotify_wd
= -1;
92 u
->job_timeout
= USEC_INFINITY
;
93 u
->job_running_timeout
= USEC_INFINITY
;
94 u
->ref_uid
= UID_INVALID
;
95 u
->ref_gid
= GID_INVALID
;
96 u
->cpu_usage_last
= NSEC_INFINITY
;
97 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
99 u
->ip_accounting_ingress_map_fd
= -1;
100 u
->ip_accounting_egress_map_fd
= -1;
101 u
->ipv4_allow_map_fd
= -1;
102 u
->ipv6_allow_map_fd
= -1;
103 u
->ipv4_deny_map_fd
= -1;
104 u
->ipv6_deny_map_fd
= -1;
106 u
->last_section_private
= -1;
108 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
109 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
114 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
115 _cleanup_(unit_freep
) Unit
*u
= NULL
;
118 u
= unit_new(m
, size
);
122 r
= unit_add_name(u
, name
);
131 bool unit_has_name(Unit
*u
, const char *name
) {
135 return set_contains(u
->names
, (char*) name
);
138 static void unit_init(Unit
*u
) {
145 assert(u
->type
>= 0);
147 cc
= unit_get_cgroup_context(u
);
149 cgroup_context_init(cc
);
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
155 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
156 cc
->io_accounting
= u
->manager
->default_io_accounting
;
157 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
158 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
159 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
160 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
161 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
163 if (u
->type
!= UNIT_SLICE
)
164 cc
->tasks_max
= u
->manager
->default_tasks_max
;
167 ec
= unit_get_exec_context(u
);
169 exec_context_init(ec
);
171 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
172 EXEC_KEYRING_SHARED
: EXEC_KEYRING_INHERIT
;
175 kc
= unit_get_kill_context(u
);
177 kill_context_init(kc
);
179 if (UNIT_VTABLE(u
)->init
)
180 UNIT_VTABLE(u
)->init(u
);
183 int unit_add_name(Unit
*u
, const char *text
) {
184 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
191 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
196 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
205 if (set_contains(u
->names
, s
))
207 if (hashmap_contains(u
->manager
->units
, s
))
210 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
213 t
= unit_name_to_type(s
);
217 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
220 r
= unit_name_to_instance(s
, &i
);
224 if (i
&& !unit_type_may_template(t
))
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
233 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
236 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
239 r
= set_put(u
->names
, s
);
244 r
= hashmap_put(u
->manager
->units
, s
, u
);
246 (void) set_remove(u
->names
, s
);
250 if (u
->type
== _UNIT_TYPE_INVALID
) {
253 u
->instance
= TAKE_PTR(i
);
255 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
262 unit_add_to_dbus_queue(u
);
266 int unit_choose_id(Unit
*u
, const char *name
) {
267 _cleanup_free_
char *t
= NULL
;
274 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
279 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
286 /* Selects one of the names of this unit as the id */
287 s
= set_get(u
->names
, (char*) name
);
291 /* Determine the new instance from the new id */
292 r
= unit_name_to_instance(s
, &i
);
301 unit_add_to_dbus_queue(u
);
306 int unit_set_description(Unit
*u
, const char *description
) {
311 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
315 unit_add_to_dbus_queue(u
);
320 bool unit_may_gc(Unit
*u
) {
321 UnitActiveState state
;
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
340 state
= unit_active_state(u
);
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
344 UNIT_VTABLE(u
)->release_resources
)
345 UNIT_VTABLE(u
)->release_resources(u
);
350 if (sd_bus_track_count(u
->bus_track
) > 0)
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u
->collect_mode
) {
356 case COLLECT_INACTIVE
:
357 if (state
!= UNIT_INACTIVE
)
362 case COLLECT_INACTIVE_OR_FAILED
:
363 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
369 assert_not_reached("Unknown garbage collection mode");
372 if (u
->cgroup_path
) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
376 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
378 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
383 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
389 void unit_add_to_load_queue(Unit
*u
) {
391 assert(u
->type
!= _UNIT_TYPE_INVALID
);
393 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
396 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
397 u
->in_load_queue
= true;
400 void unit_add_to_cleanup_queue(Unit
*u
) {
403 if (u
->in_cleanup_queue
)
406 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
407 u
->in_cleanup_queue
= true;
410 void unit_add_to_gc_queue(Unit
*u
) {
413 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
419 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
420 u
->in_gc_queue
= true;
423 void unit_add_to_dbus_queue(Unit
*u
) {
425 assert(u
->type
!= _UNIT_TYPE_INVALID
);
427 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
432 sd_bus_track_count(u
->bus_track
) <= 0 &&
433 set_isempty(u
->manager
->private_buses
)) {
434 u
->sent_dbus_new_signal
= true;
438 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
439 u
->in_dbus_queue
= true;
442 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
445 if (u
->in_stop_when_unneeded_queue
)
448 if (!u
->stop_when_unneeded
)
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
454 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
455 u
->in_stop_when_unneeded_queue
= true;
458 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
467 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
470 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
471 hashmap_remove(other
->dependencies
[d
], u
);
473 unit_add_to_gc_queue(other
);
479 static void unit_remove_transient(Unit
*u
) {
487 if (u
->fragment_path
)
488 (void) unlink(u
->fragment_path
);
490 STRV_FOREACH(i
, u
->dropin_paths
) {
491 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
493 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
497 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
501 /* Only drop transient drop-ins */
502 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
510 static void unit_free_requires_mounts_for(Unit
*u
) {
514 _cleanup_free_
char *path
;
516 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
520 char s
[strlen(path
) + 1];
522 PATH_FOREACH_PREFIX_MORE(s
, path
) {
526 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
530 (void) set_remove(x
, u
);
532 if (set_isempty(x
)) {
533 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
541 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
544 static void unit_done(Unit
*u
) {
553 if (UNIT_VTABLE(u
)->done
)
554 UNIT_VTABLE(u
)->done(u
);
556 ec
= unit_get_exec_context(u
);
558 exec_context_done(ec
);
560 cc
= unit_get_cgroup_context(u
);
562 cgroup_context_done(cc
);
565 void unit_free(Unit
*u
) {
573 u
->transient_file
= safe_fclose(u
->transient_file
);
575 if (!MANAGER_IS_RELOADING(u
->manager
))
576 unit_remove_transient(u
);
578 bus_unit_send_removed_signal(u
);
582 unit_dequeue_rewatch_pids(u
);
584 sd_bus_slot_unref(u
->match_bus_slot
);
585 sd_bus_track_unref(u
->bus_track
);
586 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
588 unit_free_requires_mounts_for(u
);
590 SET_FOREACH(t
, u
->names
, i
)
591 hashmap_remove_value(u
->manager
->units
, t
, u
);
593 if (!sd_id128_is_null(u
->invocation_id
))
594 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
608 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
609 bidi_set_free(u
, u
->dependencies
[d
]);
612 manager_unref_console(u
->manager
);
614 unit_release_cgroup(u
);
616 if (!MANAGER_IS_RELOADING(u
->manager
))
617 unit_unlink_state_files(u
);
619 unit_unref_uid_gid(u
, false);
621 (void) manager_update_failed_units(u
->manager
, u
, false);
622 set_remove(u
->manager
->startup_units
, u
);
624 unit_unwatch_all_pids(u
);
626 unit_ref_unset(&u
->slice
);
627 while (u
->refs_by_target
)
628 unit_ref_unset(u
->refs_by_target
);
630 if (u
->type
!= _UNIT_TYPE_INVALID
)
631 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
633 if (u
->in_load_queue
)
634 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
636 if (u
->in_dbus_queue
)
637 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
640 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
642 if (u
->in_cgroup_realize_queue
)
643 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
645 if (u
->in_cgroup_empty_queue
)
646 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
648 if (u
->in_cleanup_queue
)
649 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
651 if (u
->in_target_deps_queue
)
652 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
654 if (u
->in_stop_when_unneeded_queue
)
655 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
657 safe_close(u
->ip_accounting_ingress_map_fd
);
658 safe_close(u
->ip_accounting_egress_map_fd
);
660 safe_close(u
->ipv4_allow_map_fd
);
661 safe_close(u
->ipv6_allow_map_fd
);
662 safe_close(u
->ipv4_deny_map_fd
);
663 safe_close(u
->ipv6_deny_map_fd
);
665 bpf_program_unref(u
->ip_bpf_ingress
);
666 bpf_program_unref(u
->ip_bpf_ingress_installed
);
667 bpf_program_unref(u
->ip_bpf_egress
);
668 bpf_program_unref(u
->ip_bpf_egress_installed
);
670 bpf_program_unref(u
->bpf_device_control_installed
);
672 condition_free_list(u
->conditions
);
673 condition_free_list(u
->asserts
);
675 free(u
->description
);
676 strv_free(u
->documentation
);
677 free(u
->fragment_path
);
678 free(u
->source_path
);
679 strv_free(u
->dropin_paths
);
682 free(u
->job_timeout_reboot_arg
);
684 set_free_free(u
->names
);
691 UnitActiveState
unit_active_state(Unit
*u
) {
694 if (u
->load_state
== UNIT_MERGED
)
695 return unit_active_state(unit_follow_merge(u
));
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
701 return UNIT_VTABLE(u
)->active_state(u
);
704 const char* unit_sub_state_to_string(Unit
*u
) {
707 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
710 static int set_complete_move(Set
**s
, Set
**other
) {
718 return set_move(*s
, *other
);
720 *s
= TAKE_PTR(*other
);
725 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
733 return hashmap_move(*s
, *other
);
735 *s
= TAKE_PTR(*other
);
740 static int merge_names(Unit
*u
, Unit
*other
) {
748 r
= set_complete_move(&u
->names
, &other
->names
);
752 set_free_free(other
->names
);
756 SET_FOREACH(t
, u
->names
, i
)
757 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
762 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
767 assert(d
< _UNIT_DEPENDENCY_MAX
);
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
774 if (!u
->dependencies
[d
])
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
780 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
783 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
793 assert(d
< _UNIT_DEPENDENCY_MAX
);
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
802 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back
->dependencies
[k
], other
))
806 maybe_warn_about_dependency(u
, other_id
, k
);
808 UnitDependencyInfo di_u
, di_other
, di_merged
;
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
814 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
816 continue; /* dependency isn't set, let's try the next one */
818 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
820 di_merged
= (UnitDependencyInfo
) {
821 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
822 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
825 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
827 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
836 /* Also do not move dependencies on u to itself */
837 back
= hashmap_remove(other
->dependencies
[d
], u
);
839 maybe_warn_about_dependency(u
, other_id
, d
);
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
844 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
847 int unit_merge(Unit
*u
, Unit
*other
) {
849 const char *other_id
= NULL
;
854 assert(u
->manager
== other
->manager
);
855 assert(u
->type
!= _UNIT_TYPE_INVALID
);
857 other
= unit_follow_merge(other
);
862 if (u
->type
!= other
->type
)
865 if (!u
->instance
!= !other
->instance
)
868 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
871 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
884 other_id
= strdupa(other
->id
);
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
888 r
= reserve_dependencies(u
, other
, d
);
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
898 r
= merge_names(u
, other
);
902 /* Redirect all references */
903 while (other
->refs_by_target
)
904 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
906 /* Merge dependencies */
907 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
908 merge_dependencies(u
, other
, other_id
, d
);
910 other
->load_state
= UNIT_MERGED
;
911 other
->merged_into
= u
;
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other
->load_state
!= UNIT_STUB
)
916 if (UNIT_VTABLE(other
)->done
)
917 UNIT_VTABLE(other
)->done(other
);
919 unit_add_to_dbus_queue(u
);
920 unit_add_to_cleanup_queue(other
);
925 int unit_merge_by_name(Unit
*u
, const char *name
) {
926 _cleanup_free_
char *s
= NULL
;
933 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
937 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
944 other
= manager_get_unit(u
->manager
, name
);
946 return unit_merge(u
, other
);
948 return unit_add_name(u
, name
);
951 Unit
* unit_follow_merge(Unit
*u
) {
954 while (u
->load_state
== UNIT_MERGED
)
955 assert_se(u
= u
->merged_into
);
960 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
961 ExecDirectoryType dt
;
968 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
969 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
974 if (c
->root_directory
) {
975 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
981 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
986 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
987 if (!u
->manager
->prefix
[dt
])
990 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
991 _cleanup_free_
char *p
;
993 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
997 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1003 if (!MANAGER_IS_SYSTEM(u
->manager
))
1006 if (c
->private_tmp
) {
1009 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1010 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1015 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1020 if (!IN_SET(c
->std_output
,
1021 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1022 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1023 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1024 !IN_SET(c
->std_error
,
1025 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1026 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1027 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1033 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1040 const char *unit_description(Unit
*u
) {
1044 return u
->description
;
1046 return strna(u
->id
);
1049 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1051 UnitDependencyMask mask
;
1054 { UNIT_DEPENDENCY_FILE
, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1057 { UNIT_DEPENDENCY_UDEV
, "udev" },
1058 { UNIT_DEPENDENCY_PATH
, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1069 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1074 if (FLAGS_SET(mask
, table
[i
].mask
)) {
1082 fputs(table
[i
].name
, f
);
1084 mask
&= ~table
[i
].mask
;
1091 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1095 const char *prefix2
;
1097 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1098 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1099 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1100 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1101 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1102 timespan
[FORMAT_TIMESPAN_MAX
];
1104 _cleanup_set_free_ Set
*following_set
= NULL
;
1110 assert(u
->type
>= 0);
1112 prefix
= strempty(prefix
);
1113 prefix2
= strjoina(prefix
, "\t");
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1133 "%s\tCGroup realized: %s\n",
1135 prefix
, unit_description(u
),
1136 prefix
, strna(u
->instance
),
1137 prefix
, unit_load_state_to_string(u
->load_state
),
1138 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1139 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1140 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1141 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1142 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1143 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1144 prefix
, yes_no(unit_may_gc(u
)),
1145 prefix
, yes_no(unit_need_daemon_reload(u
)),
1146 prefix
, yes_no(u
->transient
),
1147 prefix
, yes_no(u
->perpetual
),
1148 prefix
, collect_mode_to_string(u
->collect_mode
),
1149 prefix
, strna(unit_slice_name(u
)),
1150 prefix
, strna(u
->cgroup_path
),
1151 prefix
, yes_no(u
->cgroup_realized
));
1153 if (u
->cgroup_realized_mask
!= 0) {
1154 _cleanup_free_
char *s
= NULL
;
1155 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1156 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1158 if (u
->cgroup_enabled_mask
!= 0) {
1159 _cleanup_free_
char *s
= NULL
;
1160 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1161 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1163 m
= unit_get_own_mask(u
);
1165 _cleanup_free_
char *s
= NULL
;
1166 (void) cg_mask_to_string(m
, &s
);
1167 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1169 m
= unit_get_members_mask(u
);
1171 _cleanup_free_
char *s
= NULL
;
1172 (void) cg_mask_to_string(m
, &s
);
1173 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1176 SET_FOREACH(t
, u
->names
, i
)
1177 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1179 if (!sd_id128_is_null(u
->invocation_id
))
1180 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1181 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1183 STRV_FOREACH(j
, u
->documentation
)
1184 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1186 following
= unit_following(u
);
1188 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1190 r
= unit_following_set(u
, &following_set
);
1194 SET_FOREACH(other
, following_set
, i
)
1195 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1198 if (u
->fragment_path
)
1199 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1202 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1204 STRV_FOREACH(j
, u
->dropin_paths
)
1205 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1207 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1208 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1209 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1210 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1212 if (u
->job_timeout
!= USEC_INFINITY
)
1213 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1215 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1216 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1218 if (u
->job_timeout_reboot_arg
)
1219 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1221 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1222 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1224 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1229 prefix
, yes_no(u
->condition_result
));
1231 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1236 prefix
, yes_no(u
->assert_result
));
1238 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1239 UnitDependencyInfo di
;
1242 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1245 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1247 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1248 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1254 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1255 UnitDependencyInfo di
;
1258 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1261 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1263 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1264 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1270 if (u
->load_state
== UNIT_LOADED
) {
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix
, yes_no(u
->stop_when_unneeded
),
1280 prefix
, yes_no(u
->refuse_manual_start
),
1281 prefix
, yes_no(u
->refuse_manual_stop
),
1282 prefix
, yes_no(u
->default_dependencies
),
1283 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1284 prefix
, yes_no(u
->ignore_on_isolate
));
1286 if (UNIT_VTABLE(u
)->dump
)
1287 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1289 } else if (u
->load_state
== UNIT_MERGED
)
1291 "%s\tMerged into: %s\n",
1292 prefix
, u
->merged_into
->id
);
1293 else if (u
->load_state
== UNIT_ERROR
)
1294 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1296 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1297 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1300 job_dump(u
->job
, f
, prefix2
);
1303 job_dump(u
->nop_job
, f
, prefix2
);
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit
*u
) {
1312 /* Load a .{service,socket,...} file */
1313 r
= unit_load_fragment(u
);
1317 if (u
->load_state
== UNIT_STUB
)
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u
));
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1336 /* Load a .service/.socket/.slice/… file */
1337 r
= unit_load_fragment(u
);
1341 if (u
->load_state
== UNIT_STUB
)
1342 u
->load_state
= UNIT_LOADED
;
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u
));
1348 void unit_add_to_target_deps_queue(Unit
*u
) {
1349 Manager
*m
= u
->manager
;
1353 if (u
->in_target_deps_queue
)
1356 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1357 u
->in_target_deps_queue
= true;
1360 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1364 if (target
->type
!= UNIT_TARGET
)
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u
->load_state
!= UNIT_LOADED
||
1370 target
->load_state
!= UNIT_LOADED
)
1373 /* If either side wants no automatic dependencies, then let's
1375 if (!u
->default_dependencies
||
1376 !target
->default_dependencies
)
1379 /* Don't create loops */
1380 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1383 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1386 static int unit_add_slice_dependencies(Unit
*u
) {
1387 UnitDependencyMask mask
;
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1396 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1398 if (UNIT_ISSET(u
->slice
))
1399 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1401 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1404 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1407 static int unit_add_mount_dependencies(Unit
*u
) {
1408 UnitDependencyInfo di
;
1415 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1416 char prefix
[strlen(path
) + 1];
1418 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1419 _cleanup_free_
char *p
= NULL
;
1422 r
= unit_name_from_path(prefix
, ".mount", &p
);
1426 m
= manager_get_unit(u
->manager
, p
);
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1433 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1439 if (m
->load_state
!= UNIT_LOADED
)
1442 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1446 if (m
->fragment_path
) {
1447 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1457 static int unit_add_startup_units(Unit
*u
) {
1461 c
= unit_get_cgroup_context(u
);
1465 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1466 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1467 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1470 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1474 return set_put(u
->manager
->startup_units
, u
);
1477 int unit_load(Unit
*u
) {
1482 if (u
->in_load_queue
) {
1483 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1484 u
->in_load_queue
= false;
1487 if (u
->type
== _UNIT_TYPE_INVALID
)
1490 if (u
->load_state
!= UNIT_STUB
)
1493 if (u
->transient_file
) {
1494 r
= fflush_and_check(u
->transient_file
);
1498 u
->transient_file
= safe_fclose(u
->transient_file
);
1499 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1502 if (UNIT_VTABLE(u
)->load
) {
1503 r
= UNIT_VTABLE(u
)->load(u
);
1508 if (u
->load_state
== UNIT_STUB
) {
1513 if (u
->load_state
== UNIT_LOADED
) {
1514 unit_add_to_target_deps_queue(u
);
1516 r
= unit_add_slice_dependencies(u
);
1520 r
= unit_add_mount_dependencies(u
);
1524 r
= unit_add_startup_units(u
);
1528 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1529 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1534 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1535 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1537 unit_update_cgroup_members_masks(u
);
1540 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1542 unit_add_to_dbus_queue(unit_follow_merge(u
));
1543 unit_add_to_gc_queue(u
);
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1551 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1552 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1556 unit_add_to_dbus_queue(u
);
1557 unit_add_to_gc_queue(u
);
1559 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1562 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1569 /* If the condition list is empty, then it is true */
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions
, c
, first
) {
1579 r
= condition_test(c
);
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1584 c
->trigger
? "|" : "",
1585 c
->negate
? "!" : "",
1591 c
->trigger
? "|" : "",
1592 c
->negate
? "!" : "",
1594 condition_result_to_string(c
->result
));
1596 if (!c
->trigger
&& r
<= 0)
1599 if (c
->trigger
&& triggered
<= 0)
1603 return triggered
!= 0;
1606 static bool unit_condition_test(Unit
*u
) {
1609 dual_timestamp_get(&u
->condition_timestamp
);
1610 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1612 return u
->condition_result
;
1615 static bool unit_assert_test(Unit
*u
) {
1618 dual_timestamp_get(&u
->assert_timestamp
);
1619 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1621 return u
->assert_result
;
1624 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL
;
1626 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1630 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1632 const UnitStatusMessageFormats
*format_table
;
1635 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1637 if (t
!= JOB_RELOAD
) {
1638 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1640 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1646 /* Return generic strings */
1648 return "Starting %s.";
1649 else if (t
== JOB_STOP
)
1650 return "Stopping %s.";
1652 return "Reloading %s.";
1655 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1660 /* Reload status messages have traditionally not been printed to console. */
1661 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1664 format
= unit_get_status_message_format(u
, t
);
1666 DISABLE_WARNING_FORMAT_NONLITERAL
;
1667 unit_status_printf(u
, "", format
);
1671 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1672 const char *format
, *mid
;
1677 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1680 if (log_on_console())
1683 /* We log status messages for all units and all operations. */
1685 format
= unit_get_status_message_format(u
, t
);
1687 DISABLE_WARNING_FORMAT_NONLITERAL
;
1688 (void) snprintf(buf
, sizeof buf
, format
, unit_description(u
));
1691 mid
= t
== JOB_START
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR
:
1692 t
== JOB_STOP
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR
:
1693 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR
;
1695 /* Note that we deliberately use LOG_MESSAGE() instead of
1696 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1697 * closely what is written to screen using the status output,
1698 * which is supposed the highest level, friendliest output
1699 * possible, which means we should avoid the low-level unit
1701 log_struct(LOG_INFO
,
1702 LOG_MESSAGE("%s", buf
),
1704 LOG_UNIT_INVOCATION_ID(u
),
1708 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1711 assert(t
< _JOB_TYPE_MAX
);
1713 unit_status_log_starting_stopping_reloading(u
, t
);
1714 unit_status_print_starting_stopping(u
, t
);
1717 int unit_start_limit_test(Unit
*u
) {
1722 if (ratelimit_below(&u
->start_limit
)) {
1723 u
->start_limit_hit
= false;
1727 log_unit_warning(u
, "Start request repeated too quickly.");
1728 u
->start_limit_hit
= true;
1730 reason
= strjoina("unit ", u
->id
, " failed");
1732 return emergency_action(u
->manager
, u
->start_limit_action
,
1733 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1734 u
->reboot_arg
, reason
);
1737 bool unit_shall_confirm_spawn(Unit
*u
) {
1740 if (manager_is_confirm_spawn_disabled(u
->manager
))
1743 /* For some reasons units remaining in the same process group
1744 * as PID 1 fail to acquire the console even if it's not used
1745 * by any process. So skip the confirmation question for them. */
1746 return !unit_get_exec_context(u
)->same_pgrp
;
1749 static bool unit_verify_deps(Unit
*u
) {
1756 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1757 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1758 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1759 * conjunction with After= as for them any such check would make things entirely racy. */
1761 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1763 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1766 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1767 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1776 * -EBADR: This unit type does not support starting.
1777 * -EALREADY: Unit is already started.
1778 * -EAGAIN: An operation is already in progress. Retry later.
1779 * -ECANCELED: Too many requests for now.
1780 * -EPROTO: Assert failed
1781 * -EINVAL: Unit not loaded
1782 * -EOPNOTSUPP: Unit type not supported
1783 * -ENOLINK: The necessary dependencies are not fulfilled.
1784 * -ESTALE: This unit has been started before and can't be started a second time
1786 int unit_start(Unit
*u
) {
1787 UnitActiveState state
;
1792 /* If this is already started, then this will succeed. Note
1793 * that this will even succeed if this unit is not startable
1794 * by the user. This is relied on to detect when we need to
1795 * wait for units and when waiting is finished. */
1796 state
= unit_active_state(u
);
1797 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1800 /* Units that aren't loaded cannot be started */
1801 if (u
->load_state
!= UNIT_LOADED
)
1804 /* Refuse starting scope units more than once */
1805 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1808 /* If the conditions failed, don't do anything at all. If we
1809 * already are activating this call might still be useful to
1810 * speed up activation in case there is some hold-off time,
1811 * but we don't want to recheck the condition in that case. */
1812 if (state
!= UNIT_ACTIVATING
&&
1813 !unit_condition_test(u
)) {
1814 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1818 /* If the asserts failed, fail the entire job */
1819 if (state
!= UNIT_ACTIVATING
&&
1820 !unit_assert_test(u
)) {
1821 log_unit_notice(u
, "Starting requested but asserts failed.");
1825 /* Units of types that aren't supported cannot be
1826 * started. Note that we do this test only after the condition
1827 * checks, so that we rather return condition check errors
1828 * (which are usually not considered a true failure) than "not
1829 * supported" errors (which are considered a failure).
1831 if (!unit_supported(u
))
1834 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1835 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1836 * effect anymore, due to a reload or due to a failed condition. */
1837 if (!unit_verify_deps(u
))
1840 /* Forward to the main object, if we aren't it. */
1841 following
= unit_following(u
);
1843 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1844 return unit_start(following
);
1847 /* If it is stopped, but we cannot start it, then fail */
1848 if (!UNIT_VTABLE(u
)->start
)
1851 /* We don't suppress calls to ->start() here when we are
1852 * already starting, to allow this request to be used as a
1853 * "hurry up" call, for example when the unit is in some "auto
1854 * restart" state where it waits for a holdoff timer to elapse
1855 * before it will start again. */
1857 unit_add_to_dbus_queue(u
);
1859 return UNIT_VTABLE(u
)->start(u
);
1862 bool unit_can_start(Unit
*u
) {
1865 if (u
->load_state
!= UNIT_LOADED
)
1868 if (!unit_supported(u
))
1871 /* Scope units may be started only once */
1872 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1875 return !!UNIT_VTABLE(u
)->start
;
1878 bool unit_can_isolate(Unit
*u
) {
1881 return unit_can_start(u
) &&
1886 * -EBADR: This unit type does not support stopping.
1887 * -EALREADY: Unit is already stopped.
1888 * -EAGAIN: An operation is already in progress. Retry later.
1890 int unit_stop(Unit
*u
) {
1891 UnitActiveState state
;
1896 state
= unit_active_state(u
);
1897 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1900 following
= unit_following(u
);
1902 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1903 return unit_stop(following
);
1906 if (!UNIT_VTABLE(u
)->stop
)
1909 unit_add_to_dbus_queue(u
);
1911 return UNIT_VTABLE(u
)->stop(u
);
1914 bool unit_can_stop(Unit
*u
) {
1917 if (!unit_supported(u
))
1923 return !!UNIT_VTABLE(u
)->stop
;
1927 * -EBADR: This unit type does not support reloading.
1928 * -ENOEXEC: Unit is not started.
1929 * -EAGAIN: An operation is already in progress. Retry later.
1931 int unit_reload(Unit
*u
) {
1932 UnitActiveState state
;
1937 if (u
->load_state
!= UNIT_LOADED
)
1940 if (!unit_can_reload(u
))
1943 state
= unit_active_state(u
);
1944 if (state
== UNIT_RELOADING
)
1947 if (state
!= UNIT_ACTIVE
) {
1948 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1952 following
= unit_following(u
);
1954 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1955 return unit_reload(following
);
1958 unit_add_to_dbus_queue(u
);
1960 if (!UNIT_VTABLE(u
)->reload
) {
1961 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1962 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
1966 return UNIT_VTABLE(u
)->reload(u
);
1969 bool unit_can_reload(Unit
*u
) {
1972 if (UNIT_VTABLE(u
)->can_reload
)
1973 return UNIT_VTABLE(u
)->can_reload(u
);
1975 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1978 return UNIT_VTABLE(u
)->reload
;
1981 bool unit_is_unneeded(Unit
*u
) {
1982 static const UnitDependency deps
[] = {
1992 if (!u
->stop_when_unneeded
)
1995 /* Don't clean up while the unit is transitioning or is even inactive. */
1996 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
2001 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
2006 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2007 * restart, then don't clean this one up. */
2009 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
) {
2013 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2016 if (unit_will_restart(other
))
2024 static void check_unneeded_dependencies(Unit
*u
) {
2026 static const UnitDependency deps
[] = {
2036 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2038 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
2043 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
)
2044 unit_submit_to_stop_when_unneeded_queue(other
);
2048 static void unit_check_binds_to(Unit
*u
) {
2049 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2061 if (unit_active_state(u
) != UNIT_ACTIVE
)
2064 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
2068 if (!other
->coldplugged
)
2069 /* We might yet create a job for the other unit… */
2072 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2082 /* If stopping a unit fails continuously we might enter a stop
2083 * loop here, hence stop acting on the service being
2084 * unnecessary after a while. */
2085 if (!ratelimit_below(&u
->auto_stop_ratelimit
)) {
2086 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2091 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2093 /* A unit we need to run is gone. Sniff. Let's stop this. */
2094 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2096 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2099 static void retroactively_start_dependencies(Unit
*u
) {
2105 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2107 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2108 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2109 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2110 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2112 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2113 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2114 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2115 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2117 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2118 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2119 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2120 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
2122 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2124 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2126 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2127 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2128 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2131 static void retroactively_stop_dependencies(Unit
*u
) {
2137 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2139 /* Pull down units which are bound to us recursively if enabled */
2140 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2141 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2142 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2145 void unit_start_on_failure(Unit
*u
) {
2153 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2156 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2158 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2159 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2161 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, &error
, NULL
);
2163 log_unit_warning_errno(u
, r
, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error
, r
));
2167 void unit_trigger_notify(Unit
*u
) {
2174 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2175 if (UNIT_VTABLE(other
)->trigger_notify
)
2176 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2179 static int unit_log_resources(Unit
*u
) {
2180 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ 4];
2181 bool any_traffic
= false, have_ip_accounting
= false;
2182 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
;
2183 size_t n_message_parts
= 0, n_iovec
= 0;
2184 char* message_parts
[3 + 1], *t
;
2185 nsec_t nsec
= NSEC_INFINITY
;
2186 CGroupIPAccountingMetric m
;
2189 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2190 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2198 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2199 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2200 * information and the complete data in structured fields. */
2202 (void) unit_get_cpu_usage(u
, &nsec
);
2203 if (nsec
!= NSEC_INFINITY
) {
2204 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2206 /* Format the CPU time for inclusion in the structured log message */
2207 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2211 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2213 /* Format the CPU time for inclusion in the human language message string */
2214 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2215 t
= strjoin("consumed ", buf
, " CPU time");
2221 message_parts
[n_message_parts
++] = t
;
2224 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2225 char buf
[FORMAT_BYTES_MAX
] = "";
2226 uint64_t value
= UINT64_MAX
;
2228 assert(ip_fields
[m
]);
2230 (void) unit_get_ip_accounting(u
, m
, &value
);
2231 if (value
== UINT64_MAX
)
2234 have_ip_accounting
= true;
2238 /* Format IP accounting data for inclusion in the structured log message */
2239 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2243 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2245 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2246 * bytes counters (and not for the packets counters) */
2247 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2249 igress
= strjoin("received ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2254 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2256 egress
= strjoin("sent ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2264 if (have_ip_accounting
) {
2267 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2269 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2274 k
= strdup("no IP traffic");
2280 message_parts
[n_message_parts
++] = k
;
2284 /* Is there any accounting data available at all? */
2290 if (n_message_parts
== 0)
2291 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2293 _cleanup_free_
char *joined
;
2295 message_parts
[n_message_parts
] = NULL
;
2297 joined
= strv_join(message_parts
, ", ");
2303 joined
[0] = ascii_toupper(joined
[0]);
2304 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2307 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2308 * and hence don't increase n_iovec for them */
2309 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2310 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2312 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2313 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2315 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2316 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2318 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2322 for (i
= 0; i
< n_message_parts
; i
++)
2323 free(message_parts
[i
]);
2325 for (i
= 0; i
< n_iovec
; i
++)
2326 free(iovec
[i
].iov_base
);
2332 static void unit_update_on_console(Unit
*u
) {
2337 b
= unit_needs_console(u
);
2338 if (u
->on_console
== b
)
2343 manager_ref_console(u
->manager
);
2345 manager_unref_console(u
->manager
);
2348 static void unit_emit_audit_start(Unit
*u
) {
2351 if (u
->type
!= UNIT_SERVICE
)
2354 /* Write audit record if we have just finished starting up */
2355 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, true);
2359 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2362 if (u
->type
!= UNIT_SERVICE
)
2366 /* Write audit record if we have just finished shutting down */
2367 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, state
== UNIT_INACTIVE
);
2368 u
->in_audit
= false;
2370 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2371 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, state
== UNIT_INACTIVE
);
2373 if (state
== UNIT_INACTIVE
)
2374 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, true);
2378 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2384 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2385 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2387 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2388 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2389 * remounted this function will be called too! */
2393 /* Update timestamps for state changes */
2394 if (!MANAGER_IS_RELOADING(m
)) {
2395 dual_timestamp_get(&u
->state_change_timestamp
);
2397 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2398 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2399 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2400 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2402 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2403 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2404 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2405 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2408 /* Keep track of failed units */
2409 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2411 /* Make sure the cgroup and state files are always removed when we become inactive */
2412 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2413 unit_prune_cgroup(u
);
2414 unit_unlink_state_files(u
);
2417 unit_update_on_console(u
);
2422 if (u
->job
->state
== JOB_WAITING
)
2424 /* So we reached a different state for this
2425 * job. Let's see if we can run it now if it
2426 * failed previously due to EAGAIN. */
2427 job_add_to_run_queue(u
->job
);
2429 /* Let's check whether this state change constitutes a
2430 * finished job, or maybe contradicts a running job and
2431 * hence needs to invalidate jobs. */
2433 switch (u
->job
->type
) {
2436 case JOB_VERIFY_ACTIVE
:
2438 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2439 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2440 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2443 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2444 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2450 case JOB_RELOAD_OR_START
:
2451 case JOB_TRY_RELOAD
:
2453 if (u
->job
->state
== JOB_RUNNING
) {
2454 if (ns
== UNIT_ACTIVE
)
2455 job_finish_and_invalidate(u
->job
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2456 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2459 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2460 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2468 case JOB_TRY_RESTART
:
2470 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2471 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2472 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2474 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2480 assert_not_reached("Job type unknown");
2486 if (!MANAGER_IS_RELOADING(m
)) {
2488 /* If this state change happened without being
2489 * requested by a job, then let's retroactively start
2490 * or stop dependencies. We skip that step when
2491 * deserializing, since we don't want to create any
2492 * additional jobs just because something is already
2496 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2497 retroactively_start_dependencies(u
);
2498 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2499 retroactively_stop_dependencies(u
);
2502 /* stop unneeded units regardless if going down was expected or not */
2503 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2504 check_unneeded_dependencies(u
);
2506 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2507 log_unit_debug(u
, "Unit entered failed state.");
2509 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2510 unit_start_on_failure(u
);
2513 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2514 /* This unit just finished starting up */
2516 unit_emit_audit_start(u
);
2517 manager_send_unit_plymouth(m
, u
);
2520 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2521 /* This unit just stopped/failed. */
2523 unit_emit_audit_stop(u
, ns
);
2524 unit_log_resources(u
);
2528 manager_recheck_journal(m
);
2529 manager_recheck_dbus(m
);
2531 unit_trigger_notify(u
);
2533 if (!MANAGER_IS_RELOADING(m
)) {
2534 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2535 unit_submit_to_stop_when_unneeded_queue(u
);
2537 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2538 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2539 * without ever entering started.) */
2540 unit_check_binds_to(u
);
2542 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2543 reason
= strjoina("unit ", u
->id
, " failed");
2544 (void) emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, reason
);
2545 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2546 reason
= strjoina("unit ", u
->id
, " succeeded");
2547 (void) emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, reason
);
2551 unit_add_to_dbus_queue(u
);
2552 unit_add_to_gc_queue(u
);
2555 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2559 assert(pid_is_valid(pid
));
2561 /* Watch a specific PID */
2563 r
= set_ensure_allocated(&u
->pids
, NULL
);
2567 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2571 /* First try, let's add the unit keyed by "pid". */
2572 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2578 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2579 * to an array of Units rather than just a Unit), lists us already. */
2581 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2583 for (; array
[n
]; n
++)
2587 if (found
) /* Found it already? if so, do nothing */
2592 /* Allocate a new array */
2593 new_array
= new(Unit
*, n
+ 2);
2597 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2599 new_array
[n
+1] = NULL
;
2601 /* Add or replace the old array */
2602 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2613 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2620 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2624 assert(pid_is_valid(pid
));
2626 /* First let's drop the unit in case it's keyed as "pid". */
2627 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2629 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2630 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2634 /* Let's iterate through the array, dropping our own entry */
2635 for (n
= 0; array
[n
]; n
++)
2637 array
[m
++] = array
[n
];
2641 /* The array is now empty, remove the entire entry */
2642 assert(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2647 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2650 void unit_unwatch_all_pids(Unit
*u
) {
2653 while (!set_isempty(u
->pids
))
2654 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2656 u
->pids
= set_free(u
->pids
);
2659 static void unit_tidy_watch_pids(Unit
*u
) {
2660 pid_t except1
, except2
;
2666 /* Cleans dead PIDs from our list */
2668 except1
= unit_main_pid(u
);
2669 except2
= unit_control_pid(u
);
2671 SET_FOREACH(e
, u
->pids
, i
) {
2672 pid_t pid
= PTR_TO_PID(e
);
2674 if (pid
== except1
|| pid
== except2
)
2677 if (!pid_is_unwaited(pid
))
2678 unit_unwatch_pid(u
, pid
);
2682 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2688 unit_tidy_watch_pids(u
);
2689 unit_watch_all_pids(u
);
2691 /* If the PID set is empty now, then let's finish this off. */
2692 unit_synthesize_cgroup_empty_event(u
);
2697 int unit_enqueue_rewatch_pids(Unit
*u
) {
2702 if (!u
->cgroup_path
)
2705 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2708 if (r
> 0) /* On unified we can use proper notifications */
2711 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2712 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2713 * involves issuing kill(pid, 0) on all processes we watch. */
2715 if (!u
->rewatch_pids_event_source
) {
2716 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2718 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2720 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2722 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2724 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: m");
2726 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2728 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2731 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2733 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2738 void unit_dequeue_rewatch_pids(Unit
*u
) {
2742 if (!u
->rewatch_pids_event_source
)
2745 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2747 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2749 u
->rewatch_pids_event_source
= sd_event_source_unref(u
->rewatch_pids_event_source
);
2752 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2754 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2758 case JOB_VERIFY_ACTIVE
:
2761 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2762 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2767 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2768 * external events), hence it makes no sense to permit enqueing such a request either. */
2769 return !u
->perpetual
;
2772 case JOB_TRY_RESTART
:
2773 return unit_can_stop(u
) && unit_can_start(u
);
2776 case JOB_TRY_RELOAD
:
2777 return unit_can_reload(u
);
2779 case JOB_RELOAD_OR_START
:
2780 return unit_can_reload(u
) && unit_can_start(u
);
2783 assert_not_reached("Invalid job type");
2787 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2790 /* Only warn about some unit types */
2791 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2794 if (streq_ptr(u
->id
, other
))
2795 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2797 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2800 static int unit_add_dependency_hashmap(
2803 UnitDependencyMask origin_mask
,
2804 UnitDependencyMask destination_mask
) {
2806 UnitDependencyInfo info
;
2811 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2812 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2813 assert(origin_mask
> 0 || destination_mask
> 0);
2815 r
= hashmap_ensure_allocated(h
, NULL
);
2819 assert_cc(sizeof(void*) == sizeof(info
));
2821 info
.data
= hashmap_get(*h
, other
);
2823 /* Entry already exists. Add in our mask. */
2825 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
2826 FLAGS_SET(destination_mask
, info
.destination_mask
))
2829 info
.origin_mask
|= origin_mask
;
2830 info
.destination_mask
|= destination_mask
;
2832 r
= hashmap_update(*h
, other
, info
.data
);
2834 info
= (UnitDependencyInfo
) {
2835 .origin_mask
= origin_mask
,
2836 .destination_mask
= destination_mask
,
2839 r
= hashmap_put(*h
, other
, info
.data
);
2847 int unit_add_dependency(
2852 UnitDependencyMask mask
) {
2854 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2855 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2856 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2857 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2858 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2859 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2860 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2861 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2862 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2863 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2864 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2865 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2866 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2867 [UNIT_BEFORE
] = UNIT_AFTER
,
2868 [UNIT_AFTER
] = UNIT_BEFORE
,
2869 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2870 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2871 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2872 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2873 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2874 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2875 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2876 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2878 Unit
*original_u
= u
, *original_other
= other
;
2882 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2885 u
= unit_follow_merge(u
);
2886 other
= unit_follow_merge(other
);
2888 /* We won't allow dependencies on ourselves. We will not
2889 * consider them an error however. */
2891 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2895 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2896 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2897 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2901 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2905 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2906 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2911 if (add_reference
) {
2912 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2916 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2921 unit_add_to_dbus_queue(u
);
2925 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2930 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2934 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2937 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
2945 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2952 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2954 _cleanup_free_
char *i
= NULL
;
2956 r
= unit_name_to_prefix(u
->id
, &i
);
2960 r
= unit_name_replace_instance(name
, i
, buf
);
2969 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2970 _cleanup_free_
char *buf
= NULL
;
2977 r
= resolve_template(u
, name
, &buf
, &name
);
2981 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2985 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2988 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2989 _cleanup_free_
char *buf
= NULL
;
2996 r
= resolve_template(u
, name
, &buf
, &name
);
3000 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3004 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3007 int set_unit_path(const char *p
) {
3008 /* This is mostly for debug purposes */
3009 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
3015 char *unit_dbus_path(Unit
*u
) {
3021 return unit_dbus_path_from_name(u
->id
);
3024 char *unit_dbus_path_invocation_id(Unit
*u
) {
3027 if (sd_id128_is_null(u
->invocation_id
))
3030 return unit_dbus_path_from_name(u
->invocation_id_string
);
3033 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3037 /* Sets the unit slice if it has not been set before. Is extra
3038 * careful, to only allow this for units that actually have a
3039 * cgroup context. Also, we don't allow to set this for slices
3040 * (since the parent slice is derived from the name). Make
3041 * sure the unit we set is actually a slice. */
3043 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3046 if (u
->type
== UNIT_SLICE
)
3049 if (unit_active_state(u
) != UNIT_INACTIVE
)
3052 if (slice
->type
!= UNIT_SLICE
)
3055 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3056 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3059 if (UNIT_DEREF(u
->slice
) == slice
)
3062 /* Disallow slice changes if @u is already bound to cgroups */
3063 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
3066 unit_ref_set(&u
->slice
, u
, slice
);
3070 int unit_set_default_slice(Unit
*u
) {
3071 _cleanup_free_
char *b
= NULL
;
3072 const char *slice_name
;
3078 if (UNIT_ISSET(u
->slice
))
3082 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3084 /* Implicitly place all instantiated units in their
3085 * own per-template slice */
3087 r
= unit_name_to_prefix(u
->id
, &prefix
);
3091 /* The prefix is already escaped, but it might include
3092 * "-" which has a special meaning for slice units,
3093 * hence escape it here extra. */
3094 escaped
= unit_name_escape(prefix
);
3098 if (MANAGER_IS_SYSTEM(u
->manager
))
3099 b
= strjoin("system-", escaped
, ".slice");
3101 b
= strappend(escaped
, ".slice");
3108 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
3109 ? SPECIAL_SYSTEM_SLICE
3110 : SPECIAL_ROOT_SLICE
;
3112 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3116 return unit_set_slice(u
, slice
);
3119 const char *unit_slice_name(Unit
*u
) {
3122 if (!UNIT_ISSET(u
->slice
))
3125 return UNIT_DEREF(u
->slice
)->id
;
3128 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3129 _cleanup_free_
char *t
= NULL
;
3136 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3139 if (unit_has_name(u
, t
))
3142 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3143 assert(r
< 0 || *_found
!= u
);
3147 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3148 const char *name
, *old_owner
, *new_owner
;
3155 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3157 bus_log_parse_error(r
);
3161 old_owner
= empty_to_null(old_owner
);
3162 new_owner
= empty_to_null(new_owner
);
3164 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3165 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3170 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3177 if (u
->match_bus_slot
)
3180 match
= strjoina("type='signal',"
3181 "sender='org.freedesktop.DBus',"
3182 "path='/org/freedesktop/DBus',"
3183 "interface='org.freedesktop.DBus',"
3184 "member='NameOwnerChanged',"
3185 "arg0='", name
, "'");
3187 return sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3190 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3196 /* Watch a specific name on the bus. We only support one unit
3197 * watching each name for now. */
3199 if (u
->manager
->api_bus
) {
3200 /* If the bus is already available, install the match directly.
3201 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3202 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3204 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3207 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3209 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3210 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3216 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3220 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3221 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3224 bool unit_can_serialize(Unit
*u
) {
3227 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3230 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3231 _cleanup_free_
char *s
= NULL
;
3240 r
= cg_mask_to_string(mask
, &s
);
3242 return log_error_errno(r
, "Failed to format cgroup mask: %m");
3244 return serialize_item(f
, key
, s
);
3247 static const char *ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3248 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3249 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3250 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3251 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3254 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3255 CGroupIPAccountingMetric m
;
3262 if (unit_can_serialize(u
)) {
3263 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3268 (void) serialize_dual_timestamp(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3270 (void) serialize_dual_timestamp(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3271 (void) serialize_dual_timestamp(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3272 (void) serialize_dual_timestamp(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3273 (void) serialize_dual_timestamp(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3275 (void) serialize_dual_timestamp(f
, "condition-timestamp", &u
->condition_timestamp
);
3276 (void) serialize_dual_timestamp(f
, "assert-timestamp", &u
->assert_timestamp
);
3278 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3279 (void) serialize_bool(f
, "condition-result", u
->condition_result
);
3281 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3282 (void) serialize_bool(f
, "assert-result", u
->assert_result
);
3284 (void) serialize_bool(f
, "transient", u
->transient
);
3285 (void) serialize_bool(f
, "in-audit", u
->in_audit
);
3287 (void) serialize_bool(f
, "exported-invocation-id", u
->exported_invocation_id
);
3288 (void) serialize_bool(f
, "exported-log-level-max", u
->exported_log_level_max
);
3289 (void) serialize_bool(f
, "exported-log-extra-fields", u
->exported_log_extra_fields
);
3290 (void) serialize_bool(f
, "exported-log-rate-limit-interval", u
->exported_log_rate_limit_interval
);
3291 (void) serialize_bool(f
, "exported-log-rate-limit-burst", u
->exported_log_rate_limit_burst
);
3293 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3294 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3295 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3298 (void) serialize_item(f
, "cgroup", u
->cgroup_path
);
3300 (void) serialize_bool(f
, "cgroup-realized", u
->cgroup_realized
);
3301 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3302 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3303 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", u
->cgroup_invalidated_mask
);
3305 if (uid_is_valid(u
->ref_uid
))
3306 (void) serialize_item_format(f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3307 if (gid_is_valid(u
->ref_gid
))
3308 (void) serialize_item_format(f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3310 if (!sd_id128_is_null(u
->invocation_id
))
3311 (void) serialize_item_format(f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3313 bus_track_serialize(u
->bus_track
, f
, "ref");
3315 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3318 r
= unit_get_ip_accounting(u
, m
, &v
);
3320 (void) serialize_item_format(f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3323 if (serialize_jobs
) {
3326 job_serialize(u
->job
, f
);
3331 job_serialize(u
->nop_job
, f
);
3340 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3348 _cleanup_free_
char *line
= NULL
;
3349 CGroupIPAccountingMetric m
;
3353 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3355 return log_error_errno(r
, "Failed to read serialization line: %m");
3356 if (r
== 0) /* eof */
3360 if (isempty(l
)) /* End marker */
3363 k
= strcspn(l
, "=");
3371 if (streq(l
, "job")) {
3373 /* new-style serialized job */
3380 r
= job_deserialize(j
, f
);
3386 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
3392 r
= job_install_deserialized(j
);
3394 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
3398 } else /* legacy for pre-44 */
3399 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3401 } else if (streq(l
, "state-change-timestamp")) {
3402 (void) deserialize_dual_timestamp(v
, &u
->state_change_timestamp
);
3404 } else if (streq(l
, "inactive-exit-timestamp")) {
3405 (void) deserialize_dual_timestamp(v
, &u
->inactive_exit_timestamp
);
3407 } else if (streq(l
, "active-enter-timestamp")) {
3408 (void) deserialize_dual_timestamp(v
, &u
->active_enter_timestamp
);
3410 } else if (streq(l
, "active-exit-timestamp")) {
3411 (void) deserialize_dual_timestamp(v
, &u
->active_exit_timestamp
);
3413 } else if (streq(l
, "inactive-enter-timestamp")) {
3414 (void) deserialize_dual_timestamp(v
, &u
->inactive_enter_timestamp
);
3416 } else if (streq(l
, "condition-timestamp")) {
3417 (void) deserialize_dual_timestamp(v
, &u
->condition_timestamp
);
3419 } else if (streq(l
, "assert-timestamp")) {
3420 (void) deserialize_dual_timestamp(v
, &u
->assert_timestamp
);
3422 } else if (streq(l
, "condition-result")) {
3424 r
= parse_boolean(v
);
3426 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3428 u
->condition_result
= r
;
3432 } else if (streq(l
, "assert-result")) {
3434 r
= parse_boolean(v
);
3436 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3438 u
->assert_result
= r
;
3442 } else if (streq(l
, "transient")) {
3444 r
= parse_boolean(v
);
3446 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3452 } else if (streq(l
, "in-audit")) {
3454 r
= parse_boolean(v
);
3456 log_unit_debug(u
, "Failed to parse in-audit bool %s, ignoring.", v
);
3462 } else if (streq(l
, "exported-invocation-id")) {
3464 r
= parse_boolean(v
);
3466 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3468 u
->exported_invocation_id
= r
;
3472 } else if (streq(l
, "exported-log-level-max")) {
3474 r
= parse_boolean(v
);
3476 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3478 u
->exported_log_level_max
= r
;
3482 } else if (streq(l
, "exported-log-extra-fields")) {
3484 r
= parse_boolean(v
);
3486 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3488 u
->exported_log_extra_fields
= r
;
3492 } else if (streq(l
, "exported-log-rate-limit-interval")) {
3494 r
= parse_boolean(v
);
3496 log_unit_debug(u
, "Failed to parse exported log rate limit interval %s, ignoring.", v
);
3498 u
->exported_log_rate_limit_interval
= r
;
3502 } else if (streq(l
, "exported-log-rate-limit-burst")) {
3504 r
= parse_boolean(v
);
3506 log_unit_debug(u
, "Failed to parse exported log rate limit burst %s, ignoring.", v
);
3508 u
->exported_log_rate_limit_burst
= r
;
3512 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3514 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3516 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3520 } else if (streq(l
, "cpu-usage-last")) {
3522 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3524 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3528 } else if (streq(l
, "cgroup")) {
3530 r
= unit_set_cgroup_path(u
, v
);
3532 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3534 (void) unit_watch_cgroup(u
);
3537 } else if (streq(l
, "cgroup-realized")) {
3540 b
= parse_boolean(v
);
3542 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3544 u
->cgroup_realized
= b
;
3548 } else if (streq(l
, "cgroup-realized-mask")) {
3550 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3552 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3555 } else if (streq(l
, "cgroup-enabled-mask")) {
3557 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3559 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3562 } else if (streq(l
, "cgroup-invalidated-mask")) {
3564 r
= cg_mask_from_string(v
, &u
->cgroup_invalidated_mask
);
3566 log_unit_debug(u
, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v
);
3569 } else if (streq(l
, "ref-uid")) {
3572 r
= parse_uid(v
, &uid
);
3574 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3576 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3580 } else if (streq(l
, "ref-gid")) {
3583 r
= parse_gid(v
, &gid
);
3585 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3587 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3591 } else if (streq(l
, "ref")) {
3593 r
= strv_extend(&u
->deserialized_refs
, v
);
3598 } else if (streq(l
, "invocation-id")) {
3601 r
= sd_id128_from_string(v
, &id
);
3603 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3605 r
= unit_set_invocation_id(u
, id
);
3607 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3613 /* Check if this is an IP accounting metric serialization field */
3614 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++)
3615 if (streq(l
, ip_accounting_metric_field
[m
]))
3617 if (m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
) {
3620 r
= safe_atou64(v
, &c
);
3622 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3624 u
->ip_accounting_extra
[m
] = c
;
3628 if (unit_can_serialize(u
)) {
3629 r
= exec_runtime_deserialize_compat(u
, l
, v
, fds
);
3631 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3635 /* Returns positive if key was handled by the call */
3639 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3641 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3645 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3646 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3647 * before 228 where the base for timeouts was not persistent across reboots. */
3649 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3650 dual_timestamp_get(&u
->state_change_timestamp
);
3652 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3653 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3654 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3655 unit_invalidate_cgroup_bpf(u
);
3660 int unit_deserialize_skip(FILE *f
) {
3664 /* Skip serialized data for this unit. We don't know what it is. */
3667 _cleanup_free_
char *line
= NULL
;
3670 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3672 return log_error_errno(r
, "Failed to read serialization line: %m");
3684 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3686 _cleanup_free_
char *e
= NULL
;
3691 /* Adds in links to the device node that this unit is based on */
3695 if (!is_device_path(what
))
3698 /* When device units aren't supported (such as in a
3699 * container), don't create dependencies on them. */
3700 if (!unit_type_supported(UNIT_DEVICE
))
3703 r
= unit_name_from_path(what
, ".device", &e
);
3707 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3711 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3712 dep
= UNIT_BINDS_TO
;
3714 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3715 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3716 device
, true, mask
);
3721 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3729 int unit_coldplug(Unit
*u
) {
3735 /* Make sure we don't enter a loop, when coldplugging recursively. */
3739 u
->coldplugged
= true;
3741 STRV_FOREACH(i
, u
->deserialized_refs
) {
3742 q
= bus_unit_track_add_name(u
, *i
);
3743 if (q
< 0 && r
>= 0)
3746 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3748 if (UNIT_VTABLE(u
)->coldplug
) {
3749 q
= UNIT_VTABLE(u
)->coldplug(u
);
3750 if (q
< 0 && r
>= 0)
3755 q
= job_coldplug(u
->job
);
3756 if (q
< 0 && r
>= 0)
3763 void unit_catchup(Unit
*u
) {
3766 if (UNIT_VTABLE(u
)->catchup
)
3767 UNIT_VTABLE(u
)->catchup(u
);
3770 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3776 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3777 * are never out-of-date. */
3778 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3781 if (stat(path
, &st
) < 0)
3782 /* What, cannot access this anymore? */
3786 /* For masked files check if they are still so */
3787 return !null_or_empty(&st
);
3789 /* For non-empty files check the mtime */
3790 return timespec_load(&st
.st_mtim
) > mtime
;
3795 bool unit_need_daemon_reload(Unit
*u
) {
3796 _cleanup_strv_free_
char **t
= NULL
;
3801 /* For unit files, we allow masking… */
3802 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3803 u
->load_state
== UNIT_MASKED
))
3806 /* Source paths should not be masked… */
3807 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3810 if (u
->load_state
== UNIT_LOADED
)
3811 (void) unit_find_dropin_paths(u
, &t
);
3812 if (!strv_equal(u
->dropin_paths
, t
))
3815 /* … any drop-ins that are masked are simply omitted from the list. */
3816 STRV_FOREACH(path
, u
->dropin_paths
)
3817 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3823 void unit_reset_failed(Unit
*u
) {
3826 if (UNIT_VTABLE(u
)->reset_failed
)
3827 UNIT_VTABLE(u
)->reset_failed(u
);
3829 RATELIMIT_RESET(u
->start_limit
);
3830 u
->start_limit_hit
= false;
3833 Unit
*unit_following(Unit
*u
) {
3836 if (UNIT_VTABLE(u
)->following
)
3837 return UNIT_VTABLE(u
)->following(u
);
3842 bool unit_stop_pending(Unit
*u
) {
3845 /* This call does check the current state of the unit. It's
3846 * hence useful to be called from state change calls of the
3847 * unit itself, where the state isn't updated yet. This is
3848 * different from unit_inactive_or_pending() which checks both
3849 * the current state and for a queued job. */
3851 return u
->job
&& u
->job
->type
== JOB_STOP
;
3854 bool unit_inactive_or_pending(Unit
*u
) {
3857 /* Returns true if the unit is inactive or going down */
3859 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3862 if (unit_stop_pending(u
))
3868 bool unit_active_or_pending(Unit
*u
) {
3871 /* Returns true if the unit is active or going up */
3873 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3877 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3883 bool unit_will_restart(Unit
*u
) {
3886 if (!UNIT_VTABLE(u
)->will_restart
)
3889 return UNIT_VTABLE(u
)->will_restart(u
);
3892 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3894 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3895 assert(SIGNAL_VALID(signo
));
3897 if (!UNIT_VTABLE(u
)->kill
)
3900 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3903 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3904 _cleanup_set_free_ Set
*pid_set
= NULL
;
3907 pid_set
= set_new(NULL
);
3911 /* Exclude the main/control pids from being killed via the cgroup */
3913 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3918 if (control_pid
> 0) {
3919 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3924 return TAKE_PTR(pid_set
);
3927 int unit_kill_common(
3933 sd_bus_error
*error
) {
3936 bool killed
= false;
3938 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3940 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3941 else if (main_pid
== 0)
3942 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3945 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3946 if (control_pid
< 0)
3947 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3948 else if (control_pid
== 0)
3949 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3952 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3953 if (control_pid
> 0) {
3954 if (kill(control_pid
, signo
) < 0)
3960 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3962 if (kill(main_pid
, signo
) < 0)
3968 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3969 _cleanup_set_free_ Set
*pid_set
= NULL
;
3972 /* Exclude the main/control pids from being killed via the cgroup */
3973 pid_set
= unit_pid_set(main_pid
, control_pid
);
3977 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3978 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
3984 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3990 int unit_following_set(Unit
*u
, Set
**s
) {
3994 if (UNIT_VTABLE(u
)->following_set
)
3995 return UNIT_VTABLE(u
)->following_set(u
, s
);
4001 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4006 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4007 r
= unit_file_get_state(
4008 u
->manager
->unit_file_scope
,
4011 &u
->unit_file_state
);
4013 u
->unit_file_state
= UNIT_FILE_BAD
;
4016 return u
->unit_file_state
;
4019 int unit_get_unit_file_preset(Unit
*u
) {
4022 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
4023 u
->unit_file_preset
= unit_file_query_preset(
4024 u
->manager
->unit_file_scope
,
4026 basename(u
->fragment_path
));
4028 return u
->unit_file_preset
;
4031 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4037 unit_ref_unset(ref
);
4039 ref
->source
= source
;
4040 ref
->target
= target
;
4041 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4045 void unit_ref_unset(UnitRef
*ref
) {
4051 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4052 * be unreferenced now. */
4053 unit_add_to_gc_queue(ref
->target
);
4055 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4056 ref
->source
= ref
->target
= NULL
;
4059 static int user_from_unit_name(Unit
*u
, char **ret
) {
4061 static const uint8_t hash_key
[] = {
4062 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4063 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4066 _cleanup_free_
char *n
= NULL
;
4069 r
= unit_name_to_prefix(u
->id
, &n
);
4073 if (valid_user_group_name(n
)) {
4078 /* If we can't use the unit name as a user name, then let's hash it and use that */
4079 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4085 int unit_patch_contexts(Unit
*u
) {
4093 /* Patch in the manager defaults into the exec and cgroup
4094 * contexts, _after_ the rest of the settings have been
4097 ec
= unit_get_exec_context(u
);
4099 /* This only copies in the ones that need memory */
4100 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4101 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4102 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4107 if (MANAGER_IS_USER(u
->manager
) &&
4108 !ec
->working_directory
) {
4110 r
= get_home_dir(&ec
->working_directory
);
4114 /* Allow user services to run, even if the
4115 * home directory is missing */
4116 ec
->working_directory_missing_ok
= true;
4119 if (ec
->private_devices
)
4120 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4122 if (ec
->protect_kernel_modules
)
4123 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4125 if (ec
->dynamic_user
) {
4127 r
= user_from_unit_name(u
, &ec
->user
);
4133 ec
->group
= strdup(ec
->user
);
4138 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4139 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4141 ec
->private_tmp
= true;
4142 ec
->remove_ipc
= true;
4143 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4144 if (ec
->protect_home
== PROTECT_HOME_NO
)
4145 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4149 cc
= unit_get_cgroup_context(u
);
4152 if (ec
->private_devices
&&
4153 cc
->device_policy
== CGROUP_AUTO
)
4154 cc
->device_policy
= CGROUP_CLOSED
;
4156 if (ec
->root_image
&&
4157 (cc
->device_policy
!= CGROUP_AUTO
|| cc
->device_allow
)) {
4159 /* When RootImage= is specified, the following devices are touched. */
4160 r
= cgroup_add_device_allow(cc
, "/dev/loop-control", "rw");
4164 r
= cgroup_add_device_allow(cc
, "block-loop", "rwm");
4168 r
= cgroup_add_device_allow(cc
, "block-blkext", "rwm");
4177 ExecContext
*unit_get_exec_context(Unit
*u
) {
4184 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4188 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4191 KillContext
*unit_get_kill_context(Unit
*u
) {
4198 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4202 return (KillContext
*) ((uint8_t*) u
+ offset
);
4205 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4211 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4215 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4218 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4224 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4228 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4231 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4234 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4237 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4238 return u
->manager
->lookup_paths
.transient
;
4240 if (flags
& UNIT_PERSISTENT
)
4241 return u
->manager
->lookup_paths
.persistent_control
;
4243 if (flags
& UNIT_RUNTIME
)
4244 return u
->manager
->lookup_paths
.runtime_control
;
4249 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4255 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4256 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4257 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4258 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4259 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4262 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4263 ret
= specifier_escape(s
);
4270 if (flags
& UNIT_ESCAPE_C
) {
4283 return ret
?: (char*) s
;
4286 return ret
?: strdup(s
);
4289 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4290 _cleanup_free_
char *result
= NULL
;
4291 size_t n
= 0, allocated
= 0;
4294 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4295 * way suitable for ExecStart= stanzas */
4297 STRV_FOREACH(i
, l
) {
4298 _cleanup_free_
char *buf
= NULL
;
4303 p
= unit_escape_setting(*i
, flags
, &buf
);
4307 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4308 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4322 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4327 return TAKE_PTR(result
);
4330 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4331 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4332 const char *dir
, *wrapped
;
4339 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4342 data
= unit_escape_setting(data
, flags
, &escaped
);
4346 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4347 * previous section header is the same */
4349 if (flags
& UNIT_PRIVATE
) {
4350 if (!UNIT_VTABLE(u
)->private_section
)
4353 if (!u
->transient_file
|| u
->last_section_private
< 0)
4354 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4355 else if (u
->last_section_private
== 0)
4356 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4358 if (!u
->transient_file
|| u
->last_section_private
< 0)
4359 data
= strjoina("[Unit]\n", data
);
4360 else if (u
->last_section_private
> 0)
4361 data
= strjoina("\n[Unit]\n", data
);
4364 if (u
->transient_file
) {
4365 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4366 * write to the transient unit file. */
4367 fputs(data
, u
->transient_file
);
4369 if (!endswith(data
, "\n"))
4370 fputc('\n', u
->transient_file
);
4372 /* Remember which section we wrote this entry to */
4373 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4377 dir
= unit_drop_in_dir(u
, flags
);
4381 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4382 "# or an equivalent operation. Do not edit.\n",
4386 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4390 (void) mkdir_p_label(p
, 0755);
4391 r
= write_string_file_atomic_label(q
, wrapped
);
4395 r
= strv_push(&u
->dropin_paths
, q
);
4400 strv_uniq(u
->dropin_paths
);
4402 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4407 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4408 _cleanup_free_
char *p
= NULL
;
4416 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4419 va_start(ap
, format
);
4420 r
= vasprintf(&p
, format
, ap
);
4426 return unit_write_setting(u
, flags
, name
, p
);
4429 int unit_make_transient(Unit
*u
) {
4430 _cleanup_free_
char *path
= NULL
;
4435 if (!UNIT_VTABLE(u
)->can_transient
)
4438 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4440 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4444 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4445 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4447 RUN_WITH_UMASK(0022) {
4448 f
= fopen(path
, "we");
4453 safe_fclose(u
->transient_file
);
4454 u
->transient_file
= f
;
4456 free_and_replace(u
->fragment_path
, path
);
4458 u
->source_path
= mfree(u
->source_path
);
4459 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4460 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4462 u
->load_state
= UNIT_STUB
;
4464 u
->transient
= true;
4466 unit_add_to_dbus_queue(u
);
4467 unit_add_to_gc_queue(u
);
4469 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4475 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
4476 _cleanup_free_
char *comm
= NULL
;
4478 (void) get_process_comm(pid
, &comm
);
4480 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4481 only, like for example systemd's own PAM stub process. */
4482 if (comm
&& comm
[0] == '(')
4485 log_unit_notice(userdata
,
4486 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4489 signal_to_string(sig
));
4492 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4497 case KILL_TERMINATE
:
4498 case KILL_TERMINATE_AND_LOG
:
4499 return c
->kill_signal
;
4502 return c
->final_kill_signal
;
4505 return c
->watchdog_signal
;
4508 assert_not_reached("KillOperation unknown");
4512 int unit_kill_context(
4518 bool main_pid_alien
) {
4520 bool wait_for_exit
= false, send_sighup
;
4521 cg_kill_log_func_t log_func
= NULL
;
4527 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4528 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4530 if (c
->kill_mode
== KILL_NONE
)
4533 sig
= operation_to_signal(c
, k
);
4537 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4540 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4541 log_func
= log_kill
;
4545 log_func(main_pid
, sig
, u
);
4547 r
= kill_and_sigcont(main_pid
, sig
);
4548 if (r
< 0 && r
!= -ESRCH
) {
4549 _cleanup_free_
char *comm
= NULL
;
4550 (void) get_process_comm(main_pid
, &comm
);
4552 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4554 if (!main_pid_alien
)
4555 wait_for_exit
= true;
4557 if (r
!= -ESRCH
&& send_sighup
)
4558 (void) kill(main_pid
, SIGHUP
);
4562 if (control_pid
> 0) {
4564 log_func(control_pid
, sig
, u
);
4566 r
= kill_and_sigcont(control_pid
, sig
);
4567 if (r
< 0 && r
!= -ESRCH
) {
4568 _cleanup_free_
char *comm
= NULL
;
4569 (void) get_process_comm(control_pid
, &comm
);
4571 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4573 wait_for_exit
= true;
4575 if (r
!= -ESRCH
&& send_sighup
)
4576 (void) kill(control_pid
, SIGHUP
);
4580 if (u
->cgroup_path
&&
4581 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4582 _cleanup_set_free_ Set
*pid_set
= NULL
;
4584 /* Exclude the main/control pids from being killed via the cgroup */
4585 pid_set
= unit_pid_set(main_pid
, control_pid
);
4589 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4591 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4595 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4596 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4600 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4601 * we are running in a container or if this is a delegation unit, simply because cgroup
4602 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4603 * of containers it can be confused easily by left-over directories in the cgroup — which
4604 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4605 * there we get proper events. Hence rely on them. */
4607 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4608 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4609 wait_for_exit
= true;
4614 pid_set
= unit_pid_set(main_pid
, control_pid
);
4618 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4627 return wait_for_exit
;
4630 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4631 _cleanup_free_
char *p
= NULL
;
4633 UnitDependencyInfo di
;
4639 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4640 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4641 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4642 * determine which units to make themselves a dependency of. */
4644 if (!path_is_absolute(path
))
4647 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &path_hash_ops
);
4655 path
= path_simplify(p
, false);
4657 if (!path_is_normalized(path
))
4660 if (hashmap_contains(u
->requires_mounts_for
, path
))
4663 di
= (UnitDependencyInfo
) {
4667 r
= hashmap_put(u
->requires_mounts_for
, path
, di
.data
);
4672 prefix
= alloca(strlen(path
) + 1);
4673 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4676 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4678 _cleanup_free_
char *q
= NULL
;
4680 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4692 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4708 int unit_setup_exec_runtime(Unit
*u
) {
4716 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4719 /* Check if there already is an ExecRuntime for this unit? */
4720 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4724 /* Try to get it from somebody else */
4725 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4726 r
= exec_runtime_acquire(u
->manager
, NULL
, other
->id
, false, rt
);
4731 return exec_runtime_acquire(u
->manager
, unit_get_exec_context(u
), u
->id
, true, rt
);
4734 int unit_setup_dynamic_creds(Unit
*u
) {
4736 DynamicCreds
*dcreds
;
4741 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4743 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4745 ec
= unit_get_exec_context(u
);
4748 if (!ec
->dynamic_user
)
4751 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4754 bool unit_type_supported(UnitType t
) {
4755 if (_unlikely_(t
< 0))
4757 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4760 if (!unit_vtable
[t
]->supported
)
4763 return unit_vtable
[t
]->supported();
4766 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4772 r
= dir_is_empty(where
);
4773 if (r
> 0 || r
== -ENOTDIR
)
4776 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4780 log_struct(LOG_NOTICE
,
4781 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4783 LOG_UNIT_INVOCATION_ID(u
),
4784 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4788 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4789 _cleanup_free_
char *canonical_where
;
4795 r
= chase_symlinks(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
);
4797 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4801 /* We will happily ignore a trailing slash (or any redundant slashes) */
4802 if (path_equal(where
, canonical_where
))
4805 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4807 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4809 LOG_UNIT_INVOCATION_ID(u
),
4810 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4816 bool unit_is_pristine(Unit
*u
) {
4819 /* Check if the unit already exists or is already around,
4820 * in a number of different ways. Note that to cater for unit
4821 * types such as slice, we are generally fine with units that
4822 * are marked UNIT_LOADED even though nothing was actually
4823 * loaded, as those unit types don't require a file on disk. */
4825 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4828 !strv_isempty(u
->dropin_paths
) ||
4833 pid_t
unit_control_pid(Unit
*u
) {
4836 if (UNIT_VTABLE(u
)->control_pid
)
4837 return UNIT_VTABLE(u
)->control_pid(u
);
4842 pid_t
unit_main_pid(Unit
*u
) {
4845 if (UNIT_VTABLE(u
)->main_pid
)
4846 return UNIT_VTABLE(u
)->main_pid(u
);
4851 static void unit_unref_uid_internal(
4855 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4859 assert(_manager_unref_uid
);
4861 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4862 * gid_t are actually the same time, with the same validity rules.
4864 * Drops a reference to UID/GID from a unit. */
4866 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4867 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4869 if (!uid_is_valid(*ref_uid
))
4872 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4873 *ref_uid
= UID_INVALID
;
4876 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4877 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4880 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4881 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4884 static int unit_ref_uid_internal(
4889 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4895 assert(uid_is_valid(uid
));
4896 assert(_manager_ref_uid
);
4898 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4899 * are actually the same type, and have the same validity rules.
4901 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4902 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4905 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4906 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4908 if (*ref_uid
== uid
)
4911 if (uid_is_valid(*ref_uid
)) /* Already set? */
4914 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4922 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4923 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4926 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4927 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4930 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4935 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4937 if (uid_is_valid(uid
)) {
4938 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4943 if (gid_is_valid(gid
)) {
4944 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4947 unit_unref_uid(u
, false);
4953 return r
> 0 || q
> 0;
4956 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4962 c
= unit_get_exec_context(u
);
4964 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4966 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4971 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4974 unit_unref_uid(u
, destroy_now
);
4975 unit_unref_gid(u
, destroy_now
);
4978 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4983 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4984 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4985 * objects when no service references the UID/GID anymore. */
4987 r
= unit_ref_uid_gid(u
, uid
, gid
);
4989 bus_unit_send_change_signal(u
);
4992 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4997 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4999 if (sd_id128_equal(u
->invocation_id
, id
))
5002 if (!sd_id128_is_null(u
->invocation_id
))
5003 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
5005 if (sd_id128_is_null(id
)) {
5010 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
5014 u
->invocation_id
= id
;
5015 sd_id128_to_string(id
, u
->invocation_id_string
);
5017 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
5024 u
->invocation_id
= SD_ID128_NULL
;
5025 u
->invocation_id_string
[0] = 0;
5029 int unit_acquire_invocation_id(Unit
*u
) {
5035 r
= sd_id128_randomize(&id
);
5037 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5039 r
= unit_set_invocation_id(u
, id
);
5041 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5046 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5052 /* Copy parameters from manager */
5053 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5057 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5058 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5059 p
->prefix
= u
->manager
->prefix
;
5060 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5062 /* Copy paramaters from unit */
5063 p
->cgroup_path
= u
->cgroup_path
;
5064 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5069 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
5075 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5076 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5078 (void) unit_realize_cgroup(u
);
5080 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
5084 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
5085 (void) ignore_signals(SIGPIPE
, -1);
5087 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5089 if (u
->cgroup_path
) {
5090 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5092 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5100 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5103 assert(d
< _UNIT_DEPENDENCY_MAX
);
5106 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5107 /* No bit set anymore, let's drop the whole entry */
5108 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5109 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5111 /* Mask was reduced, let's update the entry */
5112 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5115 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5120 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5125 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5129 UnitDependencyInfo di
;
5135 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5138 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5140 di
.origin_mask
&= ~mask
;
5141 unit_update_dependency_mask(u
, d
, other
, di
);
5143 /* We updated the dependency from our unit to the other unit now. But most dependencies
5144 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5145 * all dependency types on the other unit and delete all those which point to us and
5146 * have the right mask set. */
5148 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5149 UnitDependencyInfo dj
;
5151 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5152 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5154 dj
.destination_mask
&= ~mask
;
5156 unit_update_dependency_mask(other
, q
, u
, dj
);
5159 unit_add_to_gc_queue(other
);
5169 static int unit_export_invocation_id(Unit
*u
) {
5175 if (u
->exported_invocation_id
)
5178 if (sd_id128_is_null(u
->invocation_id
))
5181 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5182 r
= symlink_atomic(u
->invocation_id_string
, p
);
5184 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5186 u
->exported_invocation_id
= true;
5190 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5198 if (u
->exported_log_level_max
)
5201 if (c
->log_level_max
< 0)
5204 assert(c
->log_level_max
<= 7);
5206 buf
[0] = '0' + c
->log_level_max
;
5209 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5210 r
= symlink_atomic(buf
, p
);
5212 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5214 u
->exported_log_level_max
= true;
5218 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5219 _cleanup_close_
int fd
= -1;
5220 struct iovec
*iovec
;
5228 if (u
->exported_log_extra_fields
)
5231 if (c
->n_log_extra_fields
<= 0)
5234 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5235 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5237 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5238 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5240 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5241 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5244 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5245 pattern
= strjoina(p
, ".XXXXXX");
5247 fd
= mkostemp_safe(pattern
);
5249 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5251 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5253 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5257 (void) fchmod(fd
, 0644);
5259 if (rename(pattern
, p
) < 0) {
5260 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5264 u
->exported_log_extra_fields
= true;
5268 (void) unlink(pattern
);
5272 static int unit_export_log_rate_limit_interval(Unit
*u
, const ExecContext
*c
) {
5273 _cleanup_free_
char *buf
= NULL
;
5280 if (u
->exported_log_rate_limit_interval
)
5283 if (c
->log_rate_limit_interval_usec
== 0)
5286 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5288 if (asprintf(&buf
, "%" PRIu64
, c
->log_rate_limit_interval_usec
) < 0)
5291 r
= symlink_atomic(buf
, p
);
5293 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5295 u
->exported_log_rate_limit_interval
= true;
5299 static int unit_export_log_rate_limit_burst(Unit
*u
, const ExecContext
*c
) {
5300 _cleanup_free_
char *buf
= NULL
;
5307 if (u
->exported_log_rate_limit_burst
)
5310 if (c
->log_rate_limit_burst
== 0)
5313 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5315 if (asprintf(&buf
, "%u", c
->log_rate_limit_burst
) < 0)
5318 r
= symlink_atomic(buf
, p
);
5320 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5322 u
->exported_log_rate_limit_burst
= true;
5326 void unit_export_state_files(Unit
*u
) {
5327 const ExecContext
*c
;
5334 if (!MANAGER_IS_SYSTEM(u
->manager
))
5337 if (MANAGER_IS_TEST_RUN(u
->manager
))
5340 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5341 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5342 * the IPC system itself and PID 1 also log to the journal.
5344 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5345 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5346 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5347 * namespace at least.
5349 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5350 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5353 (void) unit_export_invocation_id(u
);
5355 c
= unit_get_exec_context(u
);
5357 (void) unit_export_log_level_max(u
, c
);
5358 (void) unit_export_log_extra_fields(u
, c
);
5359 (void) unit_export_log_rate_limit_interval(u
, c
);
5360 (void) unit_export_log_rate_limit_burst(u
, c
);
5364 void unit_unlink_state_files(Unit
*u
) {
5372 if (!MANAGER_IS_SYSTEM(u
->manager
))
5375 /* Undoes the effect of unit_export_state() */
5377 if (u
->exported_invocation_id
) {
5378 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5381 u
->exported_invocation_id
= false;
5384 if (u
->exported_log_level_max
) {
5385 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5388 u
->exported_log_level_max
= false;
5391 if (u
->exported_log_extra_fields
) {
5392 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5395 u
->exported_log_extra_fields
= false;
5398 if (u
->exported_log_rate_limit_interval
) {
5399 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5402 u
->exported_log_rate_limit_interval
= false;
5405 if (u
->exported_log_rate_limit_burst
) {
5406 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5409 u
->exported_log_rate_limit_burst
= false;
5413 int unit_prepare_exec(Unit
*u
) {
5418 /* Prepares everything so that we can fork of a process for this unit */
5420 (void) unit_realize_cgroup(u
);
5422 if (u
->reset_accounting
) {
5423 (void) unit_reset_cpu_accounting(u
);
5424 (void) unit_reset_ip_accounting(u
);
5425 u
->reset_accounting
= false;
5428 unit_export_state_files(u
);
5430 r
= unit_setup_exec_runtime(u
);
5434 r
= unit_setup_dynamic_creds(u
);
5441 static void log_leftover(pid_t pid
, int sig
, void *userdata
) {
5442 _cleanup_free_
char *comm
= NULL
;
5444 (void) get_process_comm(pid
, &comm
);
5446 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5449 log_unit_warning(userdata
,
5450 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5451 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5455 void unit_warn_leftover_processes(Unit
*u
) {
5458 (void) unit_pick_cgroup_path(u
);
5460 if (!u
->cgroup_path
)
5463 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5466 bool unit_needs_console(Unit
*u
) {
5468 UnitActiveState state
;
5472 state
= unit_active_state(u
);
5474 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5477 if (UNIT_VTABLE(u
)->needs_console
)
5478 return UNIT_VTABLE(u
)->needs_console(u
);
5480 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5481 ec
= unit_get_exec_context(u
);
5485 return exec_context_may_touch_console(ec
);
5488 const char *unit_label_path(Unit
*u
) {
5491 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5492 * when validating access checks. */
5494 p
= u
->source_path
?: u
->fragment_path
;
5498 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5499 if (path_equal(p
, "/dev/null"))
5505 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5510 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5511 * and not a kernel thread either */
5513 /* First, a simple range check */
5514 if (!pid_is_valid(pid
))
5515 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5517 /* Some extra safety check */
5518 if (pid
== 1 || pid
== getpid_cached())
5519 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5521 /* Don't even begin to bother with kernel threads */
5522 r
= is_kernel_thread(pid
);
5524 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5526 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5528 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5533 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5534 [COLLECT_INACTIVE
] = "inactive",
5535 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5538 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);