1 /* SPDX-License-Identifier: LGPL-2.1+ */
11 #include "sd-messages.h"
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
27 #include "id128-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
52 #include "user-util.h"
55 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
56 [UNIT_SERVICE
] = &service_vtable
,
57 [UNIT_SOCKET
] = &socket_vtable
,
58 [UNIT_TARGET
] = &target_vtable
,
59 [UNIT_DEVICE
] = &device_vtable
,
60 [UNIT_MOUNT
] = &mount_vtable
,
61 [UNIT_AUTOMOUNT
] = &automount_vtable
,
62 [UNIT_SWAP
] = &swap_vtable
,
63 [UNIT_TIMER
] = &timer_vtable
,
64 [UNIT_PATH
] = &path_vtable
,
65 [UNIT_SLICE
] = &slice_vtable
,
66 [UNIT_SCOPE
] = &scope_vtable
,
69 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
71 Unit
*unit_new(Manager
*m
, size_t size
) {
75 assert(size
>= sizeof(Unit
));
81 u
->names
= set_new(&string_hash_ops
);
86 u
->type
= _UNIT_TYPE_INVALID
;
87 u
->default_dependencies
= true;
88 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
89 u
->unit_file_preset
= -1;
90 u
->on_failure_job_mode
= JOB_REPLACE
;
91 u
->cgroup_inotify_wd
= -1;
92 u
->job_timeout
= USEC_INFINITY
;
93 u
->job_running_timeout
= USEC_INFINITY
;
94 u
->ref_uid
= UID_INVALID
;
95 u
->ref_gid
= GID_INVALID
;
96 u
->cpu_usage_last
= NSEC_INFINITY
;
97 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
99 u
->ip_accounting_ingress_map_fd
= -1;
100 u
->ip_accounting_egress_map_fd
= -1;
101 u
->ipv4_allow_map_fd
= -1;
102 u
->ipv6_allow_map_fd
= -1;
103 u
->ipv4_deny_map_fd
= -1;
104 u
->ipv6_deny_map_fd
= -1;
106 u
->last_section_private
= -1;
108 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
109 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
114 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
115 _cleanup_(unit_freep
) Unit
*u
= NULL
;
118 u
= unit_new(m
, size
);
122 r
= unit_add_name(u
, name
);
131 bool unit_has_name(Unit
*u
, const char *name
) {
135 return set_contains(u
->names
, (char*) name
);
138 static void unit_init(Unit
*u
) {
145 assert(u
->type
>= 0);
147 cc
= unit_get_cgroup_context(u
);
149 cgroup_context_init(cc
);
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
155 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
156 cc
->io_accounting
= u
->manager
->default_io_accounting
;
157 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
158 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
159 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
160 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
161 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
163 if (u
->type
!= UNIT_SLICE
)
164 cc
->tasks_max
= u
->manager
->default_tasks_max
;
167 ec
= unit_get_exec_context(u
);
169 exec_context_init(ec
);
171 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
172 EXEC_KEYRING_SHARED
: EXEC_KEYRING_INHERIT
;
175 kc
= unit_get_kill_context(u
);
177 kill_context_init(kc
);
179 if (UNIT_VTABLE(u
)->init
)
180 UNIT_VTABLE(u
)->init(u
);
183 int unit_add_name(Unit
*u
, const char *text
) {
184 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
191 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
196 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
205 if (set_contains(u
->names
, s
))
207 if (hashmap_contains(u
->manager
->units
, s
))
210 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
213 t
= unit_name_to_type(s
);
217 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
220 r
= unit_name_to_instance(s
, &i
);
224 if (i
&& !unit_type_may_template(t
))
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
233 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
236 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
239 r
= set_put(u
->names
, s
);
244 r
= hashmap_put(u
->manager
->units
, s
, u
);
246 (void) set_remove(u
->names
, s
);
250 if (u
->type
== _UNIT_TYPE_INVALID
) {
253 u
->instance
= TAKE_PTR(i
);
255 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
262 unit_add_to_dbus_queue(u
);
266 int unit_choose_id(Unit
*u
, const char *name
) {
267 _cleanup_free_
char *t
= NULL
;
274 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
279 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
286 /* Selects one of the names of this unit as the id */
287 s
= set_get(u
->names
, (char*) name
);
291 /* Determine the new instance from the new id */
292 r
= unit_name_to_instance(s
, &i
);
301 unit_add_to_dbus_queue(u
);
306 int unit_set_description(Unit
*u
, const char *description
) {
311 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
315 unit_add_to_dbus_queue(u
);
320 bool unit_may_gc(Unit
*u
) {
321 UnitActiveState state
;
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
340 state
= unit_active_state(u
);
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
344 UNIT_VTABLE(u
)->release_resources
)
345 UNIT_VTABLE(u
)->release_resources(u
);
350 if (sd_bus_track_count(u
->bus_track
) > 0)
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u
->collect_mode
) {
356 case COLLECT_INACTIVE
:
357 if (state
!= UNIT_INACTIVE
)
362 case COLLECT_INACTIVE_OR_FAILED
:
363 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
369 assert_not_reached("Unknown garbage collection mode");
372 if (u
->cgroup_path
) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
376 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
378 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
383 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
389 void unit_add_to_load_queue(Unit
*u
) {
391 assert(u
->type
!= _UNIT_TYPE_INVALID
);
393 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
396 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
397 u
->in_load_queue
= true;
400 void unit_add_to_cleanup_queue(Unit
*u
) {
403 if (u
->in_cleanup_queue
)
406 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
407 u
->in_cleanup_queue
= true;
410 void unit_add_to_gc_queue(Unit
*u
) {
413 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
419 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
420 u
->in_gc_queue
= true;
423 void unit_add_to_dbus_queue(Unit
*u
) {
425 assert(u
->type
!= _UNIT_TYPE_INVALID
);
427 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
432 sd_bus_track_count(u
->bus_track
) <= 0 &&
433 set_isempty(u
->manager
->private_buses
)) {
434 u
->sent_dbus_new_signal
= true;
438 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
439 u
->in_dbus_queue
= true;
442 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
445 if (u
->in_stop_when_unneeded_queue
)
448 if (!u
->stop_when_unneeded
)
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
454 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
455 u
->in_stop_when_unneeded_queue
= true;
458 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
467 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
470 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
471 hashmap_remove(other
->dependencies
[d
], u
);
473 unit_add_to_gc_queue(other
);
479 static void unit_remove_transient(Unit
*u
) {
487 if (u
->fragment_path
)
488 (void) unlink(u
->fragment_path
);
490 STRV_FOREACH(i
, u
->dropin_paths
) {
491 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
493 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
497 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
501 /* Only drop transient drop-ins */
502 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
510 static void unit_free_requires_mounts_for(Unit
*u
) {
514 _cleanup_free_
char *path
;
516 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
520 char s
[strlen(path
) + 1];
522 PATH_FOREACH_PREFIX_MORE(s
, path
) {
526 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
530 (void) set_remove(x
, u
);
532 if (set_isempty(x
)) {
533 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
541 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
544 static void unit_done(Unit
*u
) {
553 if (UNIT_VTABLE(u
)->done
)
554 UNIT_VTABLE(u
)->done(u
);
556 ec
= unit_get_exec_context(u
);
558 exec_context_done(ec
);
560 cc
= unit_get_cgroup_context(u
);
562 cgroup_context_done(cc
);
565 void unit_free(Unit
*u
) {
573 u
->transient_file
= safe_fclose(u
->transient_file
);
575 if (!MANAGER_IS_RELOADING(u
->manager
))
576 unit_remove_transient(u
);
578 bus_unit_send_removed_signal(u
);
582 unit_dequeue_rewatch_pids(u
);
584 sd_bus_slot_unref(u
->match_bus_slot
);
585 sd_bus_track_unref(u
->bus_track
);
586 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
588 unit_free_requires_mounts_for(u
);
590 SET_FOREACH(t
, u
->names
, i
)
591 hashmap_remove_value(u
->manager
->units
, t
, u
);
593 if (!sd_id128_is_null(u
->invocation_id
))
594 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
608 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
609 bidi_set_free(u
, u
->dependencies
[d
]);
612 manager_unref_console(u
->manager
);
614 unit_release_cgroup(u
);
616 if (!MANAGER_IS_RELOADING(u
->manager
))
617 unit_unlink_state_files(u
);
619 unit_unref_uid_gid(u
, false);
621 (void) manager_update_failed_units(u
->manager
, u
, false);
622 set_remove(u
->manager
->startup_units
, u
);
624 unit_unwatch_all_pids(u
);
626 unit_ref_unset(&u
->slice
);
627 while (u
->refs_by_target
)
628 unit_ref_unset(u
->refs_by_target
);
630 if (u
->type
!= _UNIT_TYPE_INVALID
)
631 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
633 if (u
->in_load_queue
)
634 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
636 if (u
->in_dbus_queue
)
637 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
640 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
642 if (u
->in_cgroup_realize_queue
)
643 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
645 if (u
->in_cgroup_empty_queue
)
646 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
648 if (u
->in_cleanup_queue
)
649 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
651 if (u
->in_target_deps_queue
)
652 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
654 if (u
->in_stop_when_unneeded_queue
)
655 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
657 safe_close(u
->ip_accounting_ingress_map_fd
);
658 safe_close(u
->ip_accounting_egress_map_fd
);
660 safe_close(u
->ipv4_allow_map_fd
);
661 safe_close(u
->ipv6_allow_map_fd
);
662 safe_close(u
->ipv4_deny_map_fd
);
663 safe_close(u
->ipv6_deny_map_fd
);
665 bpf_program_unref(u
->ip_bpf_ingress
);
666 bpf_program_unref(u
->ip_bpf_ingress_installed
);
667 bpf_program_unref(u
->ip_bpf_egress
);
668 bpf_program_unref(u
->ip_bpf_egress_installed
);
670 bpf_program_unref(u
->bpf_device_control_installed
);
672 condition_free_list(u
->conditions
);
673 condition_free_list(u
->asserts
);
675 free(u
->description
);
676 strv_free(u
->documentation
);
677 free(u
->fragment_path
);
678 free(u
->source_path
);
679 strv_free(u
->dropin_paths
);
682 free(u
->job_timeout_reboot_arg
);
684 set_free_free(u
->names
);
691 UnitActiveState
unit_active_state(Unit
*u
) {
694 if (u
->load_state
== UNIT_MERGED
)
695 return unit_active_state(unit_follow_merge(u
));
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
701 return UNIT_VTABLE(u
)->active_state(u
);
704 const char* unit_sub_state_to_string(Unit
*u
) {
707 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
710 static int set_complete_move(Set
**s
, Set
**other
) {
718 return set_move(*s
, *other
);
720 *s
= TAKE_PTR(*other
);
725 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
733 return hashmap_move(*s
, *other
);
735 *s
= TAKE_PTR(*other
);
740 static int merge_names(Unit
*u
, Unit
*other
) {
748 r
= set_complete_move(&u
->names
, &other
->names
);
752 set_free_free(other
->names
);
756 SET_FOREACH(t
, u
->names
, i
)
757 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
762 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
767 assert(d
< _UNIT_DEPENDENCY_MAX
);
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
774 if (!u
->dependencies
[d
])
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
780 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
783 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
793 assert(d
< _UNIT_DEPENDENCY_MAX
);
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
802 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back
->dependencies
[k
], other
))
806 maybe_warn_about_dependency(u
, other_id
, k
);
808 UnitDependencyInfo di_u
, di_other
, di_merged
;
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
814 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
816 continue; /* dependency isn't set, let's try the next one */
818 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
820 di_merged
= (UnitDependencyInfo
) {
821 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
822 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
825 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
827 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
836 /* Also do not move dependencies on u to itself */
837 back
= hashmap_remove(other
->dependencies
[d
], u
);
839 maybe_warn_about_dependency(u
, other_id
, d
);
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
844 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
847 int unit_merge(Unit
*u
, Unit
*other
) {
849 const char *other_id
= NULL
;
854 assert(u
->manager
== other
->manager
);
855 assert(u
->type
!= _UNIT_TYPE_INVALID
);
857 other
= unit_follow_merge(other
);
862 if (u
->type
!= other
->type
)
865 if (!u
->instance
!= !other
->instance
)
868 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
871 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
884 other_id
= strdupa(other
->id
);
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
888 r
= reserve_dependencies(u
, other
, d
);
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
898 r
= merge_names(u
, other
);
902 /* Redirect all references */
903 while (other
->refs_by_target
)
904 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
906 /* Merge dependencies */
907 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
908 merge_dependencies(u
, other
, other_id
, d
);
910 other
->load_state
= UNIT_MERGED
;
911 other
->merged_into
= u
;
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other
->load_state
!= UNIT_STUB
)
916 if (UNIT_VTABLE(other
)->done
)
917 UNIT_VTABLE(other
)->done(other
);
919 unit_add_to_dbus_queue(u
);
920 unit_add_to_cleanup_queue(other
);
925 int unit_merge_by_name(Unit
*u
, const char *name
) {
926 _cleanup_free_
char *s
= NULL
;
933 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
937 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
944 other
= manager_get_unit(u
->manager
, name
);
946 return unit_merge(u
, other
);
948 return unit_add_name(u
, name
);
951 Unit
* unit_follow_merge(Unit
*u
) {
954 while (u
->load_state
== UNIT_MERGED
)
955 assert_se(u
= u
->merged_into
);
960 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
961 ExecDirectoryType dt
;
968 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
969 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
974 if (c
->root_directory
) {
975 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
981 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
986 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
987 if (!u
->manager
->prefix
[dt
])
990 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
991 _cleanup_free_
char *p
;
993 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
997 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1003 if (!MANAGER_IS_SYSTEM(u
->manager
))
1006 if (c
->private_tmp
) {
1009 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1010 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1015 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1020 if (!IN_SET(c
->std_output
,
1021 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1022 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1023 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1024 !IN_SET(c
->std_error
,
1025 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1026 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1027 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1033 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1040 const char *unit_description(Unit
*u
) {
1044 return u
->description
;
1046 return strna(u
->id
);
1049 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1051 UnitDependencyMask mask
;
1054 { UNIT_DEPENDENCY_FILE
, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1057 { UNIT_DEPENDENCY_UDEV
, "udev" },
1058 { UNIT_DEPENDENCY_PATH
, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1069 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1074 if (FLAGS_SET(mask
, table
[i
].mask
)) {
1082 fputs(table
[i
].name
, f
);
1084 mask
&= ~table
[i
].mask
;
1091 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1095 const char *prefix2
;
1097 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1098 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1099 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1100 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1101 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1102 timespan
[FORMAT_TIMESPAN_MAX
];
1104 _cleanup_set_free_ Set
*following_set
= NULL
;
1110 assert(u
->type
>= 0);
1112 prefix
= strempty(prefix
);
1113 prefix2
= strjoina(prefix
, "\t");
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1133 "%s\tCGroup realized: %s\n",
1135 prefix
, unit_description(u
),
1136 prefix
, strna(u
->instance
),
1137 prefix
, unit_load_state_to_string(u
->load_state
),
1138 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1139 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1140 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1141 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1142 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1143 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1144 prefix
, yes_no(unit_may_gc(u
)),
1145 prefix
, yes_no(unit_need_daemon_reload(u
)),
1146 prefix
, yes_no(u
->transient
),
1147 prefix
, yes_no(u
->perpetual
),
1148 prefix
, collect_mode_to_string(u
->collect_mode
),
1149 prefix
, strna(unit_slice_name(u
)),
1150 prefix
, strna(u
->cgroup_path
),
1151 prefix
, yes_no(u
->cgroup_realized
));
1153 if (u
->cgroup_realized_mask
!= 0) {
1154 _cleanup_free_
char *s
= NULL
;
1155 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1156 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1158 if (u
->cgroup_enabled_mask
!= 0) {
1159 _cleanup_free_
char *s
= NULL
;
1160 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1161 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1163 m
= unit_get_own_mask(u
);
1165 _cleanup_free_
char *s
= NULL
;
1166 (void) cg_mask_to_string(m
, &s
);
1167 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1169 m
= unit_get_members_mask(u
);
1171 _cleanup_free_
char *s
= NULL
;
1172 (void) cg_mask_to_string(m
, &s
);
1173 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1176 SET_FOREACH(t
, u
->names
, i
)
1177 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1179 if (!sd_id128_is_null(u
->invocation_id
))
1180 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1181 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1183 STRV_FOREACH(j
, u
->documentation
)
1184 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1186 following
= unit_following(u
);
1188 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1190 r
= unit_following_set(u
, &following_set
);
1194 SET_FOREACH(other
, following_set
, i
)
1195 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1198 if (u
->fragment_path
)
1199 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1202 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1204 STRV_FOREACH(j
, u
->dropin_paths
)
1205 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1207 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1208 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1209 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1210 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1212 if (u
->job_timeout
!= USEC_INFINITY
)
1213 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1215 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1216 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1218 if (u
->job_timeout_reboot_arg
)
1219 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1221 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1222 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1224 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1229 prefix
, yes_no(u
->condition_result
));
1231 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1236 prefix
, yes_no(u
->assert_result
));
1238 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1239 UnitDependencyInfo di
;
1242 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1245 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1247 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1248 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1254 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1255 UnitDependencyInfo di
;
1258 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1261 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1263 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1264 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1270 if (u
->load_state
== UNIT_LOADED
) {
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix
, yes_no(u
->stop_when_unneeded
),
1280 prefix
, yes_no(u
->refuse_manual_start
),
1281 prefix
, yes_no(u
->refuse_manual_stop
),
1282 prefix
, yes_no(u
->default_dependencies
),
1283 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1284 prefix
, yes_no(u
->ignore_on_isolate
));
1286 if (UNIT_VTABLE(u
)->dump
)
1287 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1289 } else if (u
->load_state
== UNIT_MERGED
)
1291 "%s\tMerged into: %s\n",
1292 prefix
, u
->merged_into
->id
);
1293 else if (u
->load_state
== UNIT_ERROR
)
1294 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1296 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1297 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1300 job_dump(u
->job
, f
, prefix2
);
1303 job_dump(u
->nop_job
, f
, prefix2
);
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit
*u
) {
1312 /* Load a .{service,socket,...} file */
1313 r
= unit_load_fragment(u
);
1317 if (u
->load_state
== UNIT_STUB
)
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u
));
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1336 /* Load a .service/.socket/.slice/… file */
1337 r
= unit_load_fragment(u
);
1341 if (u
->load_state
== UNIT_STUB
)
1342 u
->load_state
= UNIT_LOADED
;
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u
));
1348 void unit_add_to_target_deps_queue(Unit
*u
) {
1349 Manager
*m
= u
->manager
;
1353 if (u
->in_target_deps_queue
)
1356 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1357 u
->in_target_deps_queue
= true;
1360 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1364 if (target
->type
!= UNIT_TARGET
)
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u
->load_state
!= UNIT_LOADED
||
1370 target
->load_state
!= UNIT_LOADED
)
1373 /* If either side wants no automatic dependencies, then let's
1375 if (!u
->default_dependencies
||
1376 !target
->default_dependencies
)
1379 /* Don't create loops */
1380 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1383 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1386 static int unit_add_slice_dependencies(Unit
*u
) {
1387 UnitDependencyMask mask
;
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1396 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1398 if (UNIT_ISSET(u
->slice
))
1399 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1401 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1404 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1407 static int unit_add_mount_dependencies(Unit
*u
) {
1408 UnitDependencyInfo di
;
1415 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1416 char prefix
[strlen(path
) + 1];
1418 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1419 _cleanup_free_
char *p
= NULL
;
1422 r
= unit_name_from_path(prefix
, ".mount", &p
);
1426 m
= manager_get_unit(u
->manager
, p
);
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1433 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1439 if (m
->load_state
!= UNIT_LOADED
)
1442 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1446 if (m
->fragment_path
) {
1447 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1457 static int unit_add_startup_units(Unit
*u
) {
1461 c
= unit_get_cgroup_context(u
);
1465 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1466 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1467 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1470 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1474 return set_put(u
->manager
->startup_units
, u
);
1477 int unit_load(Unit
*u
) {
1482 if (u
->in_load_queue
) {
1483 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1484 u
->in_load_queue
= false;
1487 if (u
->type
== _UNIT_TYPE_INVALID
)
1490 if (u
->load_state
!= UNIT_STUB
)
1493 if (u
->transient_file
) {
1494 r
= fflush_and_check(u
->transient_file
);
1498 u
->transient_file
= safe_fclose(u
->transient_file
);
1499 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1502 if (UNIT_VTABLE(u
)->load
) {
1503 r
= UNIT_VTABLE(u
)->load(u
);
1508 if (u
->load_state
== UNIT_STUB
) {
1513 if (u
->load_state
== UNIT_LOADED
) {
1514 unit_add_to_target_deps_queue(u
);
1516 r
= unit_add_slice_dependencies(u
);
1520 r
= unit_add_mount_dependencies(u
);
1524 r
= unit_add_startup_units(u
);
1528 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1529 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1534 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1535 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1537 unit_update_cgroup_members_masks(u
);
1540 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1542 unit_add_to_dbus_queue(unit_follow_merge(u
));
1543 unit_add_to_gc_queue(u
);
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1551 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1552 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1556 unit_add_to_dbus_queue(u
);
1557 unit_add_to_gc_queue(u
);
1559 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1562 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1569 /* If the condition list is empty, then it is true */
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions
, c
, first
) {
1579 r
= condition_test(c
);
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1584 c
->trigger
? "|" : "",
1585 c
->negate
? "!" : "",
1591 c
->trigger
? "|" : "",
1592 c
->negate
? "!" : "",
1594 condition_result_to_string(c
->result
));
1596 if (!c
->trigger
&& r
<= 0)
1599 if (c
->trigger
&& triggered
<= 0)
1603 return triggered
!= 0;
1606 static bool unit_condition_test(Unit
*u
) {
1609 dual_timestamp_get(&u
->condition_timestamp
);
1610 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1612 return u
->condition_result
;
1615 static bool unit_assert_test(Unit
*u
) {
1618 dual_timestamp_get(&u
->assert_timestamp
);
1619 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1621 return u
->assert_result
;
1624 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL
;
1626 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1630 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1632 const UnitStatusMessageFormats
*format_table
;
1635 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1637 if (t
!= JOB_RELOAD
) {
1638 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1640 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1646 /* Return generic strings */
1648 return "Starting %s.";
1649 else if (t
== JOB_STOP
)
1650 return "Stopping %s.";
1652 return "Reloading %s.";
1655 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1660 /* Reload status messages have traditionally not been printed to console. */
1661 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1664 format
= unit_get_status_message_format(u
, t
);
1666 DISABLE_WARNING_FORMAT_NONLITERAL
;
1667 unit_status_printf(u
, "", format
);
1671 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1672 const char *format
, *mid
;
1677 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1680 if (log_on_console())
1683 /* We log status messages for all units and all operations. */
1685 format
= unit_get_status_message_format(u
, t
);
1687 DISABLE_WARNING_FORMAT_NONLITERAL
;
1688 (void) snprintf(buf
, sizeof buf
, format
, unit_description(u
));
1691 mid
= t
== JOB_START
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR
:
1692 t
== JOB_STOP
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR
:
1693 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR
;
1695 /* Note that we deliberately use LOG_MESSAGE() instead of
1696 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1697 * closely what is written to screen using the status output,
1698 * which is supposed the highest level, friendliest output
1699 * possible, which means we should avoid the low-level unit
1701 log_struct(LOG_INFO
,
1702 LOG_MESSAGE("%s", buf
),
1704 LOG_UNIT_INVOCATION_ID(u
),
1708 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1711 assert(t
< _JOB_TYPE_MAX
);
1713 unit_status_log_starting_stopping_reloading(u
, t
);
1714 unit_status_print_starting_stopping(u
, t
);
1717 int unit_start_limit_test(Unit
*u
) {
1722 if (ratelimit_below(&u
->start_limit
)) {
1723 u
->start_limit_hit
= false;
1727 log_unit_warning(u
, "Start request repeated too quickly.");
1728 u
->start_limit_hit
= true;
1730 reason
= strjoina("unit ", u
->id
, " failed");
1732 return emergency_action(u
->manager
, u
->start_limit_action
,
1733 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1734 u
->reboot_arg
, reason
);
1737 bool unit_shall_confirm_spawn(Unit
*u
) {
1740 if (manager_is_confirm_spawn_disabled(u
->manager
))
1743 /* For some reasons units remaining in the same process group
1744 * as PID 1 fail to acquire the console even if it's not used
1745 * by any process. So skip the confirmation question for them. */
1746 return !unit_get_exec_context(u
)->same_pgrp
;
1749 static bool unit_verify_deps(Unit
*u
) {
1756 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1757 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1758 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1759 * conjunction with After= as for them any such check would make things entirely racy. */
1761 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1763 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1766 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1767 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1776 * -EBADR: This unit type does not support starting.
1777 * -EALREADY: Unit is already started.
1778 * -EAGAIN: An operation is already in progress. Retry later.
1779 * -ECANCELED: Too many requests for now.
1780 * -EPROTO: Assert failed
1781 * -EINVAL: Unit not loaded
1782 * -EOPNOTSUPP: Unit type not supported
1783 * -ENOLINK: The necessary dependencies are not fulfilled.
1784 * -ESTALE: This unit has been started before and can't be started a second time
1786 int unit_start(Unit
*u
) {
1787 UnitActiveState state
;
1792 /* If this is already started, then this will succeed. Note
1793 * that this will even succeed if this unit is not startable
1794 * by the user. This is relied on to detect when we need to
1795 * wait for units and when waiting is finished. */
1796 state
= unit_active_state(u
);
1797 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1800 /* Units that aren't loaded cannot be started */
1801 if (u
->load_state
!= UNIT_LOADED
)
1804 /* Refuse starting scope units more than once */
1805 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1808 /* If the conditions failed, don't do anything at all. If we
1809 * already are activating this call might still be useful to
1810 * speed up activation in case there is some hold-off time,
1811 * but we don't want to recheck the condition in that case. */
1812 if (state
!= UNIT_ACTIVATING
&&
1813 !unit_condition_test(u
)) {
1814 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1818 /* If the asserts failed, fail the entire job */
1819 if (state
!= UNIT_ACTIVATING
&&
1820 !unit_assert_test(u
)) {
1821 log_unit_notice(u
, "Starting requested but asserts failed.");
1825 /* Units of types that aren't supported cannot be
1826 * started. Note that we do this test only after the condition
1827 * checks, so that we rather return condition check errors
1828 * (which are usually not considered a true failure) than "not
1829 * supported" errors (which are considered a failure).
1831 if (!unit_supported(u
))
1834 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1835 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1836 * effect anymore, due to a reload or due to a failed condition. */
1837 if (!unit_verify_deps(u
))
1840 /* Forward to the main object, if we aren't it. */
1841 following
= unit_following(u
);
1843 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1844 return unit_start(following
);
1847 /* If it is stopped, but we cannot start it, then fail */
1848 if (!UNIT_VTABLE(u
)->start
)
1851 /* We don't suppress calls to ->start() here when we are
1852 * already starting, to allow this request to be used as a
1853 * "hurry up" call, for example when the unit is in some "auto
1854 * restart" state where it waits for a holdoff timer to elapse
1855 * before it will start again. */
1857 unit_add_to_dbus_queue(u
);
1859 return UNIT_VTABLE(u
)->start(u
);
1862 bool unit_can_start(Unit
*u
) {
1865 if (u
->load_state
!= UNIT_LOADED
)
1868 if (!unit_supported(u
))
1871 /* Scope units may be started only once */
1872 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1875 return !!UNIT_VTABLE(u
)->start
;
1878 bool unit_can_isolate(Unit
*u
) {
1881 return unit_can_start(u
) &&
1886 * -EBADR: This unit type does not support stopping.
1887 * -EALREADY: Unit is already stopped.
1888 * -EAGAIN: An operation is already in progress. Retry later.
1890 int unit_stop(Unit
*u
) {
1891 UnitActiveState state
;
1896 state
= unit_active_state(u
);
1897 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1900 following
= unit_following(u
);
1902 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1903 return unit_stop(following
);
1906 if (!UNIT_VTABLE(u
)->stop
)
1909 unit_add_to_dbus_queue(u
);
1911 return UNIT_VTABLE(u
)->stop(u
);
1914 bool unit_can_stop(Unit
*u
) {
1917 if (!unit_supported(u
))
1923 return !!UNIT_VTABLE(u
)->stop
;
1927 * -EBADR: This unit type does not support reloading.
1928 * -ENOEXEC: Unit is not started.
1929 * -EAGAIN: An operation is already in progress. Retry later.
1931 int unit_reload(Unit
*u
) {
1932 UnitActiveState state
;
1937 if (u
->load_state
!= UNIT_LOADED
)
1940 if (!unit_can_reload(u
))
1943 state
= unit_active_state(u
);
1944 if (state
== UNIT_RELOADING
)
1947 if (state
!= UNIT_ACTIVE
) {
1948 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1952 following
= unit_following(u
);
1954 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1955 return unit_reload(following
);
1958 unit_add_to_dbus_queue(u
);
1960 if (!UNIT_VTABLE(u
)->reload
) {
1961 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1962 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
1966 return UNIT_VTABLE(u
)->reload(u
);
1969 bool unit_can_reload(Unit
*u
) {
1972 if (UNIT_VTABLE(u
)->can_reload
)
1973 return UNIT_VTABLE(u
)->can_reload(u
);
1975 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1978 return UNIT_VTABLE(u
)->reload
;
1981 bool unit_is_unneeded(Unit
*u
) {
1982 static const UnitDependency deps
[] = {
1992 if (!u
->stop_when_unneeded
)
1995 /* Don't clean up while the unit is transitioning or is even inactive. */
1996 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
2001 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
2006 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2007 * restart, then don't clean this one up. */
2009 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
) {
2013 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2016 if (unit_will_restart(other
))
2024 static void check_unneeded_dependencies(Unit
*u
) {
2026 static const UnitDependency deps
[] = {
2036 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2038 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
2043 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
)
2044 unit_submit_to_stop_when_unneeded_queue(other
);
2048 static void unit_check_binds_to(Unit
*u
) {
2049 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2061 if (unit_active_state(u
) != UNIT_ACTIVE
)
2064 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
2068 if (!other
->coldplugged
)
2069 /* We might yet create a job for the other unit… */
2072 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2082 /* If stopping a unit fails continuously we might enter a stop
2083 * loop here, hence stop acting on the service being
2084 * unnecessary after a while. */
2085 if (!ratelimit_below(&u
->auto_stop_ratelimit
)) {
2086 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2091 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2093 /* A unit we need to run is gone. Sniff. Let's stop this. */
2094 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2096 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2099 static void retroactively_start_dependencies(Unit
*u
) {
2105 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2107 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2108 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2109 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2110 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2112 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2113 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2114 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2115 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2117 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2118 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2119 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2120 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
2122 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2123 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2124 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2126 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2127 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2128 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2131 static void retroactively_stop_dependencies(Unit
*u
) {
2137 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2139 /* Pull down units which are bound to us recursively if enabled */
2140 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2141 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2142 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2145 void unit_start_on_failure(Unit
*u
) {
2153 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2156 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2158 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2159 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2161 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, &error
, NULL
);
2163 log_unit_warning_errno(u
, r
, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error
, r
));
2167 void unit_trigger_notify(Unit
*u
) {
2174 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2175 if (UNIT_VTABLE(other
)->trigger_notify
)
2176 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2179 static int unit_log_resources(Unit
*u
) {
2180 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ 4];
2181 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
;
2182 size_t n_message_parts
= 0, n_iovec
= 0;
2183 char* message_parts
[3 + 1], *t
;
2184 nsec_t nsec
= NSEC_INFINITY
;
2185 CGroupIPAccountingMetric m
;
2186 bool any_traffic
= false;
2189 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2190 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2191 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2192 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2193 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2198 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2199 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2200 * information and the complete data in structured fields. */
2202 (void) unit_get_cpu_usage(u
, &nsec
);
2203 if (nsec
!= NSEC_INFINITY
) {
2204 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2206 /* Format the CPU time for inclusion in the structured log message */
2207 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2211 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2213 /* Format the CPU time for inclusion in the human language message string */
2214 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2215 t
= strjoin("consumed ", buf
, " CPU time");
2221 message_parts
[n_message_parts
++] = t
;
2224 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2225 char buf
[FORMAT_BYTES_MAX
] = "";
2226 uint64_t value
= UINT64_MAX
;
2228 assert(ip_fields
[m
]);
2230 (void) unit_get_ip_accounting(u
, m
, &value
);
2231 if (value
== UINT64_MAX
)
2236 /* Format IP accounting data for inclusion in the structured log message */
2237 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2241 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2243 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2244 * bytes counters (and not for the packets counters) */
2245 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2247 igress
= strjoin("received ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2252 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2254 egress
= strjoin("sent ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2264 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2266 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2270 k
= strdup("no IP traffic");
2276 message_parts
[n_message_parts
++] = k
;
2279 /* Is there any accounting data available at all? */
2285 if (n_message_parts
== 0)
2286 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2288 _cleanup_free_
char *joined
;
2290 message_parts
[n_message_parts
] = NULL
;
2292 joined
= strv_join(message_parts
, ", ");
2298 joined
[0] = ascii_toupper(joined
[0]);
2299 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2302 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2303 * and hence don't increase n_iovec for them */
2304 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2305 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2307 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2308 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2310 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2311 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2313 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2317 for (i
= 0; i
< n_message_parts
; i
++)
2318 free(message_parts
[i
]);
2320 for (i
= 0; i
< n_iovec
; i
++)
2321 free(iovec
[i
].iov_base
);
2327 static void unit_update_on_console(Unit
*u
) {
2332 b
= unit_needs_console(u
);
2333 if (u
->on_console
== b
)
2338 manager_ref_console(u
->manager
);
2340 manager_unref_console(u
->manager
);
2343 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2349 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2350 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2352 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2353 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2354 * remounted this function will be called too! */
2358 /* Update timestamps for state changes */
2359 if (!MANAGER_IS_RELOADING(m
)) {
2360 dual_timestamp_get(&u
->state_change_timestamp
);
2362 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2363 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2364 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2365 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2367 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2368 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2369 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2370 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2373 /* Keep track of failed units */
2374 (void) manager_update_failed_units(u
->manager
, u
, ns
== UNIT_FAILED
);
2376 /* Make sure the cgroup and state files are always removed when we become inactive */
2377 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2378 unit_prune_cgroup(u
);
2379 unit_unlink_state_files(u
);
2382 unit_update_on_console(u
);
2387 if (u
->job
->state
== JOB_WAITING
)
2389 /* So we reached a different state for this
2390 * job. Let's see if we can run it now if it
2391 * failed previously due to EAGAIN. */
2392 job_add_to_run_queue(u
->job
);
2394 /* Let's check whether this state change constitutes a
2395 * finished job, or maybe contradicts a running job and
2396 * hence needs to invalidate jobs. */
2398 switch (u
->job
->type
) {
2401 case JOB_VERIFY_ACTIVE
:
2403 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2404 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2405 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2408 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2409 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2415 case JOB_RELOAD_OR_START
:
2416 case JOB_TRY_RELOAD
:
2418 if (u
->job
->state
== JOB_RUNNING
) {
2419 if (ns
== UNIT_ACTIVE
)
2420 job_finish_and_invalidate(u
->job
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2421 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2424 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2425 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2433 case JOB_TRY_RESTART
:
2435 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2436 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2437 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2439 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2445 assert_not_reached("Job type unknown");
2451 if (!MANAGER_IS_RELOADING(m
)) {
2453 /* If this state change happened without being
2454 * requested by a job, then let's retroactively start
2455 * or stop dependencies. We skip that step when
2456 * deserializing, since we don't want to create any
2457 * additional jobs just because something is already
2461 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2462 retroactively_start_dependencies(u
);
2463 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2464 retroactively_stop_dependencies(u
);
2467 /* stop unneeded units regardless if going down was expected or not */
2468 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2469 check_unneeded_dependencies(u
);
2471 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2472 log_unit_debug(u
, "Unit entered failed state.");
2474 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2475 unit_start_on_failure(u
);
2478 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2479 /* This unit just finished starting up */
2481 if (u
->type
== UNIT_SERVICE
) {
2482 /* Write audit record if we have just finished starting up */
2483 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, true);
2487 manager_send_unit_plymouth(m
, u
);
2490 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2491 /* This unit just stopped/failed. */
2493 if (u
->type
== UNIT_SERVICE
) {
2496 /* Write audit record if we have just finished shutting down */
2497 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, ns
== UNIT_INACTIVE
);
2498 u
->in_audit
= false;
2500 /* Hmm, if there was no start record written write it now, so that we always
2501 * have a nice pair */
2502 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, ns
== UNIT_INACTIVE
);
2504 if (ns
== UNIT_INACTIVE
)
2505 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, true);
2509 /* Write a log message about consumed resources */
2510 unit_log_resources(u
);
2514 manager_recheck_journal(m
);
2515 manager_recheck_dbus(m
);
2517 unit_trigger_notify(u
);
2519 if (!MANAGER_IS_RELOADING(u
->manager
)) {
2520 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2521 unit_submit_to_stop_when_unneeded_queue(u
);
2523 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2524 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2525 * without ever entering started.) */
2526 unit_check_binds_to(u
);
2528 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2529 reason
= strjoina("unit ", u
->id
, " failed");
2530 (void) emergency_action(u
->manager
, u
->failure_action
, 0,
2531 u
->reboot_arg
, reason
);
2532 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2533 reason
= strjoina("unit ", u
->id
, " succeeded");
2534 (void) emergency_action(u
->manager
, u
->success_action
, 0,
2535 u
->reboot_arg
, reason
);
2539 unit_add_to_dbus_queue(u
);
2540 unit_add_to_gc_queue(u
);
2543 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2547 assert(pid_is_valid(pid
));
2549 /* Watch a specific PID */
2551 r
= set_ensure_allocated(&u
->pids
, NULL
);
2555 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2559 /* First try, let's add the unit keyed by "pid". */
2560 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2566 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2567 * to an array of Units rather than just a Unit), lists us already. */
2569 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2571 for (; array
[n
]; n
++)
2575 if (found
) /* Found it already? if so, do nothing */
2580 /* Allocate a new array */
2581 new_array
= new(Unit
*, n
+ 2);
2585 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2587 new_array
[n
+1] = NULL
;
2589 /* Add or replace the old array */
2590 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2601 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2608 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2612 assert(pid_is_valid(pid
));
2614 /* First let's drop the unit in case it's keyed as "pid". */
2615 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2617 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2618 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2622 /* Let's iterate through the array, dropping our own entry */
2623 for (n
= 0; array
[n
]; n
++)
2625 array
[m
++] = array
[n
];
2629 /* The array is now empty, remove the entire entry */
2630 assert(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2635 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2638 void unit_unwatch_all_pids(Unit
*u
) {
2641 while (!set_isempty(u
->pids
))
2642 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2644 u
->pids
= set_free(u
->pids
);
2647 static void unit_tidy_watch_pids(Unit
*u
) {
2648 pid_t except1
, except2
;
2654 /* Cleans dead PIDs from our list */
2656 except1
= unit_main_pid(u
);
2657 except2
= unit_control_pid(u
);
2659 SET_FOREACH(e
, u
->pids
, i
) {
2660 pid_t pid
= PTR_TO_PID(e
);
2662 if (pid
== except1
|| pid
== except2
)
2665 if (!pid_is_unwaited(pid
))
2666 unit_unwatch_pid(u
, pid
);
2670 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2676 unit_tidy_watch_pids(u
);
2677 unit_watch_all_pids(u
);
2679 /* If the PID set is empty now, then let's finish this off. */
2680 unit_synthesize_cgroup_empty_event(u
);
2685 int unit_enqueue_rewatch_pids(Unit
*u
) {
2690 if (!u
->cgroup_path
)
2693 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2696 if (r
> 0) /* On unified we can use proper notifications */
2699 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2700 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2701 * involves issuing kill(pid, 0) on all processes we watch. */
2703 if (!u
->rewatch_pids_event_source
) {
2704 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2706 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2708 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2710 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2712 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: m");
2714 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2716 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2719 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2721 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2726 void unit_dequeue_rewatch_pids(Unit
*u
) {
2730 if (!u
->rewatch_pids_event_source
)
2733 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2735 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2737 u
->rewatch_pids_event_source
= sd_event_source_unref(u
->rewatch_pids_event_source
);
2740 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2742 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2746 case JOB_VERIFY_ACTIVE
:
2749 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2750 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2755 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2756 * external events), hence it makes no sense to permit enqueing such a request either. */
2757 return !u
->perpetual
;
2760 case JOB_TRY_RESTART
:
2761 return unit_can_stop(u
) && unit_can_start(u
);
2764 case JOB_TRY_RELOAD
:
2765 return unit_can_reload(u
);
2767 case JOB_RELOAD_OR_START
:
2768 return unit_can_reload(u
) && unit_can_start(u
);
2771 assert_not_reached("Invalid job type");
2775 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2778 /* Only warn about some unit types */
2779 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2782 if (streq_ptr(u
->id
, other
))
2783 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2785 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2788 static int unit_add_dependency_hashmap(
2791 UnitDependencyMask origin_mask
,
2792 UnitDependencyMask destination_mask
) {
2794 UnitDependencyInfo info
;
2799 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2800 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2801 assert(origin_mask
> 0 || destination_mask
> 0);
2803 r
= hashmap_ensure_allocated(h
, NULL
);
2807 assert_cc(sizeof(void*) == sizeof(info
));
2809 info
.data
= hashmap_get(*h
, other
);
2811 /* Entry already exists. Add in our mask. */
2813 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
2814 FLAGS_SET(destination_mask
, info
.destination_mask
))
2817 info
.origin_mask
|= origin_mask
;
2818 info
.destination_mask
|= destination_mask
;
2820 r
= hashmap_update(*h
, other
, info
.data
);
2822 info
= (UnitDependencyInfo
) {
2823 .origin_mask
= origin_mask
,
2824 .destination_mask
= destination_mask
,
2827 r
= hashmap_put(*h
, other
, info
.data
);
2835 int unit_add_dependency(
2840 UnitDependencyMask mask
) {
2842 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2843 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2844 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2845 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2846 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2847 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2848 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2849 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2850 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2851 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2852 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2853 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2854 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2855 [UNIT_BEFORE
] = UNIT_AFTER
,
2856 [UNIT_AFTER
] = UNIT_BEFORE
,
2857 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2858 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2859 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2860 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2861 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2862 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2863 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2864 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2866 Unit
*original_u
= u
, *original_other
= other
;
2870 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2873 u
= unit_follow_merge(u
);
2874 other
= unit_follow_merge(other
);
2876 /* We won't allow dependencies on ourselves. We will not
2877 * consider them an error however. */
2879 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2883 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2884 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2885 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2889 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2893 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2894 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2899 if (add_reference
) {
2900 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2904 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2909 unit_add_to_dbus_queue(u
);
2913 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2918 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2922 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2925 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
2933 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2940 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2942 _cleanup_free_
char *i
= NULL
;
2944 r
= unit_name_to_prefix(u
->id
, &i
);
2948 r
= unit_name_replace_instance(name
, i
, buf
);
2957 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2958 _cleanup_free_
char *buf
= NULL
;
2965 r
= resolve_template(u
, name
, &buf
, &name
);
2969 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2973 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2976 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2977 _cleanup_free_
char *buf
= NULL
;
2984 r
= resolve_template(u
, name
, &buf
, &name
);
2988 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2992 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
2995 int set_unit_path(const char *p
) {
2996 /* This is mostly for debug purposes */
2997 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
3003 char *unit_dbus_path(Unit
*u
) {
3009 return unit_dbus_path_from_name(u
->id
);
3012 char *unit_dbus_path_invocation_id(Unit
*u
) {
3015 if (sd_id128_is_null(u
->invocation_id
))
3018 return unit_dbus_path_from_name(u
->invocation_id_string
);
3021 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3025 /* Sets the unit slice if it has not been set before. Is extra
3026 * careful, to only allow this for units that actually have a
3027 * cgroup context. Also, we don't allow to set this for slices
3028 * (since the parent slice is derived from the name). Make
3029 * sure the unit we set is actually a slice. */
3031 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3034 if (u
->type
== UNIT_SLICE
)
3037 if (unit_active_state(u
) != UNIT_INACTIVE
)
3040 if (slice
->type
!= UNIT_SLICE
)
3043 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3044 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3047 if (UNIT_DEREF(u
->slice
) == slice
)
3050 /* Disallow slice changes if @u is already bound to cgroups */
3051 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
3054 unit_ref_set(&u
->slice
, u
, slice
);
3058 int unit_set_default_slice(Unit
*u
) {
3059 _cleanup_free_
char *b
= NULL
;
3060 const char *slice_name
;
3066 if (UNIT_ISSET(u
->slice
))
3070 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3072 /* Implicitly place all instantiated units in their
3073 * own per-template slice */
3075 r
= unit_name_to_prefix(u
->id
, &prefix
);
3079 /* The prefix is already escaped, but it might include
3080 * "-" which has a special meaning for slice units,
3081 * hence escape it here extra. */
3082 escaped
= unit_name_escape(prefix
);
3086 if (MANAGER_IS_SYSTEM(u
->manager
))
3087 b
= strjoin("system-", escaped
, ".slice");
3089 b
= strappend(escaped
, ".slice");
3096 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
3097 ? SPECIAL_SYSTEM_SLICE
3098 : SPECIAL_ROOT_SLICE
;
3100 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3104 return unit_set_slice(u
, slice
);
3107 const char *unit_slice_name(Unit
*u
) {
3110 if (!UNIT_ISSET(u
->slice
))
3113 return UNIT_DEREF(u
->slice
)->id
;
3116 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3117 _cleanup_free_
char *t
= NULL
;
3124 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3127 if (unit_has_name(u
, t
))
3130 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3131 assert(r
< 0 || *_found
!= u
);
3135 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3136 const char *name
, *old_owner
, *new_owner
;
3143 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3145 bus_log_parse_error(r
);
3149 old_owner
= empty_to_null(old_owner
);
3150 new_owner
= empty_to_null(new_owner
);
3152 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3153 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3158 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3165 if (u
->match_bus_slot
)
3168 match
= strjoina("type='signal',"
3169 "sender='org.freedesktop.DBus',"
3170 "path='/org/freedesktop/DBus',"
3171 "interface='org.freedesktop.DBus',"
3172 "member='NameOwnerChanged',"
3173 "arg0='", name
, "'");
3175 return sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3178 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3184 /* Watch a specific name on the bus. We only support one unit
3185 * watching each name for now. */
3187 if (u
->manager
->api_bus
) {
3188 /* If the bus is already available, install the match directly.
3189 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3190 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3192 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3195 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3197 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3198 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3204 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3208 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3209 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3212 bool unit_can_serialize(Unit
*u
) {
3215 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3218 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3219 _cleanup_free_
char *s
= NULL
;
3228 r
= cg_mask_to_string(mask
, &s
);
3230 return log_error_errno(r
, "Failed to format cgroup mask: %m");
3232 return serialize_item(f
, key
, s
);
3235 static const char *ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3236 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3237 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3238 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3239 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3242 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3243 CGroupIPAccountingMetric m
;
3250 if (unit_can_serialize(u
)) {
3251 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3256 (void) serialize_dual_timestamp(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3258 (void) serialize_dual_timestamp(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3259 (void) serialize_dual_timestamp(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3260 (void) serialize_dual_timestamp(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3261 (void) serialize_dual_timestamp(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3263 (void) serialize_dual_timestamp(f
, "condition-timestamp", &u
->condition_timestamp
);
3264 (void) serialize_dual_timestamp(f
, "assert-timestamp", &u
->assert_timestamp
);
3266 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3267 (void) serialize_bool(f
, "condition-result", u
->condition_result
);
3269 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3270 (void) serialize_bool(f
, "assert-result", u
->assert_result
);
3272 (void) serialize_bool(f
, "transient", u
->transient
);
3273 (void) serialize_bool(f
, "in-audit", u
->in_audit
);
3275 (void) serialize_bool(f
, "exported-invocation-id", u
->exported_invocation_id
);
3276 (void) serialize_bool(f
, "exported-log-level-max", u
->exported_log_level_max
);
3277 (void) serialize_bool(f
, "exported-log-extra-fields", u
->exported_log_extra_fields
);
3278 (void) serialize_bool(f
, "exported-log-rate-limit-interval", u
->exported_log_rate_limit_interval
);
3279 (void) serialize_bool(f
, "exported-log-rate-limit-burst", u
->exported_log_rate_limit_burst
);
3281 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3282 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3283 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3286 (void) serialize_item(f
, "cgroup", u
->cgroup_path
);
3288 (void) serialize_bool(f
, "cgroup-realized", u
->cgroup_realized
);
3289 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3290 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3291 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", u
->cgroup_invalidated_mask
);
3293 if (uid_is_valid(u
->ref_uid
))
3294 (void) serialize_item_format(f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3295 if (gid_is_valid(u
->ref_gid
))
3296 (void) serialize_item_format(f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3298 if (!sd_id128_is_null(u
->invocation_id
))
3299 (void) serialize_item_format(f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3301 bus_track_serialize(u
->bus_track
, f
, "ref");
3303 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3306 r
= unit_get_ip_accounting(u
, m
, &v
);
3308 (void) serialize_item_format(f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3311 if (serialize_jobs
) {
3314 job_serialize(u
->job
, f
);
3319 job_serialize(u
->nop_job
, f
);
3328 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3336 _cleanup_free_
char *line
= NULL
;
3337 CGroupIPAccountingMetric m
;
3341 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3343 return log_error_errno(r
, "Failed to read serialization line: %m");
3344 if (r
== 0) /* eof */
3348 if (isempty(l
)) /* End marker */
3351 k
= strcspn(l
, "=");
3359 if (streq(l
, "job")) {
3361 /* new-style serialized job */
3368 r
= job_deserialize(j
, f
);
3374 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
3380 r
= job_install_deserialized(j
);
3382 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
3386 } else /* legacy for pre-44 */
3387 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3389 } else if (streq(l
, "state-change-timestamp")) {
3390 (void) deserialize_dual_timestamp(v
, &u
->state_change_timestamp
);
3392 } else if (streq(l
, "inactive-exit-timestamp")) {
3393 (void) deserialize_dual_timestamp(v
, &u
->inactive_exit_timestamp
);
3395 } else if (streq(l
, "active-enter-timestamp")) {
3396 (void) deserialize_dual_timestamp(v
, &u
->active_enter_timestamp
);
3398 } else if (streq(l
, "active-exit-timestamp")) {
3399 (void) deserialize_dual_timestamp(v
, &u
->active_exit_timestamp
);
3401 } else if (streq(l
, "inactive-enter-timestamp")) {
3402 (void) deserialize_dual_timestamp(v
, &u
->inactive_enter_timestamp
);
3404 } else if (streq(l
, "condition-timestamp")) {
3405 (void) deserialize_dual_timestamp(v
, &u
->condition_timestamp
);
3407 } else if (streq(l
, "assert-timestamp")) {
3408 (void) deserialize_dual_timestamp(v
, &u
->assert_timestamp
);
3410 } else if (streq(l
, "condition-result")) {
3412 r
= parse_boolean(v
);
3414 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3416 u
->condition_result
= r
;
3420 } else if (streq(l
, "assert-result")) {
3422 r
= parse_boolean(v
);
3424 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3426 u
->assert_result
= r
;
3430 } else if (streq(l
, "transient")) {
3432 r
= parse_boolean(v
);
3434 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3440 } else if (streq(l
, "in-audit")) {
3442 r
= parse_boolean(v
);
3444 log_unit_debug(u
, "Failed to parse in-audit bool %s, ignoring.", v
);
3450 } else if (streq(l
, "exported-invocation-id")) {
3452 r
= parse_boolean(v
);
3454 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3456 u
->exported_invocation_id
= r
;
3460 } else if (streq(l
, "exported-log-level-max")) {
3462 r
= parse_boolean(v
);
3464 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3466 u
->exported_log_level_max
= r
;
3470 } else if (streq(l
, "exported-log-extra-fields")) {
3472 r
= parse_boolean(v
);
3474 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3476 u
->exported_log_extra_fields
= r
;
3480 } else if (streq(l
, "exported-log-rate-limit-interval")) {
3482 r
= parse_boolean(v
);
3484 log_unit_debug(u
, "Failed to parse exported log rate limit interval %s, ignoring.", v
);
3486 u
->exported_log_rate_limit_interval
= r
;
3490 } else if (streq(l
, "exported-log-rate-limit-burst")) {
3492 r
= parse_boolean(v
);
3494 log_unit_debug(u
, "Failed to parse exported log rate limit burst %s, ignoring.", v
);
3496 u
->exported_log_rate_limit_burst
= r
;
3500 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3502 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3504 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3508 } else if (streq(l
, "cpu-usage-last")) {
3510 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3512 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3516 } else if (streq(l
, "cgroup")) {
3518 r
= unit_set_cgroup_path(u
, v
);
3520 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3522 (void) unit_watch_cgroup(u
);
3525 } else if (streq(l
, "cgroup-realized")) {
3528 b
= parse_boolean(v
);
3530 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3532 u
->cgroup_realized
= b
;
3536 } else if (streq(l
, "cgroup-realized-mask")) {
3538 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3540 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3543 } else if (streq(l
, "cgroup-enabled-mask")) {
3545 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3547 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3550 } else if (streq(l
, "cgroup-invalidated-mask")) {
3552 r
= cg_mask_from_string(v
, &u
->cgroup_invalidated_mask
);
3554 log_unit_debug(u
, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v
);
3557 } else if (streq(l
, "ref-uid")) {
3560 r
= parse_uid(v
, &uid
);
3562 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3564 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3568 } else if (streq(l
, "ref-gid")) {
3571 r
= parse_gid(v
, &gid
);
3573 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3575 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3579 } else if (streq(l
, "ref")) {
3581 r
= strv_extend(&u
->deserialized_refs
, v
);
3586 } else if (streq(l
, "invocation-id")) {
3589 r
= sd_id128_from_string(v
, &id
);
3591 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3593 r
= unit_set_invocation_id(u
, id
);
3595 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3601 /* Check if this is an IP accounting metric serialization field */
3602 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++)
3603 if (streq(l
, ip_accounting_metric_field
[m
]))
3605 if (m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
) {
3608 r
= safe_atou64(v
, &c
);
3610 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3612 u
->ip_accounting_extra
[m
] = c
;
3616 if (unit_can_serialize(u
)) {
3617 r
= exec_runtime_deserialize_compat(u
, l
, v
, fds
);
3619 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3623 /* Returns positive if key was handled by the call */
3627 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3629 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3633 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3634 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3635 * before 228 where the base for timeouts was not persistent across reboots. */
3637 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3638 dual_timestamp_get(&u
->state_change_timestamp
);
3640 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3641 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3642 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3643 unit_invalidate_cgroup_bpf(u
);
3648 int unit_deserialize_skip(FILE *f
) {
3652 /* Skip serialized data for this unit. We don't know what it is. */
3655 _cleanup_free_
char *line
= NULL
;
3658 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3660 return log_error_errno(r
, "Failed to read serialization line: %m");
3672 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3674 _cleanup_free_
char *e
= NULL
;
3679 /* Adds in links to the device node that this unit is based on */
3683 if (!is_device_path(what
))
3686 /* When device units aren't supported (such as in a
3687 * container), don't create dependencies on them. */
3688 if (!unit_type_supported(UNIT_DEVICE
))
3691 r
= unit_name_from_path(what
, ".device", &e
);
3695 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3699 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3700 dep
= UNIT_BINDS_TO
;
3702 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3703 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3704 device
, true, mask
);
3709 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3717 int unit_coldplug(Unit
*u
) {
3723 /* Make sure we don't enter a loop, when coldplugging recursively. */
3727 u
->coldplugged
= true;
3729 STRV_FOREACH(i
, u
->deserialized_refs
) {
3730 q
= bus_unit_track_add_name(u
, *i
);
3731 if (q
< 0 && r
>= 0)
3734 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3736 if (UNIT_VTABLE(u
)->coldplug
) {
3737 q
= UNIT_VTABLE(u
)->coldplug(u
);
3738 if (q
< 0 && r
>= 0)
3743 q
= job_coldplug(u
->job
);
3744 if (q
< 0 && r
>= 0)
3751 void unit_catchup(Unit
*u
) {
3754 if (UNIT_VTABLE(u
)->catchup
)
3755 UNIT_VTABLE(u
)->catchup(u
);
3758 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3764 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3765 * are never out-of-date. */
3766 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3769 if (stat(path
, &st
) < 0)
3770 /* What, cannot access this anymore? */
3774 /* For masked files check if they are still so */
3775 return !null_or_empty(&st
);
3777 /* For non-empty files check the mtime */
3778 return timespec_load(&st
.st_mtim
) > mtime
;
3783 bool unit_need_daemon_reload(Unit
*u
) {
3784 _cleanup_strv_free_
char **t
= NULL
;
3789 /* For unit files, we allow masking… */
3790 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3791 u
->load_state
== UNIT_MASKED
))
3794 /* Source paths should not be masked… */
3795 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3798 if (u
->load_state
== UNIT_LOADED
)
3799 (void) unit_find_dropin_paths(u
, &t
);
3800 if (!strv_equal(u
->dropin_paths
, t
))
3803 /* … any drop-ins that are masked are simply omitted from the list. */
3804 STRV_FOREACH(path
, u
->dropin_paths
)
3805 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3811 void unit_reset_failed(Unit
*u
) {
3814 if (UNIT_VTABLE(u
)->reset_failed
)
3815 UNIT_VTABLE(u
)->reset_failed(u
);
3817 RATELIMIT_RESET(u
->start_limit
);
3818 u
->start_limit_hit
= false;
3821 Unit
*unit_following(Unit
*u
) {
3824 if (UNIT_VTABLE(u
)->following
)
3825 return UNIT_VTABLE(u
)->following(u
);
3830 bool unit_stop_pending(Unit
*u
) {
3833 /* This call does check the current state of the unit. It's
3834 * hence useful to be called from state change calls of the
3835 * unit itself, where the state isn't updated yet. This is
3836 * different from unit_inactive_or_pending() which checks both
3837 * the current state and for a queued job. */
3839 return u
->job
&& u
->job
->type
== JOB_STOP
;
3842 bool unit_inactive_or_pending(Unit
*u
) {
3845 /* Returns true if the unit is inactive or going down */
3847 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3850 if (unit_stop_pending(u
))
3856 bool unit_active_or_pending(Unit
*u
) {
3859 /* Returns true if the unit is active or going up */
3861 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3865 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3871 bool unit_will_restart(Unit
*u
) {
3874 if (!UNIT_VTABLE(u
)->will_restart
)
3877 return UNIT_VTABLE(u
)->will_restart(u
);
3880 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3882 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3883 assert(SIGNAL_VALID(signo
));
3885 if (!UNIT_VTABLE(u
)->kill
)
3888 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3891 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3892 _cleanup_set_free_ Set
*pid_set
= NULL
;
3895 pid_set
= set_new(NULL
);
3899 /* Exclude the main/control pids from being killed via the cgroup */
3901 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3906 if (control_pid
> 0) {
3907 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3912 return TAKE_PTR(pid_set
);
3915 int unit_kill_common(
3921 sd_bus_error
*error
) {
3924 bool killed
= false;
3926 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3928 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3929 else if (main_pid
== 0)
3930 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3933 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3934 if (control_pid
< 0)
3935 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3936 else if (control_pid
== 0)
3937 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3940 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3941 if (control_pid
> 0) {
3942 if (kill(control_pid
, signo
) < 0)
3948 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3950 if (kill(main_pid
, signo
) < 0)
3956 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3957 _cleanup_set_free_ Set
*pid_set
= NULL
;
3960 /* Exclude the main/control pids from being killed via the cgroup */
3961 pid_set
= unit_pid_set(main_pid
, control_pid
);
3965 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3966 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
3972 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3978 int unit_following_set(Unit
*u
, Set
**s
) {
3982 if (UNIT_VTABLE(u
)->following_set
)
3983 return UNIT_VTABLE(u
)->following_set(u
, s
);
3989 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3994 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3995 r
= unit_file_get_state(
3996 u
->manager
->unit_file_scope
,
3999 &u
->unit_file_state
);
4001 u
->unit_file_state
= UNIT_FILE_BAD
;
4004 return u
->unit_file_state
;
4007 int unit_get_unit_file_preset(Unit
*u
) {
4010 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
4011 u
->unit_file_preset
= unit_file_query_preset(
4012 u
->manager
->unit_file_scope
,
4014 basename(u
->fragment_path
));
4016 return u
->unit_file_preset
;
4019 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4025 unit_ref_unset(ref
);
4027 ref
->source
= source
;
4028 ref
->target
= target
;
4029 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4033 void unit_ref_unset(UnitRef
*ref
) {
4039 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4040 * be unreferenced now. */
4041 unit_add_to_gc_queue(ref
->target
);
4043 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4044 ref
->source
= ref
->target
= NULL
;
4047 static int user_from_unit_name(Unit
*u
, char **ret
) {
4049 static const uint8_t hash_key
[] = {
4050 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4051 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4054 _cleanup_free_
char *n
= NULL
;
4057 r
= unit_name_to_prefix(u
->id
, &n
);
4061 if (valid_user_group_name(n
)) {
4066 /* If we can't use the unit name as a user name, then let's hash it and use that */
4067 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4073 int unit_patch_contexts(Unit
*u
) {
4081 /* Patch in the manager defaults into the exec and cgroup
4082 * contexts, _after_ the rest of the settings have been
4085 ec
= unit_get_exec_context(u
);
4087 /* This only copies in the ones that need memory */
4088 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4089 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4090 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4095 if (MANAGER_IS_USER(u
->manager
) &&
4096 !ec
->working_directory
) {
4098 r
= get_home_dir(&ec
->working_directory
);
4102 /* Allow user services to run, even if the
4103 * home directory is missing */
4104 ec
->working_directory_missing_ok
= true;
4107 if (ec
->private_devices
)
4108 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4110 if (ec
->protect_kernel_modules
)
4111 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4113 if (ec
->dynamic_user
) {
4115 r
= user_from_unit_name(u
, &ec
->user
);
4121 ec
->group
= strdup(ec
->user
);
4126 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4127 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4129 ec
->private_tmp
= true;
4130 ec
->remove_ipc
= true;
4131 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4132 if (ec
->protect_home
== PROTECT_HOME_NO
)
4133 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4137 cc
= unit_get_cgroup_context(u
);
4140 if (ec
->private_devices
&&
4141 cc
->device_policy
== CGROUP_AUTO
)
4142 cc
->device_policy
= CGROUP_CLOSED
;
4144 if (ec
->root_image
&&
4145 (cc
->device_policy
!= CGROUP_AUTO
|| cc
->device_allow
)) {
4147 /* When RootImage= is specified, the following devices are touched. */
4148 r
= cgroup_add_device_allow(cc
, "/dev/loop-control", "rw");
4152 r
= cgroup_add_device_allow(cc
, "block-loop", "rwm");
4156 r
= cgroup_add_device_allow(cc
, "block-blkext", "rwm");
4165 ExecContext
*unit_get_exec_context(Unit
*u
) {
4172 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4176 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4179 KillContext
*unit_get_kill_context(Unit
*u
) {
4186 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4190 return (KillContext
*) ((uint8_t*) u
+ offset
);
4193 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4199 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4203 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4206 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4212 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4216 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4219 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4222 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4225 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4226 return u
->manager
->lookup_paths
.transient
;
4228 if (flags
& UNIT_PERSISTENT
)
4229 return u
->manager
->lookup_paths
.persistent_control
;
4231 if (flags
& UNIT_RUNTIME
)
4232 return u
->manager
->lookup_paths
.runtime_control
;
4237 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4243 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4244 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4245 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4246 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4247 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4250 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4251 ret
= specifier_escape(s
);
4258 if (flags
& UNIT_ESCAPE_C
) {
4271 return ret
?: (char*) s
;
4274 return ret
?: strdup(s
);
4277 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4278 _cleanup_free_
char *result
= NULL
;
4279 size_t n
= 0, allocated
= 0;
4282 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4283 * way suitable for ExecStart= stanzas */
4285 STRV_FOREACH(i
, l
) {
4286 _cleanup_free_
char *buf
= NULL
;
4291 p
= unit_escape_setting(*i
, flags
, &buf
);
4295 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4296 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4310 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4315 return TAKE_PTR(result
);
4318 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4319 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4320 const char *dir
, *wrapped
;
4327 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4330 data
= unit_escape_setting(data
, flags
, &escaped
);
4334 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4335 * previous section header is the same */
4337 if (flags
& UNIT_PRIVATE
) {
4338 if (!UNIT_VTABLE(u
)->private_section
)
4341 if (!u
->transient_file
|| u
->last_section_private
< 0)
4342 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4343 else if (u
->last_section_private
== 0)
4344 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4346 if (!u
->transient_file
|| u
->last_section_private
< 0)
4347 data
= strjoina("[Unit]\n", data
);
4348 else if (u
->last_section_private
> 0)
4349 data
= strjoina("\n[Unit]\n", data
);
4352 if (u
->transient_file
) {
4353 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4354 * write to the transient unit file. */
4355 fputs(data
, u
->transient_file
);
4357 if (!endswith(data
, "\n"))
4358 fputc('\n', u
->transient_file
);
4360 /* Remember which section we wrote this entry to */
4361 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4365 dir
= unit_drop_in_dir(u
, flags
);
4369 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4370 "# or an equivalent operation. Do not edit.\n",
4374 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4378 (void) mkdir_p_label(p
, 0755);
4379 r
= write_string_file_atomic_label(q
, wrapped
);
4383 r
= strv_push(&u
->dropin_paths
, q
);
4388 strv_uniq(u
->dropin_paths
);
4390 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4395 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4396 _cleanup_free_
char *p
= NULL
;
4404 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4407 va_start(ap
, format
);
4408 r
= vasprintf(&p
, format
, ap
);
4414 return unit_write_setting(u
, flags
, name
, p
);
4417 int unit_make_transient(Unit
*u
) {
4418 _cleanup_free_
char *path
= NULL
;
4423 if (!UNIT_VTABLE(u
)->can_transient
)
4426 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4428 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4432 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4433 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4435 RUN_WITH_UMASK(0022) {
4436 f
= fopen(path
, "we");
4441 safe_fclose(u
->transient_file
);
4442 u
->transient_file
= f
;
4444 free_and_replace(u
->fragment_path
, path
);
4446 u
->source_path
= mfree(u
->source_path
);
4447 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4448 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4450 u
->load_state
= UNIT_STUB
;
4452 u
->transient
= true;
4454 unit_add_to_dbus_queue(u
);
4455 unit_add_to_gc_queue(u
);
4457 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4463 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
4464 _cleanup_free_
char *comm
= NULL
;
4466 (void) get_process_comm(pid
, &comm
);
4468 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4469 only, like for example systemd's own PAM stub process. */
4470 if (comm
&& comm
[0] == '(')
4473 log_unit_notice(userdata
,
4474 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4477 signal_to_string(sig
));
4480 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4485 case KILL_TERMINATE
:
4486 case KILL_TERMINATE_AND_LOG
:
4487 return c
->kill_signal
;
4490 return c
->final_kill_signal
;
4493 return c
->watchdog_signal
;
4496 assert_not_reached("KillOperation unknown");
4500 int unit_kill_context(
4506 bool main_pid_alien
) {
4508 bool wait_for_exit
= false, send_sighup
;
4509 cg_kill_log_func_t log_func
= NULL
;
4515 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4516 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4518 if (c
->kill_mode
== KILL_NONE
)
4521 sig
= operation_to_signal(c
, k
);
4525 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4528 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4529 log_func
= log_kill
;
4533 log_func(main_pid
, sig
, u
);
4535 r
= kill_and_sigcont(main_pid
, sig
);
4536 if (r
< 0 && r
!= -ESRCH
) {
4537 _cleanup_free_
char *comm
= NULL
;
4538 (void) get_process_comm(main_pid
, &comm
);
4540 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4542 if (!main_pid_alien
)
4543 wait_for_exit
= true;
4545 if (r
!= -ESRCH
&& send_sighup
)
4546 (void) kill(main_pid
, SIGHUP
);
4550 if (control_pid
> 0) {
4552 log_func(control_pid
, sig
, u
);
4554 r
= kill_and_sigcont(control_pid
, sig
);
4555 if (r
< 0 && r
!= -ESRCH
) {
4556 _cleanup_free_
char *comm
= NULL
;
4557 (void) get_process_comm(control_pid
, &comm
);
4559 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4561 wait_for_exit
= true;
4563 if (r
!= -ESRCH
&& send_sighup
)
4564 (void) kill(control_pid
, SIGHUP
);
4568 if (u
->cgroup_path
&&
4569 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4570 _cleanup_set_free_ Set
*pid_set
= NULL
;
4572 /* Exclude the main/control pids from being killed via the cgroup */
4573 pid_set
= unit_pid_set(main_pid
, control_pid
);
4577 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4579 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4583 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4584 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4588 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4589 * we are running in a container or if this is a delegation unit, simply because cgroup
4590 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4591 * of containers it can be confused easily by left-over directories in the cgroup — which
4592 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4593 * there we get proper events. Hence rely on them. */
4595 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4596 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4597 wait_for_exit
= true;
4602 pid_set
= unit_pid_set(main_pid
, control_pid
);
4606 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4615 return wait_for_exit
;
4618 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4619 _cleanup_free_
char *p
= NULL
;
4621 UnitDependencyInfo di
;
4627 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4628 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4629 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4630 * determine which units to make themselves a dependency of. */
4632 if (!path_is_absolute(path
))
4635 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &path_hash_ops
);
4643 path
= path_simplify(p
, false);
4645 if (!path_is_normalized(path
))
4648 if (hashmap_contains(u
->requires_mounts_for
, path
))
4651 di
= (UnitDependencyInfo
) {
4655 r
= hashmap_put(u
->requires_mounts_for
, path
, di
.data
);
4660 prefix
= alloca(strlen(path
) + 1);
4661 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4664 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4666 _cleanup_free_
char *q
= NULL
;
4668 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4680 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4696 int unit_setup_exec_runtime(Unit
*u
) {
4704 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4707 /* Check if there already is an ExecRuntime for this unit? */
4708 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4712 /* Try to get it from somebody else */
4713 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4714 r
= exec_runtime_acquire(u
->manager
, NULL
, other
->id
, false, rt
);
4719 return exec_runtime_acquire(u
->manager
, unit_get_exec_context(u
), u
->id
, true, rt
);
4722 int unit_setup_dynamic_creds(Unit
*u
) {
4724 DynamicCreds
*dcreds
;
4729 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4731 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4733 ec
= unit_get_exec_context(u
);
4736 if (!ec
->dynamic_user
)
4739 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4742 bool unit_type_supported(UnitType t
) {
4743 if (_unlikely_(t
< 0))
4745 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4748 if (!unit_vtable
[t
]->supported
)
4751 return unit_vtable
[t
]->supported();
4754 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4760 r
= dir_is_empty(where
);
4761 if (r
> 0 || r
== -ENOTDIR
)
4764 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4768 log_struct(LOG_NOTICE
,
4769 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4771 LOG_UNIT_INVOCATION_ID(u
),
4772 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4776 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4777 _cleanup_free_
char *canonical_where
;
4783 r
= chase_symlinks(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
);
4785 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4789 /* We will happily ignore a trailing slash (or any redundant slashes) */
4790 if (path_equal(where
, canonical_where
))
4793 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4795 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4797 LOG_UNIT_INVOCATION_ID(u
),
4798 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4804 bool unit_is_pristine(Unit
*u
) {
4807 /* Check if the unit already exists or is already around,
4808 * in a number of different ways. Note that to cater for unit
4809 * types such as slice, we are generally fine with units that
4810 * are marked UNIT_LOADED even though nothing was actually
4811 * loaded, as those unit types don't require a file on disk. */
4813 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4816 !strv_isempty(u
->dropin_paths
) ||
4821 pid_t
unit_control_pid(Unit
*u
) {
4824 if (UNIT_VTABLE(u
)->control_pid
)
4825 return UNIT_VTABLE(u
)->control_pid(u
);
4830 pid_t
unit_main_pid(Unit
*u
) {
4833 if (UNIT_VTABLE(u
)->main_pid
)
4834 return UNIT_VTABLE(u
)->main_pid(u
);
4839 static void unit_unref_uid_internal(
4843 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4847 assert(_manager_unref_uid
);
4849 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4850 * gid_t are actually the same time, with the same validity rules.
4852 * Drops a reference to UID/GID from a unit. */
4854 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4855 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4857 if (!uid_is_valid(*ref_uid
))
4860 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4861 *ref_uid
= UID_INVALID
;
4864 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4865 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4868 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4869 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4872 static int unit_ref_uid_internal(
4877 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4883 assert(uid_is_valid(uid
));
4884 assert(_manager_ref_uid
);
4886 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4887 * are actually the same type, and have the same validity rules.
4889 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4890 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4893 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4894 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4896 if (*ref_uid
== uid
)
4899 if (uid_is_valid(*ref_uid
)) /* Already set? */
4902 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4910 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4911 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4914 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4915 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4918 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4923 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4925 if (uid_is_valid(uid
)) {
4926 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4931 if (gid_is_valid(gid
)) {
4932 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4935 unit_unref_uid(u
, false);
4941 return r
> 0 || q
> 0;
4944 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4950 c
= unit_get_exec_context(u
);
4952 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4954 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4959 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4962 unit_unref_uid(u
, destroy_now
);
4963 unit_unref_gid(u
, destroy_now
);
4966 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4971 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4972 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4973 * objects when no service references the UID/GID anymore. */
4975 r
= unit_ref_uid_gid(u
, uid
, gid
);
4977 bus_unit_send_change_signal(u
);
4980 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4985 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4987 if (sd_id128_equal(u
->invocation_id
, id
))
4990 if (!sd_id128_is_null(u
->invocation_id
))
4991 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4993 if (sd_id128_is_null(id
)) {
4998 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
5002 u
->invocation_id
= id
;
5003 sd_id128_to_string(id
, u
->invocation_id_string
);
5005 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
5012 u
->invocation_id
= SD_ID128_NULL
;
5013 u
->invocation_id_string
[0] = 0;
5017 int unit_acquire_invocation_id(Unit
*u
) {
5023 r
= sd_id128_randomize(&id
);
5025 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5027 r
= unit_set_invocation_id(u
, id
);
5029 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5034 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5040 /* Copy parameters from manager */
5041 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5045 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5046 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5047 p
->prefix
= u
->manager
->prefix
;
5048 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5050 /* Copy paramaters from unit */
5051 p
->cgroup_path
= u
->cgroup_path
;
5052 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5057 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
5063 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5064 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5066 (void) unit_realize_cgroup(u
);
5068 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
5072 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
5073 (void) ignore_signals(SIGPIPE
, -1);
5075 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5077 if (u
->cgroup_path
) {
5078 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5080 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5088 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5091 assert(d
< _UNIT_DEPENDENCY_MAX
);
5094 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5095 /* No bit set anymore, let's drop the whole entry */
5096 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5097 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5099 /* Mask was reduced, let's update the entry */
5100 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5103 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5108 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5113 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5117 UnitDependencyInfo di
;
5123 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5126 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5128 di
.origin_mask
&= ~mask
;
5129 unit_update_dependency_mask(u
, d
, other
, di
);
5131 /* We updated the dependency from our unit to the other unit now. But most dependencies
5132 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5133 * all dependency types on the other unit and delete all those which point to us and
5134 * have the right mask set. */
5136 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5137 UnitDependencyInfo dj
;
5139 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5140 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5142 dj
.destination_mask
&= ~mask
;
5144 unit_update_dependency_mask(other
, q
, u
, dj
);
5147 unit_add_to_gc_queue(other
);
5157 static int unit_export_invocation_id(Unit
*u
) {
5163 if (u
->exported_invocation_id
)
5166 if (sd_id128_is_null(u
->invocation_id
))
5169 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5170 r
= symlink_atomic(u
->invocation_id_string
, p
);
5172 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5174 u
->exported_invocation_id
= true;
5178 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5186 if (u
->exported_log_level_max
)
5189 if (c
->log_level_max
< 0)
5192 assert(c
->log_level_max
<= 7);
5194 buf
[0] = '0' + c
->log_level_max
;
5197 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5198 r
= symlink_atomic(buf
, p
);
5200 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5202 u
->exported_log_level_max
= true;
5206 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5207 _cleanup_close_
int fd
= -1;
5208 struct iovec
*iovec
;
5216 if (u
->exported_log_extra_fields
)
5219 if (c
->n_log_extra_fields
<= 0)
5222 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5223 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5225 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5226 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5228 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5229 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5232 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5233 pattern
= strjoina(p
, ".XXXXXX");
5235 fd
= mkostemp_safe(pattern
);
5237 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5239 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5241 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5245 (void) fchmod(fd
, 0644);
5247 if (rename(pattern
, p
) < 0) {
5248 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5252 u
->exported_log_extra_fields
= true;
5256 (void) unlink(pattern
);
5260 static int unit_export_log_rate_limit_interval(Unit
*u
, const ExecContext
*c
) {
5261 _cleanup_free_
char *buf
= NULL
;
5268 if (u
->exported_log_rate_limit_interval
)
5271 if (c
->log_rate_limit_interval_usec
== 0)
5274 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5276 if (asprintf(&buf
, "%" PRIu64
, c
->log_rate_limit_interval_usec
) < 0)
5279 r
= symlink_atomic(buf
, p
);
5281 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5283 u
->exported_log_rate_limit_interval
= true;
5287 static int unit_export_log_rate_limit_burst(Unit
*u
, const ExecContext
*c
) {
5288 _cleanup_free_
char *buf
= NULL
;
5295 if (u
->exported_log_rate_limit_burst
)
5298 if (c
->log_rate_limit_burst
== 0)
5301 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5303 if (asprintf(&buf
, "%u", c
->log_rate_limit_burst
) < 0)
5306 r
= symlink_atomic(buf
, p
);
5308 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5310 u
->exported_log_rate_limit_burst
= true;
5314 void unit_export_state_files(Unit
*u
) {
5315 const ExecContext
*c
;
5322 if (!MANAGER_IS_SYSTEM(u
->manager
))
5325 if (MANAGER_IS_TEST_RUN(u
->manager
))
5328 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5329 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5330 * the IPC system itself and PID 1 also log to the journal.
5332 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5333 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5334 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5335 * namespace at least.
5337 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5338 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5341 (void) unit_export_invocation_id(u
);
5343 c
= unit_get_exec_context(u
);
5345 (void) unit_export_log_level_max(u
, c
);
5346 (void) unit_export_log_extra_fields(u
, c
);
5347 (void) unit_export_log_rate_limit_interval(u
, c
);
5348 (void) unit_export_log_rate_limit_burst(u
, c
);
5352 void unit_unlink_state_files(Unit
*u
) {
5360 if (!MANAGER_IS_SYSTEM(u
->manager
))
5363 /* Undoes the effect of unit_export_state() */
5365 if (u
->exported_invocation_id
) {
5366 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5369 u
->exported_invocation_id
= false;
5372 if (u
->exported_log_level_max
) {
5373 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5376 u
->exported_log_level_max
= false;
5379 if (u
->exported_log_extra_fields
) {
5380 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5383 u
->exported_log_extra_fields
= false;
5386 if (u
->exported_log_rate_limit_interval
) {
5387 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5390 u
->exported_log_rate_limit_interval
= false;
5393 if (u
->exported_log_rate_limit_burst
) {
5394 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5397 u
->exported_log_rate_limit_burst
= false;
5401 int unit_prepare_exec(Unit
*u
) {
5406 /* Prepares everything so that we can fork of a process for this unit */
5408 (void) unit_realize_cgroup(u
);
5410 if (u
->reset_accounting
) {
5411 (void) unit_reset_cpu_accounting(u
);
5412 (void) unit_reset_ip_accounting(u
);
5413 u
->reset_accounting
= false;
5416 unit_export_state_files(u
);
5418 r
= unit_setup_exec_runtime(u
);
5422 r
= unit_setup_dynamic_creds(u
);
5429 static void log_leftover(pid_t pid
, int sig
, void *userdata
) {
5430 _cleanup_free_
char *comm
= NULL
;
5432 (void) get_process_comm(pid
, &comm
);
5434 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5437 log_unit_warning(userdata
,
5438 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5439 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5443 void unit_warn_leftover_processes(Unit
*u
) {
5446 (void) unit_pick_cgroup_path(u
);
5448 if (!u
->cgroup_path
)
5451 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5454 bool unit_needs_console(Unit
*u
) {
5456 UnitActiveState state
;
5460 state
= unit_active_state(u
);
5462 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5465 if (UNIT_VTABLE(u
)->needs_console
)
5466 return UNIT_VTABLE(u
)->needs_console(u
);
5468 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5469 ec
= unit_get_exec_context(u
);
5473 return exec_context_may_touch_console(ec
);
5476 const char *unit_label_path(Unit
*u
) {
5479 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5480 * when validating access checks. */
5482 p
= u
->source_path
?: u
->fragment_path
;
5486 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5487 if (path_equal(p
, "/dev/null"))
5493 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5498 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5499 * and not a kernel thread either */
5501 /* First, a simple range check */
5502 if (!pid_is_valid(pid
))
5503 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5505 /* Some extra safety check */
5506 if (pid
== 1 || pid
== getpid_cached())
5507 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5509 /* Don't even begin to bother with kernel threads */
5510 r
= is_kernel_thread(pid
);
5512 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5514 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5516 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5521 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5522 [COLLECT_INACTIVE
] = "inactive",
5523 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5526 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);