1 /* SPDX-License-Identifier: LGPL-2.1+ */
11 #include "sd-messages.h"
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
24 #include "fileio-label.h"
25 #include "format-util.h"
27 #include "id128-util.h"
29 #include "load-dropin.h"
30 #include "load-fragment.h"
35 #include "parse-util.h"
36 #include "path-util.h"
37 #include "process-util.h"
38 #include "serialize.h"
40 #include "signal-util.h"
41 #include "sparse-endian.h"
43 #include "specifier.h"
44 #include "stat-util.h"
45 #include "stdio-util.h"
46 #include "string-table.h"
47 #include "string-util.h"
49 #include "umask-util.h"
50 #include "unit-name.h"
52 #include "user-util.h"
55 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
56 [UNIT_SERVICE
] = &service_vtable
,
57 [UNIT_SOCKET
] = &socket_vtable
,
58 [UNIT_TARGET
] = &target_vtable
,
59 [UNIT_DEVICE
] = &device_vtable
,
60 [UNIT_MOUNT
] = &mount_vtable
,
61 [UNIT_AUTOMOUNT
] = &automount_vtable
,
62 [UNIT_SWAP
] = &swap_vtable
,
63 [UNIT_TIMER
] = &timer_vtable
,
64 [UNIT_PATH
] = &path_vtable
,
65 [UNIT_SLICE
] = &slice_vtable
,
66 [UNIT_SCOPE
] = &scope_vtable
,
69 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
71 Unit
*unit_new(Manager
*m
, size_t size
) {
75 assert(size
>= sizeof(Unit
));
81 u
->names
= set_new(&string_hash_ops
);
86 u
->type
= _UNIT_TYPE_INVALID
;
87 u
->default_dependencies
= true;
88 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
89 u
->unit_file_preset
= -1;
90 u
->on_failure_job_mode
= JOB_REPLACE
;
91 u
->cgroup_inotify_wd
= -1;
92 u
->job_timeout
= USEC_INFINITY
;
93 u
->job_running_timeout
= USEC_INFINITY
;
94 u
->ref_uid
= UID_INVALID
;
95 u
->ref_gid
= GID_INVALID
;
96 u
->cpu_usage_last
= NSEC_INFINITY
;
97 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
99 u
->ip_accounting_ingress_map_fd
= -1;
100 u
->ip_accounting_egress_map_fd
= -1;
101 u
->ipv4_allow_map_fd
= -1;
102 u
->ipv6_allow_map_fd
= -1;
103 u
->ipv4_deny_map_fd
= -1;
104 u
->ipv6_deny_map_fd
= -1;
106 u
->last_section_private
= -1;
108 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
109 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
114 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
115 _cleanup_(unit_freep
) Unit
*u
= NULL
;
118 u
= unit_new(m
, size
);
122 r
= unit_add_name(u
, name
);
131 bool unit_has_name(Unit
*u
, const char *name
) {
135 return set_contains(u
->names
, (char*) name
);
138 static void unit_init(Unit
*u
) {
145 assert(u
->type
>= 0);
147 cc
= unit_get_cgroup_context(u
);
149 cgroup_context_init(cc
);
151 /* Copy in the manager defaults into the cgroup
152 * context, _before_ the rest of the settings have
153 * been initialized */
155 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
156 cc
->io_accounting
= u
->manager
->default_io_accounting
;
157 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
158 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
159 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
160 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
161 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
163 if (u
->type
!= UNIT_SLICE
)
164 cc
->tasks_max
= u
->manager
->default_tasks_max
;
167 ec
= unit_get_exec_context(u
);
169 exec_context_init(ec
);
171 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
172 EXEC_KEYRING_SHARED
: EXEC_KEYRING_INHERIT
;
175 kc
= unit_get_kill_context(u
);
177 kill_context_init(kc
);
179 if (UNIT_VTABLE(u
)->init
)
180 UNIT_VTABLE(u
)->init(u
);
183 int unit_add_name(Unit
*u
, const char *text
) {
184 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
191 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
196 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
205 if (set_contains(u
->names
, s
))
207 if (hashmap_contains(u
->manager
->units
, s
))
210 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
213 t
= unit_name_to_type(s
);
217 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
220 r
= unit_name_to_instance(s
, &i
);
224 if (i
&& !unit_type_may_template(t
))
227 /* Ensure that this unit is either instanced or not instanced,
228 * but not both. Note that we do allow names with different
229 * instance names however! */
230 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
233 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
236 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
239 r
= set_put(u
->names
, s
);
244 r
= hashmap_put(u
->manager
->units
, s
, u
);
246 (void) set_remove(u
->names
, s
);
250 if (u
->type
== _UNIT_TYPE_INVALID
) {
253 u
->instance
= TAKE_PTR(i
);
255 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
262 unit_add_to_dbus_queue(u
);
266 int unit_choose_id(Unit
*u
, const char *name
) {
267 _cleanup_free_
char *t
= NULL
;
274 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
279 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
286 /* Selects one of the names of this unit as the id */
287 s
= set_get(u
->names
, (char*) name
);
291 /* Determine the new instance from the new id */
292 r
= unit_name_to_instance(s
, &i
);
301 unit_add_to_dbus_queue(u
);
306 int unit_set_description(Unit
*u
, const char *description
) {
311 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
315 unit_add_to_dbus_queue(u
);
320 bool unit_may_gc(Unit
*u
) {
321 UnitActiveState state
;
326 /* Checks whether the unit is ready to be unloaded for garbage collection.
327 * Returns true when the unit may be collected, and false if there's some
328 * reason to keep it loaded.
330 * References from other units are *not* checked here. Instead, this is done
331 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
340 state
= unit_active_state(u
);
342 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
343 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
344 UNIT_VTABLE(u
)->release_resources
)
345 UNIT_VTABLE(u
)->release_resources(u
);
350 if (sd_bus_track_count(u
->bus_track
) > 0)
353 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
354 switch (u
->collect_mode
) {
356 case COLLECT_INACTIVE
:
357 if (state
!= UNIT_INACTIVE
)
362 case COLLECT_INACTIVE_OR_FAILED
:
363 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
369 assert_not_reached("Unknown garbage collection mode");
372 if (u
->cgroup_path
) {
373 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
374 * around. Units with active processes should never be collected. */
376 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
378 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
383 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
389 void unit_add_to_load_queue(Unit
*u
) {
391 assert(u
->type
!= _UNIT_TYPE_INVALID
);
393 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
396 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
397 u
->in_load_queue
= true;
400 void unit_add_to_cleanup_queue(Unit
*u
) {
403 if (u
->in_cleanup_queue
)
406 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
407 u
->in_cleanup_queue
= true;
410 void unit_add_to_gc_queue(Unit
*u
) {
413 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
419 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
420 u
->in_gc_queue
= true;
423 void unit_add_to_dbus_queue(Unit
*u
) {
425 assert(u
->type
!= _UNIT_TYPE_INVALID
);
427 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
430 /* Shortcut things if nobody cares */
431 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
432 sd_bus_track_count(u
->bus_track
) <= 0 &&
433 set_isempty(u
->manager
->private_buses
)) {
434 u
->sent_dbus_new_signal
= true;
438 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
439 u
->in_dbus_queue
= true;
442 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
445 if (u
->in_stop_when_unneeded_queue
)
448 if (!u
->stop_when_unneeded
)
451 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
454 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
455 u
->in_stop_when_unneeded_queue
= true;
458 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
465 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
467 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
470 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
471 hashmap_remove(other
->dependencies
[d
], u
);
473 unit_add_to_gc_queue(other
);
479 static void unit_remove_transient(Unit
*u
) {
487 if (u
->fragment_path
)
488 (void) unlink(u
->fragment_path
);
490 STRV_FOREACH(i
, u
->dropin_paths
) {
491 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
493 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
497 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
501 /* Only drop transient drop-ins */
502 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
510 static void unit_free_requires_mounts_for(Unit
*u
) {
514 _cleanup_free_
char *path
;
516 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
520 char s
[strlen(path
) + 1];
522 PATH_FOREACH_PREFIX_MORE(s
, path
) {
526 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
530 (void) set_remove(x
, u
);
532 if (set_isempty(x
)) {
533 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
541 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
544 static void unit_done(Unit
*u
) {
553 if (UNIT_VTABLE(u
)->done
)
554 UNIT_VTABLE(u
)->done(u
);
556 ec
= unit_get_exec_context(u
);
558 exec_context_done(ec
);
560 cc
= unit_get_cgroup_context(u
);
562 cgroup_context_done(cc
);
565 void unit_free(Unit
*u
) {
573 u
->transient_file
= safe_fclose(u
->transient_file
);
575 if (!MANAGER_IS_RELOADING(u
->manager
))
576 unit_remove_transient(u
);
578 bus_unit_send_removed_signal(u
);
582 unit_dequeue_rewatch_pids(u
);
584 sd_bus_slot_unref(u
->match_bus_slot
);
585 sd_bus_track_unref(u
->bus_track
);
586 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
588 unit_free_requires_mounts_for(u
);
590 SET_FOREACH(t
, u
->names
, i
)
591 hashmap_remove_value(u
->manager
->units
, t
, u
);
593 if (!sd_id128_is_null(u
->invocation_id
))
594 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
608 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
609 bidi_set_free(u
, u
->dependencies
[d
]);
612 manager_unref_console(u
->manager
);
614 unit_release_cgroup(u
);
616 if (!MANAGER_IS_RELOADING(u
->manager
))
617 unit_unlink_state_files(u
);
619 unit_unref_uid_gid(u
, false);
621 (void) manager_update_failed_units(u
->manager
, u
, false);
622 set_remove(u
->manager
->startup_units
, u
);
624 unit_unwatch_all_pids(u
);
626 unit_ref_unset(&u
->slice
);
627 while (u
->refs_by_target
)
628 unit_ref_unset(u
->refs_by_target
);
630 if (u
->type
!= _UNIT_TYPE_INVALID
)
631 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
633 if (u
->in_load_queue
)
634 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
636 if (u
->in_dbus_queue
)
637 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
640 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
642 if (u
->in_cgroup_realize_queue
)
643 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
645 if (u
->in_cgroup_empty_queue
)
646 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
648 if (u
->in_cleanup_queue
)
649 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
651 if (u
->in_target_deps_queue
)
652 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
654 if (u
->in_stop_when_unneeded_queue
)
655 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
657 safe_close(u
->ip_accounting_ingress_map_fd
);
658 safe_close(u
->ip_accounting_egress_map_fd
);
660 safe_close(u
->ipv4_allow_map_fd
);
661 safe_close(u
->ipv6_allow_map_fd
);
662 safe_close(u
->ipv4_deny_map_fd
);
663 safe_close(u
->ipv6_deny_map_fd
);
665 bpf_program_unref(u
->ip_bpf_ingress
);
666 bpf_program_unref(u
->ip_bpf_ingress_installed
);
667 bpf_program_unref(u
->ip_bpf_egress
);
668 bpf_program_unref(u
->ip_bpf_egress_installed
);
670 bpf_program_unref(u
->bpf_device_control_installed
);
672 condition_free_list(u
->conditions
);
673 condition_free_list(u
->asserts
);
675 free(u
->description
);
676 strv_free(u
->documentation
);
677 free(u
->fragment_path
);
678 free(u
->source_path
);
679 strv_free(u
->dropin_paths
);
682 free(u
->job_timeout_reboot_arg
);
684 set_free_free(u
->names
);
691 UnitActiveState
unit_active_state(Unit
*u
) {
694 if (u
->load_state
== UNIT_MERGED
)
695 return unit_active_state(unit_follow_merge(u
));
697 /* After a reload it might happen that a unit is not correctly
698 * loaded but still has a process around. That's why we won't
699 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
701 return UNIT_VTABLE(u
)->active_state(u
);
704 const char* unit_sub_state_to_string(Unit
*u
) {
707 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
710 static int set_complete_move(Set
**s
, Set
**other
) {
718 return set_move(*s
, *other
);
720 *s
= TAKE_PTR(*other
);
725 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
733 return hashmap_move(*s
, *other
);
735 *s
= TAKE_PTR(*other
);
740 static int merge_names(Unit
*u
, Unit
*other
) {
748 r
= set_complete_move(&u
->names
, &other
->names
);
752 set_free_free(other
->names
);
756 SET_FOREACH(t
, u
->names
, i
)
757 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
762 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
767 assert(d
< _UNIT_DEPENDENCY_MAX
);
770 * If u does not have this dependency set allocated, there is no need
771 * to reserve anything. In that case other's set will be transferred
772 * as a whole to u by complete_move().
774 if (!u
->dependencies
[d
])
777 /* merge_dependencies() will skip a u-on-u dependency */
778 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
780 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
783 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
789 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
793 assert(d
< _UNIT_DEPENDENCY_MAX
);
795 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
796 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
799 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
800 * pointers back, and let's fix them up, to instead point to 'u'. */
802 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
804 /* Do not add dependencies between u and itself. */
805 if (hashmap_remove(back
->dependencies
[k
], other
))
806 maybe_warn_about_dependency(u
, other_id
, k
);
808 UnitDependencyInfo di_u
, di_other
, di_merged
;
810 /* Let's drop this dependency between "back" and "other", and let's create it between
811 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
812 * and any such dependency which might already exist */
814 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
816 continue; /* dependency isn't set, let's try the next one */
818 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
820 di_merged
= (UnitDependencyInfo
) {
821 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
822 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
825 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
827 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
830 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
836 /* Also do not move dependencies on u to itself */
837 back
= hashmap_remove(other
->dependencies
[d
], u
);
839 maybe_warn_about_dependency(u
, other_id
, d
);
841 /* The move cannot fail. The caller must have performed a reservation. */
842 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
844 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
847 int unit_merge(Unit
*u
, Unit
*other
) {
849 const char *other_id
= NULL
;
854 assert(u
->manager
== other
->manager
);
855 assert(u
->type
!= _UNIT_TYPE_INVALID
);
857 other
= unit_follow_merge(other
);
862 if (u
->type
!= other
->type
)
865 if (!u
->instance
!= !other
->instance
)
868 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
871 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
884 other_id
= strdupa(other
->id
);
886 /* Make reservations to ensure merge_dependencies() won't fail */
887 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
888 r
= reserve_dependencies(u
, other
, d
);
890 * We don't rollback reservations if we fail. We don't have
891 * a way to undo reservations. A reservation is not a leak.
898 r
= merge_names(u
, other
);
902 /* Redirect all references */
903 while (other
->refs_by_target
)
904 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
906 /* Merge dependencies */
907 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
908 merge_dependencies(u
, other
, other_id
, d
);
910 other
->load_state
= UNIT_MERGED
;
911 other
->merged_into
= u
;
913 /* If there is still some data attached to the other node, we
914 * don't need it anymore, and can free it. */
915 if (other
->load_state
!= UNIT_STUB
)
916 if (UNIT_VTABLE(other
)->done
)
917 UNIT_VTABLE(other
)->done(other
);
919 unit_add_to_dbus_queue(u
);
920 unit_add_to_cleanup_queue(other
);
925 int unit_merge_by_name(Unit
*u
, const char *name
) {
926 _cleanup_free_
char *s
= NULL
;
933 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
937 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
944 other
= manager_get_unit(u
->manager
, name
);
946 return unit_merge(u
, other
);
948 return unit_add_name(u
, name
);
951 Unit
* unit_follow_merge(Unit
*u
) {
954 while (u
->load_state
== UNIT_MERGED
)
955 assert_se(u
= u
->merged_into
);
960 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
961 ExecDirectoryType dt
;
968 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
969 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
974 if (c
->root_directory
) {
975 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
981 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
986 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
987 if (!u
->manager
->prefix
[dt
])
990 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
991 _cleanup_free_
char *p
;
993 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
997 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1003 if (!MANAGER_IS_SYSTEM(u
->manager
))
1006 if (c
->private_tmp
) {
1009 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1010 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1015 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1020 if (!IN_SET(c
->std_output
,
1021 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1022 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1023 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1024 !IN_SET(c
->std_error
,
1025 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1026 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1027 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1030 /* If syslog or kernel logging is requested, make sure our own
1031 * logging daemon is run first. */
1033 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1040 const char *unit_description(Unit
*u
) {
1044 return u
->description
;
1046 return strna(u
->id
);
1049 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1051 UnitDependencyMask mask
;
1054 { UNIT_DEPENDENCY_FILE
, "file" },
1055 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1056 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1057 { UNIT_DEPENDENCY_UDEV
, "udev" },
1058 { UNIT_DEPENDENCY_PATH
, "path" },
1059 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1060 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1061 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1069 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1074 if (FLAGS_SET(mask
, table
[i
].mask
)) {
1082 fputs(table
[i
].name
, f
);
1084 mask
&= ~table
[i
].mask
;
1091 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1095 const char *prefix2
;
1097 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1098 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1099 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1100 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1101 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1102 timespan
[FORMAT_TIMESPAN_MAX
];
1104 _cleanup_set_free_ Set
*following_set
= NULL
;
1110 assert(u
->type
>= 0);
1112 prefix
= strempty(prefix
);
1113 prefix2
= strjoina(prefix
, "\t");
1117 "%s\tDescription: %s\n"
1118 "%s\tInstance: %s\n"
1119 "%s\tUnit Load State: %s\n"
1120 "%s\tUnit Active State: %s\n"
1121 "%s\tState Change Timestamp: %s\n"
1122 "%s\tInactive Exit Timestamp: %s\n"
1123 "%s\tActive Enter Timestamp: %s\n"
1124 "%s\tActive Exit Timestamp: %s\n"
1125 "%s\tInactive Enter Timestamp: %s\n"
1127 "%s\tNeed Daemon Reload: %s\n"
1128 "%s\tTransient: %s\n"
1129 "%s\tPerpetual: %s\n"
1130 "%s\tGarbage Collection Mode: %s\n"
1133 "%s\tCGroup realized: %s\n",
1135 prefix
, unit_description(u
),
1136 prefix
, strna(u
->instance
),
1137 prefix
, unit_load_state_to_string(u
->load_state
),
1138 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1139 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1140 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1141 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1142 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1143 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1144 prefix
, yes_no(unit_may_gc(u
)),
1145 prefix
, yes_no(unit_need_daemon_reload(u
)),
1146 prefix
, yes_no(u
->transient
),
1147 prefix
, yes_no(u
->perpetual
),
1148 prefix
, collect_mode_to_string(u
->collect_mode
),
1149 prefix
, strna(unit_slice_name(u
)),
1150 prefix
, strna(u
->cgroup_path
),
1151 prefix
, yes_no(u
->cgroup_realized
));
1153 if (u
->cgroup_realized_mask
!= 0) {
1154 _cleanup_free_
char *s
= NULL
;
1155 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1156 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1158 if (u
->cgroup_enabled_mask
!= 0) {
1159 _cleanup_free_
char *s
= NULL
;
1160 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1161 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1163 m
= unit_get_own_mask(u
);
1165 _cleanup_free_
char *s
= NULL
;
1166 (void) cg_mask_to_string(m
, &s
);
1167 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1169 m
= unit_get_members_mask(u
);
1171 _cleanup_free_
char *s
= NULL
;
1172 (void) cg_mask_to_string(m
, &s
);
1173 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1176 SET_FOREACH(t
, u
->names
, i
)
1177 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1179 if (!sd_id128_is_null(u
->invocation_id
))
1180 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1181 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1183 STRV_FOREACH(j
, u
->documentation
)
1184 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1186 following
= unit_following(u
);
1188 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1190 r
= unit_following_set(u
, &following_set
);
1194 SET_FOREACH(other
, following_set
, i
)
1195 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1198 if (u
->fragment_path
)
1199 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1202 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1204 STRV_FOREACH(j
, u
->dropin_paths
)
1205 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1207 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1208 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1209 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1210 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1212 if (u
->job_timeout
!= USEC_INFINITY
)
1213 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1215 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1216 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1218 if (u
->job_timeout_reboot_arg
)
1219 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1221 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1222 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1224 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1226 "%s\tCondition Timestamp: %s\n"
1227 "%s\tCondition Result: %s\n",
1228 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1229 prefix
, yes_no(u
->condition_result
));
1231 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1233 "%s\tAssert Timestamp: %s\n"
1234 "%s\tAssert Result: %s\n",
1235 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1236 prefix
, yes_no(u
->assert_result
));
1238 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1239 UnitDependencyInfo di
;
1242 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1245 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1247 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1248 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1254 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1255 UnitDependencyInfo di
;
1258 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1261 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1263 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1264 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1270 if (u
->load_state
== UNIT_LOADED
) {
1273 "%s\tStopWhenUnneeded: %s\n"
1274 "%s\tRefuseManualStart: %s\n"
1275 "%s\tRefuseManualStop: %s\n"
1276 "%s\tDefaultDependencies: %s\n"
1277 "%s\tOnFailureJobMode: %s\n"
1278 "%s\tIgnoreOnIsolate: %s\n",
1279 prefix
, yes_no(u
->stop_when_unneeded
),
1280 prefix
, yes_no(u
->refuse_manual_start
),
1281 prefix
, yes_no(u
->refuse_manual_stop
),
1282 prefix
, yes_no(u
->default_dependencies
),
1283 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1284 prefix
, yes_no(u
->ignore_on_isolate
));
1286 if (UNIT_VTABLE(u
)->dump
)
1287 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1289 } else if (u
->load_state
== UNIT_MERGED
)
1291 "%s\tMerged into: %s\n",
1292 prefix
, u
->merged_into
->id
);
1293 else if (u
->load_state
== UNIT_ERROR
)
1294 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1296 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1297 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1300 job_dump(u
->job
, f
, prefix2
);
1303 job_dump(u
->nop_job
, f
, prefix2
);
1306 /* Common implementation for multiple backends */
1307 int unit_load_fragment_and_dropin(Unit
*u
) {
1312 /* Load a .{service,socket,...} file */
1313 r
= unit_load_fragment(u
);
1317 if (u
->load_state
== UNIT_STUB
)
1320 /* Load drop-in directory data. If u is an alias, we might be reloading the
1321 * target unit needlessly. But we cannot be sure which drops-ins have already
1322 * been loaded and which not, at least without doing complicated book-keeping,
1323 * so let's always reread all drop-ins. */
1324 return unit_load_dropin(unit_follow_merge(u
));
1327 /* Common implementation for multiple backends */
1328 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1333 /* Same as unit_load_fragment_and_dropin(), but whether
1334 * something can be loaded or not doesn't matter. */
1336 /* Load a .service/.socket/.slice/… file */
1337 r
= unit_load_fragment(u
);
1341 if (u
->load_state
== UNIT_STUB
)
1342 u
->load_state
= UNIT_LOADED
;
1344 /* Load drop-in directory data */
1345 return unit_load_dropin(unit_follow_merge(u
));
1348 void unit_add_to_target_deps_queue(Unit
*u
) {
1349 Manager
*m
= u
->manager
;
1353 if (u
->in_target_deps_queue
)
1356 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1357 u
->in_target_deps_queue
= true;
1360 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1364 if (target
->type
!= UNIT_TARGET
)
1367 /* Only add the dependency if both units are loaded, so that
1368 * that loop check below is reliable */
1369 if (u
->load_state
!= UNIT_LOADED
||
1370 target
->load_state
!= UNIT_LOADED
)
1373 /* If either side wants no automatic dependencies, then let's
1375 if (!u
->default_dependencies
||
1376 !target
->default_dependencies
)
1379 /* Don't create loops */
1380 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1383 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1386 static int unit_add_slice_dependencies(Unit
*u
) {
1387 UnitDependencyMask mask
;
1390 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1393 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1394 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1396 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1398 if (UNIT_ISSET(u
->slice
))
1399 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1401 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1404 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1407 static int unit_add_mount_dependencies(Unit
*u
) {
1408 UnitDependencyInfo di
;
1415 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1416 char prefix
[strlen(path
) + 1];
1418 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1419 _cleanup_free_
char *p
= NULL
;
1422 r
= unit_name_from_path(prefix
, ".mount", &p
);
1426 m
= manager_get_unit(u
->manager
, p
);
1428 /* Make sure to load the mount unit if
1429 * it exists. If so the dependencies
1430 * on this unit will be added later
1431 * during the loading of the mount
1433 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1439 if (m
->load_state
!= UNIT_LOADED
)
1442 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1446 if (m
->fragment_path
) {
1447 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1457 static int unit_add_startup_units(Unit
*u
) {
1461 c
= unit_get_cgroup_context(u
);
1465 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1466 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1467 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1470 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1474 return set_put(u
->manager
->startup_units
, u
);
1477 int unit_load(Unit
*u
) {
1482 if (u
->in_load_queue
) {
1483 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1484 u
->in_load_queue
= false;
1487 if (u
->type
== _UNIT_TYPE_INVALID
)
1490 if (u
->load_state
!= UNIT_STUB
)
1493 if (u
->transient_file
) {
1494 r
= fflush_and_check(u
->transient_file
);
1498 u
->transient_file
= safe_fclose(u
->transient_file
);
1499 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1502 if (UNIT_VTABLE(u
)->load
) {
1503 r
= UNIT_VTABLE(u
)->load(u
);
1508 if (u
->load_state
== UNIT_STUB
) {
1513 if (u
->load_state
== UNIT_LOADED
) {
1514 unit_add_to_target_deps_queue(u
);
1516 r
= unit_add_slice_dependencies(u
);
1520 r
= unit_add_mount_dependencies(u
);
1524 r
= unit_add_startup_units(u
);
1528 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1529 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1534 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1535 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1537 unit_update_cgroup_members_masks(u
);
1540 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1542 unit_add_to_dbus_queue(unit_follow_merge(u
));
1543 unit_add_to_gc_queue(u
);
1548 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1549 * return ENOEXEC to ensure units are placed in this state after loading */
1551 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1552 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1556 unit_add_to_dbus_queue(u
);
1557 unit_add_to_gc_queue(u
);
1559 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1562 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1569 /* If the condition list is empty, then it is true */
1573 /* Otherwise, if all of the non-trigger conditions apply and
1574 * if any of the trigger conditions apply (unless there are
1575 * none) we return true */
1576 LIST_FOREACH(conditions
, c
, first
) {
1579 r
= condition_test(c
);
1582 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1584 c
->trigger
? "|" : "",
1585 c
->negate
? "!" : "",
1591 c
->trigger
? "|" : "",
1592 c
->negate
? "!" : "",
1594 condition_result_to_string(c
->result
));
1596 if (!c
->trigger
&& r
<= 0)
1599 if (c
->trigger
&& triggered
<= 0)
1603 return triggered
!= 0;
1606 static bool unit_condition_test(Unit
*u
) {
1609 dual_timestamp_get(&u
->condition_timestamp
);
1610 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1612 return u
->condition_result
;
1615 static bool unit_assert_test(Unit
*u
) {
1618 dual_timestamp_get(&u
->assert_timestamp
);
1619 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1621 return u
->assert_result
;
1624 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1625 DISABLE_WARNING_FORMAT_NONLITERAL
;
1626 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1631 int unit_start_limit_test(Unit
*u
) {
1636 if (ratelimit_below(&u
->start_limit
)) {
1637 u
->start_limit_hit
= false;
1641 log_unit_warning(u
, "Start request repeated too quickly.");
1642 u
->start_limit_hit
= true;
1644 reason
= strjoina("unit ", u
->id
, " failed");
1646 return emergency_action(u
->manager
, u
->start_limit_action
,
1647 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1648 u
->reboot_arg
, reason
);
1651 bool unit_shall_confirm_spawn(Unit
*u
) {
1654 if (manager_is_confirm_spawn_disabled(u
->manager
))
1657 /* For some reasons units remaining in the same process group
1658 * as PID 1 fail to acquire the console even if it's not used
1659 * by any process. So skip the confirmation question for them. */
1660 return !unit_get_exec_context(u
)->same_pgrp
;
1663 static bool unit_verify_deps(Unit
*u
) {
1670 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1671 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1672 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1673 * conjunction with After= as for them any such check would make things entirely racy. */
1675 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1677 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1680 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1681 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1690 * -EBADR: This unit type does not support starting.
1691 * -EALREADY: Unit is already started.
1692 * -EAGAIN: An operation is already in progress. Retry later.
1693 * -ECANCELED: Too many requests for now.
1694 * -EPROTO: Assert failed
1695 * -EINVAL: Unit not loaded
1696 * -EOPNOTSUPP: Unit type not supported
1697 * -ENOLINK: The necessary dependencies are not fulfilled.
1698 * -ESTALE: This unit has been started before and can't be started a second time
1700 int unit_start(Unit
*u
) {
1701 UnitActiveState state
;
1706 /* If this is already started, then this will succeed. Note
1707 * that this will even succeed if this unit is not startable
1708 * by the user. This is relied on to detect when we need to
1709 * wait for units and when waiting is finished. */
1710 state
= unit_active_state(u
);
1711 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1714 /* Units that aren't loaded cannot be started */
1715 if (u
->load_state
!= UNIT_LOADED
)
1718 /* Refuse starting scope units more than once */
1719 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1722 /* If the conditions failed, don't do anything at all. If we
1723 * already are activating this call might still be useful to
1724 * speed up activation in case there is some hold-off time,
1725 * but we don't want to recheck the condition in that case. */
1726 if (state
!= UNIT_ACTIVATING
&&
1727 !unit_condition_test(u
)) {
1728 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1732 /* If the asserts failed, fail the entire job */
1733 if (state
!= UNIT_ACTIVATING
&&
1734 !unit_assert_test(u
)) {
1735 log_unit_notice(u
, "Starting requested but asserts failed.");
1739 /* Units of types that aren't supported cannot be
1740 * started. Note that we do this test only after the condition
1741 * checks, so that we rather return condition check errors
1742 * (which are usually not considered a true failure) than "not
1743 * supported" errors (which are considered a failure).
1745 if (!unit_supported(u
))
1748 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1749 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1750 * effect anymore, due to a reload or due to a failed condition. */
1751 if (!unit_verify_deps(u
))
1754 /* Forward to the main object, if we aren't it. */
1755 following
= unit_following(u
);
1757 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1758 return unit_start(following
);
1761 /* If it is stopped, but we cannot start it, then fail */
1762 if (!UNIT_VTABLE(u
)->start
)
1765 /* We don't suppress calls to ->start() here when we are
1766 * already starting, to allow this request to be used as a
1767 * "hurry up" call, for example when the unit is in some "auto
1768 * restart" state where it waits for a holdoff timer to elapse
1769 * before it will start again. */
1771 unit_add_to_dbus_queue(u
);
1773 return UNIT_VTABLE(u
)->start(u
);
1776 bool unit_can_start(Unit
*u
) {
1779 if (u
->load_state
!= UNIT_LOADED
)
1782 if (!unit_supported(u
))
1785 /* Scope units may be started only once */
1786 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1789 return !!UNIT_VTABLE(u
)->start
;
1792 bool unit_can_isolate(Unit
*u
) {
1795 return unit_can_start(u
) &&
1800 * -EBADR: This unit type does not support stopping.
1801 * -EALREADY: Unit is already stopped.
1802 * -EAGAIN: An operation is already in progress. Retry later.
1804 int unit_stop(Unit
*u
) {
1805 UnitActiveState state
;
1810 state
= unit_active_state(u
);
1811 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1814 following
= unit_following(u
);
1816 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1817 return unit_stop(following
);
1820 if (!UNIT_VTABLE(u
)->stop
)
1823 unit_add_to_dbus_queue(u
);
1825 return UNIT_VTABLE(u
)->stop(u
);
1828 bool unit_can_stop(Unit
*u
) {
1831 if (!unit_supported(u
))
1837 return !!UNIT_VTABLE(u
)->stop
;
1841 * -EBADR: This unit type does not support reloading.
1842 * -ENOEXEC: Unit is not started.
1843 * -EAGAIN: An operation is already in progress. Retry later.
1845 int unit_reload(Unit
*u
) {
1846 UnitActiveState state
;
1851 if (u
->load_state
!= UNIT_LOADED
)
1854 if (!unit_can_reload(u
))
1857 state
= unit_active_state(u
);
1858 if (state
== UNIT_RELOADING
)
1861 if (state
!= UNIT_ACTIVE
) {
1862 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1866 following
= unit_following(u
);
1868 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1869 return unit_reload(following
);
1872 unit_add_to_dbus_queue(u
);
1874 if (!UNIT_VTABLE(u
)->reload
) {
1875 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1876 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
1880 return UNIT_VTABLE(u
)->reload(u
);
1883 bool unit_can_reload(Unit
*u
) {
1886 if (UNIT_VTABLE(u
)->can_reload
)
1887 return UNIT_VTABLE(u
)->can_reload(u
);
1889 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1892 return UNIT_VTABLE(u
)->reload
;
1895 bool unit_is_unneeded(Unit
*u
) {
1896 static const UnitDependency deps
[] = {
1906 if (!u
->stop_when_unneeded
)
1909 /* Don't clean up while the unit is transitioning or is even inactive. */
1910 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
1915 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1920 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1921 * restart, then don't clean this one up. */
1923 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
) {
1927 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1930 if (unit_will_restart(other
))
1938 static void check_unneeded_dependencies(Unit
*u
) {
1940 static const UnitDependency deps
[] = {
1950 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1952 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1957 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
)
1958 unit_submit_to_stop_when_unneeded_queue(other
);
1962 static void unit_check_binds_to(Unit
*u
) {
1963 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1975 if (unit_active_state(u
) != UNIT_ACTIVE
)
1978 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
1982 if (!other
->coldplugged
)
1983 /* We might yet create a job for the other unit… */
1986 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1996 /* If stopping a unit fails continuously we might enter a stop
1997 * loop here, hence stop acting on the service being
1998 * unnecessary after a while. */
1999 if (!ratelimit_below(&u
->auto_stop_ratelimit
)) {
2000 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2005 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2007 /* A unit we need to run is gone. Sniff. Let's stop this. */
2008 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2010 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2013 static void retroactively_start_dependencies(Unit
*u
) {
2019 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2021 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2022 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2023 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2024 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2026 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2027 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2028 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2029 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2031 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2032 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2033 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2034 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
2036 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2037 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2038 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2040 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2041 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2042 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2045 static void retroactively_stop_dependencies(Unit
*u
) {
2051 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2053 /* Pull down units which are bound to us recursively if enabled */
2054 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2055 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2056 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2059 void unit_start_on_failure(Unit
*u
) {
2067 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2070 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2072 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2073 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2075 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, &error
, NULL
);
2077 log_unit_warning_errno(u
, r
, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error
, r
));
2081 void unit_trigger_notify(Unit
*u
) {
2088 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2089 if (UNIT_VTABLE(other
)->trigger_notify
)
2090 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2093 static int unit_log_resources(Unit
*u
) {
2094 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ 4];
2095 bool any_traffic
= false, have_ip_accounting
= false;
2096 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
;
2097 size_t n_message_parts
= 0, n_iovec
= 0;
2098 char* message_parts
[3 + 1], *t
;
2099 nsec_t nsec
= NSEC_INFINITY
;
2100 CGroupIPAccountingMetric m
;
2103 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2104 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2105 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2106 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2107 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2112 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2113 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2114 * information and the complete data in structured fields. */
2116 (void) unit_get_cpu_usage(u
, &nsec
);
2117 if (nsec
!= NSEC_INFINITY
) {
2118 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2120 /* Format the CPU time for inclusion in the structured log message */
2121 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2125 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2127 /* Format the CPU time for inclusion in the human language message string */
2128 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2129 t
= strjoin("consumed ", buf
, " CPU time");
2135 message_parts
[n_message_parts
++] = t
;
2138 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2139 char buf
[FORMAT_BYTES_MAX
] = "";
2140 uint64_t value
= UINT64_MAX
;
2142 assert(ip_fields
[m
]);
2144 (void) unit_get_ip_accounting(u
, m
, &value
);
2145 if (value
== UINT64_MAX
)
2148 have_ip_accounting
= true;
2152 /* Format IP accounting data for inclusion in the structured log message */
2153 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2157 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2159 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2160 * bytes counters (and not for the packets counters) */
2161 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2163 igress
= strjoin("received ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2168 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2170 egress
= strjoin("sent ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2178 if (have_ip_accounting
) {
2181 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2183 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2188 k
= strdup("no IP traffic");
2194 message_parts
[n_message_parts
++] = k
;
2198 /* Is there any accounting data available at all? */
2204 if (n_message_parts
== 0)
2205 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2207 _cleanup_free_
char *joined
;
2209 message_parts
[n_message_parts
] = NULL
;
2211 joined
= strv_join(message_parts
, ", ");
2217 joined
[0] = ascii_toupper(joined
[0]);
2218 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2221 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2222 * and hence don't increase n_iovec for them */
2223 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2224 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2226 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2227 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2229 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2230 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2232 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2236 for (i
= 0; i
< n_message_parts
; i
++)
2237 free(message_parts
[i
]);
2239 for (i
= 0; i
< n_iovec
; i
++)
2240 free(iovec
[i
].iov_base
);
2246 static void unit_update_on_console(Unit
*u
) {
2251 b
= unit_needs_console(u
);
2252 if (u
->on_console
== b
)
2257 manager_ref_console(u
->manager
);
2259 manager_unref_console(u
->manager
);
2262 static void unit_emit_audit_start(Unit
*u
) {
2265 if (u
->type
!= UNIT_SERVICE
)
2268 /* Write audit record if we have just finished starting up */
2269 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, true);
2273 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2276 if (u
->type
!= UNIT_SERVICE
)
2280 /* Write audit record if we have just finished shutting down */
2281 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, state
== UNIT_INACTIVE
);
2282 u
->in_audit
= false;
2284 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2285 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, state
== UNIT_INACTIVE
);
2287 if (state
== UNIT_INACTIVE
)
2288 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, true);
2292 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2298 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2299 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2301 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2302 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2303 * remounted this function will be called too! */
2307 /* Update timestamps for state changes */
2308 if (!MANAGER_IS_RELOADING(m
)) {
2309 dual_timestamp_get(&u
->state_change_timestamp
);
2311 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2312 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2313 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2314 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2316 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2317 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2318 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2319 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2322 /* Keep track of failed units */
2323 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2325 /* Make sure the cgroup and state files are always removed when we become inactive */
2326 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2327 unit_prune_cgroup(u
);
2328 unit_unlink_state_files(u
);
2331 unit_update_on_console(u
);
2336 if (u
->job
->state
== JOB_WAITING
)
2338 /* So we reached a different state for this
2339 * job. Let's see if we can run it now if it
2340 * failed previously due to EAGAIN. */
2341 job_add_to_run_queue(u
->job
);
2343 /* Let's check whether this state change constitutes a
2344 * finished job, or maybe contradicts a running job and
2345 * hence needs to invalidate jobs. */
2347 switch (u
->job
->type
) {
2350 case JOB_VERIFY_ACTIVE
:
2352 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2353 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2354 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2357 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2358 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2364 case JOB_RELOAD_OR_START
:
2365 case JOB_TRY_RELOAD
:
2367 if (u
->job
->state
== JOB_RUNNING
) {
2368 if (ns
== UNIT_ACTIVE
)
2369 job_finish_and_invalidate(u
->job
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2370 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2373 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2374 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2382 case JOB_TRY_RESTART
:
2384 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2385 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2386 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2388 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2394 assert_not_reached("Job type unknown");
2400 if (!MANAGER_IS_RELOADING(m
)) {
2402 /* If this state change happened without being
2403 * requested by a job, then let's retroactively start
2404 * or stop dependencies. We skip that step when
2405 * deserializing, since we don't want to create any
2406 * additional jobs just because something is already
2410 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2411 retroactively_start_dependencies(u
);
2412 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2413 retroactively_stop_dependencies(u
);
2416 /* stop unneeded units regardless if going down was expected or not */
2417 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2418 check_unneeded_dependencies(u
);
2420 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2421 log_unit_debug(u
, "Unit entered failed state.");
2423 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2424 unit_start_on_failure(u
);
2427 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2428 /* This unit just finished starting up */
2430 unit_emit_audit_start(u
);
2431 manager_send_unit_plymouth(m
, u
);
2434 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2435 /* This unit just stopped/failed. */
2437 unit_emit_audit_stop(u
, ns
);
2438 unit_log_resources(u
);
2442 manager_recheck_journal(m
);
2443 manager_recheck_dbus(m
);
2445 unit_trigger_notify(u
);
2447 if (!MANAGER_IS_RELOADING(m
)) {
2448 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2449 unit_submit_to_stop_when_unneeded_queue(u
);
2451 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2452 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2453 * without ever entering started.) */
2454 unit_check_binds_to(u
);
2456 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2457 reason
= strjoina("unit ", u
->id
, " failed");
2458 (void) emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, reason
);
2459 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2460 reason
= strjoina("unit ", u
->id
, " succeeded");
2461 (void) emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, reason
);
2465 unit_add_to_dbus_queue(u
);
2466 unit_add_to_gc_queue(u
);
2469 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2473 assert(pid_is_valid(pid
));
2475 /* Watch a specific PID */
2477 r
= set_ensure_allocated(&u
->pids
, NULL
);
2481 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2485 /* First try, let's add the unit keyed by "pid". */
2486 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2492 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2493 * to an array of Units rather than just a Unit), lists us already. */
2495 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2497 for (; array
[n
]; n
++)
2501 if (found
) /* Found it already? if so, do nothing */
2506 /* Allocate a new array */
2507 new_array
= new(Unit
*, n
+ 2);
2511 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2513 new_array
[n
+1] = NULL
;
2515 /* Add or replace the old array */
2516 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2527 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2534 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2538 assert(pid_is_valid(pid
));
2540 /* First let's drop the unit in case it's keyed as "pid". */
2541 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2543 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2544 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2548 /* Let's iterate through the array, dropping our own entry */
2549 for (n
= 0; array
[n
]; n
++)
2551 array
[m
++] = array
[n
];
2555 /* The array is now empty, remove the entire entry */
2556 assert(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2561 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2564 void unit_unwatch_all_pids(Unit
*u
) {
2567 while (!set_isempty(u
->pids
))
2568 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2570 u
->pids
= set_free(u
->pids
);
2573 static void unit_tidy_watch_pids(Unit
*u
) {
2574 pid_t except1
, except2
;
2580 /* Cleans dead PIDs from our list */
2582 except1
= unit_main_pid(u
);
2583 except2
= unit_control_pid(u
);
2585 SET_FOREACH(e
, u
->pids
, i
) {
2586 pid_t pid
= PTR_TO_PID(e
);
2588 if (pid
== except1
|| pid
== except2
)
2591 if (!pid_is_unwaited(pid
))
2592 unit_unwatch_pid(u
, pid
);
2596 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2602 unit_tidy_watch_pids(u
);
2603 unit_watch_all_pids(u
);
2605 /* If the PID set is empty now, then let's finish this off. */
2606 unit_synthesize_cgroup_empty_event(u
);
2611 int unit_enqueue_rewatch_pids(Unit
*u
) {
2616 if (!u
->cgroup_path
)
2619 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2622 if (r
> 0) /* On unified we can use proper notifications */
2625 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2626 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2627 * involves issuing kill(pid, 0) on all processes we watch. */
2629 if (!u
->rewatch_pids_event_source
) {
2630 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2632 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2634 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2636 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2638 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: m");
2640 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2642 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2645 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2647 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2652 void unit_dequeue_rewatch_pids(Unit
*u
) {
2656 if (!u
->rewatch_pids_event_source
)
2659 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2661 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2663 u
->rewatch_pids_event_source
= sd_event_source_unref(u
->rewatch_pids_event_source
);
2666 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2668 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2672 case JOB_VERIFY_ACTIVE
:
2675 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2676 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2681 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2682 * external events), hence it makes no sense to permit enqueing such a request either. */
2683 return !u
->perpetual
;
2686 case JOB_TRY_RESTART
:
2687 return unit_can_stop(u
) && unit_can_start(u
);
2690 case JOB_TRY_RELOAD
:
2691 return unit_can_reload(u
);
2693 case JOB_RELOAD_OR_START
:
2694 return unit_can_reload(u
) && unit_can_start(u
);
2697 assert_not_reached("Invalid job type");
2701 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2704 /* Only warn about some unit types */
2705 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2708 if (streq_ptr(u
->id
, other
))
2709 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2711 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2714 static int unit_add_dependency_hashmap(
2717 UnitDependencyMask origin_mask
,
2718 UnitDependencyMask destination_mask
) {
2720 UnitDependencyInfo info
;
2725 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2726 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2727 assert(origin_mask
> 0 || destination_mask
> 0);
2729 r
= hashmap_ensure_allocated(h
, NULL
);
2733 assert_cc(sizeof(void*) == sizeof(info
));
2735 info
.data
= hashmap_get(*h
, other
);
2737 /* Entry already exists. Add in our mask. */
2739 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
2740 FLAGS_SET(destination_mask
, info
.destination_mask
))
2743 info
.origin_mask
|= origin_mask
;
2744 info
.destination_mask
|= destination_mask
;
2746 r
= hashmap_update(*h
, other
, info
.data
);
2748 info
= (UnitDependencyInfo
) {
2749 .origin_mask
= origin_mask
,
2750 .destination_mask
= destination_mask
,
2753 r
= hashmap_put(*h
, other
, info
.data
);
2761 int unit_add_dependency(
2766 UnitDependencyMask mask
) {
2768 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2769 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2770 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2771 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2772 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2773 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2774 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2775 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2776 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2777 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2778 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2779 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2780 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2781 [UNIT_BEFORE
] = UNIT_AFTER
,
2782 [UNIT_AFTER
] = UNIT_BEFORE
,
2783 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2784 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2785 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2786 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2787 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2788 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2789 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2790 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2792 Unit
*original_u
= u
, *original_other
= other
;
2796 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2799 u
= unit_follow_merge(u
);
2800 other
= unit_follow_merge(other
);
2802 /* We won't allow dependencies on ourselves. We will not
2803 * consider them an error however. */
2805 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2809 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2810 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2811 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2815 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2819 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2820 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2825 if (add_reference
) {
2826 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2830 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2835 unit_add_to_dbus_queue(u
);
2839 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2844 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2848 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2851 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
2859 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2866 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2868 _cleanup_free_
char *i
= NULL
;
2870 r
= unit_name_to_prefix(u
->id
, &i
);
2874 r
= unit_name_replace_instance(name
, i
, buf
);
2883 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2884 _cleanup_free_
char *buf
= NULL
;
2891 r
= resolve_template(u
, name
, &buf
, &name
);
2895 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2899 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2902 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2903 _cleanup_free_
char *buf
= NULL
;
2910 r
= resolve_template(u
, name
, &buf
, &name
);
2914 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2918 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
2921 int set_unit_path(const char *p
) {
2922 /* This is mostly for debug purposes */
2923 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2929 char *unit_dbus_path(Unit
*u
) {
2935 return unit_dbus_path_from_name(u
->id
);
2938 char *unit_dbus_path_invocation_id(Unit
*u
) {
2941 if (sd_id128_is_null(u
->invocation_id
))
2944 return unit_dbus_path_from_name(u
->invocation_id_string
);
2947 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2951 /* Sets the unit slice if it has not been set before. Is extra
2952 * careful, to only allow this for units that actually have a
2953 * cgroup context. Also, we don't allow to set this for slices
2954 * (since the parent slice is derived from the name). Make
2955 * sure the unit we set is actually a slice. */
2957 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2960 if (u
->type
== UNIT_SLICE
)
2963 if (unit_active_state(u
) != UNIT_INACTIVE
)
2966 if (slice
->type
!= UNIT_SLICE
)
2969 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
2970 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
2973 if (UNIT_DEREF(u
->slice
) == slice
)
2976 /* Disallow slice changes if @u is already bound to cgroups */
2977 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
2980 unit_ref_set(&u
->slice
, u
, slice
);
2984 int unit_set_default_slice(Unit
*u
) {
2985 _cleanup_free_
char *b
= NULL
;
2986 const char *slice_name
;
2992 if (UNIT_ISSET(u
->slice
))
2996 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
2998 /* Implicitly place all instantiated units in their
2999 * own per-template slice */
3001 r
= unit_name_to_prefix(u
->id
, &prefix
);
3005 /* The prefix is already escaped, but it might include
3006 * "-" which has a special meaning for slice units,
3007 * hence escape it here extra. */
3008 escaped
= unit_name_escape(prefix
);
3012 if (MANAGER_IS_SYSTEM(u
->manager
))
3013 b
= strjoin("system-", escaped
, ".slice");
3015 b
= strappend(escaped
, ".slice");
3022 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
3023 ? SPECIAL_SYSTEM_SLICE
3024 : SPECIAL_ROOT_SLICE
;
3026 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3030 return unit_set_slice(u
, slice
);
3033 const char *unit_slice_name(Unit
*u
) {
3036 if (!UNIT_ISSET(u
->slice
))
3039 return UNIT_DEREF(u
->slice
)->id
;
3042 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3043 _cleanup_free_
char *t
= NULL
;
3050 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3053 if (unit_has_name(u
, t
))
3056 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3057 assert(r
< 0 || *_found
!= u
);
3061 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3062 const char *name
, *old_owner
, *new_owner
;
3069 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3071 bus_log_parse_error(r
);
3075 old_owner
= empty_to_null(old_owner
);
3076 new_owner
= empty_to_null(new_owner
);
3078 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3079 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3084 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3091 if (u
->match_bus_slot
)
3094 match
= strjoina("type='signal',"
3095 "sender='org.freedesktop.DBus',"
3096 "path='/org/freedesktop/DBus',"
3097 "interface='org.freedesktop.DBus',"
3098 "member='NameOwnerChanged',"
3099 "arg0='", name
, "'");
3101 return sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3104 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3110 /* Watch a specific name on the bus. We only support one unit
3111 * watching each name for now. */
3113 if (u
->manager
->api_bus
) {
3114 /* If the bus is already available, install the match directly.
3115 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3116 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3118 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3121 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3123 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3124 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3130 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3134 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3135 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3138 bool unit_can_serialize(Unit
*u
) {
3141 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3144 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3145 _cleanup_free_
char *s
= NULL
;
3154 r
= cg_mask_to_string(mask
, &s
);
3156 return log_error_errno(r
, "Failed to format cgroup mask: %m");
3158 return serialize_item(f
, key
, s
);
3161 static const char *ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3162 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3163 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3164 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3165 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3168 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3169 CGroupIPAccountingMetric m
;
3176 if (unit_can_serialize(u
)) {
3177 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3182 (void) serialize_dual_timestamp(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3184 (void) serialize_dual_timestamp(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3185 (void) serialize_dual_timestamp(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3186 (void) serialize_dual_timestamp(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3187 (void) serialize_dual_timestamp(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3189 (void) serialize_dual_timestamp(f
, "condition-timestamp", &u
->condition_timestamp
);
3190 (void) serialize_dual_timestamp(f
, "assert-timestamp", &u
->assert_timestamp
);
3192 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3193 (void) serialize_bool(f
, "condition-result", u
->condition_result
);
3195 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3196 (void) serialize_bool(f
, "assert-result", u
->assert_result
);
3198 (void) serialize_bool(f
, "transient", u
->transient
);
3199 (void) serialize_bool(f
, "in-audit", u
->in_audit
);
3201 (void) serialize_bool(f
, "exported-invocation-id", u
->exported_invocation_id
);
3202 (void) serialize_bool(f
, "exported-log-level-max", u
->exported_log_level_max
);
3203 (void) serialize_bool(f
, "exported-log-extra-fields", u
->exported_log_extra_fields
);
3204 (void) serialize_bool(f
, "exported-log-rate-limit-interval", u
->exported_log_rate_limit_interval
);
3205 (void) serialize_bool(f
, "exported-log-rate-limit-burst", u
->exported_log_rate_limit_burst
);
3207 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3208 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3209 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3212 (void) serialize_item(f
, "cgroup", u
->cgroup_path
);
3214 (void) serialize_bool(f
, "cgroup-realized", u
->cgroup_realized
);
3215 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3216 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3217 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", u
->cgroup_invalidated_mask
);
3219 if (uid_is_valid(u
->ref_uid
))
3220 (void) serialize_item_format(f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3221 if (gid_is_valid(u
->ref_gid
))
3222 (void) serialize_item_format(f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3224 if (!sd_id128_is_null(u
->invocation_id
))
3225 (void) serialize_item_format(f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3227 bus_track_serialize(u
->bus_track
, f
, "ref");
3229 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3232 r
= unit_get_ip_accounting(u
, m
, &v
);
3234 (void) serialize_item_format(f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3237 if (serialize_jobs
) {
3240 job_serialize(u
->job
, f
);
3245 job_serialize(u
->nop_job
, f
);
3254 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3262 _cleanup_free_
char *line
= NULL
;
3263 CGroupIPAccountingMetric m
;
3267 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3269 return log_error_errno(r
, "Failed to read serialization line: %m");
3270 if (r
== 0) /* eof */
3274 if (isempty(l
)) /* End marker */
3277 k
= strcspn(l
, "=");
3285 if (streq(l
, "job")) {
3287 /* new-style serialized job */
3294 r
= job_deserialize(j
, f
);
3300 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
3306 r
= job_install_deserialized(j
);
3308 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
3312 } else /* legacy for pre-44 */
3313 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3315 } else if (streq(l
, "state-change-timestamp")) {
3316 (void) deserialize_dual_timestamp(v
, &u
->state_change_timestamp
);
3318 } else if (streq(l
, "inactive-exit-timestamp")) {
3319 (void) deserialize_dual_timestamp(v
, &u
->inactive_exit_timestamp
);
3321 } else if (streq(l
, "active-enter-timestamp")) {
3322 (void) deserialize_dual_timestamp(v
, &u
->active_enter_timestamp
);
3324 } else if (streq(l
, "active-exit-timestamp")) {
3325 (void) deserialize_dual_timestamp(v
, &u
->active_exit_timestamp
);
3327 } else if (streq(l
, "inactive-enter-timestamp")) {
3328 (void) deserialize_dual_timestamp(v
, &u
->inactive_enter_timestamp
);
3330 } else if (streq(l
, "condition-timestamp")) {
3331 (void) deserialize_dual_timestamp(v
, &u
->condition_timestamp
);
3333 } else if (streq(l
, "assert-timestamp")) {
3334 (void) deserialize_dual_timestamp(v
, &u
->assert_timestamp
);
3336 } else if (streq(l
, "condition-result")) {
3338 r
= parse_boolean(v
);
3340 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3342 u
->condition_result
= r
;
3346 } else if (streq(l
, "assert-result")) {
3348 r
= parse_boolean(v
);
3350 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3352 u
->assert_result
= r
;
3356 } else if (streq(l
, "transient")) {
3358 r
= parse_boolean(v
);
3360 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3366 } else if (streq(l
, "in-audit")) {
3368 r
= parse_boolean(v
);
3370 log_unit_debug(u
, "Failed to parse in-audit bool %s, ignoring.", v
);
3376 } else if (streq(l
, "exported-invocation-id")) {
3378 r
= parse_boolean(v
);
3380 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3382 u
->exported_invocation_id
= r
;
3386 } else if (streq(l
, "exported-log-level-max")) {
3388 r
= parse_boolean(v
);
3390 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3392 u
->exported_log_level_max
= r
;
3396 } else if (streq(l
, "exported-log-extra-fields")) {
3398 r
= parse_boolean(v
);
3400 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3402 u
->exported_log_extra_fields
= r
;
3406 } else if (streq(l
, "exported-log-rate-limit-interval")) {
3408 r
= parse_boolean(v
);
3410 log_unit_debug(u
, "Failed to parse exported log rate limit interval %s, ignoring.", v
);
3412 u
->exported_log_rate_limit_interval
= r
;
3416 } else if (streq(l
, "exported-log-rate-limit-burst")) {
3418 r
= parse_boolean(v
);
3420 log_unit_debug(u
, "Failed to parse exported log rate limit burst %s, ignoring.", v
);
3422 u
->exported_log_rate_limit_burst
= r
;
3426 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3428 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3430 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3434 } else if (streq(l
, "cpu-usage-last")) {
3436 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3438 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3442 } else if (streq(l
, "cgroup")) {
3444 r
= unit_set_cgroup_path(u
, v
);
3446 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3448 (void) unit_watch_cgroup(u
);
3451 } else if (streq(l
, "cgroup-realized")) {
3454 b
= parse_boolean(v
);
3456 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3458 u
->cgroup_realized
= b
;
3462 } else if (streq(l
, "cgroup-realized-mask")) {
3464 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3466 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3469 } else if (streq(l
, "cgroup-enabled-mask")) {
3471 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3473 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3476 } else if (streq(l
, "cgroup-invalidated-mask")) {
3478 r
= cg_mask_from_string(v
, &u
->cgroup_invalidated_mask
);
3480 log_unit_debug(u
, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v
);
3483 } else if (streq(l
, "ref-uid")) {
3486 r
= parse_uid(v
, &uid
);
3488 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3490 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3494 } else if (streq(l
, "ref-gid")) {
3497 r
= parse_gid(v
, &gid
);
3499 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3501 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3505 } else if (streq(l
, "ref")) {
3507 r
= strv_extend(&u
->deserialized_refs
, v
);
3512 } else if (streq(l
, "invocation-id")) {
3515 r
= sd_id128_from_string(v
, &id
);
3517 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3519 r
= unit_set_invocation_id(u
, id
);
3521 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3527 /* Check if this is an IP accounting metric serialization field */
3528 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++)
3529 if (streq(l
, ip_accounting_metric_field
[m
]))
3531 if (m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
) {
3534 r
= safe_atou64(v
, &c
);
3536 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3538 u
->ip_accounting_extra
[m
] = c
;
3542 if (unit_can_serialize(u
)) {
3543 r
= exec_runtime_deserialize_compat(u
, l
, v
, fds
);
3545 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3549 /* Returns positive if key was handled by the call */
3553 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3555 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3559 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3560 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3561 * before 228 where the base for timeouts was not persistent across reboots. */
3563 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3564 dual_timestamp_get(&u
->state_change_timestamp
);
3566 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3567 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3568 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3569 unit_invalidate_cgroup_bpf(u
);
3574 int unit_deserialize_skip(FILE *f
) {
3578 /* Skip serialized data for this unit. We don't know what it is. */
3581 _cleanup_free_
char *line
= NULL
;
3584 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3586 return log_error_errno(r
, "Failed to read serialization line: %m");
3598 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3600 _cleanup_free_
char *e
= NULL
;
3605 /* Adds in links to the device node that this unit is based on */
3609 if (!is_device_path(what
))
3612 /* When device units aren't supported (such as in a
3613 * container), don't create dependencies on them. */
3614 if (!unit_type_supported(UNIT_DEVICE
))
3617 r
= unit_name_from_path(what
, ".device", &e
);
3621 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3625 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3626 dep
= UNIT_BINDS_TO
;
3628 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3629 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3630 device
, true, mask
);
3635 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3643 int unit_coldplug(Unit
*u
) {
3649 /* Make sure we don't enter a loop, when coldplugging recursively. */
3653 u
->coldplugged
= true;
3655 STRV_FOREACH(i
, u
->deserialized_refs
) {
3656 q
= bus_unit_track_add_name(u
, *i
);
3657 if (q
< 0 && r
>= 0)
3660 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3662 if (UNIT_VTABLE(u
)->coldplug
) {
3663 q
= UNIT_VTABLE(u
)->coldplug(u
);
3664 if (q
< 0 && r
>= 0)
3669 q
= job_coldplug(u
->job
);
3670 if (q
< 0 && r
>= 0)
3677 void unit_catchup(Unit
*u
) {
3680 if (UNIT_VTABLE(u
)->catchup
)
3681 UNIT_VTABLE(u
)->catchup(u
);
3684 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3690 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3691 * are never out-of-date. */
3692 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3695 if (stat(path
, &st
) < 0)
3696 /* What, cannot access this anymore? */
3700 /* For masked files check if they are still so */
3701 return !null_or_empty(&st
);
3703 /* For non-empty files check the mtime */
3704 return timespec_load(&st
.st_mtim
) > mtime
;
3709 bool unit_need_daemon_reload(Unit
*u
) {
3710 _cleanup_strv_free_
char **t
= NULL
;
3715 /* For unit files, we allow masking… */
3716 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3717 u
->load_state
== UNIT_MASKED
))
3720 /* Source paths should not be masked… */
3721 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3724 if (u
->load_state
== UNIT_LOADED
)
3725 (void) unit_find_dropin_paths(u
, &t
);
3726 if (!strv_equal(u
->dropin_paths
, t
))
3729 /* … any drop-ins that are masked are simply omitted from the list. */
3730 STRV_FOREACH(path
, u
->dropin_paths
)
3731 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3737 void unit_reset_failed(Unit
*u
) {
3740 if (UNIT_VTABLE(u
)->reset_failed
)
3741 UNIT_VTABLE(u
)->reset_failed(u
);
3743 RATELIMIT_RESET(u
->start_limit
);
3744 u
->start_limit_hit
= false;
3747 Unit
*unit_following(Unit
*u
) {
3750 if (UNIT_VTABLE(u
)->following
)
3751 return UNIT_VTABLE(u
)->following(u
);
3756 bool unit_stop_pending(Unit
*u
) {
3759 /* This call does check the current state of the unit. It's
3760 * hence useful to be called from state change calls of the
3761 * unit itself, where the state isn't updated yet. This is
3762 * different from unit_inactive_or_pending() which checks both
3763 * the current state and for a queued job. */
3765 return u
->job
&& u
->job
->type
== JOB_STOP
;
3768 bool unit_inactive_or_pending(Unit
*u
) {
3771 /* Returns true if the unit is inactive or going down */
3773 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3776 if (unit_stop_pending(u
))
3782 bool unit_active_or_pending(Unit
*u
) {
3785 /* Returns true if the unit is active or going up */
3787 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3791 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3797 bool unit_will_restart(Unit
*u
) {
3800 if (!UNIT_VTABLE(u
)->will_restart
)
3803 return UNIT_VTABLE(u
)->will_restart(u
);
3806 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3808 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3809 assert(SIGNAL_VALID(signo
));
3811 if (!UNIT_VTABLE(u
)->kill
)
3814 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3817 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3818 _cleanup_set_free_ Set
*pid_set
= NULL
;
3821 pid_set
= set_new(NULL
);
3825 /* Exclude the main/control pids from being killed via the cgroup */
3827 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3832 if (control_pid
> 0) {
3833 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3838 return TAKE_PTR(pid_set
);
3841 int unit_kill_common(
3847 sd_bus_error
*error
) {
3850 bool killed
= false;
3852 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3854 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3855 else if (main_pid
== 0)
3856 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3859 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3860 if (control_pid
< 0)
3861 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3862 else if (control_pid
== 0)
3863 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3866 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3867 if (control_pid
> 0) {
3868 if (kill(control_pid
, signo
) < 0)
3874 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3876 if (kill(main_pid
, signo
) < 0)
3882 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3883 _cleanup_set_free_ Set
*pid_set
= NULL
;
3886 /* Exclude the main/control pids from being killed via the cgroup */
3887 pid_set
= unit_pid_set(main_pid
, control_pid
);
3891 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3892 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
3898 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3904 int unit_following_set(Unit
*u
, Set
**s
) {
3908 if (UNIT_VTABLE(u
)->following_set
)
3909 return UNIT_VTABLE(u
)->following_set(u
, s
);
3915 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3920 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3921 r
= unit_file_get_state(
3922 u
->manager
->unit_file_scope
,
3925 &u
->unit_file_state
);
3927 u
->unit_file_state
= UNIT_FILE_BAD
;
3930 return u
->unit_file_state
;
3933 int unit_get_unit_file_preset(Unit
*u
) {
3936 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3937 u
->unit_file_preset
= unit_file_query_preset(
3938 u
->manager
->unit_file_scope
,
3940 basename(u
->fragment_path
));
3942 return u
->unit_file_preset
;
3945 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
3951 unit_ref_unset(ref
);
3953 ref
->source
= source
;
3954 ref
->target
= target
;
3955 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
3959 void unit_ref_unset(UnitRef
*ref
) {
3965 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3966 * be unreferenced now. */
3967 unit_add_to_gc_queue(ref
->target
);
3969 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
3970 ref
->source
= ref
->target
= NULL
;
3973 static int user_from_unit_name(Unit
*u
, char **ret
) {
3975 static const uint8_t hash_key
[] = {
3976 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3977 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3980 _cleanup_free_
char *n
= NULL
;
3983 r
= unit_name_to_prefix(u
->id
, &n
);
3987 if (valid_user_group_name(n
)) {
3992 /* If we can't use the unit name as a user name, then let's hash it and use that */
3993 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
3999 int unit_patch_contexts(Unit
*u
) {
4007 /* Patch in the manager defaults into the exec and cgroup
4008 * contexts, _after_ the rest of the settings have been
4011 ec
= unit_get_exec_context(u
);
4013 /* This only copies in the ones that need memory */
4014 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4015 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4016 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4021 if (MANAGER_IS_USER(u
->manager
) &&
4022 !ec
->working_directory
) {
4024 r
= get_home_dir(&ec
->working_directory
);
4028 /* Allow user services to run, even if the
4029 * home directory is missing */
4030 ec
->working_directory_missing_ok
= true;
4033 if (ec
->private_devices
)
4034 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4036 if (ec
->protect_kernel_modules
)
4037 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4039 if (ec
->dynamic_user
) {
4041 r
= user_from_unit_name(u
, &ec
->user
);
4047 ec
->group
= strdup(ec
->user
);
4052 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4053 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4055 ec
->private_tmp
= true;
4056 ec
->remove_ipc
= true;
4057 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4058 if (ec
->protect_home
== PROTECT_HOME_NO
)
4059 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4063 cc
= unit_get_cgroup_context(u
);
4066 if (ec
->private_devices
&&
4067 cc
->device_policy
== CGROUP_AUTO
)
4068 cc
->device_policy
= CGROUP_CLOSED
;
4070 if (ec
->root_image
&&
4071 (cc
->device_policy
!= CGROUP_AUTO
|| cc
->device_allow
)) {
4073 /* When RootImage= is specified, the following devices are touched. */
4074 r
= cgroup_add_device_allow(cc
, "/dev/loop-control", "rw");
4078 r
= cgroup_add_device_allow(cc
, "block-loop", "rwm");
4082 r
= cgroup_add_device_allow(cc
, "block-blkext", "rwm");
4091 ExecContext
*unit_get_exec_context(Unit
*u
) {
4098 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4102 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4105 KillContext
*unit_get_kill_context(Unit
*u
) {
4112 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4116 return (KillContext
*) ((uint8_t*) u
+ offset
);
4119 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4125 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4129 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4132 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4138 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4142 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4145 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4148 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4151 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4152 return u
->manager
->lookup_paths
.transient
;
4154 if (flags
& UNIT_PERSISTENT
)
4155 return u
->manager
->lookup_paths
.persistent_control
;
4157 if (flags
& UNIT_RUNTIME
)
4158 return u
->manager
->lookup_paths
.runtime_control
;
4163 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4169 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4170 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4171 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4172 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4173 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4176 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4177 ret
= specifier_escape(s
);
4184 if (flags
& UNIT_ESCAPE_C
) {
4197 return ret
?: (char*) s
;
4200 return ret
?: strdup(s
);
4203 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4204 _cleanup_free_
char *result
= NULL
;
4205 size_t n
= 0, allocated
= 0;
4208 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4209 * way suitable for ExecStart= stanzas */
4211 STRV_FOREACH(i
, l
) {
4212 _cleanup_free_
char *buf
= NULL
;
4217 p
= unit_escape_setting(*i
, flags
, &buf
);
4221 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4222 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4236 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4241 return TAKE_PTR(result
);
4244 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4245 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4246 const char *dir
, *wrapped
;
4253 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4256 data
= unit_escape_setting(data
, flags
, &escaped
);
4260 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4261 * previous section header is the same */
4263 if (flags
& UNIT_PRIVATE
) {
4264 if (!UNIT_VTABLE(u
)->private_section
)
4267 if (!u
->transient_file
|| u
->last_section_private
< 0)
4268 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4269 else if (u
->last_section_private
== 0)
4270 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4272 if (!u
->transient_file
|| u
->last_section_private
< 0)
4273 data
= strjoina("[Unit]\n", data
);
4274 else if (u
->last_section_private
> 0)
4275 data
= strjoina("\n[Unit]\n", data
);
4278 if (u
->transient_file
) {
4279 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4280 * write to the transient unit file. */
4281 fputs(data
, u
->transient_file
);
4283 if (!endswith(data
, "\n"))
4284 fputc('\n', u
->transient_file
);
4286 /* Remember which section we wrote this entry to */
4287 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4291 dir
= unit_drop_in_dir(u
, flags
);
4295 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4296 "# or an equivalent operation. Do not edit.\n",
4300 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4304 (void) mkdir_p_label(p
, 0755);
4305 r
= write_string_file_atomic_label(q
, wrapped
);
4309 r
= strv_push(&u
->dropin_paths
, q
);
4314 strv_uniq(u
->dropin_paths
);
4316 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4321 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4322 _cleanup_free_
char *p
= NULL
;
4330 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4333 va_start(ap
, format
);
4334 r
= vasprintf(&p
, format
, ap
);
4340 return unit_write_setting(u
, flags
, name
, p
);
4343 int unit_make_transient(Unit
*u
) {
4344 _cleanup_free_
char *path
= NULL
;
4349 if (!UNIT_VTABLE(u
)->can_transient
)
4352 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4354 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4358 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4359 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4361 RUN_WITH_UMASK(0022) {
4362 f
= fopen(path
, "we");
4367 safe_fclose(u
->transient_file
);
4368 u
->transient_file
= f
;
4370 free_and_replace(u
->fragment_path
, path
);
4372 u
->source_path
= mfree(u
->source_path
);
4373 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4374 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4376 u
->load_state
= UNIT_STUB
;
4378 u
->transient
= true;
4380 unit_add_to_dbus_queue(u
);
4381 unit_add_to_gc_queue(u
);
4383 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4389 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
4390 _cleanup_free_
char *comm
= NULL
;
4392 (void) get_process_comm(pid
, &comm
);
4394 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4395 only, like for example systemd's own PAM stub process. */
4396 if (comm
&& comm
[0] == '(')
4399 log_unit_notice(userdata
,
4400 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4403 signal_to_string(sig
));
4406 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4411 case KILL_TERMINATE
:
4412 case KILL_TERMINATE_AND_LOG
:
4413 return c
->kill_signal
;
4416 return c
->final_kill_signal
;
4419 return c
->watchdog_signal
;
4422 assert_not_reached("KillOperation unknown");
4426 int unit_kill_context(
4432 bool main_pid_alien
) {
4434 bool wait_for_exit
= false, send_sighup
;
4435 cg_kill_log_func_t log_func
= NULL
;
4441 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4442 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4444 if (c
->kill_mode
== KILL_NONE
)
4447 sig
= operation_to_signal(c
, k
);
4451 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4454 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4455 log_func
= log_kill
;
4459 log_func(main_pid
, sig
, u
);
4461 r
= kill_and_sigcont(main_pid
, sig
);
4462 if (r
< 0 && r
!= -ESRCH
) {
4463 _cleanup_free_
char *comm
= NULL
;
4464 (void) get_process_comm(main_pid
, &comm
);
4466 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4468 if (!main_pid_alien
)
4469 wait_for_exit
= true;
4471 if (r
!= -ESRCH
&& send_sighup
)
4472 (void) kill(main_pid
, SIGHUP
);
4476 if (control_pid
> 0) {
4478 log_func(control_pid
, sig
, u
);
4480 r
= kill_and_sigcont(control_pid
, sig
);
4481 if (r
< 0 && r
!= -ESRCH
) {
4482 _cleanup_free_
char *comm
= NULL
;
4483 (void) get_process_comm(control_pid
, &comm
);
4485 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4487 wait_for_exit
= true;
4489 if (r
!= -ESRCH
&& send_sighup
)
4490 (void) kill(control_pid
, SIGHUP
);
4494 if (u
->cgroup_path
&&
4495 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4496 _cleanup_set_free_ Set
*pid_set
= NULL
;
4498 /* Exclude the main/control pids from being killed via the cgroup */
4499 pid_set
= unit_pid_set(main_pid
, control_pid
);
4503 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4505 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4509 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4510 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4514 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4515 * we are running in a container or if this is a delegation unit, simply because cgroup
4516 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4517 * of containers it can be confused easily by left-over directories in the cgroup — which
4518 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4519 * there we get proper events. Hence rely on them. */
4521 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4522 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4523 wait_for_exit
= true;
4528 pid_set
= unit_pid_set(main_pid
, control_pid
);
4532 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4541 return wait_for_exit
;
4544 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4545 _cleanup_free_
char *p
= NULL
;
4547 UnitDependencyInfo di
;
4553 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4554 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4555 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4556 * determine which units to make themselves a dependency of. */
4558 if (!path_is_absolute(path
))
4561 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &path_hash_ops
);
4569 path
= path_simplify(p
, false);
4571 if (!path_is_normalized(path
))
4574 if (hashmap_contains(u
->requires_mounts_for
, path
))
4577 di
= (UnitDependencyInfo
) {
4581 r
= hashmap_put(u
->requires_mounts_for
, path
, di
.data
);
4586 prefix
= alloca(strlen(path
) + 1);
4587 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4590 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4592 _cleanup_free_
char *q
= NULL
;
4594 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4606 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4622 int unit_setup_exec_runtime(Unit
*u
) {
4630 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4633 /* Check if there already is an ExecRuntime for this unit? */
4634 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4638 /* Try to get it from somebody else */
4639 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4640 r
= exec_runtime_acquire(u
->manager
, NULL
, other
->id
, false, rt
);
4645 return exec_runtime_acquire(u
->manager
, unit_get_exec_context(u
), u
->id
, true, rt
);
4648 int unit_setup_dynamic_creds(Unit
*u
) {
4650 DynamicCreds
*dcreds
;
4655 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4657 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4659 ec
= unit_get_exec_context(u
);
4662 if (!ec
->dynamic_user
)
4665 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4668 bool unit_type_supported(UnitType t
) {
4669 if (_unlikely_(t
< 0))
4671 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4674 if (!unit_vtable
[t
]->supported
)
4677 return unit_vtable
[t
]->supported();
4680 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4686 r
= dir_is_empty(where
);
4687 if (r
> 0 || r
== -ENOTDIR
)
4690 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4694 log_struct(LOG_NOTICE
,
4695 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4697 LOG_UNIT_INVOCATION_ID(u
),
4698 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4702 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4703 _cleanup_free_
char *canonical_where
;
4709 r
= chase_symlinks(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
);
4711 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4715 /* We will happily ignore a trailing slash (or any redundant slashes) */
4716 if (path_equal(where
, canonical_where
))
4719 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4721 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4723 LOG_UNIT_INVOCATION_ID(u
),
4724 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4730 bool unit_is_pristine(Unit
*u
) {
4733 /* Check if the unit already exists or is already around,
4734 * in a number of different ways. Note that to cater for unit
4735 * types such as slice, we are generally fine with units that
4736 * are marked UNIT_LOADED even though nothing was actually
4737 * loaded, as those unit types don't require a file on disk. */
4739 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4742 !strv_isempty(u
->dropin_paths
) ||
4747 pid_t
unit_control_pid(Unit
*u
) {
4750 if (UNIT_VTABLE(u
)->control_pid
)
4751 return UNIT_VTABLE(u
)->control_pid(u
);
4756 pid_t
unit_main_pid(Unit
*u
) {
4759 if (UNIT_VTABLE(u
)->main_pid
)
4760 return UNIT_VTABLE(u
)->main_pid(u
);
4765 static void unit_unref_uid_internal(
4769 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4773 assert(_manager_unref_uid
);
4775 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4776 * gid_t are actually the same time, with the same validity rules.
4778 * Drops a reference to UID/GID from a unit. */
4780 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4781 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4783 if (!uid_is_valid(*ref_uid
))
4786 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4787 *ref_uid
= UID_INVALID
;
4790 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4791 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4794 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4795 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4798 static int unit_ref_uid_internal(
4803 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4809 assert(uid_is_valid(uid
));
4810 assert(_manager_ref_uid
);
4812 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4813 * are actually the same type, and have the same validity rules.
4815 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4816 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4819 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4820 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4822 if (*ref_uid
== uid
)
4825 if (uid_is_valid(*ref_uid
)) /* Already set? */
4828 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4836 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4837 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4840 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4841 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4844 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4849 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4851 if (uid_is_valid(uid
)) {
4852 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4857 if (gid_is_valid(gid
)) {
4858 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4861 unit_unref_uid(u
, false);
4867 return r
> 0 || q
> 0;
4870 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4876 c
= unit_get_exec_context(u
);
4878 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4880 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4885 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4888 unit_unref_uid(u
, destroy_now
);
4889 unit_unref_gid(u
, destroy_now
);
4892 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4897 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4898 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4899 * objects when no service references the UID/GID anymore. */
4901 r
= unit_ref_uid_gid(u
, uid
, gid
);
4903 bus_unit_send_change_signal(u
);
4906 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4911 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4913 if (sd_id128_equal(u
->invocation_id
, id
))
4916 if (!sd_id128_is_null(u
->invocation_id
))
4917 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4919 if (sd_id128_is_null(id
)) {
4924 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
4928 u
->invocation_id
= id
;
4929 sd_id128_to_string(id
, u
->invocation_id_string
);
4931 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4938 u
->invocation_id
= SD_ID128_NULL
;
4939 u
->invocation_id_string
[0] = 0;
4943 int unit_acquire_invocation_id(Unit
*u
) {
4949 r
= sd_id128_randomize(&id
);
4951 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
4953 r
= unit_set_invocation_id(u
, id
);
4955 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
4960 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
4966 /* Copy parameters from manager */
4967 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
4971 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
4972 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
4973 p
->prefix
= u
->manager
->prefix
;
4974 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
4976 /* Copy paramaters from unit */
4977 p
->cgroup_path
= u
->cgroup_path
;
4978 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
4983 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
4989 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4990 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4992 (void) unit_realize_cgroup(u
);
4994 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
4998 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
4999 (void) ignore_signals(SIGPIPE
, -1);
5001 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5003 if (u
->cgroup_path
) {
5004 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5006 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5014 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5017 assert(d
< _UNIT_DEPENDENCY_MAX
);
5020 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5021 /* No bit set anymore, let's drop the whole entry */
5022 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5023 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5025 /* Mask was reduced, let's update the entry */
5026 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5029 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5034 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5039 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5043 UnitDependencyInfo di
;
5049 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5052 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5054 di
.origin_mask
&= ~mask
;
5055 unit_update_dependency_mask(u
, d
, other
, di
);
5057 /* We updated the dependency from our unit to the other unit now. But most dependencies
5058 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5059 * all dependency types on the other unit and delete all those which point to us and
5060 * have the right mask set. */
5062 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5063 UnitDependencyInfo dj
;
5065 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5066 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5068 dj
.destination_mask
&= ~mask
;
5070 unit_update_dependency_mask(other
, q
, u
, dj
);
5073 unit_add_to_gc_queue(other
);
5083 static int unit_export_invocation_id(Unit
*u
) {
5089 if (u
->exported_invocation_id
)
5092 if (sd_id128_is_null(u
->invocation_id
))
5095 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5096 r
= symlink_atomic(u
->invocation_id_string
, p
);
5098 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5100 u
->exported_invocation_id
= true;
5104 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5112 if (u
->exported_log_level_max
)
5115 if (c
->log_level_max
< 0)
5118 assert(c
->log_level_max
<= 7);
5120 buf
[0] = '0' + c
->log_level_max
;
5123 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5124 r
= symlink_atomic(buf
, p
);
5126 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5128 u
->exported_log_level_max
= true;
5132 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5133 _cleanup_close_
int fd
= -1;
5134 struct iovec
*iovec
;
5142 if (u
->exported_log_extra_fields
)
5145 if (c
->n_log_extra_fields
<= 0)
5148 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5149 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5151 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5152 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5154 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5155 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5158 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5159 pattern
= strjoina(p
, ".XXXXXX");
5161 fd
= mkostemp_safe(pattern
);
5163 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5165 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5167 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5171 (void) fchmod(fd
, 0644);
5173 if (rename(pattern
, p
) < 0) {
5174 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5178 u
->exported_log_extra_fields
= true;
5182 (void) unlink(pattern
);
5186 static int unit_export_log_rate_limit_interval(Unit
*u
, const ExecContext
*c
) {
5187 _cleanup_free_
char *buf
= NULL
;
5194 if (u
->exported_log_rate_limit_interval
)
5197 if (c
->log_rate_limit_interval_usec
== 0)
5200 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5202 if (asprintf(&buf
, "%" PRIu64
, c
->log_rate_limit_interval_usec
) < 0)
5205 r
= symlink_atomic(buf
, p
);
5207 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5209 u
->exported_log_rate_limit_interval
= true;
5213 static int unit_export_log_rate_limit_burst(Unit
*u
, const ExecContext
*c
) {
5214 _cleanup_free_
char *buf
= NULL
;
5221 if (u
->exported_log_rate_limit_burst
)
5224 if (c
->log_rate_limit_burst
== 0)
5227 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5229 if (asprintf(&buf
, "%u", c
->log_rate_limit_burst
) < 0)
5232 r
= symlink_atomic(buf
, p
);
5234 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5236 u
->exported_log_rate_limit_burst
= true;
5240 void unit_export_state_files(Unit
*u
) {
5241 const ExecContext
*c
;
5248 if (!MANAGER_IS_SYSTEM(u
->manager
))
5251 if (MANAGER_IS_TEST_RUN(u
->manager
))
5254 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5255 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5256 * the IPC system itself and PID 1 also log to the journal.
5258 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5259 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5260 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5261 * namespace at least.
5263 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5264 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5267 (void) unit_export_invocation_id(u
);
5269 c
= unit_get_exec_context(u
);
5271 (void) unit_export_log_level_max(u
, c
);
5272 (void) unit_export_log_extra_fields(u
, c
);
5273 (void) unit_export_log_rate_limit_interval(u
, c
);
5274 (void) unit_export_log_rate_limit_burst(u
, c
);
5278 void unit_unlink_state_files(Unit
*u
) {
5286 if (!MANAGER_IS_SYSTEM(u
->manager
))
5289 /* Undoes the effect of unit_export_state() */
5291 if (u
->exported_invocation_id
) {
5292 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5295 u
->exported_invocation_id
= false;
5298 if (u
->exported_log_level_max
) {
5299 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5302 u
->exported_log_level_max
= false;
5305 if (u
->exported_log_extra_fields
) {
5306 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5309 u
->exported_log_extra_fields
= false;
5312 if (u
->exported_log_rate_limit_interval
) {
5313 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5316 u
->exported_log_rate_limit_interval
= false;
5319 if (u
->exported_log_rate_limit_burst
) {
5320 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5323 u
->exported_log_rate_limit_burst
= false;
5327 int unit_prepare_exec(Unit
*u
) {
5332 /* Prepares everything so that we can fork of a process for this unit */
5334 (void) unit_realize_cgroup(u
);
5336 if (u
->reset_accounting
) {
5337 (void) unit_reset_cpu_accounting(u
);
5338 (void) unit_reset_ip_accounting(u
);
5339 u
->reset_accounting
= false;
5342 unit_export_state_files(u
);
5344 r
= unit_setup_exec_runtime(u
);
5348 r
= unit_setup_dynamic_creds(u
);
5355 static void log_leftover(pid_t pid
, int sig
, void *userdata
) {
5356 _cleanup_free_
char *comm
= NULL
;
5358 (void) get_process_comm(pid
, &comm
);
5360 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5363 log_unit_warning(userdata
,
5364 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5365 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5369 void unit_warn_leftover_processes(Unit
*u
) {
5372 (void) unit_pick_cgroup_path(u
);
5374 if (!u
->cgroup_path
)
5377 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5380 bool unit_needs_console(Unit
*u
) {
5382 UnitActiveState state
;
5386 state
= unit_active_state(u
);
5388 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5391 if (UNIT_VTABLE(u
)->needs_console
)
5392 return UNIT_VTABLE(u
)->needs_console(u
);
5394 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5395 ec
= unit_get_exec_context(u
);
5399 return exec_context_may_touch_console(ec
);
5402 const char *unit_label_path(Unit
*u
) {
5405 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5406 * when validating access checks. */
5408 p
= u
->source_path
?: u
->fragment_path
;
5412 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5413 if (path_equal(p
, "/dev/null"))
5419 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5424 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5425 * and not a kernel thread either */
5427 /* First, a simple range check */
5428 if (!pid_is_valid(pid
))
5429 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5431 /* Some extra safety check */
5432 if (pid
== 1 || pid
== getpid_cached())
5433 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5435 /* Don't even begin to bother with kernel threads */
5436 r
= is_kernel_thread(pid
);
5438 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5440 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5442 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5447 void unit_log_success(Unit
*u
) {
5450 log_struct(LOG_INFO
,
5451 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5453 LOG_UNIT_INVOCATION_ID(u
),
5454 LOG_UNIT_MESSAGE(u
, "Succeeded."));
5457 void unit_log_failure(Unit
*u
, const char *result
) {
5461 log_struct(LOG_WARNING
,
5462 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5464 LOG_UNIT_INVOCATION_ID(u
),
5465 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5466 "UNIT_RESULT=%s", result
);
5469 void unit_log_process_exit(
5473 const char *command
,
5480 if (code
!= CLD_EXITED
)
5481 level
= LOG_WARNING
;
5484 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
5485 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s",
5487 sigchld_code_to_string(code
), status
,
5488 strna(code
== CLD_EXITED
5489 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
5490 : signal_to_string(status
))),
5491 "EXIT_CODE=%s", sigchld_code_to_string(code
),
5492 "EXIT_STATUS=%i", status
,
5493 "COMMAND=%s", strna(command
),
5495 LOG_UNIT_INVOCATION_ID(u
));
5498 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5499 [COLLECT_INACTIVE
] = "inactive",
5500 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5503 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);