1 /* SPDX-License-Identifier: LGPL-2.1+ */
11 #include "sd-messages.h"
13 #include "all-units.h"
14 #include "alloc-util.h"
15 #include "bus-common-errors.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
24 #include "fileio-label.h"
26 #include "format-util.h"
28 #include "id128-util.h"
30 #include "load-dropin.h"
31 #include "load-fragment.h"
36 #include "parse-util.h"
37 #include "path-util.h"
38 #include "process-util.h"
39 #include "serialize.h"
41 #include "signal-util.h"
42 #include "sparse-endian.h"
44 #include "specifier.h"
45 #include "stat-util.h"
46 #include "stdio-util.h"
47 #include "string-table.h"
48 #include "string-util.h"
50 #include "terminal-util.h"
51 #include "umask-util.h"
52 #include "unit-name.h"
54 #include "user-util.h"
57 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
58 [UNIT_SERVICE
] = &service_vtable
,
59 [UNIT_SOCKET
] = &socket_vtable
,
60 [UNIT_TARGET
] = &target_vtable
,
61 [UNIT_DEVICE
] = &device_vtable
,
62 [UNIT_MOUNT
] = &mount_vtable
,
63 [UNIT_AUTOMOUNT
] = &automount_vtable
,
64 [UNIT_SWAP
] = &swap_vtable
,
65 [UNIT_TIMER
] = &timer_vtable
,
66 [UNIT_PATH
] = &path_vtable
,
67 [UNIT_SLICE
] = &slice_vtable
,
68 [UNIT_SCOPE
] = &scope_vtable
,
71 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
73 Unit
*unit_new(Manager
*m
, size_t size
) {
77 assert(size
>= sizeof(Unit
));
83 u
->names
= set_new(&string_hash_ops
);
88 u
->type
= _UNIT_TYPE_INVALID
;
89 u
->default_dependencies
= true;
90 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
91 u
->unit_file_preset
= -1;
92 u
->on_failure_job_mode
= JOB_REPLACE
;
93 u
->cgroup_inotify_wd
= -1;
94 u
->job_timeout
= USEC_INFINITY
;
95 u
->job_running_timeout
= USEC_INFINITY
;
96 u
->ref_uid
= UID_INVALID
;
97 u
->ref_gid
= GID_INVALID
;
98 u
->cpu_usage_last
= NSEC_INFINITY
;
99 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
100 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
102 u
->ip_accounting_ingress_map_fd
= -1;
103 u
->ip_accounting_egress_map_fd
= -1;
104 u
->ipv4_allow_map_fd
= -1;
105 u
->ipv6_allow_map_fd
= -1;
106 u
->ipv4_deny_map_fd
= -1;
107 u
->ipv6_deny_map_fd
= -1;
109 u
->last_section_private
= -1;
111 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
112 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
117 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
118 _cleanup_(unit_freep
) Unit
*u
= NULL
;
121 u
= unit_new(m
, size
);
125 r
= unit_add_name(u
, name
);
134 bool unit_has_name(Unit
*u
, const char *name
) {
138 return set_contains(u
->names
, (char*) name
);
141 static void unit_init(Unit
*u
) {
148 assert(u
->type
>= 0);
150 cc
= unit_get_cgroup_context(u
);
152 cgroup_context_init(cc
);
154 /* Copy in the manager defaults into the cgroup
155 * context, _before_ the rest of the settings have
156 * been initialized */
158 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
159 cc
->io_accounting
= u
->manager
->default_io_accounting
;
160 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
161 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
162 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
163 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
164 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
166 if (u
->type
!= UNIT_SLICE
)
167 cc
->tasks_max
= u
->manager
->default_tasks_max
;
170 ec
= unit_get_exec_context(u
);
172 exec_context_init(ec
);
174 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
175 EXEC_KEYRING_SHARED
: EXEC_KEYRING_INHERIT
;
178 kc
= unit_get_kill_context(u
);
180 kill_context_init(kc
);
182 if (UNIT_VTABLE(u
)->init
)
183 UNIT_VTABLE(u
)->init(u
);
186 int unit_add_name(Unit
*u
, const char *text
) {
187 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
194 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
199 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
208 if (set_contains(u
->names
, s
))
210 if (hashmap_contains(u
->manager
->units
, s
))
213 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
216 t
= unit_name_to_type(s
);
220 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
223 r
= unit_name_to_instance(s
, &i
);
227 if (i
&& !unit_type_may_template(t
))
230 /* Ensure that this unit is either instanced or not instanced,
231 * but not both. Note that we do allow names with different
232 * instance names however! */
233 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
236 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
239 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
242 r
= set_put(u
->names
, s
);
247 r
= hashmap_put(u
->manager
->units
, s
, u
);
249 (void) set_remove(u
->names
, s
);
253 if (u
->type
== _UNIT_TYPE_INVALID
) {
256 u
->instance
= TAKE_PTR(i
);
258 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
265 unit_add_to_dbus_queue(u
);
269 int unit_choose_id(Unit
*u
, const char *name
) {
270 _cleanup_free_
char *t
= NULL
;
277 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
282 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
289 /* Selects one of the names of this unit as the id */
290 s
= set_get(u
->names
, (char*) name
);
294 /* Determine the new instance from the new id */
295 r
= unit_name_to_instance(s
, &i
);
304 unit_add_to_dbus_queue(u
);
309 int unit_set_description(Unit
*u
, const char *description
) {
314 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
318 unit_add_to_dbus_queue(u
);
323 bool unit_may_gc(Unit
*u
) {
324 UnitActiveState state
;
329 /* Checks whether the unit is ready to be unloaded for garbage collection.
330 * Returns true when the unit may be collected, and false if there's some
331 * reason to keep it loaded.
333 * References from other units are *not* checked here. Instead, this is done
334 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
343 state
= unit_active_state(u
);
345 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
346 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
347 UNIT_VTABLE(u
)->release_resources
)
348 UNIT_VTABLE(u
)->release_resources(u
);
353 if (sd_bus_track_count(u
->bus_track
) > 0)
356 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
357 switch (u
->collect_mode
) {
359 case COLLECT_INACTIVE
:
360 if (state
!= UNIT_INACTIVE
)
365 case COLLECT_INACTIVE_OR_FAILED
:
366 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
372 assert_not_reached("Unknown garbage collection mode");
375 if (u
->cgroup_path
) {
376 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
377 * around. Units with active processes should never be collected. */
379 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
381 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
386 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
392 void unit_add_to_load_queue(Unit
*u
) {
394 assert(u
->type
!= _UNIT_TYPE_INVALID
);
396 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
399 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
400 u
->in_load_queue
= true;
403 void unit_add_to_cleanup_queue(Unit
*u
) {
406 if (u
->in_cleanup_queue
)
409 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
410 u
->in_cleanup_queue
= true;
413 void unit_add_to_gc_queue(Unit
*u
) {
416 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
422 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
423 u
->in_gc_queue
= true;
426 void unit_add_to_dbus_queue(Unit
*u
) {
428 assert(u
->type
!= _UNIT_TYPE_INVALID
);
430 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
433 /* Shortcut things if nobody cares */
434 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
435 sd_bus_track_count(u
->bus_track
) <= 0 &&
436 set_isempty(u
->manager
->private_buses
)) {
437 u
->sent_dbus_new_signal
= true;
441 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
442 u
->in_dbus_queue
= true;
445 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
448 if (u
->in_stop_when_unneeded_queue
)
451 if (!u
->stop_when_unneeded
)
454 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
457 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
458 u
->in_stop_when_unneeded_queue
= true;
461 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
468 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
470 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
473 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
474 hashmap_remove(other
->dependencies
[d
], u
);
476 unit_add_to_gc_queue(other
);
482 static void unit_remove_transient(Unit
*u
) {
490 if (u
->fragment_path
)
491 (void) unlink(u
->fragment_path
);
493 STRV_FOREACH(i
, u
->dropin_paths
) {
494 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
496 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
500 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
504 /* Only drop transient drop-ins */
505 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
513 static void unit_free_requires_mounts_for(Unit
*u
) {
517 _cleanup_free_
char *path
;
519 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
523 char s
[strlen(path
) + 1];
525 PATH_FOREACH_PREFIX_MORE(s
, path
) {
529 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
533 (void) set_remove(x
, u
);
535 if (set_isempty(x
)) {
536 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
544 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
547 static void unit_done(Unit
*u
) {
556 if (UNIT_VTABLE(u
)->done
)
557 UNIT_VTABLE(u
)->done(u
);
559 ec
= unit_get_exec_context(u
);
561 exec_context_done(ec
);
563 cc
= unit_get_cgroup_context(u
);
565 cgroup_context_done(cc
);
568 void unit_free(Unit
*u
) {
576 if (UNIT_ISSET(u
->slice
)) {
577 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
578 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u
->slice
));
580 /* And make sure the parent is realized again, updating cgroup memberships */
581 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u
->slice
));
584 u
->transient_file
= safe_fclose(u
->transient_file
);
586 if (!MANAGER_IS_RELOADING(u
->manager
))
587 unit_remove_transient(u
);
589 bus_unit_send_removed_signal(u
);
593 unit_dequeue_rewatch_pids(u
);
595 sd_bus_slot_unref(u
->match_bus_slot
);
596 sd_bus_track_unref(u
->bus_track
);
597 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
599 unit_free_requires_mounts_for(u
);
601 SET_FOREACH(t
, u
->names
, i
)
602 hashmap_remove_value(u
->manager
->units
, t
, u
);
604 if (!sd_id128_is_null(u
->invocation_id
))
605 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
619 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
620 bidi_set_free(u
, u
->dependencies
[d
]);
623 manager_unref_console(u
->manager
);
625 unit_release_cgroup(u
);
627 if (!MANAGER_IS_RELOADING(u
->manager
))
628 unit_unlink_state_files(u
);
630 unit_unref_uid_gid(u
, false);
632 (void) manager_update_failed_units(u
->manager
, u
, false);
633 set_remove(u
->manager
->startup_units
, u
);
635 unit_unwatch_all_pids(u
);
637 unit_ref_unset(&u
->slice
);
638 while (u
->refs_by_target
)
639 unit_ref_unset(u
->refs_by_target
);
641 if (u
->type
!= _UNIT_TYPE_INVALID
)
642 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
644 if (u
->in_load_queue
)
645 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
647 if (u
->in_dbus_queue
)
648 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
651 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
653 if (u
->in_cgroup_realize_queue
)
654 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
656 if (u
->in_cgroup_empty_queue
)
657 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
659 if (u
->in_cleanup_queue
)
660 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
662 if (u
->in_target_deps_queue
)
663 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
665 if (u
->in_stop_when_unneeded_queue
)
666 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
668 safe_close(u
->ip_accounting_ingress_map_fd
);
669 safe_close(u
->ip_accounting_egress_map_fd
);
671 safe_close(u
->ipv4_allow_map_fd
);
672 safe_close(u
->ipv6_allow_map_fd
);
673 safe_close(u
->ipv4_deny_map_fd
);
674 safe_close(u
->ipv6_deny_map_fd
);
676 bpf_program_unref(u
->ip_bpf_ingress
);
677 bpf_program_unref(u
->ip_bpf_ingress_installed
);
678 bpf_program_unref(u
->ip_bpf_egress
);
679 bpf_program_unref(u
->ip_bpf_egress_installed
);
681 bpf_program_unref(u
->bpf_device_control_installed
);
683 condition_free_list(u
->conditions
);
684 condition_free_list(u
->asserts
);
686 free(u
->description
);
687 strv_free(u
->documentation
);
688 free(u
->fragment_path
);
689 free(u
->source_path
);
690 strv_free(u
->dropin_paths
);
693 free(u
->job_timeout_reboot_arg
);
695 set_free_free(u
->names
);
702 UnitActiveState
unit_active_state(Unit
*u
) {
705 if (u
->load_state
== UNIT_MERGED
)
706 return unit_active_state(unit_follow_merge(u
));
708 /* After a reload it might happen that a unit is not correctly
709 * loaded but still has a process around. That's why we won't
710 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
712 return UNIT_VTABLE(u
)->active_state(u
);
715 const char* unit_sub_state_to_string(Unit
*u
) {
718 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
721 static int set_complete_move(Set
**s
, Set
**other
) {
729 return set_move(*s
, *other
);
731 *s
= TAKE_PTR(*other
);
736 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
744 return hashmap_move(*s
, *other
);
746 *s
= TAKE_PTR(*other
);
751 static int merge_names(Unit
*u
, Unit
*other
) {
759 r
= set_complete_move(&u
->names
, &other
->names
);
763 set_free_free(other
->names
);
767 SET_FOREACH(t
, u
->names
, i
)
768 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
773 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
778 assert(d
< _UNIT_DEPENDENCY_MAX
);
781 * If u does not have this dependency set allocated, there is no need
782 * to reserve anything. In that case other's set will be transferred
783 * as a whole to u by complete_move().
785 if (!u
->dependencies
[d
])
788 /* merge_dependencies() will skip a u-on-u dependency */
789 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
791 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
794 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
800 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
804 assert(d
< _UNIT_DEPENDENCY_MAX
);
806 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
807 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
810 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
811 * pointers back, and let's fix them up, to instead point to 'u'. */
813 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
815 /* Do not add dependencies between u and itself. */
816 if (hashmap_remove(back
->dependencies
[k
], other
))
817 maybe_warn_about_dependency(u
, other_id
, k
);
819 UnitDependencyInfo di_u
, di_other
, di_merged
;
821 /* Let's drop this dependency between "back" and "other", and let's create it between
822 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
823 * and any such dependency which might already exist */
825 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
827 continue; /* dependency isn't set, let's try the next one */
829 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
831 di_merged
= (UnitDependencyInfo
) {
832 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
833 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
836 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
838 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
841 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
847 /* Also do not move dependencies on u to itself */
848 back
= hashmap_remove(other
->dependencies
[d
], u
);
850 maybe_warn_about_dependency(u
, other_id
, d
);
852 /* The move cannot fail. The caller must have performed a reservation. */
853 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
855 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
858 int unit_merge(Unit
*u
, Unit
*other
) {
860 const char *other_id
= NULL
;
865 assert(u
->manager
== other
->manager
);
866 assert(u
->type
!= _UNIT_TYPE_INVALID
);
868 other
= unit_follow_merge(other
);
873 if (u
->type
!= other
->type
)
876 if (!u
->instance
!= !other
->instance
)
879 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
882 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
891 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
895 other_id
= strdupa(other
->id
);
897 /* Make reservations to ensure merge_dependencies() won't fail */
898 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
899 r
= reserve_dependencies(u
, other
, d
);
901 * We don't rollback reservations if we fail. We don't have
902 * a way to undo reservations. A reservation is not a leak.
909 r
= merge_names(u
, other
);
913 /* Redirect all references */
914 while (other
->refs_by_target
)
915 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
917 /* Merge dependencies */
918 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
919 merge_dependencies(u
, other
, other_id
, d
);
921 other
->load_state
= UNIT_MERGED
;
922 other
->merged_into
= u
;
924 /* If there is still some data attached to the other node, we
925 * don't need it anymore, and can free it. */
926 if (other
->load_state
!= UNIT_STUB
)
927 if (UNIT_VTABLE(other
)->done
)
928 UNIT_VTABLE(other
)->done(other
);
930 unit_add_to_dbus_queue(u
);
931 unit_add_to_cleanup_queue(other
);
936 int unit_merge_by_name(Unit
*u
, const char *name
) {
937 _cleanup_free_
char *s
= NULL
;
944 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
948 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
955 other
= manager_get_unit(u
->manager
, name
);
957 return unit_merge(u
, other
);
959 return unit_add_name(u
, name
);
962 Unit
* unit_follow_merge(Unit
*u
) {
965 while (u
->load_state
== UNIT_MERGED
)
966 assert_se(u
= u
->merged_into
);
971 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
972 ExecDirectoryType dt
;
979 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
980 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
985 if (c
->root_directory
) {
986 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
992 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
997 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
998 if (!u
->manager
->prefix
[dt
])
1001 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
1002 _cleanup_free_
char *p
;
1004 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
1008 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1014 if (!MANAGER_IS_SYSTEM(u
->manager
))
1017 if (c
->private_tmp
) {
1020 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1021 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1026 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1031 if (!IN_SET(c
->std_output
,
1032 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1033 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1034 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1035 !IN_SET(c
->std_error
,
1036 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1037 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1038 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1041 /* If syslog or kernel logging is requested, make sure our own
1042 * logging daemon is run first. */
1044 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1051 const char *unit_description(Unit
*u
) {
1055 return u
->description
;
1057 return strna(u
->id
);
1060 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1062 UnitDependencyMask mask
;
1065 { UNIT_DEPENDENCY_FILE
, "file" },
1066 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1067 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1068 { UNIT_DEPENDENCY_UDEV
, "udev" },
1069 { UNIT_DEPENDENCY_PATH
, "path" },
1070 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1071 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1072 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1080 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1085 if (FLAGS_SET(mask
, table
[i
].mask
)) {
1093 fputs(table
[i
].name
, f
);
1095 mask
&= ~table
[i
].mask
;
1102 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1106 const char *prefix2
;
1108 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1109 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1110 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1111 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1112 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1113 timespan
[FORMAT_TIMESPAN_MAX
];
1115 _cleanup_set_free_ Set
*following_set
= NULL
;
1121 assert(u
->type
>= 0);
1123 prefix
= strempty(prefix
);
1124 prefix2
= strjoina(prefix
, "\t");
1128 "%s\tDescription: %s\n"
1129 "%s\tInstance: %s\n"
1130 "%s\tUnit Load State: %s\n"
1131 "%s\tUnit Active State: %s\n"
1132 "%s\tState Change Timestamp: %s\n"
1133 "%s\tInactive Exit Timestamp: %s\n"
1134 "%s\tActive Enter Timestamp: %s\n"
1135 "%s\tActive Exit Timestamp: %s\n"
1136 "%s\tInactive Enter Timestamp: %s\n"
1138 "%s\tNeed Daemon Reload: %s\n"
1139 "%s\tTransient: %s\n"
1140 "%s\tPerpetual: %s\n"
1141 "%s\tGarbage Collection Mode: %s\n"
1144 "%s\tCGroup realized: %s\n",
1146 prefix
, unit_description(u
),
1147 prefix
, strna(u
->instance
),
1148 prefix
, unit_load_state_to_string(u
->load_state
),
1149 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1150 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1151 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1152 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1153 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1154 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1155 prefix
, yes_no(unit_may_gc(u
)),
1156 prefix
, yes_no(unit_need_daemon_reload(u
)),
1157 prefix
, yes_no(u
->transient
),
1158 prefix
, yes_no(u
->perpetual
),
1159 prefix
, collect_mode_to_string(u
->collect_mode
),
1160 prefix
, strna(unit_slice_name(u
)),
1161 prefix
, strna(u
->cgroup_path
),
1162 prefix
, yes_no(u
->cgroup_realized
));
1164 if (u
->cgroup_realized_mask
!= 0) {
1165 _cleanup_free_
char *s
= NULL
;
1166 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1167 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1170 if (u
->cgroup_enabled_mask
!= 0) {
1171 _cleanup_free_
char *s
= NULL
;
1172 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1173 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1176 m
= unit_get_own_mask(u
);
1178 _cleanup_free_
char *s
= NULL
;
1179 (void) cg_mask_to_string(m
, &s
);
1180 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1183 m
= unit_get_members_mask(u
);
1185 _cleanup_free_
char *s
= NULL
;
1186 (void) cg_mask_to_string(m
, &s
);
1187 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1190 m
= unit_get_delegate_mask(u
);
1192 _cleanup_free_
char *s
= NULL
;
1193 (void) cg_mask_to_string(m
, &s
);
1194 fprintf(f
, "%s\tCGroup delegate mask: %s\n", prefix
, strnull(s
));
1197 SET_FOREACH(t
, u
->names
, i
)
1198 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1200 if (!sd_id128_is_null(u
->invocation_id
))
1201 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1202 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1204 STRV_FOREACH(j
, u
->documentation
)
1205 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1207 following
= unit_following(u
);
1209 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1211 r
= unit_following_set(u
, &following_set
);
1215 SET_FOREACH(other
, following_set
, i
)
1216 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1219 if (u
->fragment_path
)
1220 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1223 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1225 STRV_FOREACH(j
, u
->dropin_paths
)
1226 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1228 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1229 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1230 if (u
->failure_action_exit_status
>= 0)
1231 fprintf(f
, "%s\tFailure Action Exit Status: %i\n", prefix
, u
->failure_action_exit_status
);
1232 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1233 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1234 if (u
->success_action_exit_status
>= 0)
1235 fprintf(f
, "%s\tSuccess Action Exit Status: %i\n", prefix
, u
->success_action_exit_status
);
1237 if (u
->job_timeout
!= USEC_INFINITY
)
1238 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1240 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1241 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1243 if (u
->job_timeout_reboot_arg
)
1244 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1246 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1247 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1249 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1251 "%s\tCondition Timestamp: %s\n"
1252 "%s\tCondition Result: %s\n",
1253 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1254 prefix
, yes_no(u
->condition_result
));
1256 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1258 "%s\tAssert Timestamp: %s\n"
1259 "%s\tAssert Result: %s\n",
1260 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1261 prefix
, yes_no(u
->assert_result
));
1263 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1264 UnitDependencyInfo di
;
1267 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1270 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1272 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1273 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1279 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1280 UnitDependencyInfo di
;
1283 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1286 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1288 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1289 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1295 if (u
->load_state
== UNIT_LOADED
) {
1298 "%s\tStopWhenUnneeded: %s\n"
1299 "%s\tRefuseManualStart: %s\n"
1300 "%s\tRefuseManualStop: %s\n"
1301 "%s\tDefaultDependencies: %s\n"
1302 "%s\tOnFailureJobMode: %s\n"
1303 "%s\tIgnoreOnIsolate: %s\n",
1304 prefix
, yes_no(u
->stop_when_unneeded
),
1305 prefix
, yes_no(u
->refuse_manual_start
),
1306 prefix
, yes_no(u
->refuse_manual_stop
),
1307 prefix
, yes_no(u
->default_dependencies
),
1308 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1309 prefix
, yes_no(u
->ignore_on_isolate
));
1311 if (UNIT_VTABLE(u
)->dump
)
1312 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1314 } else if (u
->load_state
== UNIT_MERGED
)
1316 "%s\tMerged into: %s\n",
1317 prefix
, u
->merged_into
->id
);
1318 else if (u
->load_state
== UNIT_ERROR
)
1319 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1321 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1322 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1325 job_dump(u
->job
, f
, prefix2
);
1328 job_dump(u
->nop_job
, f
, prefix2
);
1331 /* Common implementation for multiple backends */
1332 int unit_load_fragment_and_dropin(Unit
*u
) {
1337 /* Load a .{service,socket,...} file */
1338 r
= unit_load_fragment(u
);
1342 if (u
->load_state
== UNIT_STUB
)
1345 /* Load drop-in directory data. If u is an alias, we might be reloading the
1346 * target unit needlessly. But we cannot be sure which drops-ins have already
1347 * been loaded and which not, at least without doing complicated book-keeping,
1348 * so let's always reread all drop-ins. */
1349 return unit_load_dropin(unit_follow_merge(u
));
1352 /* Common implementation for multiple backends */
1353 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1358 /* Same as unit_load_fragment_and_dropin(), but whether
1359 * something can be loaded or not doesn't matter. */
1361 /* Load a .service/.socket/.slice/… file */
1362 r
= unit_load_fragment(u
);
1366 if (u
->load_state
== UNIT_STUB
)
1367 u
->load_state
= UNIT_LOADED
;
1369 /* Load drop-in directory data */
1370 return unit_load_dropin(unit_follow_merge(u
));
1373 void unit_add_to_target_deps_queue(Unit
*u
) {
1374 Manager
*m
= u
->manager
;
1378 if (u
->in_target_deps_queue
)
1381 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1382 u
->in_target_deps_queue
= true;
1385 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1389 if (target
->type
!= UNIT_TARGET
)
1392 /* Only add the dependency if both units are loaded, so that
1393 * that loop check below is reliable */
1394 if (u
->load_state
!= UNIT_LOADED
||
1395 target
->load_state
!= UNIT_LOADED
)
1398 /* If either side wants no automatic dependencies, then let's
1400 if (!u
->default_dependencies
||
1401 !target
->default_dependencies
)
1404 /* Don't create loops */
1405 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1408 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1411 static int unit_add_slice_dependencies(Unit
*u
) {
1412 UnitDependencyMask mask
;
1415 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1418 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1419 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1421 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1423 if (UNIT_ISSET(u
->slice
))
1424 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1426 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1429 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1432 static int unit_add_mount_dependencies(Unit
*u
) {
1433 UnitDependencyInfo di
;
1440 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1441 char prefix
[strlen(path
) + 1];
1443 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1444 _cleanup_free_
char *p
= NULL
;
1447 r
= unit_name_from_path(prefix
, ".mount", &p
);
1451 m
= manager_get_unit(u
->manager
, p
);
1453 /* Make sure to load the mount unit if
1454 * it exists. If so the dependencies
1455 * on this unit will be added later
1456 * during the loading of the mount
1458 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1464 if (m
->load_state
!= UNIT_LOADED
)
1467 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1471 if (m
->fragment_path
) {
1472 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1482 static int unit_add_startup_units(Unit
*u
) {
1486 c
= unit_get_cgroup_context(u
);
1490 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1491 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1492 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1495 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1499 return set_put(u
->manager
->startup_units
, u
);
1502 int unit_load(Unit
*u
) {
1507 if (u
->in_load_queue
) {
1508 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1509 u
->in_load_queue
= false;
1512 if (u
->type
== _UNIT_TYPE_INVALID
)
1515 if (u
->load_state
!= UNIT_STUB
)
1518 if (u
->transient_file
) {
1519 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1520 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1522 r
= fflush_and_check(u
->transient_file
);
1526 u
->transient_file
= safe_fclose(u
->transient_file
);
1527 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1530 if (UNIT_VTABLE(u
)->load
) {
1531 r
= UNIT_VTABLE(u
)->load(u
);
1536 if (u
->load_state
== UNIT_STUB
) {
1541 if (u
->load_state
== UNIT_LOADED
) {
1542 unit_add_to_target_deps_queue(u
);
1544 r
= unit_add_slice_dependencies(u
);
1548 r
= unit_add_mount_dependencies(u
);
1552 r
= unit_add_startup_units(u
);
1556 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1557 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1562 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1563 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1565 /* We finished loading, let's ensure our parents recalculate the members mask */
1566 unit_invalidate_cgroup_members_masks(u
);
1569 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1571 unit_add_to_dbus_queue(unit_follow_merge(u
));
1572 unit_add_to_gc_queue(u
);
1577 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1578 * return ENOEXEC to ensure units are placed in this state after loading */
1580 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1581 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1585 unit_add_to_dbus_queue(u
);
1586 unit_add_to_gc_queue(u
);
1588 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1591 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1598 /* If the condition list is empty, then it is true */
1602 /* Otherwise, if all of the non-trigger conditions apply and
1603 * if any of the trigger conditions apply (unless there are
1604 * none) we return true */
1605 LIST_FOREACH(conditions
, c
, first
) {
1608 r
= condition_test(c
);
1611 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1613 c
->trigger
? "|" : "",
1614 c
->negate
? "!" : "",
1620 c
->trigger
? "|" : "",
1621 c
->negate
? "!" : "",
1623 condition_result_to_string(c
->result
));
1625 if (!c
->trigger
&& r
<= 0)
1628 if (c
->trigger
&& triggered
<= 0)
1632 return triggered
!= 0;
1635 static bool unit_condition_test(Unit
*u
) {
1638 dual_timestamp_get(&u
->condition_timestamp
);
1639 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1641 return u
->condition_result
;
1644 static bool unit_assert_test(Unit
*u
) {
1647 dual_timestamp_get(&u
->assert_timestamp
);
1648 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1650 return u
->assert_result
;
1653 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1656 d
= unit_description(u
);
1657 if (log_get_show_color())
1658 d
= strjoina(ANSI_HIGHLIGHT
, d
, ANSI_NORMAL
);
1660 DISABLE_WARNING_FORMAT_NONLITERAL
;
1661 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, d
);
1665 int unit_start_limit_test(Unit
*u
) {
1670 if (ratelimit_below(&u
->start_limit
)) {
1671 u
->start_limit_hit
= false;
1675 log_unit_warning(u
, "Start request repeated too quickly.");
1676 u
->start_limit_hit
= true;
1678 reason
= strjoina("unit ", u
->id
, " failed");
1680 return emergency_action(u
->manager
, u
->start_limit_action
,
1681 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1682 u
->reboot_arg
, -1, reason
);
1685 bool unit_shall_confirm_spawn(Unit
*u
) {
1688 if (manager_is_confirm_spawn_disabled(u
->manager
))
1691 /* For some reasons units remaining in the same process group
1692 * as PID 1 fail to acquire the console even if it's not used
1693 * by any process. So skip the confirmation question for them. */
1694 return !unit_get_exec_context(u
)->same_pgrp
;
1697 static bool unit_verify_deps(Unit
*u
) {
1704 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1705 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1706 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1707 * conjunction with After= as for them any such check would make things entirely racy. */
1709 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1711 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1714 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1715 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1724 * -EBADR: This unit type does not support starting.
1725 * -EALREADY: Unit is already started.
1726 * -EAGAIN: An operation is already in progress. Retry later.
1727 * -ECANCELED: Too many requests for now.
1728 * -EPROTO: Assert failed
1729 * -EINVAL: Unit not loaded
1730 * -EOPNOTSUPP: Unit type not supported
1731 * -ENOLINK: The necessary dependencies are not fulfilled.
1732 * -ESTALE: This unit has been started before and can't be started a second time
1734 int unit_start(Unit
*u
) {
1735 UnitActiveState state
;
1740 /* If this is already started, then this will succeed. Note
1741 * that this will even succeed if this unit is not startable
1742 * by the user. This is relied on to detect when we need to
1743 * wait for units and when waiting is finished. */
1744 state
= unit_active_state(u
);
1745 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1748 /* Units that aren't loaded cannot be started */
1749 if (u
->load_state
!= UNIT_LOADED
)
1752 /* Refuse starting scope units more than once */
1753 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1756 /* If the conditions failed, don't do anything at all. If we
1757 * already are activating this call might still be useful to
1758 * speed up activation in case there is some hold-off time,
1759 * but we don't want to recheck the condition in that case. */
1760 if (state
!= UNIT_ACTIVATING
&&
1761 !unit_condition_test(u
)) {
1762 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1766 /* If the asserts failed, fail the entire job */
1767 if (state
!= UNIT_ACTIVATING
&&
1768 !unit_assert_test(u
)) {
1769 log_unit_notice(u
, "Starting requested but asserts failed.");
1773 /* Units of types that aren't supported cannot be
1774 * started. Note that we do this test only after the condition
1775 * checks, so that we rather return condition check errors
1776 * (which are usually not considered a true failure) than "not
1777 * supported" errors (which are considered a failure).
1779 if (!unit_supported(u
))
1782 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1783 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1784 * effect anymore, due to a reload or due to a failed condition. */
1785 if (!unit_verify_deps(u
))
1788 /* Forward to the main object, if we aren't it. */
1789 following
= unit_following(u
);
1791 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1792 return unit_start(following
);
1795 /* If it is stopped, but we cannot start it, then fail */
1796 if (!UNIT_VTABLE(u
)->start
)
1799 /* We don't suppress calls to ->start() here when we are
1800 * already starting, to allow this request to be used as a
1801 * "hurry up" call, for example when the unit is in some "auto
1802 * restart" state where it waits for a holdoff timer to elapse
1803 * before it will start again. */
1805 unit_add_to_dbus_queue(u
);
1807 return UNIT_VTABLE(u
)->start(u
);
1810 bool unit_can_start(Unit
*u
) {
1813 if (u
->load_state
!= UNIT_LOADED
)
1816 if (!unit_supported(u
))
1819 /* Scope units may be started only once */
1820 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1823 return !!UNIT_VTABLE(u
)->start
;
1826 bool unit_can_isolate(Unit
*u
) {
1829 return unit_can_start(u
) &&
1834 * -EBADR: This unit type does not support stopping.
1835 * -EALREADY: Unit is already stopped.
1836 * -EAGAIN: An operation is already in progress. Retry later.
1838 int unit_stop(Unit
*u
) {
1839 UnitActiveState state
;
1844 state
= unit_active_state(u
);
1845 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1848 following
= unit_following(u
);
1850 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1851 return unit_stop(following
);
1854 if (!UNIT_VTABLE(u
)->stop
)
1857 unit_add_to_dbus_queue(u
);
1859 return UNIT_VTABLE(u
)->stop(u
);
1862 bool unit_can_stop(Unit
*u
) {
1865 if (!unit_supported(u
))
1871 return !!UNIT_VTABLE(u
)->stop
;
1875 * -EBADR: This unit type does not support reloading.
1876 * -ENOEXEC: Unit is not started.
1877 * -EAGAIN: An operation is already in progress. Retry later.
1879 int unit_reload(Unit
*u
) {
1880 UnitActiveState state
;
1885 if (u
->load_state
!= UNIT_LOADED
)
1888 if (!unit_can_reload(u
))
1891 state
= unit_active_state(u
);
1892 if (state
== UNIT_RELOADING
)
1895 if (state
!= UNIT_ACTIVE
) {
1896 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1900 following
= unit_following(u
);
1902 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1903 return unit_reload(following
);
1906 unit_add_to_dbus_queue(u
);
1908 if (!UNIT_VTABLE(u
)->reload
) {
1909 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1910 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
1914 return UNIT_VTABLE(u
)->reload(u
);
1917 bool unit_can_reload(Unit
*u
) {
1920 if (UNIT_VTABLE(u
)->can_reload
)
1921 return UNIT_VTABLE(u
)->can_reload(u
);
1923 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1926 return UNIT_VTABLE(u
)->reload
;
1929 bool unit_is_unneeded(Unit
*u
) {
1930 static const UnitDependency deps
[] = {
1940 if (!u
->stop_when_unneeded
)
1943 /* Don't clean up while the unit is transitioning or is even inactive. */
1944 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
1949 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1954 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1955 * restart, then don't clean this one up. */
1957 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
) {
1961 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1964 if (unit_will_restart(other
))
1972 static void check_unneeded_dependencies(Unit
*u
) {
1974 static const UnitDependency deps
[] = {
1984 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1986 for (j
= 0; j
< ELEMENTSOF(deps
); j
++) {
1991 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[deps
[j
]], i
)
1992 unit_submit_to_stop_when_unneeded_queue(other
);
1996 static void unit_check_binds_to(Unit
*u
) {
1997 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2009 if (unit_active_state(u
) != UNIT_ACTIVE
)
2012 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
2016 if (!other
->coldplugged
)
2017 /* We might yet create a job for the other unit… */
2020 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2030 /* If stopping a unit fails continuously we might enter a stop
2031 * loop here, hence stop acting on the service being
2032 * unnecessary after a while. */
2033 if (!ratelimit_below(&u
->auto_stop_ratelimit
)) {
2034 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2039 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2041 /* A unit we need to run is gone. Sniff. Let's stop this. */
2042 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2044 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2047 static void retroactively_start_dependencies(Unit
*u
) {
2053 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2055 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2056 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2057 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2058 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2060 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2061 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2062 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2063 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2065 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2066 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2067 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2068 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
2070 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2071 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2072 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2074 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2075 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2076 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2079 static void retroactively_stop_dependencies(Unit
*u
) {
2085 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2087 /* Pull down units which are bound to us recursively if enabled */
2088 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2089 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2090 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2093 void unit_start_on_failure(Unit
*u
) {
2101 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2104 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2106 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2107 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2109 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, &error
, NULL
);
2111 log_unit_warning_errno(u
, r
, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error
, r
));
2115 void unit_trigger_notify(Unit
*u
) {
2122 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2123 if (UNIT_VTABLE(other
)->trigger_notify
)
2124 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2127 static int unit_log_resources(Unit
*u
) {
2128 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ 4];
2129 bool any_traffic
= false, have_ip_accounting
= false;
2130 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
;
2131 size_t n_message_parts
= 0, n_iovec
= 0;
2132 char* message_parts
[3 + 1], *t
;
2133 nsec_t nsec
= NSEC_INFINITY
;
2134 CGroupIPAccountingMetric m
;
2137 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2138 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2139 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2140 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2141 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2146 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2147 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2148 * information and the complete data in structured fields. */
2150 (void) unit_get_cpu_usage(u
, &nsec
);
2151 if (nsec
!= NSEC_INFINITY
) {
2152 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2154 /* Format the CPU time for inclusion in the structured log message */
2155 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2159 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2161 /* Format the CPU time for inclusion in the human language message string */
2162 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2163 t
= strjoin("consumed ", buf
, " CPU time");
2169 message_parts
[n_message_parts
++] = t
;
2172 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2173 char buf
[FORMAT_BYTES_MAX
] = "";
2174 uint64_t value
= UINT64_MAX
;
2176 assert(ip_fields
[m
]);
2178 (void) unit_get_ip_accounting(u
, m
, &value
);
2179 if (value
== UINT64_MAX
)
2182 have_ip_accounting
= true;
2186 /* Format IP accounting data for inclusion in the structured log message */
2187 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2191 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2193 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2194 * bytes counters (and not for the packets counters) */
2195 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2197 igress
= strjoin("received ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2202 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2204 egress
= strjoin("sent ", format_bytes(buf
, sizeof(buf
), value
), " IP traffic");
2212 if (have_ip_accounting
) {
2215 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2217 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2222 k
= strdup("no IP traffic");
2228 message_parts
[n_message_parts
++] = k
;
2232 /* Is there any accounting data available at all? */
2238 if (n_message_parts
== 0)
2239 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2241 _cleanup_free_
char *joined
;
2243 message_parts
[n_message_parts
] = NULL
;
2245 joined
= strv_join(message_parts
, ", ");
2251 joined
[0] = ascii_toupper(joined
[0]);
2252 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2255 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2256 * and hence don't increase n_iovec for them */
2257 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2258 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2260 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2261 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2263 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2264 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2266 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2270 for (i
= 0; i
< n_message_parts
; i
++)
2271 free(message_parts
[i
]);
2273 for (i
= 0; i
< n_iovec
; i
++)
2274 free(iovec
[i
].iov_base
);
2280 static void unit_update_on_console(Unit
*u
) {
2285 b
= unit_needs_console(u
);
2286 if (u
->on_console
== b
)
2291 manager_ref_console(u
->manager
);
2293 manager_unref_console(u
->manager
);
2296 static void unit_emit_audit_start(Unit
*u
) {
2299 if (u
->type
!= UNIT_SERVICE
)
2302 /* Write audit record if we have just finished starting up */
2303 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, true);
2307 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2310 if (u
->type
!= UNIT_SERVICE
)
2314 /* Write audit record if we have just finished shutting down */
2315 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, state
== UNIT_INACTIVE
);
2316 u
->in_audit
= false;
2318 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2319 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, state
== UNIT_INACTIVE
);
2321 if (state
== UNIT_INACTIVE
)
2322 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, true);
2326 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2332 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2333 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2335 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2336 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2337 * remounted this function will be called too! */
2341 /* Update timestamps for state changes */
2342 if (!MANAGER_IS_RELOADING(m
)) {
2343 dual_timestamp_get(&u
->state_change_timestamp
);
2345 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2346 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2347 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2348 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2350 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2351 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2352 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2353 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2356 /* Keep track of failed units */
2357 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2359 /* Make sure the cgroup and state files are always removed when we become inactive */
2360 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2361 unit_prune_cgroup(u
);
2362 unit_unlink_state_files(u
);
2365 unit_update_on_console(u
);
2370 if (u
->job
->state
== JOB_WAITING
)
2372 /* So we reached a different state for this
2373 * job. Let's see if we can run it now if it
2374 * failed previously due to EAGAIN. */
2375 job_add_to_run_queue(u
->job
);
2377 /* Let's check whether this state change constitutes a
2378 * finished job, or maybe contradicts a running job and
2379 * hence needs to invalidate jobs. */
2381 switch (u
->job
->type
) {
2384 case JOB_VERIFY_ACTIVE
:
2386 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2387 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2388 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2391 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2392 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2398 case JOB_RELOAD_OR_START
:
2399 case JOB_TRY_RELOAD
:
2401 if (u
->job
->state
== JOB_RUNNING
) {
2402 if (ns
== UNIT_ACTIVE
)
2403 job_finish_and_invalidate(u
->job
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2404 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2407 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2408 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2416 case JOB_TRY_RESTART
:
2418 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2419 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2420 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2422 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2428 assert_not_reached("Job type unknown");
2434 if (!MANAGER_IS_RELOADING(m
)) {
2436 /* If this state change happened without being
2437 * requested by a job, then let's retroactively start
2438 * or stop dependencies. We skip that step when
2439 * deserializing, since we don't want to create any
2440 * additional jobs just because something is already
2444 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2445 retroactively_start_dependencies(u
);
2446 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2447 retroactively_stop_dependencies(u
);
2450 /* stop unneeded units regardless if going down was expected or not */
2451 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2452 check_unneeded_dependencies(u
);
2454 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2455 log_unit_debug(u
, "Unit entered failed state.");
2457 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2458 unit_start_on_failure(u
);
2461 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2462 /* This unit just finished starting up */
2464 unit_emit_audit_start(u
);
2465 manager_send_unit_plymouth(m
, u
);
2468 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2469 /* This unit just stopped/failed. */
2471 unit_emit_audit_stop(u
, ns
);
2472 unit_log_resources(u
);
2476 manager_recheck_journal(m
);
2477 manager_recheck_dbus(m
);
2479 unit_trigger_notify(u
);
2481 if (!MANAGER_IS_RELOADING(m
)) {
2482 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2483 unit_submit_to_stop_when_unneeded_queue(u
);
2485 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2486 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2487 * without ever entering started.) */
2488 unit_check_binds_to(u
);
2490 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2491 reason
= strjoina("unit ", u
->id
, " failed");
2492 (void) emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2493 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2494 reason
= strjoina("unit ", u
->id
, " succeeded");
2495 (void) emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2499 unit_add_to_dbus_queue(u
);
2500 unit_add_to_gc_queue(u
);
2503 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2507 assert(pid_is_valid(pid
));
2509 /* Watch a specific PID */
2511 r
= set_ensure_allocated(&u
->pids
, NULL
);
2515 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2519 /* First try, let's add the unit keyed by "pid". */
2520 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2526 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2527 * to an array of Units rather than just a Unit), lists us already. */
2529 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2531 for (; array
[n
]; n
++)
2535 if (found
) /* Found it already? if so, do nothing */
2540 /* Allocate a new array */
2541 new_array
= new(Unit
*, n
+ 2);
2545 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2547 new_array
[n
+1] = NULL
;
2549 /* Add or replace the old array */
2550 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2561 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2568 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2572 assert(pid_is_valid(pid
));
2574 /* First let's drop the unit in case it's keyed as "pid". */
2575 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2577 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2578 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2582 /* Let's iterate through the array, dropping our own entry */
2583 for (n
= 0; array
[n
]; n
++)
2585 array
[m
++] = array
[n
];
2589 /* The array is now empty, remove the entire entry */
2590 assert(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2595 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2598 void unit_unwatch_all_pids(Unit
*u
) {
2601 while (!set_isempty(u
->pids
))
2602 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2604 u
->pids
= set_free(u
->pids
);
2607 static void unit_tidy_watch_pids(Unit
*u
) {
2608 pid_t except1
, except2
;
2614 /* Cleans dead PIDs from our list */
2616 except1
= unit_main_pid(u
);
2617 except2
= unit_control_pid(u
);
2619 SET_FOREACH(e
, u
->pids
, i
) {
2620 pid_t pid
= PTR_TO_PID(e
);
2622 if (pid
== except1
|| pid
== except2
)
2625 if (!pid_is_unwaited(pid
))
2626 unit_unwatch_pid(u
, pid
);
2630 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2636 unit_tidy_watch_pids(u
);
2637 unit_watch_all_pids(u
);
2639 /* If the PID set is empty now, then let's finish this off. */
2640 unit_synthesize_cgroup_empty_event(u
);
2645 int unit_enqueue_rewatch_pids(Unit
*u
) {
2650 if (!u
->cgroup_path
)
2653 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2656 if (r
> 0) /* On unified we can use proper notifications */
2659 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2660 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2661 * involves issuing kill(pid, 0) on all processes we watch. */
2663 if (!u
->rewatch_pids_event_source
) {
2664 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2666 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2668 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2670 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2672 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: m");
2674 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2676 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2679 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2681 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2686 void unit_dequeue_rewatch_pids(Unit
*u
) {
2690 if (!u
->rewatch_pids_event_source
)
2693 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2695 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2697 u
->rewatch_pids_event_source
= sd_event_source_unref(u
->rewatch_pids_event_source
);
2700 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2702 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2706 case JOB_VERIFY_ACTIVE
:
2709 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2710 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2715 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2716 * external events), hence it makes no sense to permit enqueing such a request either. */
2717 return !u
->perpetual
;
2720 case JOB_TRY_RESTART
:
2721 return unit_can_stop(u
) && unit_can_start(u
);
2724 case JOB_TRY_RELOAD
:
2725 return unit_can_reload(u
);
2727 case JOB_RELOAD_OR_START
:
2728 return unit_can_reload(u
) && unit_can_start(u
);
2731 assert_not_reached("Invalid job type");
2735 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2738 /* Only warn about some unit types */
2739 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2742 if (streq_ptr(u
->id
, other
))
2743 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2745 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2748 static int unit_add_dependency_hashmap(
2751 UnitDependencyMask origin_mask
,
2752 UnitDependencyMask destination_mask
) {
2754 UnitDependencyInfo info
;
2759 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2760 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2761 assert(origin_mask
> 0 || destination_mask
> 0);
2763 r
= hashmap_ensure_allocated(h
, NULL
);
2767 assert_cc(sizeof(void*) == sizeof(info
));
2769 info
.data
= hashmap_get(*h
, other
);
2771 /* Entry already exists. Add in our mask. */
2773 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
2774 FLAGS_SET(destination_mask
, info
.destination_mask
))
2777 info
.origin_mask
|= origin_mask
;
2778 info
.destination_mask
|= destination_mask
;
2780 r
= hashmap_update(*h
, other
, info
.data
);
2782 info
= (UnitDependencyInfo
) {
2783 .origin_mask
= origin_mask
,
2784 .destination_mask
= destination_mask
,
2787 r
= hashmap_put(*h
, other
, info
.data
);
2795 int unit_add_dependency(
2800 UnitDependencyMask mask
) {
2802 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2803 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2804 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2805 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2806 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2807 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2808 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2809 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2810 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2811 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2812 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2813 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2814 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2815 [UNIT_BEFORE
] = UNIT_AFTER
,
2816 [UNIT_AFTER
] = UNIT_BEFORE
,
2817 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2818 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2819 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2820 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2821 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2822 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2823 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2824 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2826 Unit
*original_u
= u
, *original_other
= other
;
2830 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2833 u
= unit_follow_merge(u
);
2834 other
= unit_follow_merge(other
);
2836 /* We won't allow dependencies on ourselves. We will not
2837 * consider them an error however. */
2839 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2843 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2844 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2845 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2849 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2853 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2854 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2859 if (add_reference
) {
2860 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2864 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2869 unit_add_to_dbus_queue(u
);
2873 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2878 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2882 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2885 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
2893 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2900 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2902 _cleanup_free_
char *i
= NULL
;
2904 r
= unit_name_to_prefix(u
->id
, &i
);
2908 r
= unit_name_replace_instance(name
, i
, buf
);
2917 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2918 _cleanup_free_
char *buf
= NULL
;
2925 r
= resolve_template(u
, name
, &buf
, &name
);
2929 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2933 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2936 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
2937 _cleanup_free_
char *buf
= NULL
;
2944 r
= resolve_template(u
, name
, &buf
, &name
);
2948 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
2952 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
2955 int set_unit_path(const char *p
) {
2956 /* This is mostly for debug purposes */
2957 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2963 char *unit_dbus_path(Unit
*u
) {
2969 return unit_dbus_path_from_name(u
->id
);
2972 char *unit_dbus_path_invocation_id(Unit
*u
) {
2975 if (sd_id128_is_null(u
->invocation_id
))
2978 return unit_dbus_path_from_name(u
->invocation_id_string
);
2981 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2985 /* Sets the unit slice if it has not been set before. Is extra
2986 * careful, to only allow this for units that actually have a
2987 * cgroup context. Also, we don't allow to set this for slices
2988 * (since the parent slice is derived from the name). Make
2989 * sure the unit we set is actually a slice. */
2991 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2994 if (u
->type
== UNIT_SLICE
)
2997 if (unit_active_state(u
) != UNIT_INACTIVE
)
3000 if (slice
->type
!= UNIT_SLICE
)
3003 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3004 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3007 if (UNIT_DEREF(u
->slice
) == slice
)
3010 /* Disallow slice changes if @u is already bound to cgroups */
3011 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
3014 unit_ref_set(&u
->slice
, u
, slice
);
3018 int unit_set_default_slice(Unit
*u
) {
3019 const char *slice_name
;
3025 if (UNIT_ISSET(u
->slice
))
3029 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3031 /* Implicitly place all instantiated units in their
3032 * own per-template slice */
3034 r
= unit_name_to_prefix(u
->id
, &prefix
);
3038 /* The prefix is already escaped, but it might include
3039 * "-" which has a special meaning for slice units,
3040 * hence escape it here extra. */
3041 escaped
= unit_name_escape(prefix
);
3045 if (MANAGER_IS_SYSTEM(u
->manager
))
3046 slice_name
= strjoina("system-", escaped
, ".slice");
3048 slice_name
= strjoina(escaped
, ".slice");
3051 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
3052 ? SPECIAL_SYSTEM_SLICE
3053 : SPECIAL_ROOT_SLICE
;
3055 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3059 return unit_set_slice(u
, slice
);
3062 const char *unit_slice_name(Unit
*u
) {
3065 if (!UNIT_ISSET(u
->slice
))
3068 return UNIT_DEREF(u
->slice
)->id
;
3071 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3072 _cleanup_free_
char *t
= NULL
;
3079 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3082 if (unit_has_name(u
, t
))
3085 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3086 assert(r
< 0 || *_found
!= u
);
3090 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3091 const char *name
, *old_owner
, *new_owner
;
3098 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3100 bus_log_parse_error(r
);
3104 old_owner
= empty_to_null(old_owner
);
3105 new_owner
= empty_to_null(new_owner
);
3107 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3108 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3113 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3120 if (u
->match_bus_slot
)
3123 match
= strjoina("type='signal',"
3124 "sender='org.freedesktop.DBus',"
3125 "path='/org/freedesktop/DBus',"
3126 "interface='org.freedesktop.DBus',"
3127 "member='NameOwnerChanged',"
3128 "arg0='", name
, "'");
3130 return sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3133 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3139 /* Watch a specific name on the bus. We only support one unit
3140 * watching each name for now. */
3142 if (u
->manager
->api_bus
) {
3143 /* If the bus is already available, install the match directly.
3144 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3145 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3147 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3150 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3152 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3153 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3159 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3163 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3164 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3167 bool unit_can_serialize(Unit
*u
) {
3170 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3173 static int serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3174 _cleanup_free_
char *s
= NULL
;
3183 r
= cg_mask_to_string(mask
, &s
);
3185 return log_error_errno(r
, "Failed to format cgroup mask: %m");
3187 return serialize_item(f
, key
, s
);
3190 static const char *ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3191 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3192 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3193 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3194 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3197 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3198 CGroupIPAccountingMetric m
;
3205 if (unit_can_serialize(u
)) {
3206 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3211 (void) serialize_dual_timestamp(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3213 (void) serialize_dual_timestamp(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3214 (void) serialize_dual_timestamp(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3215 (void) serialize_dual_timestamp(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3216 (void) serialize_dual_timestamp(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3218 (void) serialize_dual_timestamp(f
, "condition-timestamp", &u
->condition_timestamp
);
3219 (void) serialize_dual_timestamp(f
, "assert-timestamp", &u
->assert_timestamp
);
3221 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3222 (void) serialize_bool(f
, "condition-result", u
->condition_result
);
3224 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3225 (void) serialize_bool(f
, "assert-result", u
->assert_result
);
3227 (void) serialize_bool(f
, "transient", u
->transient
);
3228 (void) serialize_bool(f
, "in-audit", u
->in_audit
);
3230 (void) serialize_bool(f
, "exported-invocation-id", u
->exported_invocation_id
);
3231 (void) serialize_bool(f
, "exported-log-level-max", u
->exported_log_level_max
);
3232 (void) serialize_bool(f
, "exported-log-extra-fields", u
->exported_log_extra_fields
);
3233 (void) serialize_bool(f
, "exported-log-rate-limit-interval", u
->exported_log_rate_limit_interval
);
3234 (void) serialize_bool(f
, "exported-log-rate-limit-burst", u
->exported_log_rate_limit_burst
);
3236 (void) serialize_item_format(f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3237 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3238 (void) serialize_item_format(f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3241 (void) serialize_item(f
, "cgroup", u
->cgroup_path
);
3243 (void) serialize_bool(f
, "cgroup-realized", u
->cgroup_realized
);
3244 (void) serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3245 (void) serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3246 (void) serialize_cgroup_mask(f
, "cgroup-invalidated-mask", u
->cgroup_invalidated_mask
);
3248 if (uid_is_valid(u
->ref_uid
))
3249 (void) serialize_item_format(f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3250 if (gid_is_valid(u
->ref_gid
))
3251 (void) serialize_item_format(f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3253 if (!sd_id128_is_null(u
->invocation_id
))
3254 (void) serialize_item_format(f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3256 bus_track_serialize(u
->bus_track
, f
, "ref");
3258 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3261 r
= unit_get_ip_accounting(u
, m
, &v
);
3263 (void) serialize_item_format(f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3266 if (serialize_jobs
) {
3269 job_serialize(u
->job
, f
);
3274 job_serialize(u
->nop_job
, f
);
3283 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3291 _cleanup_free_
char *line
= NULL
;
3292 CGroupIPAccountingMetric m
;
3296 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3298 return log_error_errno(r
, "Failed to read serialization line: %m");
3299 if (r
== 0) /* eof */
3303 if (isempty(l
)) /* End marker */
3306 k
= strcspn(l
, "=");
3314 if (streq(l
, "job")) {
3316 /* new-style serialized job */
3323 r
= job_deserialize(j
, f
);
3329 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
3335 r
= job_install_deserialized(j
);
3337 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
3341 } else /* legacy for pre-44 */
3342 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3344 } else if (streq(l
, "state-change-timestamp")) {
3345 (void) deserialize_dual_timestamp(v
, &u
->state_change_timestamp
);
3347 } else if (streq(l
, "inactive-exit-timestamp")) {
3348 (void) deserialize_dual_timestamp(v
, &u
->inactive_exit_timestamp
);
3350 } else if (streq(l
, "active-enter-timestamp")) {
3351 (void) deserialize_dual_timestamp(v
, &u
->active_enter_timestamp
);
3353 } else if (streq(l
, "active-exit-timestamp")) {
3354 (void) deserialize_dual_timestamp(v
, &u
->active_exit_timestamp
);
3356 } else if (streq(l
, "inactive-enter-timestamp")) {
3357 (void) deserialize_dual_timestamp(v
, &u
->inactive_enter_timestamp
);
3359 } else if (streq(l
, "condition-timestamp")) {
3360 (void) deserialize_dual_timestamp(v
, &u
->condition_timestamp
);
3362 } else if (streq(l
, "assert-timestamp")) {
3363 (void) deserialize_dual_timestamp(v
, &u
->assert_timestamp
);
3365 } else if (streq(l
, "condition-result")) {
3367 r
= parse_boolean(v
);
3369 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3371 u
->condition_result
= r
;
3375 } else if (streq(l
, "assert-result")) {
3377 r
= parse_boolean(v
);
3379 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3381 u
->assert_result
= r
;
3385 } else if (streq(l
, "transient")) {
3387 r
= parse_boolean(v
);
3389 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3395 } else if (streq(l
, "in-audit")) {
3397 r
= parse_boolean(v
);
3399 log_unit_debug(u
, "Failed to parse in-audit bool %s, ignoring.", v
);
3405 } else if (streq(l
, "exported-invocation-id")) {
3407 r
= parse_boolean(v
);
3409 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3411 u
->exported_invocation_id
= r
;
3415 } else if (streq(l
, "exported-log-level-max")) {
3417 r
= parse_boolean(v
);
3419 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3421 u
->exported_log_level_max
= r
;
3425 } else if (streq(l
, "exported-log-extra-fields")) {
3427 r
= parse_boolean(v
);
3429 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3431 u
->exported_log_extra_fields
= r
;
3435 } else if (streq(l
, "exported-log-rate-limit-interval")) {
3437 r
= parse_boolean(v
);
3439 log_unit_debug(u
, "Failed to parse exported log rate limit interval %s, ignoring.", v
);
3441 u
->exported_log_rate_limit_interval
= r
;
3445 } else if (streq(l
, "exported-log-rate-limit-burst")) {
3447 r
= parse_boolean(v
);
3449 log_unit_debug(u
, "Failed to parse exported log rate limit burst %s, ignoring.", v
);
3451 u
->exported_log_rate_limit_burst
= r
;
3455 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3457 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3459 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3463 } else if (streq(l
, "cpu-usage-last")) {
3465 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3467 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3471 } else if (streq(l
, "cgroup")) {
3473 r
= unit_set_cgroup_path(u
, v
);
3475 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3477 (void) unit_watch_cgroup(u
);
3480 } else if (streq(l
, "cgroup-realized")) {
3483 b
= parse_boolean(v
);
3485 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3487 u
->cgroup_realized
= b
;
3491 } else if (streq(l
, "cgroup-realized-mask")) {
3493 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3495 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3498 } else if (streq(l
, "cgroup-enabled-mask")) {
3500 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3502 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3505 } else if (streq(l
, "cgroup-invalidated-mask")) {
3507 r
= cg_mask_from_string(v
, &u
->cgroup_invalidated_mask
);
3509 log_unit_debug(u
, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v
);
3512 } else if (streq(l
, "ref-uid")) {
3515 r
= parse_uid(v
, &uid
);
3517 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3519 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3523 } else if (streq(l
, "ref-gid")) {
3526 r
= parse_gid(v
, &gid
);
3528 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3530 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3534 } else if (streq(l
, "ref")) {
3536 r
= strv_extend(&u
->deserialized_refs
, v
);
3541 } else if (streq(l
, "invocation-id")) {
3544 r
= sd_id128_from_string(v
, &id
);
3546 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3548 r
= unit_set_invocation_id(u
, id
);
3550 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3556 /* Check if this is an IP accounting metric serialization field */
3557 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++)
3558 if (streq(l
, ip_accounting_metric_field
[m
]))
3560 if (m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
) {
3563 r
= safe_atou64(v
, &c
);
3565 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3567 u
->ip_accounting_extra
[m
] = c
;
3571 if (unit_can_serialize(u
)) {
3572 r
= exec_runtime_deserialize_compat(u
, l
, v
, fds
);
3574 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3578 /* Returns positive if key was handled by the call */
3582 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3584 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3588 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3589 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3590 * before 228 where the base for timeouts was not persistent across reboots. */
3592 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3593 dual_timestamp_get(&u
->state_change_timestamp
);
3595 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3596 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3597 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3598 unit_invalidate_cgroup_bpf(u
);
3603 int unit_deserialize_skip(FILE *f
) {
3607 /* Skip serialized data for this unit. We don't know what it is. */
3610 _cleanup_free_
char *line
= NULL
;
3613 r
= read_line(f
, LONG_LINE_MAX
, &line
);
3615 return log_error_errno(r
, "Failed to read serialization line: %m");
3627 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3629 _cleanup_free_
char *e
= NULL
;
3634 /* Adds in links to the device node that this unit is based on */
3638 if (!is_device_path(what
))
3641 /* When device units aren't supported (such as in a
3642 * container), don't create dependencies on them. */
3643 if (!unit_type_supported(UNIT_DEVICE
))
3646 r
= unit_name_from_path(what
, ".device", &e
);
3650 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3654 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3655 dep
= UNIT_BINDS_TO
;
3657 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3658 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3659 device
, true, mask
);
3664 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3672 int unit_coldplug(Unit
*u
) {
3678 /* Make sure we don't enter a loop, when coldplugging recursively. */
3682 u
->coldplugged
= true;
3684 STRV_FOREACH(i
, u
->deserialized_refs
) {
3685 q
= bus_unit_track_add_name(u
, *i
);
3686 if (q
< 0 && r
>= 0)
3689 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3691 if (UNIT_VTABLE(u
)->coldplug
) {
3692 q
= UNIT_VTABLE(u
)->coldplug(u
);
3693 if (q
< 0 && r
>= 0)
3698 q
= job_coldplug(u
->job
);
3699 if (q
< 0 && r
>= 0)
3706 void unit_catchup(Unit
*u
) {
3709 if (UNIT_VTABLE(u
)->catchup
)
3710 UNIT_VTABLE(u
)->catchup(u
);
3713 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3719 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3720 * are never out-of-date. */
3721 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3724 if (stat(path
, &st
) < 0)
3725 /* What, cannot access this anymore? */
3729 /* For masked files check if they are still so */
3730 return !null_or_empty(&st
);
3732 /* For non-empty files check the mtime */
3733 return timespec_load(&st
.st_mtim
) > mtime
;
3738 bool unit_need_daemon_reload(Unit
*u
) {
3739 _cleanup_strv_free_
char **t
= NULL
;
3744 /* For unit files, we allow masking… */
3745 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3746 u
->load_state
== UNIT_MASKED
))
3749 /* Source paths should not be masked… */
3750 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3753 if (u
->load_state
== UNIT_LOADED
)
3754 (void) unit_find_dropin_paths(u
, &t
);
3755 if (!strv_equal(u
->dropin_paths
, t
))
3758 /* … any drop-ins that are masked are simply omitted from the list. */
3759 STRV_FOREACH(path
, u
->dropin_paths
)
3760 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3766 void unit_reset_failed(Unit
*u
) {
3769 if (UNIT_VTABLE(u
)->reset_failed
)
3770 UNIT_VTABLE(u
)->reset_failed(u
);
3772 RATELIMIT_RESET(u
->start_limit
);
3773 u
->start_limit_hit
= false;
3776 Unit
*unit_following(Unit
*u
) {
3779 if (UNIT_VTABLE(u
)->following
)
3780 return UNIT_VTABLE(u
)->following(u
);
3785 bool unit_stop_pending(Unit
*u
) {
3788 /* This call does check the current state of the unit. It's
3789 * hence useful to be called from state change calls of the
3790 * unit itself, where the state isn't updated yet. This is
3791 * different from unit_inactive_or_pending() which checks both
3792 * the current state and for a queued job. */
3794 return u
->job
&& u
->job
->type
== JOB_STOP
;
3797 bool unit_inactive_or_pending(Unit
*u
) {
3800 /* Returns true if the unit is inactive or going down */
3802 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3805 if (unit_stop_pending(u
))
3811 bool unit_active_or_pending(Unit
*u
) {
3814 /* Returns true if the unit is active or going up */
3816 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3820 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3826 bool unit_will_restart(Unit
*u
) {
3829 if (!UNIT_VTABLE(u
)->will_restart
)
3832 return UNIT_VTABLE(u
)->will_restart(u
);
3835 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3837 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3838 assert(SIGNAL_VALID(signo
));
3840 if (!UNIT_VTABLE(u
)->kill
)
3843 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3846 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3847 _cleanup_set_free_ Set
*pid_set
= NULL
;
3850 pid_set
= set_new(NULL
);
3854 /* Exclude the main/control pids from being killed via the cgroup */
3856 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3861 if (control_pid
> 0) {
3862 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3867 return TAKE_PTR(pid_set
);
3870 int unit_kill_common(
3876 sd_bus_error
*error
) {
3879 bool killed
= false;
3881 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3883 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3884 else if (main_pid
== 0)
3885 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3888 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3889 if (control_pid
< 0)
3890 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3891 else if (control_pid
== 0)
3892 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3895 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3896 if (control_pid
> 0) {
3897 if (kill(control_pid
, signo
) < 0)
3903 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3905 if (kill(main_pid
, signo
) < 0)
3911 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3912 _cleanup_set_free_ Set
*pid_set
= NULL
;
3915 /* Exclude the main/control pids from being killed via the cgroup */
3916 pid_set
= unit_pid_set(main_pid
, control_pid
);
3920 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3921 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
3927 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3933 int unit_following_set(Unit
*u
, Set
**s
) {
3937 if (UNIT_VTABLE(u
)->following_set
)
3938 return UNIT_VTABLE(u
)->following_set(u
, s
);
3944 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3949 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3950 r
= unit_file_get_state(
3951 u
->manager
->unit_file_scope
,
3954 &u
->unit_file_state
);
3956 u
->unit_file_state
= UNIT_FILE_BAD
;
3959 return u
->unit_file_state
;
3962 int unit_get_unit_file_preset(Unit
*u
) {
3965 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3966 u
->unit_file_preset
= unit_file_query_preset(
3967 u
->manager
->unit_file_scope
,
3969 basename(u
->fragment_path
));
3971 return u
->unit_file_preset
;
3974 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
3980 unit_ref_unset(ref
);
3982 ref
->source
= source
;
3983 ref
->target
= target
;
3984 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
3988 void unit_ref_unset(UnitRef
*ref
) {
3994 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3995 * be unreferenced now. */
3996 unit_add_to_gc_queue(ref
->target
);
3998 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
3999 ref
->source
= ref
->target
= NULL
;
4002 static int user_from_unit_name(Unit
*u
, char **ret
) {
4004 static const uint8_t hash_key
[] = {
4005 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4006 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4009 _cleanup_free_
char *n
= NULL
;
4012 r
= unit_name_to_prefix(u
->id
, &n
);
4016 if (valid_user_group_name(n
)) {
4021 /* If we can't use the unit name as a user name, then let's hash it and use that */
4022 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4028 int unit_patch_contexts(Unit
*u
) {
4036 /* Patch in the manager defaults into the exec and cgroup
4037 * contexts, _after_ the rest of the settings have been
4040 ec
= unit_get_exec_context(u
);
4042 /* This only copies in the ones that need memory */
4043 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4044 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4045 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4050 if (MANAGER_IS_USER(u
->manager
) &&
4051 !ec
->working_directory
) {
4053 r
= get_home_dir(&ec
->working_directory
);
4057 /* Allow user services to run, even if the
4058 * home directory is missing */
4059 ec
->working_directory_missing_ok
= true;
4062 if (ec
->private_devices
)
4063 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4065 if (ec
->protect_kernel_modules
)
4066 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4068 if (ec
->dynamic_user
) {
4070 r
= user_from_unit_name(u
, &ec
->user
);
4076 ec
->group
= strdup(ec
->user
);
4081 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4082 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4084 ec
->private_tmp
= true;
4085 ec
->remove_ipc
= true;
4086 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4087 if (ec
->protect_home
== PROTECT_HOME_NO
)
4088 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4092 cc
= unit_get_cgroup_context(u
);
4095 if (ec
->private_devices
&&
4096 cc
->device_policy
== CGROUP_AUTO
)
4097 cc
->device_policy
= CGROUP_CLOSED
;
4099 if (ec
->root_image
&&
4100 (cc
->device_policy
!= CGROUP_AUTO
|| cc
->device_allow
)) {
4102 /* When RootImage= is specified, the following devices are touched. */
4103 r
= cgroup_add_device_allow(cc
, "/dev/loop-control", "rw");
4107 r
= cgroup_add_device_allow(cc
, "block-loop", "rwm");
4111 r
= cgroup_add_device_allow(cc
, "block-blkext", "rwm");
4120 ExecContext
*unit_get_exec_context(Unit
*u
) {
4127 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4131 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4134 KillContext
*unit_get_kill_context(Unit
*u
) {
4141 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4145 return (KillContext
*) ((uint8_t*) u
+ offset
);
4148 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4154 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4158 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4161 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4167 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4171 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4174 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4177 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4180 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4181 return u
->manager
->lookup_paths
.transient
;
4183 if (flags
& UNIT_PERSISTENT
)
4184 return u
->manager
->lookup_paths
.persistent_control
;
4186 if (flags
& UNIT_RUNTIME
)
4187 return u
->manager
->lookup_paths
.runtime_control
;
4192 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4198 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4199 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4200 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4201 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4202 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4205 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4206 ret
= specifier_escape(s
);
4213 if (flags
& UNIT_ESCAPE_C
) {
4226 return ret
?: (char*) s
;
4229 return ret
?: strdup(s
);
4232 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4233 _cleanup_free_
char *result
= NULL
;
4234 size_t n
= 0, allocated
= 0;
4237 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4238 * way suitable for ExecStart= stanzas */
4240 STRV_FOREACH(i
, l
) {
4241 _cleanup_free_
char *buf
= NULL
;
4246 p
= unit_escape_setting(*i
, flags
, &buf
);
4250 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4251 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4265 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4270 return TAKE_PTR(result
);
4273 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4274 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4275 const char *dir
, *wrapped
;
4282 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4285 data
= unit_escape_setting(data
, flags
, &escaped
);
4289 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4290 * previous section header is the same */
4292 if (flags
& UNIT_PRIVATE
) {
4293 if (!UNIT_VTABLE(u
)->private_section
)
4296 if (!u
->transient_file
|| u
->last_section_private
< 0)
4297 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4298 else if (u
->last_section_private
== 0)
4299 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4301 if (!u
->transient_file
|| u
->last_section_private
< 0)
4302 data
= strjoina("[Unit]\n", data
);
4303 else if (u
->last_section_private
> 0)
4304 data
= strjoina("\n[Unit]\n", data
);
4307 if (u
->transient_file
) {
4308 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4309 * write to the transient unit file. */
4310 fputs(data
, u
->transient_file
);
4312 if (!endswith(data
, "\n"))
4313 fputc('\n', u
->transient_file
);
4315 /* Remember which section we wrote this entry to */
4316 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4320 dir
= unit_drop_in_dir(u
, flags
);
4324 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4325 "# or an equivalent operation. Do not edit.\n",
4329 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4333 (void) mkdir_p_label(p
, 0755);
4334 r
= write_string_file_atomic_label(q
, wrapped
);
4338 r
= strv_push(&u
->dropin_paths
, q
);
4343 strv_uniq(u
->dropin_paths
);
4345 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4350 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4351 _cleanup_free_
char *p
= NULL
;
4359 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4362 va_start(ap
, format
);
4363 r
= vasprintf(&p
, format
, ap
);
4369 return unit_write_setting(u
, flags
, name
, p
);
4372 int unit_make_transient(Unit
*u
) {
4373 _cleanup_free_
char *path
= NULL
;
4378 if (!UNIT_VTABLE(u
)->can_transient
)
4381 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4383 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4387 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4388 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4390 RUN_WITH_UMASK(0022) {
4391 f
= fopen(path
, "we");
4396 safe_fclose(u
->transient_file
);
4397 u
->transient_file
= f
;
4399 free_and_replace(u
->fragment_path
, path
);
4401 u
->source_path
= mfree(u
->source_path
);
4402 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4403 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4405 u
->load_state
= UNIT_STUB
;
4407 u
->transient
= true;
4409 unit_add_to_dbus_queue(u
);
4410 unit_add_to_gc_queue(u
);
4412 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4418 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
4419 _cleanup_free_
char *comm
= NULL
;
4421 (void) get_process_comm(pid
, &comm
);
4423 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4424 only, like for example systemd's own PAM stub process. */
4425 if (comm
&& comm
[0] == '(')
4428 log_unit_notice(userdata
,
4429 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4432 signal_to_string(sig
));
4435 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4440 case KILL_TERMINATE
:
4441 case KILL_TERMINATE_AND_LOG
:
4442 return c
->kill_signal
;
4445 return c
->final_kill_signal
;
4448 return c
->watchdog_signal
;
4451 assert_not_reached("KillOperation unknown");
4455 int unit_kill_context(
4461 bool main_pid_alien
) {
4463 bool wait_for_exit
= false, send_sighup
;
4464 cg_kill_log_func_t log_func
= NULL
;
4470 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4471 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4473 if (c
->kill_mode
== KILL_NONE
)
4476 sig
= operation_to_signal(c
, k
);
4480 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4483 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4484 log_func
= log_kill
;
4488 log_func(main_pid
, sig
, u
);
4490 r
= kill_and_sigcont(main_pid
, sig
);
4491 if (r
< 0 && r
!= -ESRCH
) {
4492 _cleanup_free_
char *comm
= NULL
;
4493 (void) get_process_comm(main_pid
, &comm
);
4495 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4497 if (!main_pid_alien
)
4498 wait_for_exit
= true;
4500 if (r
!= -ESRCH
&& send_sighup
)
4501 (void) kill(main_pid
, SIGHUP
);
4505 if (control_pid
> 0) {
4507 log_func(control_pid
, sig
, u
);
4509 r
= kill_and_sigcont(control_pid
, sig
);
4510 if (r
< 0 && r
!= -ESRCH
) {
4511 _cleanup_free_
char *comm
= NULL
;
4512 (void) get_process_comm(control_pid
, &comm
);
4514 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4516 wait_for_exit
= true;
4518 if (r
!= -ESRCH
&& send_sighup
)
4519 (void) kill(control_pid
, SIGHUP
);
4523 if (u
->cgroup_path
&&
4524 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4525 _cleanup_set_free_ Set
*pid_set
= NULL
;
4527 /* Exclude the main/control pids from being killed via the cgroup */
4528 pid_set
= unit_pid_set(main_pid
, control_pid
);
4532 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4534 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4538 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4539 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4543 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4544 * we are running in a container or if this is a delegation unit, simply because cgroup
4545 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4546 * of containers it can be confused easily by left-over directories in the cgroup — which
4547 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4548 * there we get proper events. Hence rely on them. */
4550 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4551 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4552 wait_for_exit
= true;
4557 pid_set
= unit_pid_set(main_pid
, control_pid
);
4561 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4570 return wait_for_exit
;
4573 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4574 _cleanup_free_
char *p
= NULL
;
4576 UnitDependencyInfo di
;
4582 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4583 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4584 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4585 * determine which units to make themselves a dependency of. */
4587 if (!path_is_absolute(path
))
4590 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &path_hash_ops
);
4598 path
= path_simplify(p
, false);
4600 if (!path_is_normalized(path
))
4603 if (hashmap_contains(u
->requires_mounts_for
, path
))
4606 di
= (UnitDependencyInfo
) {
4610 r
= hashmap_put(u
->requires_mounts_for
, path
, di
.data
);
4615 prefix
= alloca(strlen(path
) + 1);
4616 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4619 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4621 _cleanup_free_
char *q
= NULL
;
4623 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4635 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4651 int unit_setup_exec_runtime(Unit
*u
) {
4659 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4662 /* Check if there already is an ExecRuntime for this unit? */
4663 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4667 /* Try to get it from somebody else */
4668 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4669 r
= exec_runtime_acquire(u
->manager
, NULL
, other
->id
, false, rt
);
4674 return exec_runtime_acquire(u
->manager
, unit_get_exec_context(u
), u
->id
, true, rt
);
4677 int unit_setup_dynamic_creds(Unit
*u
) {
4679 DynamicCreds
*dcreds
;
4684 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4686 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4688 ec
= unit_get_exec_context(u
);
4691 if (!ec
->dynamic_user
)
4694 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4697 bool unit_type_supported(UnitType t
) {
4698 if (_unlikely_(t
< 0))
4700 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4703 if (!unit_vtable
[t
]->supported
)
4706 return unit_vtable
[t
]->supported();
4709 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4715 r
= dir_is_empty(where
);
4716 if (r
> 0 || r
== -ENOTDIR
)
4719 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4723 log_struct(LOG_NOTICE
,
4724 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4726 LOG_UNIT_INVOCATION_ID(u
),
4727 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4731 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4732 _cleanup_free_
char *canonical_where
;
4738 r
= chase_symlinks(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
);
4740 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4744 /* We will happily ignore a trailing slash (or any redundant slashes) */
4745 if (path_equal(where
, canonical_where
))
4748 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4750 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4752 LOG_UNIT_INVOCATION_ID(u
),
4753 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4759 bool unit_is_pristine(Unit
*u
) {
4762 /* Check if the unit already exists or is already around,
4763 * in a number of different ways. Note that to cater for unit
4764 * types such as slice, we are generally fine with units that
4765 * are marked UNIT_LOADED even though nothing was actually
4766 * loaded, as those unit types don't require a file on disk. */
4768 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4771 !strv_isempty(u
->dropin_paths
) ||
4776 pid_t
unit_control_pid(Unit
*u
) {
4779 if (UNIT_VTABLE(u
)->control_pid
)
4780 return UNIT_VTABLE(u
)->control_pid(u
);
4785 pid_t
unit_main_pid(Unit
*u
) {
4788 if (UNIT_VTABLE(u
)->main_pid
)
4789 return UNIT_VTABLE(u
)->main_pid(u
);
4794 static void unit_unref_uid_internal(
4798 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4802 assert(_manager_unref_uid
);
4804 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4805 * gid_t are actually the same time, with the same validity rules.
4807 * Drops a reference to UID/GID from a unit. */
4809 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4810 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4812 if (!uid_is_valid(*ref_uid
))
4815 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4816 *ref_uid
= UID_INVALID
;
4819 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4820 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4823 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4824 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4827 static int unit_ref_uid_internal(
4832 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4838 assert(uid_is_valid(uid
));
4839 assert(_manager_ref_uid
);
4841 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4842 * are actually the same type, and have the same validity rules.
4844 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4845 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4848 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4849 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4851 if (*ref_uid
== uid
)
4854 if (uid_is_valid(*ref_uid
)) /* Already set? */
4857 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4865 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4866 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4869 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4870 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4873 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4878 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4880 if (uid_is_valid(uid
)) {
4881 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4886 if (gid_is_valid(gid
)) {
4887 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4890 unit_unref_uid(u
, false);
4896 return r
> 0 || q
> 0;
4899 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4905 c
= unit_get_exec_context(u
);
4907 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4909 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4914 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4917 unit_unref_uid(u
, destroy_now
);
4918 unit_unref_gid(u
, destroy_now
);
4921 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4926 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4927 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4928 * objects when no service references the UID/GID anymore. */
4930 r
= unit_ref_uid_gid(u
, uid
, gid
);
4932 bus_unit_send_change_signal(u
);
4935 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4940 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4942 if (sd_id128_equal(u
->invocation_id
, id
))
4945 if (!sd_id128_is_null(u
->invocation_id
))
4946 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4948 if (sd_id128_is_null(id
)) {
4953 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
4957 u
->invocation_id
= id
;
4958 sd_id128_to_string(id
, u
->invocation_id_string
);
4960 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4967 u
->invocation_id
= SD_ID128_NULL
;
4968 u
->invocation_id_string
[0] = 0;
4972 int unit_acquire_invocation_id(Unit
*u
) {
4978 r
= sd_id128_randomize(&id
);
4980 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
4982 r
= unit_set_invocation_id(u
, id
);
4984 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
4989 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
4995 /* Copy parameters from manager */
4996 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5000 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5001 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5002 p
->prefix
= u
->manager
->prefix
;
5003 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5005 /* Copy paramaters from unit */
5006 p
->cgroup_path
= u
->cgroup_path
;
5007 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5012 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
5018 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5019 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5021 (void) unit_realize_cgroup(u
);
5023 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
5027 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
5028 (void) ignore_signals(SIGPIPE
, -1);
5030 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5032 if (u
->cgroup_path
) {
5033 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5035 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5043 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5046 assert(d
< _UNIT_DEPENDENCY_MAX
);
5049 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5050 /* No bit set anymore, let's drop the whole entry */
5051 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5052 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5054 /* Mask was reduced, let's update the entry */
5055 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5058 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5063 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5068 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5072 UnitDependencyInfo di
;
5078 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5081 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5083 di
.origin_mask
&= ~mask
;
5084 unit_update_dependency_mask(u
, d
, other
, di
);
5086 /* We updated the dependency from our unit to the other unit now. But most dependencies
5087 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5088 * all dependency types on the other unit and delete all those which point to us and
5089 * have the right mask set. */
5091 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5092 UnitDependencyInfo dj
;
5094 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5095 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5097 dj
.destination_mask
&= ~mask
;
5099 unit_update_dependency_mask(other
, q
, u
, dj
);
5102 unit_add_to_gc_queue(other
);
5112 static int unit_export_invocation_id(Unit
*u
) {
5118 if (u
->exported_invocation_id
)
5121 if (sd_id128_is_null(u
->invocation_id
))
5124 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5125 r
= symlink_atomic(u
->invocation_id_string
, p
);
5127 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5129 u
->exported_invocation_id
= true;
5133 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5141 if (u
->exported_log_level_max
)
5144 if (c
->log_level_max
< 0)
5147 assert(c
->log_level_max
<= 7);
5149 buf
[0] = '0' + c
->log_level_max
;
5152 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5153 r
= symlink_atomic(buf
, p
);
5155 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5157 u
->exported_log_level_max
= true;
5161 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5162 _cleanup_close_
int fd
= -1;
5163 struct iovec
*iovec
;
5171 if (u
->exported_log_extra_fields
)
5174 if (c
->n_log_extra_fields
<= 0)
5177 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5178 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5180 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5181 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5183 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5184 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5187 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5188 pattern
= strjoina(p
, ".XXXXXX");
5190 fd
= mkostemp_safe(pattern
);
5192 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5194 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5196 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5200 (void) fchmod(fd
, 0644);
5202 if (rename(pattern
, p
) < 0) {
5203 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5207 u
->exported_log_extra_fields
= true;
5211 (void) unlink(pattern
);
5215 static int unit_export_log_rate_limit_interval(Unit
*u
, const ExecContext
*c
) {
5216 _cleanup_free_
char *buf
= NULL
;
5223 if (u
->exported_log_rate_limit_interval
)
5226 if (c
->log_rate_limit_interval_usec
== 0)
5229 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5231 if (asprintf(&buf
, "%" PRIu64
, c
->log_rate_limit_interval_usec
) < 0)
5234 r
= symlink_atomic(buf
, p
);
5236 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5238 u
->exported_log_rate_limit_interval
= true;
5242 static int unit_export_log_rate_limit_burst(Unit
*u
, const ExecContext
*c
) {
5243 _cleanup_free_
char *buf
= NULL
;
5250 if (u
->exported_log_rate_limit_burst
)
5253 if (c
->log_rate_limit_burst
== 0)
5256 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5258 if (asprintf(&buf
, "%u", c
->log_rate_limit_burst
) < 0)
5261 r
= symlink_atomic(buf
, p
);
5263 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5265 u
->exported_log_rate_limit_burst
= true;
5269 void unit_export_state_files(Unit
*u
) {
5270 const ExecContext
*c
;
5277 if (!MANAGER_IS_SYSTEM(u
->manager
))
5280 if (MANAGER_IS_TEST_RUN(u
->manager
))
5283 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5284 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5285 * the IPC system itself and PID 1 also log to the journal.
5287 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5288 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5289 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5290 * namespace at least.
5292 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5293 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5296 (void) unit_export_invocation_id(u
);
5298 c
= unit_get_exec_context(u
);
5300 (void) unit_export_log_level_max(u
, c
);
5301 (void) unit_export_log_extra_fields(u
, c
);
5302 (void) unit_export_log_rate_limit_interval(u
, c
);
5303 (void) unit_export_log_rate_limit_burst(u
, c
);
5307 void unit_unlink_state_files(Unit
*u
) {
5315 if (!MANAGER_IS_SYSTEM(u
->manager
))
5318 /* Undoes the effect of unit_export_state() */
5320 if (u
->exported_invocation_id
) {
5321 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5324 u
->exported_invocation_id
= false;
5327 if (u
->exported_log_level_max
) {
5328 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5331 u
->exported_log_level_max
= false;
5334 if (u
->exported_log_extra_fields
) {
5335 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5338 u
->exported_log_extra_fields
= false;
5341 if (u
->exported_log_rate_limit_interval
) {
5342 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5345 u
->exported_log_rate_limit_interval
= false;
5348 if (u
->exported_log_rate_limit_burst
) {
5349 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5352 u
->exported_log_rate_limit_burst
= false;
5356 int unit_prepare_exec(Unit
*u
) {
5361 /* Prepares everything so that we can fork of a process for this unit */
5363 (void) unit_realize_cgroup(u
);
5365 if (u
->reset_accounting
) {
5366 (void) unit_reset_cpu_accounting(u
);
5367 (void) unit_reset_ip_accounting(u
);
5368 u
->reset_accounting
= false;
5371 unit_export_state_files(u
);
5373 r
= unit_setup_exec_runtime(u
);
5377 r
= unit_setup_dynamic_creds(u
);
5384 static void log_leftover(pid_t pid
, int sig
, void *userdata
) {
5385 _cleanup_free_
char *comm
= NULL
;
5387 (void) get_process_comm(pid
, &comm
);
5389 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5392 log_unit_warning(userdata
,
5393 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5394 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5398 void unit_warn_leftover_processes(Unit
*u
) {
5401 (void) unit_pick_cgroup_path(u
);
5403 if (!u
->cgroup_path
)
5406 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5409 bool unit_needs_console(Unit
*u
) {
5411 UnitActiveState state
;
5415 state
= unit_active_state(u
);
5417 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5420 if (UNIT_VTABLE(u
)->needs_console
)
5421 return UNIT_VTABLE(u
)->needs_console(u
);
5423 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5424 ec
= unit_get_exec_context(u
);
5428 return exec_context_may_touch_console(ec
);
5431 const char *unit_label_path(Unit
*u
) {
5434 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5435 * when validating access checks. */
5437 p
= u
->source_path
?: u
->fragment_path
;
5441 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5442 if (path_equal(p
, "/dev/null"))
5448 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5453 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5454 * and not a kernel thread either */
5456 /* First, a simple range check */
5457 if (!pid_is_valid(pid
))
5458 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5460 /* Some extra safety check */
5461 if (pid
== 1 || pid
== getpid_cached())
5462 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5464 /* Don't even begin to bother with kernel threads */
5465 r
= is_kernel_thread(pid
);
5467 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5469 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5471 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5476 void unit_log_success(Unit
*u
) {
5479 log_struct(LOG_INFO
,
5480 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5482 LOG_UNIT_INVOCATION_ID(u
),
5483 LOG_UNIT_MESSAGE(u
, "Succeeded."));
5486 void unit_log_failure(Unit
*u
, const char *result
) {
5490 log_struct(LOG_WARNING
,
5491 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5493 LOG_UNIT_INVOCATION_ID(u
),
5494 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5495 "UNIT_RESULT=%s", result
);
5498 void unit_log_process_exit(
5502 const char *command
,
5509 if (code
!= CLD_EXITED
)
5510 level
= LOG_WARNING
;
5513 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
5514 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s",
5516 sigchld_code_to_string(code
), status
,
5517 strna(code
== CLD_EXITED
5518 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
5519 : signal_to_string(status
))),
5520 "EXIT_CODE=%s", sigchld_code_to_string(code
),
5521 "EXIT_STATUS=%i", status
,
5522 "COMMAND=%s", strna(command
),
5524 LOG_UNIT_INVOCATION_ID(u
));
5527 int unit_exit_status(Unit
*u
) {
5530 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5531 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5532 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5533 * service process has exited abnormally (signal/coredump). */
5535 if (!UNIT_VTABLE(u
)->exit_status
)
5538 return UNIT_VTABLE(u
)->exit_status(u
);
5541 int unit_failure_action_exit_status(Unit
*u
) {
5546 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5548 if (u
->failure_action_exit_status
>= 0)
5549 return u
->failure_action_exit_status
;
5551 r
= unit_exit_status(u
);
5552 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5558 int unit_success_action_exit_status(Unit
*u
) {
5563 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5565 if (u
->success_action_exit_status
>= 0)
5566 return u
->success_action_exit_status
;
5568 r
= unit_exit_status(u
);
5569 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5575 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5576 [COLLECT_INACTIVE
] = "inactive",
5577 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5580 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);