1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2010 Lennart Poettering
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
28 #include "sd-messages.h"
30 #include "alloc-util.h"
31 #include "bus-common-errors.h"
33 #include "cgroup-util.h"
34 #include "dbus-unit.h"
40 #include "fileio-label.h"
41 #include "format-util.h"
43 #include "id128-util.h"
45 #include "load-dropin.h"
46 #include "load-fragment.h"
51 #include "parse-util.h"
52 #include "path-util.h"
53 #include "process-util.h"
55 #include "signal-util.h"
56 #include "sparse-endian.h"
58 #include "specifier.h"
59 #include "stat-util.h"
60 #include "stdio-util.h"
61 #include "string-table.h"
62 #include "string-util.h"
64 #include "umask-util.h"
65 #include "unit-name.h"
67 #include "user-util.h"
70 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
71 [UNIT_SERVICE
] = &service_vtable
,
72 [UNIT_SOCKET
] = &socket_vtable
,
73 [UNIT_TARGET
] = &target_vtable
,
74 [UNIT_DEVICE
] = &device_vtable
,
75 [UNIT_MOUNT
] = &mount_vtable
,
76 [UNIT_AUTOMOUNT
] = &automount_vtable
,
77 [UNIT_SWAP
] = &swap_vtable
,
78 [UNIT_TIMER
] = &timer_vtable
,
79 [UNIT_PATH
] = &path_vtable
,
80 [UNIT_SLICE
] = &slice_vtable
,
81 [UNIT_SCOPE
] = &scope_vtable
,
84 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
86 Unit
*unit_new(Manager
*m
, size_t size
) {
90 assert(size
>= sizeof(Unit
));
96 u
->names
= set_new(&string_hash_ops
);
101 u
->type
= _UNIT_TYPE_INVALID
;
102 u
->default_dependencies
= true;
103 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
104 u
->unit_file_preset
= -1;
105 u
->on_failure_job_mode
= JOB_REPLACE
;
106 u
->cgroup_inotify_wd
= -1;
107 u
->job_timeout
= USEC_INFINITY
;
108 u
->job_running_timeout
= USEC_INFINITY
;
109 u
->ref_uid
= UID_INVALID
;
110 u
->ref_gid
= GID_INVALID
;
111 u
->cpu_usage_last
= NSEC_INFINITY
;
112 u
->cgroup_bpf_state
= UNIT_CGROUP_BPF_INVALIDATED
;
114 u
->ip_accounting_ingress_map_fd
= -1;
115 u
->ip_accounting_egress_map_fd
= -1;
116 u
->ipv4_allow_map_fd
= -1;
117 u
->ipv6_allow_map_fd
= -1;
118 u
->ipv4_deny_map_fd
= -1;
119 u
->ipv6_deny_map_fd
= -1;
121 u
->last_section_private
= -1;
123 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
124 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
129 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
133 u
= unit_new(m
, size
);
137 r
= unit_add_name(u
, name
);
147 bool unit_has_name(Unit
*u
, const char *name
) {
151 return set_contains(u
->names
, (char*) name
);
154 static void unit_init(Unit
*u
) {
161 assert(u
->type
>= 0);
163 cc
= unit_get_cgroup_context(u
);
165 cgroup_context_init(cc
);
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
171 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
172 cc
->io_accounting
= u
->manager
->default_io_accounting
;
173 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
174 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
175 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
176 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
177 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
179 if (u
->type
!= UNIT_SLICE
)
180 cc
->tasks_max
= u
->manager
->default_tasks_max
;
183 ec
= unit_get_exec_context(u
);
185 exec_context_init(ec
);
187 ec
->keyring_mode
= MANAGER_IS_SYSTEM(u
->manager
) ?
188 EXEC_KEYRING_PRIVATE
: EXEC_KEYRING_INHERIT
;
191 kc
= unit_get_kill_context(u
);
193 kill_context_init(kc
);
195 if (UNIT_VTABLE(u
)->init
)
196 UNIT_VTABLE(u
)->init(u
);
199 int unit_add_name(Unit
*u
, const char *text
) {
200 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
207 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
212 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
221 if (set_contains(u
->names
, s
))
223 if (hashmap_contains(u
->manager
->units
, s
))
226 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
229 t
= unit_name_to_type(s
);
233 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
236 r
= unit_name_to_instance(s
, &i
);
240 if (i
&& !unit_type_may_template(t
))
243 /* Ensure that this unit is either instanced or not instanced,
244 * but not both. Note that we do allow names with different
245 * instance names however! */
246 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
249 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
252 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
255 r
= set_put(u
->names
, s
);
260 r
= hashmap_put(u
->manager
->units
, s
, u
);
262 (void) set_remove(u
->names
, s
);
266 if (u
->type
== _UNIT_TYPE_INVALID
) {
271 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
280 unit_add_to_dbus_queue(u
);
284 int unit_choose_id(Unit
*u
, const char *name
) {
285 _cleanup_free_
char *t
= NULL
;
292 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
297 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
304 /* Selects one of the names of this unit as the id */
305 s
= set_get(u
->names
, (char*) name
);
309 /* Determine the new instance from the new id */
310 r
= unit_name_to_instance(s
, &i
);
319 unit_add_to_dbus_queue(u
);
324 int unit_set_description(Unit
*u
, const char *description
) {
329 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
333 unit_add_to_dbus_queue(u
);
338 bool unit_check_gc(Unit
*u
) {
339 UnitActiveState state
;
344 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true, when the unit shall
345 * stay around, false if there's no reason to keep it loaded. */
353 state
= unit_active_state(u
);
355 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
356 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
357 UNIT_VTABLE(u
)->release_resources
)
358 UNIT_VTABLE(u
)->release_resources(u
);
366 if (sd_bus_track_count(u
->bus_track
) > 0)
369 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
370 switch (u
->collect_mode
) {
372 case COLLECT_INACTIVE
:
373 if (state
!= UNIT_INACTIVE
)
378 case COLLECT_INACTIVE_OR_FAILED
:
379 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
385 assert_not_reached("Unknown garbage collection mode");
388 if (u
->cgroup_path
) {
389 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
390 * around. Units with active processes should never be collected. */
392 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
394 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", u
->cgroup_path
);
399 if (UNIT_VTABLE(u
)->check_gc
)
400 if (UNIT_VTABLE(u
)->check_gc(u
))
406 void unit_add_to_load_queue(Unit
*u
) {
408 assert(u
->type
!= _UNIT_TYPE_INVALID
);
410 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
413 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
414 u
->in_load_queue
= true;
417 void unit_add_to_cleanup_queue(Unit
*u
) {
420 if (u
->in_cleanup_queue
)
423 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
424 u
->in_cleanup_queue
= true;
427 void unit_add_to_gc_queue(Unit
*u
) {
430 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
433 if (unit_check_gc(u
))
436 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
437 u
->in_gc_queue
= true;
440 void unit_add_to_dbus_queue(Unit
*u
) {
442 assert(u
->type
!= _UNIT_TYPE_INVALID
);
444 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
447 /* Shortcut things if nobody cares */
448 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
449 sd_bus_track_count(u
->bus_track
) <= 0 &&
450 set_isempty(u
->manager
->private_buses
)) {
451 u
->sent_dbus_new_signal
= true;
455 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
456 u
->in_dbus_queue
= true;
459 static void bidi_set_free(Unit
*u
, Hashmap
*h
) {
466 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
468 HASHMAP_FOREACH_KEY(v
, other
, h
, i
) {
471 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
472 hashmap_remove(other
->dependencies
[d
], u
);
474 unit_add_to_gc_queue(other
);
480 static void unit_remove_transient(Unit
*u
) {
488 if (u
->fragment_path
)
489 (void) unlink(u
->fragment_path
);
491 STRV_FOREACH(i
, u
->dropin_paths
) {
492 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
494 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
498 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
502 /* Only drop transient drop-ins */
503 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
511 static void unit_free_requires_mounts_for(Unit
*u
) {
515 _cleanup_free_
char *path
;
517 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
521 char s
[strlen(path
) + 1];
523 PATH_FOREACH_PREFIX_MORE(s
, path
) {
527 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
531 (void) set_remove(x
, u
);
533 if (set_isempty(x
)) {
534 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
542 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
545 static void unit_done(Unit
*u
) {
554 if (UNIT_VTABLE(u
)->done
)
555 UNIT_VTABLE(u
)->done(u
);
557 ec
= unit_get_exec_context(u
);
559 exec_context_done(ec
);
561 cc
= unit_get_cgroup_context(u
);
563 cgroup_context_done(cc
);
566 void unit_free(Unit
*u
) {
574 u
->transient_file
= safe_fclose(u
->transient_file
);
576 if (!MANAGER_IS_RELOADING(u
->manager
))
577 unit_remove_transient(u
);
579 bus_unit_send_removed_signal(u
);
583 sd_bus_slot_unref(u
->match_bus_slot
);
585 sd_bus_track_unref(u
->bus_track
);
586 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
588 unit_free_requires_mounts_for(u
);
590 SET_FOREACH(t
, u
->names
, i
)
591 hashmap_remove_value(u
->manager
->units
, t
, u
);
593 if (!sd_id128_is_null(u
->invocation_id
))
594 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
608 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
609 bidi_set_free(u
, u
->dependencies
[d
]);
611 if (u
->type
!= _UNIT_TYPE_INVALID
)
612 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
614 if (u
->in_load_queue
)
615 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
617 if (u
->in_dbus_queue
)
618 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
620 if (u
->in_cleanup_queue
)
621 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
624 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
626 if (u
->in_cgroup_realize_queue
)
627 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
629 if (u
->in_cgroup_empty_queue
)
630 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
632 unit_release_cgroup(u
);
634 if (!MANAGER_IS_RELOADING(u
->manager
))
635 unit_unlink_state_files(u
);
637 unit_unref_uid_gid(u
, false);
639 (void) manager_update_failed_units(u
->manager
, u
, false);
640 set_remove(u
->manager
->startup_units
, u
);
642 free(u
->description
);
643 strv_free(u
->documentation
);
644 free(u
->fragment_path
);
645 free(u
->source_path
);
646 strv_free(u
->dropin_paths
);
649 free(u
->job_timeout_reboot_arg
);
651 set_free_free(u
->names
);
653 unit_unwatch_all_pids(u
);
655 condition_free_list(u
->conditions
);
656 condition_free_list(u
->asserts
);
660 unit_ref_unset(&u
->slice
);
663 unit_ref_unset(u
->refs
);
665 safe_close(u
->ip_accounting_ingress_map_fd
);
666 safe_close(u
->ip_accounting_egress_map_fd
);
668 safe_close(u
->ipv4_allow_map_fd
);
669 safe_close(u
->ipv6_allow_map_fd
);
670 safe_close(u
->ipv4_deny_map_fd
);
671 safe_close(u
->ipv6_deny_map_fd
);
673 bpf_program_unref(u
->ip_bpf_ingress
);
674 bpf_program_unref(u
->ip_bpf_egress
);
679 UnitActiveState
unit_active_state(Unit
*u
) {
682 if (u
->load_state
== UNIT_MERGED
)
683 return unit_active_state(unit_follow_merge(u
));
685 /* After a reload it might happen that a unit is not correctly
686 * loaded but still has a process around. That's why we won't
687 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
689 return UNIT_VTABLE(u
)->active_state(u
);
692 const char* unit_sub_state_to_string(Unit
*u
) {
695 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
698 static int set_complete_move(Set
**s
, Set
**other
) {
706 return set_move(*s
, *other
);
715 static int hashmap_complete_move(Hashmap
**s
, Hashmap
**other
) {
723 return hashmap_move(*s
, *other
);
732 static int merge_names(Unit
*u
, Unit
*other
) {
740 r
= set_complete_move(&u
->names
, &other
->names
);
744 set_free_free(other
->names
);
748 SET_FOREACH(t
, u
->names
, i
)
749 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
754 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
759 assert(d
< _UNIT_DEPENDENCY_MAX
);
762 * If u does not have this dependency set allocated, there is no need
763 * to reserve anything. In that case other's set will be transferred
764 * as a whole to u by complete_move().
766 if (!u
->dependencies
[d
])
769 /* merge_dependencies() will skip a u-on-u dependency */
770 n_reserve
= hashmap_size(other
->dependencies
[d
]) - !!hashmap_get(other
->dependencies
[d
], u
);
772 return hashmap_reserve(u
->dependencies
[d
], n_reserve
);
775 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
781 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
785 assert(d
< _UNIT_DEPENDENCY_MAX
);
787 /* Fix backwards pointers. Let's iterate through all dependendent units of the other unit. */
788 HASHMAP_FOREACH_KEY(v
, back
, other
->dependencies
[d
], i
) {
791 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
792 * pointers back, and let's fix them up, to instead point to 'u'. */
794 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
796 /* Do not add dependencies between u and itself. */
797 if (hashmap_remove(back
->dependencies
[k
], other
))
798 maybe_warn_about_dependency(u
, other_id
, k
);
800 UnitDependencyInfo di_u
, di_other
, di_merged
;
802 /* Let's drop this dependency between "back" and "other", and let's create it between
803 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
804 * and any such dependency which might already exist */
806 di_other
.data
= hashmap_get(back
->dependencies
[k
], other
);
808 continue; /* dependency isn't set, let's try the next one */
810 di_u
.data
= hashmap_get(back
->dependencies
[k
], u
);
812 di_merged
= (UnitDependencyInfo
) {
813 .origin_mask
= di_u
.origin_mask
| di_other
.origin_mask
,
814 .destination_mask
= di_u
.destination_mask
| di_other
.destination_mask
,
817 r
= hashmap_remove_and_replace(back
->dependencies
[k
], other
, u
, di_merged
.data
);
819 log_warning_errno(r
, "Failed to remove/replace: back=%s other=%s u=%s: %m", back
->id
, other_id
, u
->id
);
822 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
828 /* Also do not move dependencies on u to itself */
829 back
= hashmap_remove(other
->dependencies
[d
], u
);
831 maybe_warn_about_dependency(u
, other_id
, d
);
833 /* The move cannot fail. The caller must have performed a reservation. */
834 assert_se(hashmap_complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
836 other
->dependencies
[d
] = hashmap_free(other
->dependencies
[d
]);
839 int unit_merge(Unit
*u
, Unit
*other
) {
841 const char *other_id
= NULL
;
846 assert(u
->manager
== other
->manager
);
847 assert(u
->type
!= _UNIT_TYPE_INVALID
);
849 other
= unit_follow_merge(other
);
854 if (u
->type
!= other
->type
)
857 if (!u
->instance
!= !other
->instance
)
860 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
863 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
872 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
876 other_id
= strdupa(other
->id
);
878 /* Make reservations to ensure merge_dependencies() won't fail */
879 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
880 r
= reserve_dependencies(u
, other
, d
);
882 * We don't rollback reservations if we fail. We don't have
883 * a way to undo reservations. A reservation is not a leak.
890 r
= merge_names(u
, other
);
894 /* Redirect all references */
896 unit_ref_set(other
->refs
, u
);
898 /* Merge dependencies */
899 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
900 merge_dependencies(u
, other
, other_id
, d
);
902 other
->load_state
= UNIT_MERGED
;
903 other
->merged_into
= u
;
905 /* If there is still some data attached to the other node, we
906 * don't need it anymore, and can free it. */
907 if (other
->load_state
!= UNIT_STUB
)
908 if (UNIT_VTABLE(other
)->done
)
909 UNIT_VTABLE(other
)->done(other
);
911 unit_add_to_dbus_queue(u
);
912 unit_add_to_cleanup_queue(other
);
917 int unit_merge_by_name(Unit
*u
, const char *name
) {
918 _cleanup_free_
char *s
= NULL
;
925 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
929 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
936 other
= manager_get_unit(u
->manager
, name
);
938 return unit_merge(u
, other
);
940 return unit_add_name(u
, name
);
943 Unit
* unit_follow_merge(Unit
*u
) {
946 while (u
->load_state
== UNIT_MERGED
)
947 assert_se(u
= u
->merged_into
);
952 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
953 ExecDirectoryType dt
;
960 if (c
->working_directory
) {
961 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
966 if (c
->root_directory
) {
967 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
973 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
978 for (dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
979 if (!u
->manager
->prefix
[dt
])
982 STRV_FOREACH(dp
, c
->directories
[dt
].paths
) {
983 _cleanup_free_
char *p
;
985 p
= strjoin(u
->manager
->prefix
[dt
], "/", *dp
);
989 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
995 if (!MANAGER_IS_SYSTEM(u
->manager
))
998 if (c
->private_tmp
) {
1001 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
1002 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1007 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, NULL
, true, UNIT_DEPENDENCY_FILE
);
1012 if (!IN_SET(c
->std_output
,
1013 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1014 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1015 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
1016 !IN_SET(c
->std_error
,
1017 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1018 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
1019 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
1022 /* If syslog or kernel logging is requested, make sure our own
1023 * logging daemon is run first. */
1025 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, NULL
, true, UNIT_DEPENDENCY_FILE
);
1032 const char *unit_description(Unit
*u
) {
1036 return u
->description
;
1038 return strna(u
->id
);
1041 static void print_unit_dependency_mask(FILE *f
, const char *kind
, UnitDependencyMask mask
, bool *space
) {
1043 UnitDependencyMask mask
;
1046 { UNIT_DEPENDENCY_FILE
, "file" },
1047 { UNIT_DEPENDENCY_IMPLICIT
, "implicit" },
1048 { UNIT_DEPENDENCY_DEFAULT
, "default" },
1049 { UNIT_DEPENDENCY_UDEV
, "udev" },
1050 { UNIT_DEPENDENCY_PATH
, "path" },
1051 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT
, "mountinfo-implicit" },
1052 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT
, "mountinfo-default" },
1053 { UNIT_DEPENDENCY_PROC_SWAP
, "proc-swap" },
1061 for (i
= 0; i
< ELEMENTSOF(table
); i
++) {
1066 if ((mask
& table
[i
].mask
) == table
[i
].mask
) {
1074 fputs(table
[i
].name
, f
);
1076 mask
&= ~table
[i
].mask
;
1083 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
1087 const char *prefix2
;
1089 timestamp0
[FORMAT_TIMESTAMP_MAX
],
1090 timestamp1
[FORMAT_TIMESTAMP_MAX
],
1091 timestamp2
[FORMAT_TIMESTAMP_MAX
],
1092 timestamp3
[FORMAT_TIMESTAMP_MAX
],
1093 timestamp4
[FORMAT_TIMESTAMP_MAX
],
1094 timespan
[FORMAT_TIMESPAN_MAX
];
1096 _cleanup_set_free_ Set
*following_set
= NULL
;
1102 assert(u
->type
>= 0);
1104 prefix
= strempty(prefix
);
1105 prefix2
= strjoina(prefix
, "\t");
1109 "%s\tDescription: %s\n"
1110 "%s\tInstance: %s\n"
1111 "%s\tUnit Load State: %s\n"
1112 "%s\tUnit Active State: %s\n"
1113 "%s\tState Change Timestamp: %s\n"
1114 "%s\tInactive Exit Timestamp: %s\n"
1115 "%s\tActive Enter Timestamp: %s\n"
1116 "%s\tActive Exit Timestamp: %s\n"
1117 "%s\tInactive Enter Timestamp: %s\n"
1118 "%s\tGC Check Good: %s\n"
1119 "%s\tNeed Daemon Reload: %s\n"
1120 "%s\tTransient: %s\n"
1121 "%s\tPerpetual: %s\n"
1122 "%s\tGarbage Collection Mode: %s\n"
1125 "%s\tCGroup realized: %s\n",
1127 prefix
, unit_description(u
),
1128 prefix
, strna(u
->instance
),
1129 prefix
, unit_load_state_to_string(u
->load_state
),
1130 prefix
, unit_active_state_to_string(unit_active_state(u
)),
1131 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
1132 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
1133 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
1134 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
1135 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
1136 prefix
, yes_no(unit_check_gc(u
)),
1137 prefix
, yes_no(unit_need_daemon_reload(u
)),
1138 prefix
, yes_no(u
->transient
),
1139 prefix
, yes_no(u
->perpetual
),
1140 prefix
, collect_mode_to_string(u
->collect_mode
),
1141 prefix
, strna(unit_slice_name(u
)),
1142 prefix
, strna(u
->cgroup_path
),
1143 prefix
, yes_no(u
->cgroup_realized
));
1145 if (u
->cgroup_realized_mask
!= 0) {
1146 _cleanup_free_
char *s
= NULL
;
1147 (void) cg_mask_to_string(u
->cgroup_realized_mask
, &s
);
1148 fprintf(f
, "%s\tCGroup realized mask: %s\n", prefix
, strnull(s
));
1150 if (u
->cgroup_enabled_mask
!= 0) {
1151 _cleanup_free_
char *s
= NULL
;
1152 (void) cg_mask_to_string(u
->cgroup_enabled_mask
, &s
);
1153 fprintf(f
, "%s\tCGroup enabled mask: %s\n", prefix
, strnull(s
));
1155 m
= unit_get_own_mask(u
);
1157 _cleanup_free_
char *s
= NULL
;
1158 (void) cg_mask_to_string(m
, &s
);
1159 fprintf(f
, "%s\tCGroup own mask: %s\n", prefix
, strnull(s
));
1161 m
= unit_get_members_mask(u
);
1163 _cleanup_free_
char *s
= NULL
;
1164 (void) cg_mask_to_string(m
, &s
);
1165 fprintf(f
, "%s\tCGroup members mask: %s\n", prefix
, strnull(s
));
1168 SET_FOREACH(t
, u
->names
, i
)
1169 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
1171 if (!sd_id128_is_null(u
->invocation_id
))
1172 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
1173 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
1175 STRV_FOREACH(j
, u
->documentation
)
1176 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
1178 following
= unit_following(u
);
1180 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
1182 r
= unit_following_set(u
, &following_set
);
1186 SET_FOREACH(other
, following_set
, i
)
1187 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1190 if (u
->fragment_path
)
1191 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1194 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1196 STRV_FOREACH(j
, u
->dropin_paths
)
1197 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1199 if (u
->failure_action
!= EMERGENCY_ACTION_NONE
)
1200 fprintf(f
, "%s\tFailure Action: %s\n", prefix
, emergency_action_to_string(u
->failure_action
));
1201 if (u
->success_action
!= EMERGENCY_ACTION_NONE
)
1202 fprintf(f
, "%s\tSuccess Action: %s\n", prefix
, emergency_action_to_string(u
->success_action
));
1204 if (u
->job_timeout
!= USEC_INFINITY
)
1205 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1207 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1208 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1210 if (u
->job_timeout_reboot_arg
)
1211 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1213 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1214 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1216 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1218 "%s\tCondition Timestamp: %s\n"
1219 "%s\tCondition Result: %s\n",
1220 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1221 prefix
, yes_no(u
->condition_result
));
1223 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1225 "%s\tAssert Timestamp: %s\n"
1226 "%s\tAssert Result: %s\n",
1227 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1228 prefix
, yes_no(u
->assert_result
));
1230 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1231 UnitDependencyInfo di
;
1234 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
1237 fprintf(f
, "%s\t%s: %s (", prefix
, unit_dependency_to_string(d
), other
->id
);
1239 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1240 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1246 if (!hashmap_isempty(u
->requires_mounts_for
)) {
1247 UnitDependencyInfo di
;
1250 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1253 fprintf(f
, "%s\tRequiresMountsFor: %s (", prefix
, path
);
1255 print_unit_dependency_mask(f
, "origin", di
.origin_mask
, &space
);
1256 print_unit_dependency_mask(f
, "destination", di
.destination_mask
, &space
);
1262 if (u
->load_state
== UNIT_LOADED
) {
1265 "%s\tStopWhenUnneeded: %s\n"
1266 "%s\tRefuseManualStart: %s\n"
1267 "%s\tRefuseManualStop: %s\n"
1268 "%s\tDefaultDependencies: %s\n"
1269 "%s\tOnFailureJobMode: %s\n"
1270 "%s\tIgnoreOnIsolate: %s\n",
1271 prefix
, yes_no(u
->stop_when_unneeded
),
1272 prefix
, yes_no(u
->refuse_manual_start
),
1273 prefix
, yes_no(u
->refuse_manual_stop
),
1274 prefix
, yes_no(u
->default_dependencies
),
1275 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1276 prefix
, yes_no(u
->ignore_on_isolate
));
1278 if (UNIT_VTABLE(u
)->dump
)
1279 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1281 } else if (u
->load_state
== UNIT_MERGED
)
1283 "%s\tMerged into: %s\n",
1284 prefix
, u
->merged_into
->id
);
1285 else if (u
->load_state
== UNIT_ERROR
)
1286 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1288 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1289 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1292 job_dump(u
->job
, f
, prefix2
);
1295 job_dump(u
->nop_job
, f
, prefix2
);
1298 /* Common implementation for multiple backends */
1299 int unit_load_fragment_and_dropin(Unit
*u
) {
1304 /* Load a .{service,socket,...} file */
1305 r
= unit_load_fragment(u
);
1309 if (u
->load_state
== UNIT_STUB
)
1312 /* Load drop-in directory data. If u is an alias, we might be reloading the
1313 * target unit needlessly. But we cannot be sure which drops-ins have already
1314 * been loaded and which not, at least without doing complicated book-keeping,
1315 * so let's always reread all drop-ins. */
1316 return unit_load_dropin(unit_follow_merge(u
));
1319 /* Common implementation for multiple backends */
1320 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1325 /* Same as unit_load_fragment_and_dropin(), but whether
1326 * something can be loaded or not doesn't matter. */
1328 /* Load a .service file */
1329 r
= unit_load_fragment(u
);
1333 if (u
->load_state
== UNIT_STUB
)
1334 u
->load_state
= UNIT_LOADED
;
1336 /* Load drop-in directory data */
1337 return unit_load_dropin(unit_follow_merge(u
));
1340 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1344 if (target
->type
!= UNIT_TARGET
)
1347 /* Only add the dependency if both units are loaded, so that
1348 * that loop check below is reliable */
1349 if (u
->load_state
!= UNIT_LOADED
||
1350 target
->load_state
!= UNIT_LOADED
)
1353 /* If either side wants no automatic dependencies, then let's
1355 if (!u
->default_dependencies
||
1356 !target
->default_dependencies
)
1359 /* Don't create loops */
1360 if (hashmap_get(target
->dependencies
[UNIT_BEFORE
], u
))
1363 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1366 static int unit_add_target_dependencies(Unit
*u
) {
1368 static const UnitDependency deps
[] = {
1380 for (k
= 0; k
< ELEMENTSOF(deps
); k
++) {
1385 HASHMAP_FOREACH_KEY(v
, target
, u
->dependencies
[deps
[k
]], i
) {
1386 r
= unit_add_default_target_dependency(u
, target
);
1395 static int unit_add_slice_dependencies(Unit
*u
) {
1396 UnitDependencyMask mask
;
1399 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1402 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1403 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1405 mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1407 if (UNIT_ISSET(u
->slice
))
1408 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true, mask
);
1410 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1413 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, NULL
, true, mask
);
1416 static int unit_add_mount_dependencies(Unit
*u
) {
1417 UnitDependencyInfo di
;
1424 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
, i
) {
1425 char prefix
[strlen(path
) + 1];
1427 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1428 _cleanup_free_
char *p
= NULL
;
1431 r
= unit_name_from_path(prefix
, ".mount", &p
);
1435 m
= manager_get_unit(u
->manager
, p
);
1437 /* Make sure to load the mount unit if
1438 * it exists. If so the dependencies
1439 * on this unit will be added later
1440 * during the loading of the mount
1442 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1448 if (m
->load_state
!= UNIT_LOADED
)
1451 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1455 if (m
->fragment_path
) {
1456 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1466 static int unit_add_startup_units(Unit
*u
) {
1470 c
= unit_get_cgroup_context(u
);
1474 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1475 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1476 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1479 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1483 return set_put(u
->manager
->startup_units
, u
);
1486 int unit_load(Unit
*u
) {
1491 if (u
->in_load_queue
) {
1492 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1493 u
->in_load_queue
= false;
1496 if (u
->type
== _UNIT_TYPE_INVALID
)
1499 if (u
->load_state
!= UNIT_STUB
)
1502 if (u
->transient_file
) {
1503 r
= fflush_and_check(u
->transient_file
);
1507 u
->transient_file
= safe_fclose(u
->transient_file
);
1508 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1511 if (UNIT_VTABLE(u
)->load
) {
1512 r
= UNIT_VTABLE(u
)->load(u
);
1517 if (u
->load_state
== UNIT_STUB
) {
1522 if (u
->load_state
== UNIT_LOADED
) {
1524 r
= unit_add_target_dependencies(u
);
1528 r
= unit_add_slice_dependencies(u
);
1532 r
= unit_add_mount_dependencies(u
);
1536 r
= unit_add_startup_units(u
);
1540 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1541 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1546 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1547 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1549 unit_update_cgroup_members_masks(u
);
1552 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1554 unit_add_to_dbus_queue(unit_follow_merge(u
));
1555 unit_add_to_gc_queue(u
);
1560 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
: UNIT_ERROR
;
1562 unit_add_to_dbus_queue(u
);
1563 unit_add_to_gc_queue(u
);
1565 log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1570 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1577 /* If the condition list is empty, then it is true */
1581 /* Otherwise, if all of the non-trigger conditions apply and
1582 * if any of the trigger conditions apply (unless there are
1583 * none) we return true */
1584 LIST_FOREACH(conditions
, c
, first
) {
1587 r
= condition_test(c
);
1590 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1592 c
->trigger
? "|" : "",
1593 c
->negate
? "!" : "",
1599 c
->trigger
? "|" : "",
1600 c
->negate
? "!" : "",
1602 condition_result_to_string(c
->result
));
1604 if (!c
->trigger
&& r
<= 0)
1607 if (c
->trigger
&& triggered
<= 0)
1611 return triggered
!= 0;
1614 static bool unit_condition_test(Unit
*u
) {
1617 dual_timestamp_get(&u
->condition_timestamp
);
1618 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1620 return u
->condition_result
;
1623 static bool unit_assert_test(Unit
*u
) {
1626 dual_timestamp_get(&u
->assert_timestamp
);
1627 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1629 return u
->assert_result
;
1632 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1633 DISABLE_WARNING_FORMAT_NONLITERAL
;
1634 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1638 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1640 const UnitStatusMessageFormats
*format_table
;
1643 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1645 if (t
!= JOB_RELOAD
) {
1646 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1648 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1654 /* Return generic strings */
1656 return "Starting %s.";
1657 else if (t
== JOB_STOP
)
1658 return "Stopping %s.";
1660 return "Reloading %s.";
1663 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1668 /* Reload status messages have traditionally not been printed to console. */
1669 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1672 format
= unit_get_status_message_format(u
, t
);
1674 DISABLE_WARNING_FORMAT_NONLITERAL
;
1675 unit_status_printf(u
, "", format
);
1679 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1680 const char *format
, *mid
;
1685 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1688 if (log_on_console())
1691 /* We log status messages for all units and all operations. */
1693 format
= unit_get_status_message_format(u
, t
);
1695 DISABLE_WARNING_FORMAT_NONLITERAL
;
1696 xsprintf(buf
, format
, unit_description(u
));
1699 mid
= t
== JOB_START
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR
:
1700 t
== JOB_STOP
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR
:
1701 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR
;
1703 /* Note that we deliberately use LOG_MESSAGE() instead of
1704 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1705 * closely what is written to screen using the status output,
1706 * which is supposed the highest level, friendliest output
1707 * possible, which means we should avoid the low-level unit
1709 log_struct(LOG_INFO
,
1710 LOG_MESSAGE("%s", buf
),
1712 LOG_UNIT_INVOCATION_ID(u
),
1717 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1720 assert(t
< _JOB_TYPE_MAX
);
1722 unit_status_log_starting_stopping_reloading(u
, t
);
1723 unit_status_print_starting_stopping(u
, t
);
1726 int unit_start_limit_test(Unit
*u
) {
1729 if (ratelimit_test(&u
->start_limit
)) {
1730 u
->start_limit_hit
= false;
1734 log_unit_warning(u
, "Start request repeated too quickly.");
1735 u
->start_limit_hit
= true;
1737 return emergency_action(u
->manager
, u
->start_limit_action
, u
->reboot_arg
, "unit failed");
1740 bool unit_shall_confirm_spawn(Unit
*u
) {
1743 if (manager_is_confirm_spawn_disabled(u
->manager
))
1746 /* For some reasons units remaining in the same process group
1747 * as PID 1 fail to acquire the console even if it's not used
1748 * by any process. So skip the confirmation question for them. */
1749 return !unit_get_exec_context(u
)->same_pgrp
;
1752 static bool unit_verify_deps(Unit
*u
) {
1759 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1760 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1761 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1762 * conjunction with After= as for them any such check would make things entirely racy. */
1764 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1766 if (!hashmap_contains(u
->dependencies
[UNIT_AFTER
], other
))
1769 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1770 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1779 * -EBADR: This unit type does not support starting.
1780 * -EALREADY: Unit is already started.
1781 * -EAGAIN: An operation is already in progress. Retry later.
1782 * -ECANCELED: Too many requests for now.
1783 * -EPROTO: Assert failed
1784 * -EINVAL: Unit not loaded
1785 * -EOPNOTSUPP: Unit type not supported
1786 * -ENOLINK: The necessary dependencies are not fulfilled.
1788 int unit_start(Unit
*u
) {
1789 UnitActiveState state
;
1794 /* If this is already started, then this will succeed. Note
1795 * that this will even succeed if this unit is not startable
1796 * by the user. This is relied on to detect when we need to
1797 * wait for units and when waiting is finished. */
1798 state
= unit_active_state(u
);
1799 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1802 /* Units that aren't loaded cannot be started */
1803 if (u
->load_state
!= UNIT_LOADED
)
1806 /* If the conditions failed, don't do anything at all. If we
1807 * already are activating this call might still be useful to
1808 * speed up activation in case there is some hold-off time,
1809 * but we don't want to recheck the condition in that case. */
1810 if (state
!= UNIT_ACTIVATING
&&
1811 !unit_condition_test(u
)) {
1812 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1816 /* If the asserts failed, fail the entire job */
1817 if (state
!= UNIT_ACTIVATING
&&
1818 !unit_assert_test(u
)) {
1819 log_unit_notice(u
, "Starting requested but asserts failed.");
1823 /* Units of types that aren't supported cannot be
1824 * started. Note that we do this test only after the condition
1825 * checks, so that we rather return condition check errors
1826 * (which are usually not considered a true failure) than "not
1827 * supported" errors (which are considered a failure).
1829 if (!unit_supported(u
))
1832 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1833 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1834 * effect anymore, due to a reload or due to a failed condition. */
1835 if (!unit_verify_deps(u
))
1838 /* Forward to the main object, if we aren't it. */
1839 following
= unit_following(u
);
1841 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1842 return unit_start(following
);
1845 /* If it is stopped, but we cannot start it, then fail */
1846 if (!UNIT_VTABLE(u
)->start
)
1849 /* We don't suppress calls to ->start() here when we are
1850 * already starting, to allow this request to be used as a
1851 * "hurry up" call, for example when the unit is in some "auto
1852 * restart" state where it waits for a holdoff timer to elapse
1853 * before it will start again. */
1855 unit_add_to_dbus_queue(u
);
1857 return UNIT_VTABLE(u
)->start(u
);
1860 bool unit_can_start(Unit
*u
) {
1863 if (u
->load_state
!= UNIT_LOADED
)
1866 if (!unit_supported(u
))
1869 return !!UNIT_VTABLE(u
)->start
;
1872 bool unit_can_isolate(Unit
*u
) {
1875 return unit_can_start(u
) &&
1880 * -EBADR: This unit type does not support stopping.
1881 * -EALREADY: Unit is already stopped.
1882 * -EAGAIN: An operation is already in progress. Retry later.
1884 int unit_stop(Unit
*u
) {
1885 UnitActiveState state
;
1890 state
= unit_active_state(u
);
1891 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1894 following
= unit_following(u
);
1896 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1897 return unit_stop(following
);
1900 if (!UNIT_VTABLE(u
)->stop
)
1903 unit_add_to_dbus_queue(u
);
1905 return UNIT_VTABLE(u
)->stop(u
);
1908 bool unit_can_stop(Unit
*u
) {
1911 if (!unit_supported(u
))
1917 return !!UNIT_VTABLE(u
)->stop
;
1921 * -EBADR: This unit type does not support reloading.
1922 * -ENOEXEC: Unit is not started.
1923 * -EAGAIN: An operation is already in progress. Retry later.
1925 int unit_reload(Unit
*u
) {
1926 UnitActiveState state
;
1931 if (u
->load_state
!= UNIT_LOADED
)
1934 if (!unit_can_reload(u
))
1937 state
= unit_active_state(u
);
1938 if (state
== UNIT_RELOADING
)
1941 if (state
!= UNIT_ACTIVE
) {
1942 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1946 following
= unit_following(u
);
1948 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1949 return unit_reload(following
);
1952 unit_add_to_dbus_queue(u
);
1954 if (!UNIT_VTABLE(u
)->reload
) {
1955 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1956 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), true);
1960 return UNIT_VTABLE(u
)->reload(u
);
1963 bool unit_can_reload(Unit
*u
) {
1966 if (UNIT_VTABLE(u
)->can_reload
)
1967 return UNIT_VTABLE(u
)->can_reload(u
);
1969 if (!hashmap_isempty(u
->dependencies
[UNIT_PROPAGATES_RELOAD_TO
]))
1972 return UNIT_VTABLE(u
)->reload
;
1975 static void unit_check_unneeded(Unit
*u
) {
1977 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1979 static const UnitDependency needed_dependencies
[] = {
1991 /* If this service shall be shut down when unneeded then do
1994 if (!u
->stop_when_unneeded
)
1997 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
2000 for (j
= 0; j
< ELEMENTSOF(needed_dependencies
); j
++) {
2005 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[needed_dependencies
[j
]], i
)
2006 if (unit_active_or_pending(other
) || unit_will_restart(other
))
2010 /* If stopping a unit fails continuously we might enter a stop
2011 * loop here, hence stop acting on the service being
2012 * unnecessary after a while. */
2013 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
2014 log_unit_warning(u
, "Unit not needed anymore, but not stopping since we tried this too often recently.");
2018 log_unit_info(u
, "Unit not needed anymore. Stopping.");
2020 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
2021 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2023 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2026 static void unit_check_binds_to(Unit
*u
) {
2027 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2039 if (unit_active_state(u
) != UNIT_ACTIVE
)
2042 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
2046 if (!other
->coldplugged
)
2047 /* We might yet create a job for the other unit… */
2050 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2060 /* If stopping a unit fails continuously we might enter a stop
2061 * loop here, hence stop acting on the service being
2062 * unnecessary after a while. */
2063 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
2064 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
2069 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
2071 /* A unit we need to run is gone. Sniff. Let's stop this. */
2072 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
2074 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
2077 static void retroactively_start_dependencies(Unit
*u
) {
2083 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2085 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2086 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2087 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2088 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2090 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2091 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2092 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2093 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
2095 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2096 if (!hashmap_get(u
->dependencies
[UNIT_AFTER
], other
) &&
2097 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2098 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
2100 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
2101 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2102 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2104 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
2105 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2106 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2109 static void retroactively_stop_dependencies(Unit
*u
) {
2115 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2117 /* Pull down units which are bound to us recursively if enabled */
2118 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
2119 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2120 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
2123 static void check_unneeded_dependencies(Unit
*u
) {
2129 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2131 /* Garbage collect services that might not be needed anymore, if enabled */
2132 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUIRES
], i
)
2133 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2134 unit_check_unneeded(other
);
2135 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_WANTS
], i
)
2136 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2137 unit_check_unneeded(other
);
2138 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_REQUISITE
], i
)
2139 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2140 unit_check_unneeded(other
);
2141 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
2142 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2143 unit_check_unneeded(other
);
2146 void unit_start_on_failure(Unit
*u
) {
2153 if (hashmap_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
2156 log_unit_info(u
, "Triggering OnFailure= dependencies.");
2158 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
2161 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, NULL
, NULL
);
2163 log_unit_error_errno(u
, r
, "Failed to enqueue OnFailure= job: %m");
2167 void unit_trigger_notify(Unit
*u
) {
2174 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
2175 if (UNIT_VTABLE(other
)->trigger_notify
)
2176 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2179 static int unit_log_resources(Unit
*u
) {
2181 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ 4];
2182 size_t n_message_parts
= 0, n_iovec
= 0;
2183 char* message_parts
[3 + 1], *t
;
2184 nsec_t nsec
= NSEC_INFINITY
;
2185 CGroupIPAccountingMetric m
;
2188 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2189 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2190 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2191 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2192 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2197 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2198 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2199 * information and the complete data in structured fields. */
2201 (void) unit_get_cpu_usage(u
, &nsec
);
2202 if (nsec
!= NSEC_INFINITY
) {
2203 char buf
[FORMAT_TIMESPAN_MAX
] = "";
2205 /* Format the CPU time for inclusion in the structured log message */
2206 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2210 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2212 /* Format the CPU time for inclusion in the human language message string */
2213 format_timespan(buf
, sizeof(buf
), nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
);
2214 t
= strjoin(n_message_parts
> 0 ? "consumed " : "Consumed ", buf
, " CPU time");
2220 message_parts
[n_message_parts
++] = t
;
2223 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2224 char buf
[FORMAT_BYTES_MAX
] = "";
2225 uint64_t value
= UINT64_MAX
;
2227 assert(ip_fields
[m
]);
2229 (void) unit_get_ip_accounting(u
, m
, &value
);
2230 if (value
== UINT64_MAX
)
2233 /* Format IP accounting data for inclusion in the structured log message */
2234 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2238 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2240 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2241 * bytes counters (and not for the packets counters) */
2242 if (m
== CGROUP_IP_INGRESS_BYTES
)
2243 t
= strjoin(n_message_parts
> 0 ? "received " : "Received ",
2244 format_bytes(buf
, sizeof(buf
), value
),
2246 else if (m
== CGROUP_IP_EGRESS_BYTES
)
2247 t
= strjoin(n_message_parts
> 0 ? "sent " : "Sent ",
2248 format_bytes(buf
, sizeof(buf
), value
),
2257 message_parts
[n_message_parts
++] = t
;
2260 /* Is there any accounting data available at all? */
2266 if (n_message_parts
== 0)
2267 t
= strjoina("MESSAGE=", u
->id
, ": Completed");
2269 _cleanup_free_
char *joined
;
2271 message_parts
[n_message_parts
] = NULL
;
2273 joined
= strv_join(message_parts
, ", ");
2279 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
);
2282 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2283 * and hence don't increase n_iovec for them */
2284 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2285 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2287 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2288 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2290 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2291 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2293 log_struct_iovec(LOG_INFO
, iovec
, n_iovec
+ 4);
2297 for (i
= 0; i
< n_message_parts
; i
++)
2298 free(message_parts
[i
]);
2300 for (i
= 0; i
< n_iovec
; i
++)
2301 free(iovec
[i
].iov_base
);
2307 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2312 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2313 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2315 /* Note that this is called for all low-level state changes,
2316 * even if they might map to the same high-level
2317 * UnitActiveState! That means that ns == os is an expected
2318 * behavior here. For example: if a mount point is remounted
2319 * this function will be called too! */
2323 /* Update timestamps for state changes */
2324 if (!MANAGER_IS_RELOADING(m
)) {
2325 dual_timestamp_get(&u
->state_change_timestamp
);
2327 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2328 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2329 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2330 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2332 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2333 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2334 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2335 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2338 /* Keep track of failed units */
2339 (void) manager_update_failed_units(u
->manager
, u
, ns
== UNIT_FAILED
);
2341 /* Make sure the cgroup and state files are always removed when we become inactive */
2342 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2343 unit_prune_cgroup(u
);
2344 unit_unlink_state_files(u
);
2347 /* Note that this doesn't apply to RemainAfterExit services exiting
2348 * successfully, since there's no change of state in that case. Which is
2349 * why it is handled in service_set_state() */
2350 if (UNIT_IS_INACTIVE_OR_FAILED(os
) != UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2353 ec
= unit_get_exec_context(u
);
2354 if (ec
&& exec_context_may_touch_console(ec
)) {
2355 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2358 if (m
->n_on_console
== 0)
2359 /* unset no_console_output flag, since the console is free */
2360 m
->no_console_output
= false;
2369 if (u
->job
->state
== JOB_WAITING
)
2371 /* So we reached a different state for this
2372 * job. Let's see if we can run it now if it
2373 * failed previously due to EAGAIN. */
2374 job_add_to_run_queue(u
->job
);
2376 /* Let's check whether this state change constitutes a
2377 * finished job, or maybe contradicts a running job and
2378 * hence needs to invalidate jobs. */
2380 switch (u
->job
->type
) {
2383 case JOB_VERIFY_ACTIVE
:
2385 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2386 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2387 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2390 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2391 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2397 case JOB_RELOAD_OR_START
:
2398 case JOB_TRY_RELOAD
:
2400 if (u
->job
->state
== JOB_RUNNING
) {
2401 if (ns
== UNIT_ACTIVE
)
2402 job_finish_and_invalidate(u
->job
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2403 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2406 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2407 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2415 case JOB_TRY_RESTART
:
2417 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2418 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2419 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2421 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2427 assert_not_reached("Job type unknown");
2433 if (!MANAGER_IS_RELOADING(m
)) {
2435 /* If this state change happened without being
2436 * requested by a job, then let's retroactively start
2437 * or stop dependencies. We skip that step when
2438 * deserializing, since we don't want to create any
2439 * additional jobs just because something is already
2443 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2444 retroactively_start_dependencies(u
);
2445 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2446 retroactively_stop_dependencies(u
);
2449 /* stop unneeded units regardless if going down was expected or not */
2450 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2451 check_unneeded_dependencies(u
);
2453 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2454 log_unit_debug(u
, "Unit entered failed state.");
2455 unit_start_on_failure(u
);
2459 /* Some names are special */
2460 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2462 if (unit_has_name(u
, SPECIAL_DBUS_SERVICE
))
2463 /* The bus might have just become available,
2464 * hence try to connect to it, if we aren't
2468 if (u
->type
== UNIT_SERVICE
&&
2469 !UNIT_IS_ACTIVE_OR_RELOADING(os
) &&
2470 !MANAGER_IS_RELOADING(m
)) {
2471 /* Write audit record if we have just finished starting up */
2472 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, true);
2476 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
))
2477 manager_send_unit_plymouth(m
, u
);
2480 /* We don't care about D-Bus going down here, since we'll get an asynchronous notification for it
2483 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) &&
2484 !UNIT_IS_INACTIVE_OR_FAILED(os
)
2485 && !MANAGER_IS_RELOADING(m
)) {
2487 /* This unit just stopped/failed. */
2488 if (u
->type
== UNIT_SERVICE
) {
2490 /* Hmm, if there was no start record written
2491 * write it now, so that we always have a nice
2494 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, ns
== UNIT_INACTIVE
);
2496 if (ns
== UNIT_INACTIVE
)
2497 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, true);
2499 /* Write audit record if we have just finished shutting down */
2500 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, ns
== UNIT_INACTIVE
);
2502 u
->in_audit
= false;
2505 /* Write a log message about consumed resources */
2506 unit_log_resources(u
);
2510 manager_recheck_journal(m
);
2511 unit_trigger_notify(u
);
2513 if (!MANAGER_IS_RELOADING(u
->manager
)) {
2514 /* Maybe we finished startup and are now ready for
2515 * being stopped because unneeded? */
2516 unit_check_unneeded(u
);
2518 /* Maybe we finished startup, but something we needed
2519 * has vanished? Let's die then. (This happens when
2520 * something BindsTo= to a Type=oneshot unit, as these
2521 * units go directly from starting to inactive,
2522 * without ever entering started.) */
2523 unit_check_binds_to(u
);
2525 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
)
2526 (void) emergency_action(u
->manager
, u
->failure_action
, u
->reboot_arg
, "unit failed");
2527 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
)
2528 (void) emergency_action(u
->manager
, u
->success_action
, u
->reboot_arg
, "unit succeeded");
2531 unit_add_to_dbus_queue(u
);
2532 unit_add_to_gc_queue(u
);
2535 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2541 /* Watch a specific PID. We only support one or two units
2542 * watching each PID for now, not more. */
2544 r
= set_ensure_allocated(&u
->pids
, NULL
);
2548 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids1
, NULL
);
2552 r
= hashmap_put(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2554 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids2
, NULL
);
2558 r
= hashmap_put(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2561 q
= set_put(u
->pids
, PID_TO_PTR(pid
));
2568 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2572 (void) hashmap_remove_value(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2573 (void) hashmap_remove_value(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2574 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2577 void unit_unwatch_all_pids(Unit
*u
) {
2580 while (!set_isempty(u
->pids
))
2581 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2583 u
->pids
= set_free(u
->pids
);
2586 void unit_tidy_watch_pids(Unit
*u
, pid_t except1
, pid_t except2
) {
2592 /* Cleans dead PIDs from our list */
2594 SET_FOREACH(e
, u
->pids
, i
) {
2595 pid_t pid
= PTR_TO_PID(e
);
2597 if (pid
== except1
|| pid
== except2
)
2600 if (!pid_is_unwaited(pid
))
2601 unit_unwatch_pid(u
, pid
);
2605 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2607 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2611 case JOB_VERIFY_ACTIVE
:
2614 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2615 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2620 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2621 * external events), hence it makes no sense to permit enqueing such a request either. */
2622 return !u
->perpetual
;
2625 case JOB_TRY_RESTART
:
2626 return unit_can_stop(u
) && unit_can_start(u
);
2629 case JOB_TRY_RELOAD
:
2630 return unit_can_reload(u
);
2632 case JOB_RELOAD_OR_START
:
2633 return unit_can_reload(u
) && unit_can_start(u
);
2636 assert_not_reached("Invalid job type");
2640 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2643 /* Only warn about some unit types */
2644 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2647 if (streq_ptr(u
->id
, other
))
2648 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2650 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2653 static int unit_add_dependency_hashmap(
2656 UnitDependencyMask origin_mask
,
2657 UnitDependencyMask destination_mask
) {
2659 UnitDependencyInfo info
;
2664 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2665 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
2666 assert(origin_mask
> 0 || destination_mask
> 0);
2668 r
= hashmap_ensure_allocated(h
, NULL
);
2672 assert_cc(sizeof(void*) == sizeof(info
));
2674 info
.data
= hashmap_get(*h
, other
);
2676 /* Entry already exists. Add in our mask. */
2678 if ((info
.origin_mask
& origin_mask
) == info
.origin_mask
&&
2679 (info
.destination_mask
& destination_mask
) == info
.destination_mask
)
2682 info
.origin_mask
|= origin_mask
;
2683 info
.destination_mask
|= destination_mask
;
2685 r
= hashmap_update(*h
, other
, info
.data
);
2687 info
= (UnitDependencyInfo
) {
2688 .origin_mask
= origin_mask
,
2689 .destination_mask
= destination_mask
,
2692 r
= hashmap_put(*h
, other
, info
.data
);
2700 int unit_add_dependency(
2705 UnitDependencyMask mask
) {
2707 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2708 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2709 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2710 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2711 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2712 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2713 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2714 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2715 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2716 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2717 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2718 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2719 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2720 [UNIT_BEFORE
] = UNIT_AFTER
,
2721 [UNIT_AFTER
] = UNIT_BEFORE
,
2722 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2723 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2724 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2725 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2726 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2727 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2728 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2729 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2731 Unit
*original_u
= u
, *original_other
= other
;
2735 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2738 u
= unit_follow_merge(u
);
2739 other
= unit_follow_merge(other
);
2741 /* We won't allow dependencies on ourselves. We will not
2742 * consider them an error however. */
2744 maybe_warn_about_dependency(original_u
, original_other
->id
, d
);
2748 if ((d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) ||
2749 (d
== UNIT_AFTER
&& u
->type
== UNIT_DEVICE
)) {
2750 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2754 r
= unit_add_dependency_hashmap(u
->dependencies
+ d
, other
, mask
, 0);
2758 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2759 r
= unit_add_dependency_hashmap(other
->dependencies
+ inverse_table
[d
], u
, 0, mask
);
2764 if (add_reference
) {
2765 r
= unit_add_dependency_hashmap(u
->dependencies
+ UNIT_REFERENCES
, other
, mask
, 0);
2769 r
= unit_add_dependency_hashmap(other
->dependencies
+ UNIT_REFERENCED_BY
, u
, 0, mask
);
2774 unit_add_to_dbus_queue(u
);
2778 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
2783 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2787 return unit_add_dependency(u
, e
, other
, add_reference
, mask
);
2790 static int resolve_template(Unit
*u
, const char *name
, const char*path
, char **buf
, const char **ret
) {
2794 assert(name
|| path
);
2799 name
= basename(path
);
2801 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2808 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2810 _cleanup_free_
char *i
= NULL
;
2812 r
= unit_name_to_prefix(u
->id
, &i
);
2816 r
= unit_name_replace_instance(name
, i
, buf
);
2825 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, const char *path
, bool add_reference
, UnitDependencyMask mask
) {
2826 _cleanup_free_
char *buf
= NULL
;
2831 assert(name
|| path
);
2833 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2837 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2841 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
2844 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, const char *path
, bool add_reference
, UnitDependencyMask mask
) {
2845 _cleanup_free_
char *buf
= NULL
;
2850 assert(name
|| path
);
2852 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2856 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2860 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
2863 int set_unit_path(const char *p
) {
2864 /* This is mostly for debug purposes */
2865 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2871 char *unit_dbus_path(Unit
*u
) {
2877 return unit_dbus_path_from_name(u
->id
);
2880 char *unit_dbus_path_invocation_id(Unit
*u
) {
2883 if (sd_id128_is_null(u
->invocation_id
))
2886 return unit_dbus_path_from_name(u
->invocation_id_string
);
2889 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2893 /* Sets the unit slice if it has not been set before. Is extra
2894 * careful, to only allow this for units that actually have a
2895 * cgroup context. Also, we don't allow to set this for slices
2896 * (since the parent slice is derived from the name). Make
2897 * sure the unit we set is actually a slice. */
2899 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2902 if (u
->type
== UNIT_SLICE
)
2905 if (unit_active_state(u
) != UNIT_INACTIVE
)
2908 if (slice
->type
!= UNIT_SLICE
)
2911 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
2912 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
2915 if (UNIT_DEREF(u
->slice
) == slice
)
2918 /* Disallow slice changes if @u is already bound to cgroups */
2919 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
2922 unit_ref_unset(&u
->slice
);
2923 unit_ref_set(&u
->slice
, slice
);
2927 int unit_set_default_slice(Unit
*u
) {
2928 _cleanup_free_
char *b
= NULL
;
2929 const char *slice_name
;
2935 if (UNIT_ISSET(u
->slice
))
2939 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
2941 /* Implicitly place all instantiated units in their
2942 * own per-template slice */
2944 r
= unit_name_to_prefix(u
->id
, &prefix
);
2948 /* The prefix is already escaped, but it might include
2949 * "-" which has a special meaning for slice units,
2950 * hence escape it here extra. */
2951 escaped
= unit_name_escape(prefix
);
2955 if (MANAGER_IS_SYSTEM(u
->manager
))
2956 b
= strjoin("system-", escaped
, ".slice");
2958 b
= strappend(escaped
, ".slice");
2965 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
2966 ? SPECIAL_SYSTEM_SLICE
2967 : SPECIAL_ROOT_SLICE
;
2969 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
2973 return unit_set_slice(u
, slice
);
2976 const char *unit_slice_name(Unit
*u
) {
2979 if (!UNIT_ISSET(u
->slice
))
2982 return UNIT_DEREF(u
->slice
)->id
;
2985 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
2986 _cleanup_free_
char *t
= NULL
;
2993 r
= unit_name_change_suffix(u
->id
, type
, &t
);
2996 if (unit_has_name(u
, t
))
2999 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3000 assert(r
< 0 || *_found
!= u
);
3004 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3005 const char *name
, *old_owner
, *new_owner
;
3012 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
3014 bus_log_parse_error(r
);
3018 old_owner
= isempty(old_owner
) ? NULL
: old_owner
;
3019 new_owner
= isempty(new_owner
) ? NULL
: new_owner
;
3021 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3022 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
3027 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3034 if (u
->match_bus_slot
)
3037 match
= strjoina("type='signal',"
3038 "sender='org.freedesktop.DBus',"
3039 "path='/org/freedesktop/DBus',"
3040 "interface='org.freedesktop.DBus',"
3041 "member='NameOwnerChanged',"
3042 "arg0='", name
, "'");
3044 return sd_bus_add_match(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, u
);
3047 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3053 /* Watch a specific name on the bus. We only support one unit
3054 * watching each name for now. */
3056 if (u
->manager
->api_bus
) {
3057 /* If the bus is already available, install the match directly.
3058 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3059 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3061 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3064 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3066 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3067 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3073 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3077 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3078 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3081 bool unit_can_serialize(Unit
*u
) {
3084 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
3087 static int unit_serialize_cgroup_mask(FILE *f
, const char *key
, CGroupMask mask
) {
3088 _cleanup_free_
char *s
= NULL
;
3095 r
= cg_mask_to_string(mask
, &s
);
3106 static const char *ip_accounting_metric_field
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
3107 [CGROUP_IP_INGRESS_BYTES
] = "ip-accounting-ingress-bytes",
3108 [CGROUP_IP_INGRESS_PACKETS
] = "ip-accounting-ingress-packets",
3109 [CGROUP_IP_EGRESS_BYTES
] = "ip-accounting-egress-bytes",
3110 [CGROUP_IP_EGRESS_PACKETS
] = "ip-accounting-egress-packets",
3113 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
3114 CGroupIPAccountingMetric m
;
3121 if (unit_can_serialize(u
)) {
3124 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
3128 rt
= unit_get_exec_runtime(u
);
3130 r
= exec_runtime_serialize(u
, rt
, f
, fds
);
3136 dual_timestamp_serialize(f
, "state-change-timestamp", &u
->state_change_timestamp
);
3138 dual_timestamp_serialize(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
3139 dual_timestamp_serialize(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
3140 dual_timestamp_serialize(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
3141 dual_timestamp_serialize(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
3143 dual_timestamp_serialize(f
, "condition-timestamp", &u
->condition_timestamp
);
3144 dual_timestamp_serialize(f
, "assert-timestamp", &u
->assert_timestamp
);
3146 if (dual_timestamp_is_set(&u
->condition_timestamp
))
3147 unit_serialize_item(u
, f
, "condition-result", yes_no(u
->condition_result
));
3149 if (dual_timestamp_is_set(&u
->assert_timestamp
))
3150 unit_serialize_item(u
, f
, "assert-result", yes_no(u
->assert_result
));
3152 unit_serialize_item(u
, f
, "transient", yes_no(u
->transient
));
3154 unit_serialize_item(u
, f
, "exported-invocation-id", yes_no(u
->exported_invocation_id
));
3155 unit_serialize_item(u
, f
, "exported-log-level-max", yes_no(u
->exported_log_level_max
));
3156 unit_serialize_item(u
, f
, "exported-log-extra-fields", yes_no(u
->exported_log_extra_fields
));
3158 unit_serialize_item_format(u
, f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
3159 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
3160 unit_serialize_item_format(u
, f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
3163 unit_serialize_item(u
, f
, "cgroup", u
->cgroup_path
);
3164 unit_serialize_item(u
, f
, "cgroup-realized", yes_no(u
->cgroup_realized
));
3165 (void) unit_serialize_cgroup_mask(f
, "cgroup-realized-mask", u
->cgroup_realized_mask
);
3166 (void) unit_serialize_cgroup_mask(f
, "cgroup-enabled-mask", u
->cgroup_enabled_mask
);
3167 unit_serialize_item_format(u
, f
, "cgroup-bpf-realized", "%i", u
->cgroup_bpf_state
);
3169 if (uid_is_valid(u
->ref_uid
))
3170 unit_serialize_item_format(u
, f
, "ref-uid", UID_FMT
, u
->ref_uid
);
3171 if (gid_is_valid(u
->ref_gid
))
3172 unit_serialize_item_format(u
, f
, "ref-gid", GID_FMT
, u
->ref_gid
);
3174 if (!sd_id128_is_null(u
->invocation_id
))
3175 unit_serialize_item_format(u
, f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
3177 bus_track_serialize(u
->bus_track
, f
, "ref");
3179 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
3182 r
= unit_get_ip_accounting(u
, m
, &v
);
3184 unit_serialize_item_format(u
, f
, ip_accounting_metric_field
[m
], "%" PRIu64
, v
);
3187 if (serialize_jobs
) {
3189 fprintf(f
, "job\n");
3190 job_serialize(u
->job
, f
);
3194 fprintf(f
, "job\n");
3195 job_serialize(u
->nop_job
, f
);
3204 int unit_serialize_item(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
3220 int unit_serialize_item_escaped(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
3221 _cleanup_free_
char *c
= NULL
;
3242 int unit_serialize_item_fd(Unit
*u
, FILE *f
, FDSet
*fds
, const char *key
, int fd
) {
3252 copy
= fdset_put_dup(fds
, fd
);
3256 fprintf(f
, "%s=%i\n", key
, copy
);
3260 void unit_serialize_item_format(Unit
*u
, FILE *f
, const char *key
, const char *format
, ...) {
3271 va_start(ap
, format
);
3272 vfprintf(f
, format
, ap
);
3278 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
3279 ExecRuntime
**rt
= NULL
;
3287 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3289 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
3292 char line
[LINE_MAX
], *l
, *v
;
3293 CGroupIPAccountingMetric m
;
3296 if (!fgets(line
, sizeof(line
), f
)) {
3309 k
= strcspn(l
, "=");
3317 if (streq(l
, "job")) {
3319 /* new-style serialized job */
3326 r
= job_deserialize(j
, f
);
3332 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
3338 r
= job_install_deserialized(j
);
3340 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
3344 } else /* legacy for pre-44 */
3345 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
3347 } else if (streq(l
, "state-change-timestamp")) {
3348 dual_timestamp_deserialize(v
, &u
->state_change_timestamp
);
3350 } else if (streq(l
, "inactive-exit-timestamp")) {
3351 dual_timestamp_deserialize(v
, &u
->inactive_exit_timestamp
);
3353 } else if (streq(l
, "active-enter-timestamp")) {
3354 dual_timestamp_deserialize(v
, &u
->active_enter_timestamp
);
3356 } else if (streq(l
, "active-exit-timestamp")) {
3357 dual_timestamp_deserialize(v
, &u
->active_exit_timestamp
);
3359 } else if (streq(l
, "inactive-enter-timestamp")) {
3360 dual_timestamp_deserialize(v
, &u
->inactive_enter_timestamp
);
3362 } else if (streq(l
, "condition-timestamp")) {
3363 dual_timestamp_deserialize(v
, &u
->condition_timestamp
);
3365 } else if (streq(l
, "assert-timestamp")) {
3366 dual_timestamp_deserialize(v
, &u
->assert_timestamp
);
3368 } else if (streq(l
, "condition-result")) {
3370 r
= parse_boolean(v
);
3372 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
3374 u
->condition_result
= r
;
3378 } else if (streq(l
, "assert-result")) {
3380 r
= parse_boolean(v
);
3382 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
3384 u
->assert_result
= r
;
3388 } else if (streq(l
, "transient")) {
3390 r
= parse_boolean(v
);
3392 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
3398 } else if (streq(l
, "exported-invocation-id")) {
3400 r
= parse_boolean(v
);
3402 log_unit_debug(u
, "Failed to parse exported invocation ID bool %s, ignoring.", v
);
3404 u
->exported_invocation_id
= r
;
3408 } else if (streq(l
, "exported-log-level-max")) {
3410 r
= parse_boolean(v
);
3412 log_unit_debug(u
, "Failed to parse exported log level max bool %s, ignoring.", v
);
3414 u
->exported_log_level_max
= r
;
3418 } else if (streq(l
, "exported-log-extra-fields")) {
3420 r
= parse_boolean(v
);
3422 log_unit_debug(u
, "Failed to parse exported log extra fields bool %s, ignoring.", v
);
3424 u
->exported_log_extra_fields
= r
;
3428 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
3430 r
= safe_atou64(v
, &u
->cpu_usage_base
);
3432 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
3436 } else if (streq(l
, "cpu-usage-last")) {
3438 r
= safe_atou64(v
, &u
->cpu_usage_last
);
3440 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
3444 } else if (streq(l
, "cgroup")) {
3446 r
= unit_set_cgroup_path(u
, v
);
3448 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
3450 (void) unit_watch_cgroup(u
);
3453 } else if (streq(l
, "cgroup-realized")) {
3456 b
= parse_boolean(v
);
3458 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
3460 u
->cgroup_realized
= b
;
3464 } else if (streq(l
, "cgroup-realized-mask")) {
3466 r
= cg_mask_from_string(v
, &u
->cgroup_realized_mask
);
3468 log_unit_debug(u
, "Failed to parse cgroup-realized-mask %s, ignoring.", v
);
3471 } else if (streq(l
, "cgroup-enabled-mask")) {
3473 r
= cg_mask_from_string(v
, &u
->cgroup_enabled_mask
);
3475 log_unit_debug(u
, "Failed to parse cgroup-enabled-mask %s, ignoring.", v
);
3478 } else if (streq(l
, "cgroup-bpf-realized")) {
3481 r
= safe_atoi(v
, &i
);
3483 log_unit_debug(u
, "Failed to parse cgroup BPF state %s, ignoring.", v
);
3485 u
->cgroup_bpf_state
=
3486 i
< 0 ? UNIT_CGROUP_BPF_INVALIDATED
:
3487 i
> 0 ? UNIT_CGROUP_BPF_ON
:
3488 UNIT_CGROUP_BPF_OFF
;
3492 } else if (streq(l
, "ref-uid")) {
3495 r
= parse_uid(v
, &uid
);
3497 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3499 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3503 } else if (streq(l
, "ref-gid")) {
3506 r
= parse_gid(v
, &gid
);
3508 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3510 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3512 } else if (streq(l
, "ref")) {
3514 r
= strv_extend(&u
->deserialized_refs
, v
);
3519 } else if (streq(l
, "invocation-id")) {
3522 r
= sd_id128_from_string(v
, &id
);
3524 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3526 r
= unit_set_invocation_id(u
, id
);
3528 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3534 /* Check if this is an IP accounting metric serialization field */
3535 for (m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++)
3536 if (streq(l
, ip_accounting_metric_field
[m
]))
3538 if (m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
) {
3541 r
= safe_atou64(v
, &c
);
3543 log_unit_debug(u
, "Failed to parse IP accounting value %s, ignoring.", v
);
3545 u
->ip_accounting_extra
[m
] = c
;
3549 if (unit_can_serialize(u
)) {
3551 r
= exec_runtime_deserialize_item(u
, rt
, l
, v
, fds
);
3553 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3557 /* Returns positive if key was handled by the call */
3562 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3564 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3568 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3569 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3570 * before 228 where the base for timeouts was not persistent across reboots. */
3572 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3573 dual_timestamp_get(&u
->state_change_timestamp
);
3575 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3576 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3577 unit_invalidate_cgroup(u
, _CGROUP_MASK_ALL
);
3578 unit_invalidate_cgroup_bpf(u
);
3583 void unit_deserialize_skip(FILE *f
) {
3586 /* Skip serialized data for this unit. We don't know what it is. */
3589 char line
[LINE_MAX
], *l
;
3591 if (!fgets(line
, sizeof line
, f
))
3604 int unit_add_node_dependency(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
, UnitDependencyMask mask
) {
3606 _cleanup_free_
char *e
= NULL
;
3611 /* Adds in links to the device node that this unit is based on */
3615 if (!is_device_path(what
))
3618 /* When device units aren't supported (such as in a
3619 * container), don't create dependencies on them. */
3620 if (!unit_type_supported(UNIT_DEVICE
))
3623 r
= unit_name_from_path(what
, ".device", &e
);
3627 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3631 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3632 dep
= UNIT_BINDS_TO
;
3634 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3635 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3636 device
, true, mask
);
3641 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false, mask
);
3649 int unit_coldplug(Unit
*u
) {
3655 /* Make sure we don't enter a loop, when coldplugging
3660 u
->coldplugged
= true;
3662 STRV_FOREACH(i
, u
->deserialized_refs
) {
3663 q
= bus_unit_track_add_name(u
, *i
);
3664 if (q
< 0 && r
>= 0)
3667 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3669 if (UNIT_VTABLE(u
)->coldplug
) {
3670 q
= UNIT_VTABLE(u
)->coldplug(u
);
3671 if (q
< 0 && r
>= 0)
3676 q
= job_coldplug(u
->job
);
3677 if (q
< 0 && r
>= 0)
3684 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3690 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3691 * are never out-of-date. */
3692 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3695 if (stat(path
, &st
) < 0)
3696 /* What, cannot access this anymore? */
3700 /* For masked files check if they are still so */
3701 return !null_or_empty(&st
);
3703 /* For non-empty files check the mtime */
3704 return timespec_load(&st
.st_mtim
) > mtime
;
3709 bool unit_need_daemon_reload(Unit
*u
) {
3710 _cleanup_strv_free_
char **t
= NULL
;
3715 /* For unit files, we allow masking… */
3716 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3717 u
->load_state
== UNIT_MASKED
))
3720 /* Source paths should not be masked… */
3721 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3724 if (u
->load_state
== UNIT_LOADED
)
3725 (void) unit_find_dropin_paths(u
, &t
);
3726 if (!strv_equal(u
->dropin_paths
, t
))
3729 /* … any drop-ins that are masked are simply omitted from the list. */
3730 STRV_FOREACH(path
, u
->dropin_paths
)
3731 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3737 void unit_reset_failed(Unit
*u
) {
3740 if (UNIT_VTABLE(u
)->reset_failed
)
3741 UNIT_VTABLE(u
)->reset_failed(u
);
3743 RATELIMIT_RESET(u
->start_limit
);
3744 u
->start_limit_hit
= false;
3747 Unit
*unit_following(Unit
*u
) {
3750 if (UNIT_VTABLE(u
)->following
)
3751 return UNIT_VTABLE(u
)->following(u
);
3756 bool unit_stop_pending(Unit
*u
) {
3759 /* This call does check the current state of the unit. It's
3760 * hence useful to be called from state change calls of the
3761 * unit itself, where the state isn't updated yet. This is
3762 * different from unit_inactive_or_pending() which checks both
3763 * the current state and for a queued job. */
3765 return u
->job
&& u
->job
->type
== JOB_STOP
;
3768 bool unit_inactive_or_pending(Unit
*u
) {
3771 /* Returns true if the unit is inactive or going down */
3773 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3776 if (unit_stop_pending(u
))
3782 bool unit_active_or_pending(Unit
*u
) {
3785 /* Returns true if the unit is active or going up */
3787 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3791 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3797 bool unit_will_restart(Unit
*u
) {
3800 if (!UNIT_VTABLE(u
)->will_restart
)
3803 return UNIT_VTABLE(u
)->will_restart(u
);
3806 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3808 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3809 assert(SIGNAL_VALID(signo
));
3811 if (!UNIT_VTABLE(u
)->kill
)
3814 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3817 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3821 pid_set
= set_new(NULL
);
3825 /* Exclude the main/control pids from being killed via the cgroup */
3827 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3832 if (control_pid
> 0) {
3833 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3845 int unit_kill_common(
3851 sd_bus_error
*error
) {
3854 bool killed
= false;
3856 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3858 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3859 else if (main_pid
== 0)
3860 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3863 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3864 if (control_pid
< 0)
3865 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3866 else if (control_pid
== 0)
3867 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3870 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3871 if (control_pid
> 0) {
3872 if (kill(control_pid
, signo
) < 0)
3878 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3880 if (kill(main_pid
, signo
) < 0)
3886 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3887 _cleanup_set_free_ Set
*pid_set
= NULL
;
3890 /* Exclude the main/control pids from being killed via the cgroup */
3891 pid_set
= unit_pid_set(main_pid
, control_pid
);
3895 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3896 if (q
< 0 && !IN_SET(q
, -EAGAIN
, -ESRCH
, -ENOENT
))
3902 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3908 int unit_following_set(Unit
*u
, Set
**s
) {
3912 if (UNIT_VTABLE(u
)->following_set
)
3913 return UNIT_VTABLE(u
)->following_set(u
, s
);
3919 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3924 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3925 r
= unit_file_get_state(
3926 u
->manager
->unit_file_scope
,
3928 basename(u
->fragment_path
),
3929 &u
->unit_file_state
);
3931 u
->unit_file_state
= UNIT_FILE_BAD
;
3934 return u
->unit_file_state
;
3937 int unit_get_unit_file_preset(Unit
*u
) {
3940 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3941 u
->unit_file_preset
= unit_file_query_preset(
3942 u
->manager
->unit_file_scope
,
3944 basename(u
->fragment_path
));
3946 return u
->unit_file_preset
;
3949 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*u
) {
3954 unit_ref_unset(ref
);
3957 LIST_PREPEND(refs
, u
->refs
, ref
);
3961 void unit_ref_unset(UnitRef
*ref
) {
3967 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3968 * be unreferenced now. */
3969 unit_add_to_gc_queue(ref
->unit
);
3971 LIST_REMOVE(refs
, ref
->unit
->refs
, ref
);
3975 static int user_from_unit_name(Unit
*u
, char **ret
) {
3977 static const uint8_t hash_key
[] = {
3978 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3979 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3982 _cleanup_free_
char *n
= NULL
;
3985 r
= unit_name_to_prefix(u
->id
, &n
);
3989 if (valid_user_group_name(n
)) {
3995 /* If we can't use the unit name as a user name, then let's hash it and use that */
3996 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4002 int unit_patch_contexts(Unit
*u
) {
4010 /* Patch in the manager defaults into the exec and cgroup
4011 * contexts, _after_ the rest of the settings have been
4014 ec
= unit_get_exec_context(u
);
4016 /* This only copies in the ones that need memory */
4017 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
4018 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4019 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4024 if (MANAGER_IS_USER(u
->manager
) &&
4025 !ec
->working_directory
) {
4027 r
= get_home_dir(&ec
->working_directory
);
4031 /* Allow user services to run, even if the
4032 * home directory is missing */
4033 ec
->working_directory_missing_ok
= true;
4036 if (ec
->private_devices
)
4037 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4039 if (ec
->protect_kernel_modules
)
4040 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4042 if (ec
->dynamic_user
) {
4044 r
= user_from_unit_name(u
, &ec
->user
);
4050 ec
->group
= strdup(ec
->user
);
4055 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
4056 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
4058 ec
->private_tmp
= true;
4059 ec
->remove_ipc
= true;
4060 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4061 if (ec
->protect_home
== PROTECT_HOME_NO
)
4062 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4066 cc
= unit_get_cgroup_context(u
);
4070 ec
->private_devices
&&
4071 cc
->device_policy
== CGROUP_AUTO
)
4072 cc
->device_policy
= CGROUP_CLOSED
;
4078 ExecContext
*unit_get_exec_context(Unit
*u
) {
4085 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4089 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4092 KillContext
*unit_get_kill_context(Unit
*u
) {
4099 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4103 return (KillContext
*) ((uint8_t*) u
+ offset
);
4106 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4112 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4116 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4119 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4125 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4129 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4132 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4135 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4138 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4139 return u
->manager
->lookup_paths
.transient
;
4141 if (flags
& UNIT_PERSISTENT
)
4142 return u
->manager
->lookup_paths
.persistent_control
;
4144 if (flags
& UNIT_RUNTIME
)
4145 return u
->manager
->lookup_paths
.runtime_control
;
4150 char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4156 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4157 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4158 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4159 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4160 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4163 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4164 ret
= specifier_escape(s
);
4171 if (flags
& UNIT_ESCAPE_C
) {
4184 return ret
?: (char*) s
;
4187 return ret
?: strdup(s
);
4190 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4191 _cleanup_free_
char *result
= NULL
;
4192 size_t n
= 0, allocated
= 0;
4195 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4196 * way suitable for ExecStart= stanzas */
4198 STRV_FOREACH(i
, l
) {
4199 _cleanup_free_
char *buf
= NULL
;
4204 p
= unit_escape_setting(*i
, flags
, &buf
);
4208 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4209 if (!GREEDY_REALLOC(result
, allocated
, n
+ a
+ 1))
4223 if (!GREEDY_REALLOC(result
, allocated
, n
+ 1))
4234 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4235 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4236 const char *dir
, *wrapped
;
4243 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4246 data
= unit_escape_setting(data
, flags
, &escaped
);
4250 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4251 * previous section header is the same */
4253 if (flags
& UNIT_PRIVATE
) {
4254 if (!UNIT_VTABLE(u
)->private_section
)
4257 if (!u
->transient_file
|| u
->last_section_private
< 0)
4258 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4259 else if (u
->last_section_private
== 0)
4260 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4262 if (!u
->transient_file
|| u
->last_section_private
< 0)
4263 data
= strjoina("[Unit]\n", data
);
4264 else if (u
->last_section_private
> 0)
4265 data
= strjoina("\n[Unit]\n", data
);
4268 if (u
->transient_file
) {
4269 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4270 * write to the transient unit file. */
4271 fputs(data
, u
->transient_file
);
4273 if (!endswith(data
, "\n"))
4274 fputc('\n', u
->transient_file
);
4276 /* Remember which section we wrote this entry to */
4277 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4281 dir
= unit_drop_in_dir(u
, flags
);
4285 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4286 "# or an equivalent operation. Do not edit.\n",
4290 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4294 (void) mkdir_p_label(p
, 0755);
4295 r
= write_string_file_atomic_label(q
, wrapped
);
4299 r
= strv_push(&u
->dropin_paths
, q
);
4304 strv_uniq(u
->dropin_paths
);
4306 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4311 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4312 _cleanup_free_
char *p
= NULL
;
4320 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4323 va_start(ap
, format
);
4324 r
= vasprintf(&p
, format
, ap
);
4330 return unit_write_setting(u
, flags
, name
, p
);
4333 int unit_make_transient(Unit
*u
) {
4334 _cleanup_free_
char *path
= NULL
;
4339 if (!UNIT_VTABLE(u
)->can_transient
)
4342 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4344 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
4348 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4349 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4351 RUN_WITH_UMASK(0022) {
4352 f
= fopen(path
, "we");
4357 safe_fclose(u
->transient_file
);
4358 u
->transient_file
= f
;
4360 free_and_replace(u
->fragment_path
, path
);
4362 u
->source_path
= mfree(u
->source_path
);
4363 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4364 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4366 u
->load_state
= UNIT_STUB
;
4368 u
->transient
= true;
4370 unit_add_to_dbus_queue(u
);
4371 unit_add_to_gc_queue(u
);
4373 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4379 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
4380 _cleanup_free_
char *comm
= NULL
;
4382 (void) get_process_comm(pid
, &comm
);
4384 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4385 only, like for example systemd's own PAM stub process. */
4386 if (comm
&& comm
[0] == '(')
4389 log_unit_notice(userdata
,
4390 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4393 signal_to_string(sig
));
4396 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
4401 case KILL_TERMINATE
:
4402 case KILL_TERMINATE_AND_LOG
:
4403 return c
->kill_signal
;
4412 assert_not_reached("KillOperation unknown");
4416 int unit_kill_context(
4422 bool main_pid_alien
) {
4424 bool wait_for_exit
= false, send_sighup
;
4425 cg_kill_log_func_t log_func
= NULL
;
4431 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4432 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4434 if (c
->kill_mode
== KILL_NONE
)
4437 sig
= operation_to_signal(c
, k
);
4441 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4444 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
4445 log_func
= log_kill
;
4449 log_func(main_pid
, sig
, u
);
4451 r
= kill_and_sigcont(main_pid
, sig
);
4452 if (r
< 0 && r
!= -ESRCH
) {
4453 _cleanup_free_
char *comm
= NULL
;
4454 (void) get_process_comm(main_pid
, &comm
);
4456 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4458 if (!main_pid_alien
)
4459 wait_for_exit
= true;
4461 if (r
!= -ESRCH
&& send_sighup
)
4462 (void) kill(main_pid
, SIGHUP
);
4466 if (control_pid
> 0) {
4468 log_func(control_pid
, sig
, u
);
4470 r
= kill_and_sigcont(control_pid
, sig
);
4471 if (r
< 0 && r
!= -ESRCH
) {
4472 _cleanup_free_
char *comm
= NULL
;
4473 (void) get_process_comm(control_pid
, &comm
);
4475 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4477 wait_for_exit
= true;
4479 if (r
!= -ESRCH
&& send_sighup
)
4480 (void) kill(control_pid
, SIGHUP
);
4484 if (u
->cgroup_path
&&
4485 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4486 _cleanup_set_free_ Set
*pid_set
= NULL
;
4488 /* Exclude the main/control pids from being killed via the cgroup */
4489 pid_set
= unit_pid_set(main_pid
, control_pid
);
4493 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4495 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4499 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4500 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
4504 /* FIXME: For now, on the legacy hierarchy, we
4505 * will not wait for the cgroup members to die
4506 * if we are running in a container or if this
4507 * is a delegation unit, simply because cgroup
4508 * notification is unreliable in these
4509 * cases. It doesn't work at all in
4510 * containers, and outside of containers it
4511 * can be confused easily by left-over
4512 * directories in the cgroup — which however
4513 * should not exist in non-delegated units. On
4514 * the unified hierarchy that's different,
4515 * there we get proper events. Hence rely on
4518 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4519 (detect_container() == 0 && !UNIT_CGROUP_BOOL(u
, delegate
)))
4520 wait_for_exit
= true;
4525 pid_set
= unit_pid_set(main_pid
, control_pid
);
4529 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4538 return wait_for_exit
;
4541 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4542 char prefix
[strlen(path
) + 1], *p
;
4543 UnitDependencyInfo di
;
4549 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4550 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4551 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4552 * determine which units to make themselves a dependency of. */
4554 if (!path_is_absolute(path
))
4557 r
= hashmap_ensure_allocated(&u
->requires_mounts_for
, &string_hash_ops
);
4565 path_kill_slashes(p
);
4567 if (!path_is_normalized(p
)) {
4572 if (hashmap_contains(u
->requires_mounts_for
, p
)) {
4577 di
= (UnitDependencyInfo
) {
4581 r
= hashmap_put(u
->requires_mounts_for
, p
, di
.data
);
4587 PATH_FOREACH_PREFIX_MORE(prefix
, p
) {
4590 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4594 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &string_hash_ops
);
4608 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4624 int unit_setup_exec_runtime(Unit
*u
) {
4631 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4634 /* Check if there already is an ExecRuntime for this unit? */
4635 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4639 /* Try to get it from somebody else */
4640 HASHMAP_FOREACH_KEY(v
, other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4642 *rt
= unit_get_exec_runtime(other
);
4644 exec_runtime_ref(*rt
);
4649 return exec_runtime_make(rt
, unit_get_exec_context(u
), u
->id
);
4652 int unit_setup_dynamic_creds(Unit
*u
) {
4654 DynamicCreds
*dcreds
;
4659 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4661 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4663 ec
= unit_get_exec_context(u
);
4666 if (!ec
->dynamic_user
)
4669 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4672 bool unit_type_supported(UnitType t
) {
4673 if (_unlikely_(t
< 0))
4675 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4678 if (!unit_vtable
[t
]->supported
)
4681 return unit_vtable
[t
]->supported();
4684 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4690 r
= dir_is_empty(where
);
4694 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4698 log_struct(LOG_NOTICE
,
4699 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4701 LOG_UNIT_INVOCATION_ID(u
),
4702 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4707 int unit_fail_if_symlink(Unit
*u
, const char* where
) {
4713 r
= is_symlink(where
);
4715 log_unit_debug_errno(u
, r
, "Failed to check symlink %s, ignoring: %m", where
);
4722 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4724 LOG_UNIT_INVOCATION_ID(u
),
4725 LOG_UNIT_MESSAGE(u
, "Mount on symlink %s not allowed.", where
),
4732 bool unit_is_pristine(Unit
*u
) {
4735 /* Check if the unit already exists or is already around,
4736 * in a number of different ways. Note that to cater for unit
4737 * types such as slice, we are generally fine with units that
4738 * are marked UNIT_LOADED even though nothing was
4739 * actually loaded, as those unit types don't require a file
4740 * on disk to validly load. */
4742 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4745 !strv_isempty(u
->dropin_paths
) ||
4750 pid_t
unit_control_pid(Unit
*u
) {
4753 if (UNIT_VTABLE(u
)->control_pid
)
4754 return UNIT_VTABLE(u
)->control_pid(u
);
4759 pid_t
unit_main_pid(Unit
*u
) {
4762 if (UNIT_VTABLE(u
)->main_pid
)
4763 return UNIT_VTABLE(u
)->main_pid(u
);
4768 static void unit_unref_uid_internal(
4772 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4776 assert(_manager_unref_uid
);
4778 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4779 * gid_t are actually the same time, with the same validity rules.
4781 * Drops a reference to UID/GID from a unit. */
4783 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4784 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4786 if (!uid_is_valid(*ref_uid
))
4789 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4790 *ref_uid
= UID_INVALID
;
4793 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4794 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4797 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4798 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4801 static int unit_ref_uid_internal(
4806 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4812 assert(uid_is_valid(uid
));
4813 assert(_manager_ref_uid
);
4815 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4816 * are actually the same type, and have the same validity rules.
4818 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4819 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4822 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4823 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4825 if (*ref_uid
== uid
)
4828 if (uid_is_valid(*ref_uid
)) /* Already set? */
4831 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4839 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4840 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4843 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4844 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4847 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4852 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4854 if (uid_is_valid(uid
)) {
4855 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4860 if (gid_is_valid(gid
)) {
4861 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4864 unit_unref_uid(u
, false);
4870 return r
> 0 || q
> 0;
4873 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4879 c
= unit_get_exec_context(u
);
4881 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4883 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4888 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4891 unit_unref_uid(u
, destroy_now
);
4892 unit_unref_gid(u
, destroy_now
);
4895 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4900 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4901 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4902 * objects when no service references the UID/GID anymore. */
4904 r
= unit_ref_uid_gid(u
, uid
, gid
);
4906 bus_unit_send_change_signal(u
);
4909 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4914 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4916 if (sd_id128_equal(u
->invocation_id
, id
))
4919 if (!sd_id128_is_null(u
->invocation_id
))
4920 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4922 if (sd_id128_is_null(id
)) {
4927 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
4931 u
->invocation_id
= id
;
4932 sd_id128_to_string(id
, u
->invocation_id_string
);
4934 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4941 u
->invocation_id
= SD_ID128_NULL
;
4942 u
->invocation_id_string
[0] = 0;
4946 int unit_acquire_invocation_id(Unit
*u
) {
4952 r
= sd_id128_randomize(&id
);
4954 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
4956 r
= unit_set_invocation_id(u
, id
);
4958 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
4963 void unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
4967 p
->cgroup_path
= u
->cgroup_path
;
4968 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, UNIT_CGROUP_BOOL(u
, delegate
));
4971 int unit_fork_helper_process(Unit
*u
, pid_t
*ret
) {
4978 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4979 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4981 (void) unit_realize_cgroup(u
);
4989 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
, -1);
4990 (void) ignore_signals(SIGPIPE
, -1);
4995 if (u
->cgroup_path
) {
4996 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
4998 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", u
->cgroup_path
);
5003 *ret
= getpid_cached();
5011 static void unit_update_dependency_mask(Unit
*u
, UnitDependency d
, Unit
*other
, UnitDependencyInfo di
) {
5014 assert(d
< _UNIT_DEPENDENCY_MAX
);
5017 if (di
.origin_mask
== 0 && di
.destination_mask
== 0) {
5018 /* No bit set anymore, let's drop the whole entry */
5019 assert_se(hashmap_remove(u
->dependencies
[d
], other
));
5020 log_unit_debug(u
, "%s lost dependency %s=%s", u
->id
, unit_dependency_to_string(d
), other
->id
);
5022 /* Mask was reduced, let's update the entry */
5023 assert_se(hashmap_update(u
->dependencies
[d
], other
, di
.data
) == 0);
5026 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5031 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5036 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
5040 UnitDependencyInfo di
;
5046 HASHMAP_FOREACH_KEY(di
.data
, other
, u
->dependencies
[d
], i
) {
5049 if ((di
.origin_mask
& ~mask
) == di
.origin_mask
)
5051 di
.origin_mask
&= ~mask
;
5052 unit_update_dependency_mask(u
, d
, other
, di
);
5054 /* We updated the dependency from our unit to the other unit now. But most dependencies
5055 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5056 * all dependency types on the other unit and delete all those which point to us and
5057 * have the right mask set. */
5059 for (q
= 0; q
< _UNIT_DEPENDENCY_MAX
; q
++) {
5060 UnitDependencyInfo dj
;
5062 dj
.data
= hashmap_get(other
->dependencies
[q
], u
);
5063 if ((dj
.destination_mask
& ~mask
) == dj
.destination_mask
)
5065 dj
.destination_mask
&= ~mask
;
5067 unit_update_dependency_mask(other
, q
, u
, dj
);
5070 unit_add_to_gc_queue(other
);
5080 static int unit_export_invocation_id(Unit
*u
) {
5086 if (u
->exported_invocation_id
)
5089 if (sd_id128_is_null(u
->invocation_id
))
5092 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5093 r
= symlink_atomic(u
->invocation_id_string
, p
);
5095 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5097 u
->exported_invocation_id
= true;
5101 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5109 if (u
->exported_log_level_max
)
5112 if (c
->log_level_max
< 0)
5115 assert(c
->log_level_max
<= 7);
5117 buf
[0] = '0' + c
->log_level_max
;
5120 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5121 r
= symlink_atomic(buf
, p
);
5123 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5125 u
->exported_log_level_max
= true;
5129 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5130 _cleanup_close_
int fd
= -1;
5131 struct iovec
*iovec
;
5139 if (u
->exported_log_extra_fields
)
5142 if (c
->n_log_extra_fields
<= 0)
5145 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5146 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5148 for (i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5149 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5151 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5152 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5155 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5156 pattern
= strjoina(p
, ".XXXXXX");
5158 fd
= mkostemp_safe(pattern
);
5160 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5162 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5164 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5168 (void) fchmod(fd
, 0644);
5170 if (rename(pattern
, p
) < 0) {
5171 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5175 u
->exported_log_extra_fields
= true;
5179 (void) unlink(pattern
);
5183 void unit_export_state_files(Unit
*u
) {
5184 const ExecContext
*c
;
5191 if (!MANAGER_IS_SYSTEM(u
->manager
))
5194 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5195 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5196 * the IPC system itself and PID 1 also log to the journal.
5198 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5199 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5200 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5201 * namespace at least.
5203 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5204 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5207 (void) unit_export_invocation_id(u
);
5209 c
= unit_get_exec_context(u
);
5211 (void) unit_export_log_level_max(u
, c
);
5212 (void) unit_export_log_extra_fields(u
, c
);
5216 void unit_unlink_state_files(Unit
*u
) {
5224 if (!MANAGER_IS_SYSTEM(u
->manager
))
5227 /* Undoes the effect of unit_export_state() */
5229 if (u
->exported_invocation_id
) {
5230 p
= strjoina("/run/systemd/units/invocation:", u
->id
);
5233 u
->exported_invocation_id
= false;
5236 if (u
->exported_log_level_max
) {
5237 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5240 u
->exported_log_level_max
= false;
5243 if (u
->exported_log_extra_fields
) {
5244 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5247 u
->exported_log_extra_fields
= false;
5251 int unit_prepare_exec(Unit
*u
) {
5256 /* Prepares everything so that we can fork of a process for this unit */
5258 (void) unit_realize_cgroup(u
);
5260 if (u
->reset_accounting
) {
5261 (void) unit_reset_cpu_accounting(u
);
5262 (void) unit_reset_ip_accounting(u
);
5263 u
->reset_accounting
= false;
5266 unit_export_state_files(u
);
5268 r
= unit_setup_exec_runtime(u
);
5272 r
= unit_setup_dynamic_creds(u
);
5279 static void log_leftover(pid_t pid
, int sig
, void *userdata
) {
5280 _cleanup_free_
char *comm
= NULL
;
5282 (void) get_process_comm(pid
, &comm
);
5284 if (comm
&& comm
[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5287 log_unit_warning(userdata
,
5288 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5289 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5293 void unit_warn_leftover_processes(Unit
*u
) {
5296 (void) unit_pick_cgroup_path(u
);
5298 if (!u
->cgroup_path
)
5301 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_leftover
, u
);
5304 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5305 [COLLECT_INACTIVE
] = "inactive",
5306 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5309 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);