2 This file is part of systemd.
4 Copyright 2010 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #include "sd-messages.h"
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
38 #include "fileio-label.h"
39 #include "format-util.h"
40 #include "id128-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
51 #include "signal-util.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-util.h"
57 #include "umask-util.h"
58 #include "unit-name.h"
60 #include "user-util.h"
63 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
64 [UNIT_SERVICE
] = &service_vtable
,
65 [UNIT_SOCKET
] = &socket_vtable
,
66 [UNIT_BUSNAME
] = &busname_vtable
,
67 [UNIT_TARGET
] = &target_vtable
,
68 [UNIT_DEVICE
] = &device_vtable
,
69 [UNIT_MOUNT
] = &mount_vtable
,
70 [UNIT_AUTOMOUNT
] = &automount_vtable
,
71 [UNIT_SWAP
] = &swap_vtable
,
72 [UNIT_TIMER
] = &timer_vtable
,
73 [UNIT_PATH
] = &path_vtable
,
74 [UNIT_SLICE
] = &slice_vtable
,
75 [UNIT_SCOPE
] = &scope_vtable
78 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
80 Unit
*unit_new(Manager
*m
, size_t size
) {
84 assert(size
>= sizeof(Unit
));
90 u
->names
= set_new(&string_hash_ops
);
95 u
->type
= _UNIT_TYPE_INVALID
;
96 u
->default_dependencies
= true;
97 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
98 u
->unit_file_preset
= -1;
99 u
->on_failure_job_mode
= JOB_REPLACE
;
100 u
->cgroup_inotify_wd
= -1;
101 u
->job_timeout
= USEC_INFINITY
;
102 u
->ref_uid
= UID_INVALID
;
103 u
->ref_gid
= GID_INVALID
;
104 u
->cpu_usage_last
= NSEC_INFINITY
;
106 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
107 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
112 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
116 u
= unit_new(m
, size
);
120 r
= unit_add_name(u
, name
);
130 bool unit_has_name(Unit
*u
, const char *name
) {
134 return set_contains(u
->names
, (char*) name
);
137 static void unit_init(Unit
*u
) {
144 assert(u
->type
>= 0);
146 cc
= unit_get_cgroup_context(u
);
148 cgroup_context_init(cc
);
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
154 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
155 cc
->io_accounting
= u
->manager
->default_io_accounting
;
156 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
157 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
158 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
160 if (u
->type
!= UNIT_SLICE
)
161 cc
->tasks_max
= u
->manager
->default_tasks_max
;
164 ec
= unit_get_exec_context(u
);
166 exec_context_init(ec
);
168 kc
= unit_get_kill_context(u
);
170 kill_context_init(kc
);
172 if (UNIT_VTABLE(u
)->init
)
173 UNIT_VTABLE(u
)->init(u
);
176 int unit_add_name(Unit
*u
, const char *text
) {
177 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
184 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
189 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
198 if (set_contains(u
->names
, s
))
200 if (hashmap_contains(u
->manager
->units
, s
))
203 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
206 t
= unit_name_to_type(s
);
210 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
213 r
= unit_name_to_instance(s
, &i
);
217 if (i
&& !unit_type_may_template(t
))
220 /* Ensure that this unit is either instanced or not instanced,
221 * but not both. Note that we do allow names with different
222 * instance names however! */
223 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
226 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
229 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
232 r
= set_put(u
->names
, s
);
237 r
= hashmap_put(u
->manager
->units
, s
, u
);
239 (void) set_remove(u
->names
, s
);
243 if (u
->type
== _UNIT_TYPE_INVALID
) {
248 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
257 unit_add_to_dbus_queue(u
);
261 int unit_choose_id(Unit
*u
, const char *name
) {
262 _cleanup_free_
char *t
= NULL
;
269 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
274 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
281 /* Selects one of the names of this unit as the id */
282 s
= set_get(u
->names
, (char*) name
);
286 /* Determine the new instance from the new id */
287 r
= unit_name_to_instance(s
, &i
);
296 unit_add_to_dbus_queue(u
);
301 int unit_set_description(Unit
*u
, const char *description
) {
306 if (isempty(description
))
309 s
= strdup(description
);
314 free(u
->description
);
317 unit_add_to_dbus_queue(u
);
321 bool unit_check_gc(Unit
*u
) {
322 UnitActiveState state
;
332 state
= unit_active_state(u
);
333 inactive
= state
== UNIT_INACTIVE
;
335 /* If the unit is inactive and failed and no job is queued for
336 * it, then release its runtime resources */
337 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
338 UNIT_VTABLE(u
)->release_resources
)
339 UNIT_VTABLE(u
)->release_resources(u
, inactive
);
341 /* But we keep the unit object around for longer when it is
342 * referenced or configured to not be gc'ed */
352 if (sd_bus_track_count(u
->bus_track
) > 0)
355 if (UNIT_VTABLE(u
)->check_gc
)
356 if (UNIT_VTABLE(u
)->check_gc(u
))
362 void unit_add_to_load_queue(Unit
*u
) {
364 assert(u
->type
!= _UNIT_TYPE_INVALID
);
366 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
369 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
370 u
->in_load_queue
= true;
373 void unit_add_to_cleanup_queue(Unit
*u
) {
376 if (u
->in_cleanup_queue
)
379 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
380 u
->in_cleanup_queue
= true;
383 void unit_add_to_gc_queue(Unit
*u
) {
386 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
389 if (unit_check_gc(u
))
392 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
393 u
->in_gc_queue
= true;
396 void unit_add_to_dbus_queue(Unit
*u
) {
398 assert(u
->type
!= _UNIT_TYPE_INVALID
);
400 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
403 /* Shortcut things if nobody cares */
404 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
405 sd_bus_track_count(u
->bus_track
) <= 0 &&
406 set_isempty(u
->manager
->private_buses
)) {
407 u
->sent_dbus_new_signal
= true;
411 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
412 u
->in_dbus_queue
= true;
415 static void bidi_set_free(Unit
*u
, Set
*s
) {
421 /* Frees the set and makes sure we are dropped from the
422 * inverse pointers */
424 SET_FOREACH(other
, s
, i
) {
427 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
428 set_remove(other
->dependencies
[d
], u
);
430 unit_add_to_gc_queue(other
);
436 static void unit_remove_transient(Unit
*u
) {
444 if (u
->fragment_path
)
445 (void) unlink(u
->fragment_path
);
447 STRV_FOREACH(i
, u
->dropin_paths
) {
448 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
450 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
454 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
458 /* Only drop transient drop-ins */
459 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
467 static void unit_free_requires_mounts_for(Unit
*u
) {
470 STRV_FOREACH(j
, u
->requires_mounts_for
) {
471 char s
[strlen(*j
) + 1];
473 PATH_FOREACH_PREFIX_MORE(s
, *j
) {
477 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
483 if (set_isempty(x
)) {
484 hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
491 u
->requires_mounts_for
= strv_free(u
->requires_mounts_for
);
494 static void unit_done(Unit
*u
) {
503 if (UNIT_VTABLE(u
)->done
)
504 UNIT_VTABLE(u
)->done(u
);
506 ec
= unit_get_exec_context(u
);
508 exec_context_done(ec
);
510 cc
= unit_get_cgroup_context(u
);
512 cgroup_context_done(cc
);
515 void unit_free(Unit
*u
) {
523 if (u
->transient_file
)
524 fclose(u
->transient_file
);
526 if (!MANAGER_IS_RELOADING(u
->manager
))
527 unit_remove_transient(u
);
529 bus_unit_send_removed_signal(u
);
533 sd_bus_slot_unref(u
->match_bus_slot
);
535 sd_bus_track_unref(u
->bus_track
);
536 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
538 unit_free_requires_mounts_for(u
);
540 SET_FOREACH(t
, u
->names
, i
)
541 hashmap_remove_value(u
->manager
->units
, t
, u
);
543 if (!sd_id128_is_null(u
->invocation_id
))
544 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
558 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
559 bidi_set_free(u
, u
->dependencies
[d
]);
561 if (u
->type
!= _UNIT_TYPE_INVALID
)
562 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
564 if (u
->in_load_queue
)
565 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
567 if (u
->in_dbus_queue
)
568 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
570 if (u
->in_cleanup_queue
)
571 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
574 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
576 if (u
->in_cgroup_queue
)
577 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
579 unit_release_cgroup(u
);
581 unit_unref_uid_gid(u
, false);
583 (void) manager_update_failed_units(u
->manager
, u
, false);
584 set_remove(u
->manager
->startup_units
, u
);
586 free(u
->description
);
587 strv_free(u
->documentation
);
588 free(u
->fragment_path
);
589 free(u
->source_path
);
590 strv_free(u
->dropin_paths
);
593 free(u
->job_timeout_reboot_arg
);
595 set_free_free(u
->names
);
597 unit_unwatch_all_pids(u
);
599 condition_free_list(u
->conditions
);
600 condition_free_list(u
->asserts
);
604 unit_ref_unset(&u
->slice
);
607 unit_ref_unset(u
->refs
);
612 UnitActiveState
unit_active_state(Unit
*u
) {
615 if (u
->load_state
== UNIT_MERGED
)
616 return unit_active_state(unit_follow_merge(u
));
618 /* After a reload it might happen that a unit is not correctly
619 * loaded but still has a process around. That's why we won't
620 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
622 return UNIT_VTABLE(u
)->active_state(u
);
625 const char* unit_sub_state_to_string(Unit
*u
) {
628 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
631 static int complete_move(Set
**s
, Set
**other
) {
641 r
= set_move(*s
, *other
);
652 static int merge_names(Unit
*u
, Unit
*other
) {
660 r
= complete_move(&u
->names
, &other
->names
);
664 set_free_free(other
->names
);
668 SET_FOREACH(t
, u
->names
, i
)
669 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
674 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
679 assert(d
< _UNIT_DEPENDENCY_MAX
);
682 * If u does not have this dependency set allocated, there is no need
683 * to reserve anything. In that case other's set will be transferred
684 * as a whole to u by complete_move().
686 if (!u
->dependencies
[d
])
689 /* merge_dependencies() will skip a u-on-u dependency */
690 n_reserve
= set_size(other
->dependencies
[d
]) - !!set_get(other
->dependencies
[d
], u
);
692 return set_reserve(u
->dependencies
[d
], n_reserve
);
695 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
702 assert(d
< _UNIT_DEPENDENCY_MAX
);
704 /* Fix backwards pointers */
705 SET_FOREACH(back
, other
->dependencies
[d
], i
) {
708 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
709 /* Do not add dependencies between u and itself */
711 if (set_remove(back
->dependencies
[k
], other
))
712 maybe_warn_about_dependency(u
, other_id
, k
);
714 r
= set_remove_and_put(back
->dependencies
[k
], other
, u
);
716 set_remove(back
->dependencies
[k
], other
);
718 assert(r
>= 0 || r
== -ENOENT
);
723 /* Also do not move dependencies on u to itself */
724 back
= set_remove(other
->dependencies
[d
], u
);
726 maybe_warn_about_dependency(u
, other_id
, d
);
728 /* The move cannot fail. The caller must have performed a reservation. */
729 assert_se(complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
731 other
->dependencies
[d
] = set_free(other
->dependencies
[d
]);
734 int unit_merge(Unit
*u
, Unit
*other
) {
736 const char *other_id
= NULL
;
741 assert(u
->manager
== other
->manager
);
742 assert(u
->type
!= _UNIT_TYPE_INVALID
);
744 other
= unit_follow_merge(other
);
749 if (u
->type
!= other
->type
)
752 if (!u
->instance
!= !other
->instance
)
755 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
758 if (other
->load_state
!= UNIT_STUB
&&
759 other
->load_state
!= UNIT_NOT_FOUND
)
768 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
772 other_id
= strdupa(other
->id
);
774 /* Make reservations to ensure merge_dependencies() won't fail */
775 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
776 r
= reserve_dependencies(u
, other
, d
);
778 * We don't rollback reservations if we fail. We don't have
779 * a way to undo reservations. A reservation is not a leak.
786 r
= merge_names(u
, other
);
790 /* Redirect all references */
792 unit_ref_set(other
->refs
, u
);
794 /* Merge dependencies */
795 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
796 merge_dependencies(u
, other
, other_id
, d
);
798 other
->load_state
= UNIT_MERGED
;
799 other
->merged_into
= u
;
801 /* If there is still some data attached to the other node, we
802 * don't need it anymore, and can free it. */
803 if (other
->load_state
!= UNIT_STUB
)
804 if (UNIT_VTABLE(other
)->done
)
805 UNIT_VTABLE(other
)->done(other
);
807 unit_add_to_dbus_queue(u
);
808 unit_add_to_cleanup_queue(other
);
813 int unit_merge_by_name(Unit
*u
, const char *name
) {
814 _cleanup_free_
char *s
= NULL
;
821 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
825 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
832 other
= manager_get_unit(u
->manager
, name
);
834 return unit_merge(u
, other
);
836 return unit_add_name(u
, name
);
839 Unit
* unit_follow_merge(Unit
*u
) {
842 while (u
->load_state
== UNIT_MERGED
)
843 assert_se(u
= u
->merged_into
);
848 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
854 if (c
->working_directory
) {
855 r
= unit_require_mounts_for(u
, c
->working_directory
);
860 if (c
->root_directory
) {
861 r
= unit_require_mounts_for(u
, c
->root_directory
);
867 r
= unit_require_mounts_for(u
, c
->root_image
);
872 if (!MANAGER_IS_SYSTEM(u
->manager
))
875 if (c
->private_tmp
) {
878 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
879 r
= unit_require_mounts_for(u
, p
);
884 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, NULL
, true);
889 if (!IN_SET(c
->std_output
,
890 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
891 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
892 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
893 !IN_SET(c
->std_error
,
894 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
895 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
896 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
899 /* If syslog or kernel logging is requested, make sure our own
900 * logging daemon is run first. */
902 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, NULL
, true);
909 const char *unit_description(Unit
*u
) {
913 return u
->description
;
918 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
924 timestamp0
[FORMAT_TIMESTAMP_MAX
],
925 timestamp1
[FORMAT_TIMESTAMP_MAX
],
926 timestamp2
[FORMAT_TIMESTAMP_MAX
],
927 timestamp3
[FORMAT_TIMESTAMP_MAX
],
928 timestamp4
[FORMAT_TIMESTAMP_MAX
],
929 timespan
[FORMAT_TIMESPAN_MAX
];
931 _cleanup_set_free_ Set
*following_set
= NULL
;
936 assert(u
->type
>= 0);
938 prefix
= strempty(prefix
);
939 prefix2
= strjoina(prefix
, "\t");
943 "%s\tDescription: %s\n"
945 "%s\tUnit Load State: %s\n"
946 "%s\tUnit Active State: %s\n"
947 "%s\tState Change Timestamp: %s\n"
948 "%s\tInactive Exit Timestamp: %s\n"
949 "%s\tActive Enter Timestamp: %s\n"
950 "%s\tActive Exit Timestamp: %s\n"
951 "%s\tInactive Enter Timestamp: %s\n"
952 "%s\tGC Check Good: %s\n"
953 "%s\tNeed Daemon Reload: %s\n"
954 "%s\tTransient: %s\n"
955 "%s\tPerpetual: %s\n"
958 "%s\tCGroup realized: %s\n"
959 "%s\tCGroup mask: 0x%x\n"
960 "%s\tCGroup members mask: 0x%x\n",
962 prefix
, unit_description(u
),
963 prefix
, strna(u
->instance
),
964 prefix
, unit_load_state_to_string(u
->load_state
),
965 prefix
, unit_active_state_to_string(unit_active_state(u
)),
966 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
967 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
968 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
969 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
970 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
971 prefix
, yes_no(unit_check_gc(u
)),
972 prefix
, yes_no(unit_need_daemon_reload(u
)),
973 prefix
, yes_no(u
->transient
),
974 prefix
, yes_no(u
->perpetual
),
975 prefix
, strna(unit_slice_name(u
)),
976 prefix
, strna(u
->cgroup_path
),
977 prefix
, yes_no(u
->cgroup_realized
),
978 prefix
, u
->cgroup_realized_mask
,
979 prefix
, u
->cgroup_members_mask
);
981 SET_FOREACH(t
, u
->names
, i
)
982 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
984 if (!sd_id128_is_null(u
->invocation_id
))
985 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
986 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
988 STRV_FOREACH(j
, u
->documentation
)
989 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
991 following
= unit_following(u
);
993 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
995 r
= unit_following_set(u
, &following_set
);
999 SET_FOREACH(other
, following_set
, i
)
1000 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1003 if (u
->fragment_path
)
1004 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1007 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1009 STRV_FOREACH(j
, u
->dropin_paths
)
1010 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1012 if (u
->job_timeout
!= USEC_INFINITY
)
1013 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1015 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1016 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1018 if (u
->job_timeout_reboot_arg
)
1019 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1021 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1022 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1024 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1026 "%s\tCondition Timestamp: %s\n"
1027 "%s\tCondition Result: %s\n",
1028 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1029 prefix
, yes_no(u
->condition_result
));
1031 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1033 "%s\tAssert Timestamp: %s\n"
1034 "%s\tAssert Result: %s\n",
1035 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1036 prefix
, yes_no(u
->assert_result
));
1038 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1041 SET_FOREACH(other
, u
->dependencies
[d
], i
)
1042 fprintf(f
, "%s\t%s: %s\n", prefix
, unit_dependency_to_string(d
), other
->id
);
1045 if (!strv_isempty(u
->requires_mounts_for
)) {
1047 "%s\tRequiresMountsFor:", prefix
);
1049 STRV_FOREACH(j
, u
->requires_mounts_for
)
1050 fprintf(f
, " %s", *j
);
1055 if (u
->load_state
== UNIT_LOADED
) {
1058 "%s\tStopWhenUnneeded: %s\n"
1059 "%s\tRefuseManualStart: %s\n"
1060 "%s\tRefuseManualStop: %s\n"
1061 "%s\tDefaultDependencies: %s\n"
1062 "%s\tOnFailureJobMode: %s\n"
1063 "%s\tIgnoreOnIsolate: %s\n",
1064 prefix
, yes_no(u
->stop_when_unneeded
),
1065 prefix
, yes_no(u
->refuse_manual_start
),
1066 prefix
, yes_no(u
->refuse_manual_stop
),
1067 prefix
, yes_no(u
->default_dependencies
),
1068 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1069 prefix
, yes_no(u
->ignore_on_isolate
));
1071 if (UNIT_VTABLE(u
)->dump
)
1072 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1074 } else if (u
->load_state
== UNIT_MERGED
)
1076 "%s\tMerged into: %s\n",
1077 prefix
, u
->merged_into
->id
);
1078 else if (u
->load_state
== UNIT_ERROR
)
1079 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1081 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1082 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1085 job_dump(u
->job
, f
, prefix2
);
1088 job_dump(u
->nop_job
, f
, prefix2
);
1091 /* Common implementation for multiple backends */
1092 int unit_load_fragment_and_dropin(Unit
*u
) {
1098 /* Load a .{service,socket,...} file */
1099 r
= unit_load_fragment(u
);
1103 if (u
->load_state
== UNIT_STUB
)
1106 /* If the unit is an alias and the final unit has already been
1107 * loaded, there's no point in reloading the dropins one more time. */
1108 t
= unit_follow_merge(u
);
1109 if (t
!= u
&& t
->load_state
!= UNIT_STUB
)
1112 return unit_load_dropin(t
);
1115 /* Common implementation for multiple backends */
1116 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1122 /* Same as unit_load_fragment_and_dropin(), but whether
1123 * something can be loaded or not doesn't matter. */
1125 /* Load a .service file */
1126 r
= unit_load_fragment(u
);
1130 if (u
->load_state
== UNIT_STUB
)
1131 u
->load_state
= UNIT_LOADED
;
1133 /* If the unit is an alias and the final unit has already been
1134 * loaded, there's no point in reloading the dropins one more time. */
1135 t
= unit_follow_merge(u
);
1136 if (t
!= u
&& t
->load_state
!= UNIT_STUB
)
1139 return unit_load_dropin(t
);
1142 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1146 if (target
->type
!= UNIT_TARGET
)
1149 /* Only add the dependency if both units are loaded, so that
1150 * that loop check below is reliable */
1151 if (u
->load_state
!= UNIT_LOADED
||
1152 target
->load_state
!= UNIT_LOADED
)
1155 /* If either side wants no automatic dependencies, then let's
1157 if (!u
->default_dependencies
||
1158 !target
->default_dependencies
)
1161 /* Don't create loops */
1162 if (set_get(target
->dependencies
[UNIT_BEFORE
], u
))
1165 return unit_add_dependency(target
, UNIT_AFTER
, u
, true);
1168 static int unit_add_target_dependencies(Unit
*u
) {
1170 static const UnitDependency deps
[] = {
1184 for (k
= 0; k
< ELEMENTSOF(deps
); k
++)
1185 SET_FOREACH(target
, u
->dependencies
[deps
[k
]], i
) {
1186 r
= unit_add_default_target_dependency(u
, target
);
1194 static int unit_add_slice_dependencies(Unit
*u
) {
1197 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1200 if (UNIT_ISSET(u
->slice
))
1201 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true);
1203 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1206 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, NULL
, true);
1209 static int unit_add_mount_dependencies(Unit
*u
) {
1215 STRV_FOREACH(i
, u
->requires_mounts_for
) {
1216 char prefix
[strlen(*i
) + 1];
1218 PATH_FOREACH_PREFIX_MORE(prefix
, *i
) {
1219 _cleanup_free_
char *p
= NULL
;
1222 r
= unit_name_from_path(prefix
, ".mount", &p
);
1226 m
= manager_get_unit(u
->manager
, p
);
1228 /* Make sure to load the mount unit if
1229 * it exists. If so the dependencies
1230 * on this unit will be added later
1231 * during the loading of the mount
1233 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1239 if (m
->load_state
!= UNIT_LOADED
)
1242 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true);
1246 if (m
->fragment_path
) {
1247 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true);
1257 static int unit_add_startup_units(Unit
*u
) {
1261 c
= unit_get_cgroup_context(u
);
1265 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1266 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1267 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1270 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1274 return set_put(u
->manager
->startup_units
, u
);
1277 int unit_load(Unit
*u
) {
1282 if (u
->in_load_queue
) {
1283 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1284 u
->in_load_queue
= false;
1287 if (u
->type
== _UNIT_TYPE_INVALID
)
1290 if (u
->load_state
!= UNIT_STUB
)
1293 if (u
->transient_file
) {
1294 r
= fflush_and_check(u
->transient_file
);
1298 fclose(u
->transient_file
);
1299 u
->transient_file
= NULL
;
1301 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1304 if (UNIT_VTABLE(u
)->load
) {
1305 r
= UNIT_VTABLE(u
)->load(u
);
1310 if (u
->load_state
== UNIT_STUB
) {
1315 if (u
->load_state
== UNIT_LOADED
) {
1317 r
= unit_add_target_dependencies(u
);
1321 r
= unit_add_slice_dependencies(u
);
1325 r
= unit_add_mount_dependencies(u
);
1329 r
= unit_add_startup_units(u
);
1333 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& set_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1334 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1339 unit_update_cgroup_members_masks(u
);
1342 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1344 unit_add_to_dbus_queue(unit_follow_merge(u
));
1345 unit_add_to_gc_queue(u
);
1350 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
: UNIT_ERROR
;
1352 unit_add_to_dbus_queue(u
);
1353 unit_add_to_gc_queue(u
);
1355 log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1360 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1367 /* If the condition list is empty, then it is true */
1371 /* Otherwise, if all of the non-trigger conditions apply and
1372 * if any of the trigger conditions apply (unless there are
1373 * none) we return true */
1374 LIST_FOREACH(conditions
, c
, first
) {
1377 r
= condition_test(c
);
1380 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1382 c
->trigger
? "|" : "",
1383 c
->negate
? "!" : "",
1389 c
->trigger
? "|" : "",
1390 c
->negate
? "!" : "",
1392 condition_result_to_string(c
->result
));
1394 if (!c
->trigger
&& r
<= 0)
1397 if (c
->trigger
&& triggered
<= 0)
1401 return triggered
!= 0;
1404 static bool unit_condition_test(Unit
*u
) {
1407 dual_timestamp_get(&u
->condition_timestamp
);
1408 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1410 return u
->condition_result
;
1413 static bool unit_assert_test(Unit
*u
) {
1416 dual_timestamp_get(&u
->assert_timestamp
);
1417 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1419 return u
->assert_result
;
1422 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1423 DISABLE_WARNING_FORMAT_NONLITERAL
;
1424 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1428 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1430 const UnitStatusMessageFormats
*format_table
;
1433 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1435 if (t
!= JOB_RELOAD
) {
1436 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1438 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1444 /* Return generic strings */
1446 return "Starting %s.";
1447 else if (t
== JOB_STOP
)
1448 return "Stopping %s.";
1450 return "Reloading %s.";
1453 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1458 /* Reload status messages have traditionally not been printed to console. */
1459 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1462 format
= unit_get_status_message_format(u
, t
);
1464 DISABLE_WARNING_FORMAT_NONLITERAL
;
1465 unit_status_printf(u
, "", format
);
1469 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1470 const char *format
, *mid
;
1475 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1478 if (log_on_console())
1481 /* We log status messages for all units and all operations. */
1483 format
= unit_get_status_message_format(u
, t
);
1485 DISABLE_WARNING_FORMAT_NONLITERAL
;
1486 snprintf(buf
, sizeof buf
, format
, unit_description(u
));
1489 mid
= t
== JOB_START
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR
:
1490 t
== JOB_STOP
? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR
:
1491 "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR
;
1493 /* Note that we deliberately use LOG_MESSAGE() instead of
1494 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1495 * closely what is written to screen using the status output,
1496 * which is supposed the highest level, friendliest output
1497 * possible, which means we should avoid the low-level unit
1499 log_struct(LOG_INFO
,
1502 LOG_MESSAGE("%s", buf
),
1506 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1509 assert(t
< _JOB_TYPE_MAX
);
1511 unit_status_log_starting_stopping_reloading(u
, t
);
1512 unit_status_print_starting_stopping(u
, t
);
1515 int unit_start_limit_test(Unit
*u
) {
1518 if (ratelimit_test(&u
->start_limit
)) {
1519 u
->start_limit_hit
= false;
1523 log_unit_warning(u
, "Start request repeated too quickly.");
1524 u
->start_limit_hit
= true;
1526 return emergency_action(u
->manager
, u
->start_limit_action
, u
->reboot_arg
, "unit failed");
1529 bool unit_shall_confirm_spawn(Unit
*u
) {
1532 if (manager_is_confirm_spawn_disabled(u
->manager
))
1535 /* For some reasons units remaining in the same process group
1536 * as PID 1 fail to acquire the console even if it's not used
1537 * by any process. So skip the confirmation question for them. */
1538 return !unit_get_exec_context(u
)->same_pgrp
;
1541 static bool unit_verify_deps(Unit
*u
) {
1547 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1548 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1549 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1550 * conjunction with After= as for them any such check would make things entirely racy. */
1552 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], j
) {
1554 if (!set_contains(u
->dependencies
[UNIT_AFTER
], other
))
1557 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1558 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1567 * -EBADR: This unit type does not support starting.
1568 * -EALREADY: Unit is already started.
1569 * -EAGAIN: An operation is already in progress. Retry later.
1570 * -ECANCELED: Too many requests for now.
1571 * -EPROTO: Assert failed
1572 * -EINVAL: Unit not loaded
1573 * -EOPNOTSUPP: Unit type not supported
1574 * -ENOLINK: The necessary dependencies are not fulfilled.
1576 int unit_start(Unit
*u
) {
1577 UnitActiveState state
;
1582 /* If this is already started, then this will succeed. Note
1583 * that this will even succeed if this unit is not startable
1584 * by the user. This is relied on to detect when we need to
1585 * wait for units and when waiting is finished. */
1586 state
= unit_active_state(u
);
1587 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1590 /* Units that aren't loaded cannot be started */
1591 if (u
->load_state
!= UNIT_LOADED
)
1594 /* If the conditions failed, don't do anything at all. If we
1595 * already are activating this call might still be useful to
1596 * speed up activation in case there is some hold-off time,
1597 * but we don't want to recheck the condition in that case. */
1598 if (state
!= UNIT_ACTIVATING
&&
1599 !unit_condition_test(u
)) {
1600 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1604 /* If the asserts failed, fail the entire job */
1605 if (state
!= UNIT_ACTIVATING
&&
1606 !unit_assert_test(u
)) {
1607 log_unit_notice(u
, "Starting requested but asserts failed.");
1611 /* Units of types that aren't supported cannot be
1612 * started. Note that we do this test only after the condition
1613 * checks, so that we rather return condition check errors
1614 * (which are usually not considered a true failure) than "not
1615 * supported" errors (which are considered a failure).
1617 if (!unit_supported(u
))
1620 /* Let's make sure that the deps really are in order before we start this. Normally the job engine should have
1621 * taken care of this already, but let's check this here again. After all, our dependencies might not be in
1622 * effect anymore, due to a reload or due to a failed condition. */
1623 if (!unit_verify_deps(u
))
1626 /* Forward to the main object, if we aren't it. */
1627 following
= unit_following(u
);
1629 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1630 return unit_start(following
);
1633 /* If it is stopped, but we cannot start it, then fail */
1634 if (!UNIT_VTABLE(u
)->start
)
1637 /* We don't suppress calls to ->start() here when we are
1638 * already starting, to allow this request to be used as a
1639 * "hurry up" call, for example when the unit is in some "auto
1640 * restart" state where it waits for a holdoff timer to elapse
1641 * before it will start again. */
1643 unit_add_to_dbus_queue(u
);
1645 return UNIT_VTABLE(u
)->start(u
);
1648 bool unit_can_start(Unit
*u
) {
1651 if (u
->load_state
!= UNIT_LOADED
)
1654 if (!unit_supported(u
))
1657 return !!UNIT_VTABLE(u
)->start
;
1660 bool unit_can_isolate(Unit
*u
) {
1663 return unit_can_start(u
) &&
1668 * -EBADR: This unit type does not support stopping.
1669 * -EALREADY: Unit is already stopped.
1670 * -EAGAIN: An operation is already in progress. Retry later.
1672 int unit_stop(Unit
*u
) {
1673 UnitActiveState state
;
1678 state
= unit_active_state(u
);
1679 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1682 following
= unit_following(u
);
1684 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1685 return unit_stop(following
);
1688 if (!UNIT_VTABLE(u
)->stop
)
1691 unit_add_to_dbus_queue(u
);
1693 return UNIT_VTABLE(u
)->stop(u
);
1696 bool unit_can_stop(Unit
*u
) {
1699 if (!unit_supported(u
))
1705 return !!UNIT_VTABLE(u
)->stop
;
1709 * -EBADR: This unit type does not support reloading.
1710 * -ENOEXEC: Unit is not started.
1711 * -EAGAIN: An operation is already in progress. Retry later.
1713 int unit_reload(Unit
*u
) {
1714 UnitActiveState state
;
1719 if (u
->load_state
!= UNIT_LOADED
)
1722 if (!unit_can_reload(u
))
1725 state
= unit_active_state(u
);
1726 if (state
== UNIT_RELOADING
)
1729 if (state
!= UNIT_ACTIVE
) {
1730 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1734 following
= unit_following(u
);
1736 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1737 return unit_reload(following
);
1740 unit_add_to_dbus_queue(u
);
1742 return UNIT_VTABLE(u
)->reload(u
);
1745 bool unit_can_reload(Unit
*u
) {
1748 if (!UNIT_VTABLE(u
)->reload
)
1751 if (!UNIT_VTABLE(u
)->can_reload
)
1754 return UNIT_VTABLE(u
)->can_reload(u
);
1757 static void unit_check_unneeded(Unit
*u
) {
1759 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1761 static const UnitDependency needed_dependencies
[] = {
1775 /* If this service shall be shut down when unneeded then do
1778 if (!u
->stop_when_unneeded
)
1781 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
1784 for (j
= 0; j
< ELEMENTSOF(needed_dependencies
); j
++)
1785 SET_FOREACH(other
, u
->dependencies
[needed_dependencies
[j
]], i
)
1786 if (unit_active_or_pending(other
))
1789 /* If stopping a unit fails continuously we might enter a stop
1790 * loop here, hence stop acting on the service being
1791 * unnecessary after a while. */
1792 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1793 log_unit_warning(u
, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1797 log_unit_info(u
, "Unit not needed anymore. Stopping.");
1799 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1800 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1802 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1805 static void unit_check_binds_to(Unit
*u
) {
1806 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1817 if (unit_active_state(u
) != UNIT_ACTIVE
)
1820 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
1824 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1834 /* If stopping a unit fails continuously we might enter a stop
1835 * loop here, hence stop acting on the service being
1836 * unnecessary after a while. */
1837 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1838 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
1843 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
1845 /* A unit we need to run is gone. Sniff. Let's stop this. */
1846 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1848 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1851 static void retroactively_start_dependencies(Unit
*u
) {
1856 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
1858 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1859 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1860 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1861 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1863 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1864 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1865 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1866 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1868 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1869 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1870 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1871 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
1873 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
1874 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1875 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1877 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
1878 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1879 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1882 static void retroactively_stop_dependencies(Unit
*u
) {
1887 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1889 /* Pull down units which are bound to us recursively if enabled */
1890 SET_FOREACH(other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
1891 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1892 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1895 static void check_unneeded_dependencies(Unit
*u
) {
1900 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1902 /* Garbage collect services that might not be needed anymore, if enabled */
1903 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1904 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1905 unit_check_unneeded(other
);
1906 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1907 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1908 unit_check_unneeded(other
);
1909 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUISITE
], i
)
1910 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1911 unit_check_unneeded(other
);
1912 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1913 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1914 unit_check_unneeded(other
);
1917 void unit_start_on_failure(Unit
*u
) {
1923 if (set_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
1926 log_unit_info(u
, "Triggering OnFailure= dependencies.");
1928 SET_FOREACH(other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
1931 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, NULL
, NULL
);
1933 log_unit_error_errno(u
, r
, "Failed to enqueue OnFailure= job: %m");
1937 void unit_trigger_notify(Unit
*u
) {
1943 SET_FOREACH(other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
1944 if (UNIT_VTABLE(other
)->trigger_notify
)
1945 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
1948 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
1953 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
1954 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
1956 /* Note that this is called for all low-level state changes,
1957 * even if they might map to the same high-level
1958 * UnitActiveState! That means that ns == os is an expected
1959 * behavior here. For example: if a mount point is remounted
1960 * this function will be called too! */
1964 /* Update timestamps for state changes */
1965 if (!MANAGER_IS_RELOADING(m
)) {
1966 dual_timestamp_get(&u
->state_change_timestamp
);
1968 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
1969 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
1970 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
1971 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
1973 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1974 u
->active_enter_timestamp
= u
->state_change_timestamp
;
1975 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1976 u
->active_exit_timestamp
= u
->state_change_timestamp
;
1979 /* Keep track of failed units */
1980 (void) manager_update_failed_units(u
->manager
, u
, ns
== UNIT_FAILED
);
1982 /* Make sure the cgroup is always removed when we become inactive */
1983 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1984 unit_prune_cgroup(u
);
1986 /* Note that this doesn't apply to RemainAfterExit services exiting
1987 * successfully, since there's no change of state in that case. Which is
1988 * why it is handled in service_set_state() */
1989 if (UNIT_IS_INACTIVE_OR_FAILED(os
) != UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1992 ec
= unit_get_exec_context(u
);
1993 if (ec
&& exec_context_may_touch_console(ec
)) {
1994 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1997 if (m
->n_on_console
== 0)
1998 /* unset no_console_output flag, since the console is free */
1999 m
->no_console_output
= false;
2008 if (u
->job
->state
== JOB_WAITING
)
2010 /* So we reached a different state for this
2011 * job. Let's see if we can run it now if it
2012 * failed previously due to EAGAIN. */
2013 job_add_to_run_queue(u
->job
);
2015 /* Let's check whether this state change constitutes a
2016 * finished job, or maybe contradicts a running job and
2017 * hence needs to invalidate jobs. */
2019 switch (u
->job
->type
) {
2022 case JOB_VERIFY_ACTIVE
:
2024 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2025 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2026 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2029 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2030 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2036 case JOB_RELOAD_OR_START
:
2037 case JOB_TRY_RELOAD
:
2039 if (u
->job
->state
== JOB_RUNNING
) {
2040 if (ns
== UNIT_ACTIVE
)
2041 job_finish_and_invalidate(u
->job
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2042 else if (ns
!= UNIT_ACTIVATING
&& ns
!= UNIT_RELOADING
) {
2045 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2046 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2054 case JOB_TRY_RESTART
:
2056 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2057 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2058 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2060 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2066 assert_not_reached("Job type unknown");
2072 if (!MANAGER_IS_RELOADING(m
)) {
2074 /* If this state change happened without being
2075 * requested by a job, then let's retroactively start
2076 * or stop dependencies. We skip that step when
2077 * deserializing, since we don't want to create any
2078 * additional jobs just because something is already
2082 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2083 retroactively_start_dependencies(u
);
2084 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2085 retroactively_stop_dependencies(u
);
2088 /* stop unneeded units regardless if going down was expected or not */
2089 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2090 check_unneeded_dependencies(u
);
2092 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2093 log_unit_notice(u
, "Unit entered failed state.");
2094 unit_start_on_failure(u
);
2098 /* Some names are special */
2099 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2101 if (unit_has_name(u
, SPECIAL_DBUS_SERVICE
))
2102 /* The bus might have just become available,
2103 * hence try to connect to it, if we aren't
2107 if (u
->type
== UNIT_SERVICE
&&
2108 !UNIT_IS_ACTIVE_OR_RELOADING(os
) &&
2109 !MANAGER_IS_RELOADING(m
)) {
2110 /* Write audit record if we have just finished starting up */
2111 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, true);
2115 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
))
2116 manager_send_unit_plymouth(m
, u
);
2120 /* We don't care about D-Bus here, since we'll get an
2121 * asynchronous notification for it anyway. */
2123 if (u
->type
== UNIT_SERVICE
&&
2124 UNIT_IS_INACTIVE_OR_FAILED(ns
) &&
2125 !UNIT_IS_INACTIVE_OR_FAILED(os
) &&
2126 !MANAGER_IS_RELOADING(m
)) {
2128 /* Hmm, if there was no start record written
2129 * write it now, so that we always have a nice
2132 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, ns
== UNIT_INACTIVE
);
2134 if (ns
== UNIT_INACTIVE
)
2135 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, true);
2137 /* Write audit record if we have just finished shutting down */
2138 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, ns
== UNIT_INACTIVE
);
2140 u
->in_audit
= false;
2144 manager_recheck_journal(m
);
2145 unit_trigger_notify(u
);
2147 if (!MANAGER_IS_RELOADING(u
->manager
)) {
2148 /* Maybe we finished startup and are now ready for
2149 * being stopped because unneeded? */
2150 unit_check_unneeded(u
);
2152 /* Maybe we finished startup, but something we needed
2153 * has vanished? Let's die then. (This happens when
2154 * something BindsTo= to a Type=oneshot unit, as these
2155 * units go directly from starting to inactive,
2156 * without ever entering started.) */
2157 unit_check_binds_to(u
);
2160 unit_add_to_dbus_queue(u
);
2161 unit_add_to_gc_queue(u
);
2164 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2170 /* Watch a specific PID. We only support one or two units
2171 * watching each PID for now, not more. */
2173 r
= set_ensure_allocated(&u
->pids
, NULL
);
2177 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids1
, NULL
);
2181 r
= hashmap_put(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2183 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids2
, NULL
);
2187 r
= hashmap_put(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2190 q
= set_put(u
->pids
, PID_TO_PTR(pid
));
2197 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2201 (void) hashmap_remove_value(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2202 (void) hashmap_remove_value(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2203 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2206 void unit_unwatch_all_pids(Unit
*u
) {
2209 while (!set_isempty(u
->pids
))
2210 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2212 u
->pids
= set_free(u
->pids
);
2215 void unit_tidy_watch_pids(Unit
*u
, pid_t except1
, pid_t except2
) {
2221 /* Cleans dead PIDs from our list */
2223 SET_FOREACH(e
, u
->pids
, i
) {
2224 pid_t pid
= PTR_TO_PID(e
);
2226 if (pid
== except1
|| pid
== except2
)
2229 if (!pid_is_unwaited(pid
))
2230 unit_unwatch_pid(u
, pid
);
2234 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2236 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2240 case JOB_VERIFY_ACTIVE
:
2243 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2244 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2249 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2250 * external events), hence it makes no sense to permit enqueing such a request either. */
2251 return !u
->perpetual
;
2254 case JOB_TRY_RESTART
:
2255 return unit_can_stop(u
) && unit_can_start(u
);
2258 case JOB_TRY_RELOAD
:
2259 return unit_can_reload(u
);
2261 case JOB_RELOAD_OR_START
:
2262 return unit_can_reload(u
) && unit_can_start(u
);
2265 assert_not_reached("Invalid job type");
2269 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2272 /* Only warn about some unit types */
2273 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2276 if (streq_ptr(u
->id
, other
))
2277 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2279 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2282 int unit_add_dependency(Unit
*u
, UnitDependency d
, Unit
*other
, bool add_reference
) {
2284 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2285 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2286 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2287 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2288 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2289 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2290 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2291 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2292 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2293 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2294 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2295 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2296 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2297 [UNIT_BEFORE
] = UNIT_AFTER
,
2298 [UNIT_AFTER
] = UNIT_BEFORE
,
2299 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2300 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2301 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2302 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2303 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2304 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2305 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2306 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2308 int r
, q
= 0, v
= 0, w
= 0;
2309 Unit
*orig_u
= u
, *orig_other
= other
;
2312 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2315 u
= unit_follow_merge(u
);
2316 other
= unit_follow_merge(other
);
2318 /* We won't allow dependencies on ourselves. We will not
2319 * consider them an error however. */
2321 maybe_warn_about_dependency(orig_u
, orig_other
->id
, d
);
2325 if (d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) {
2326 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2330 r
= set_ensure_allocated(&u
->dependencies
[d
], NULL
);
2334 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
) {
2335 r
= set_ensure_allocated(&other
->dependencies
[inverse_table
[d
]], NULL
);
2340 if (add_reference
) {
2341 r
= set_ensure_allocated(&u
->dependencies
[UNIT_REFERENCES
], NULL
);
2345 r
= set_ensure_allocated(&other
->dependencies
[UNIT_REFERENCED_BY
], NULL
);
2350 q
= set_put(u
->dependencies
[d
], other
);
2354 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2355 v
= set_put(other
->dependencies
[inverse_table
[d
]], u
);
2362 if (add_reference
) {
2363 w
= set_put(u
->dependencies
[UNIT_REFERENCES
], other
);
2369 r
= set_put(other
->dependencies
[UNIT_REFERENCED_BY
], u
);
2374 unit_add_to_dbus_queue(u
);
2379 set_remove(u
->dependencies
[d
], other
);
2382 set_remove(other
->dependencies
[inverse_table
[d
]], u
);
2385 set_remove(u
->dependencies
[UNIT_REFERENCES
], other
);
2390 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
) {
2395 r
= unit_add_dependency(u
, d
, other
, add_reference
);
2399 return unit_add_dependency(u
, e
, other
, add_reference
);
2402 static int resolve_template(Unit
*u
, const char *name
, const char*path
, char **buf
, const char **ret
) {
2406 assert(name
|| path
);
2411 name
= basename(path
);
2413 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2420 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2422 _cleanup_free_
char *i
= NULL
;
2424 r
= unit_name_to_prefix(u
->id
, &i
);
2428 r
= unit_name_replace_instance(name
, i
, buf
);
2437 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, const char *path
, bool add_reference
) {
2438 _cleanup_free_
char *buf
= NULL
;
2443 assert(name
|| path
);
2445 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2449 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2453 return unit_add_dependency(u
, d
, other
, add_reference
);
2456 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, const char *path
, bool add_reference
) {
2457 _cleanup_free_
char *buf
= NULL
;
2462 assert(name
|| path
);
2464 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2468 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2472 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
);
2475 int set_unit_path(const char *p
) {
2476 /* This is mostly for debug purposes */
2477 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2483 char *unit_dbus_path(Unit
*u
) {
2489 return unit_dbus_path_from_name(u
->id
);
2492 char *unit_dbus_path_invocation_id(Unit
*u
) {
2495 if (sd_id128_is_null(u
->invocation_id
))
2498 return unit_dbus_path_from_name(u
->invocation_id_string
);
2501 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2505 /* Sets the unit slice if it has not been set before. Is extra
2506 * careful, to only allow this for units that actually have a
2507 * cgroup context. Also, we don't allow to set this for slices
2508 * (since the parent slice is derived from the name). Make
2509 * sure the unit we set is actually a slice. */
2511 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2514 if (u
->type
== UNIT_SLICE
)
2517 if (unit_active_state(u
) != UNIT_INACTIVE
)
2520 if (slice
->type
!= UNIT_SLICE
)
2523 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
2524 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
2527 if (UNIT_DEREF(u
->slice
) == slice
)
2530 /* Disallow slice changes if @u is already bound to cgroups */
2531 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
2534 unit_ref_unset(&u
->slice
);
2535 unit_ref_set(&u
->slice
, slice
);
2539 int unit_set_default_slice(Unit
*u
) {
2540 _cleanup_free_
char *b
= NULL
;
2541 const char *slice_name
;
2547 if (UNIT_ISSET(u
->slice
))
2551 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
2553 /* Implicitly place all instantiated units in their
2554 * own per-template slice */
2556 r
= unit_name_to_prefix(u
->id
, &prefix
);
2560 /* The prefix is already escaped, but it might include
2561 * "-" which has a special meaning for slice units,
2562 * hence escape it here extra. */
2563 escaped
= unit_name_escape(prefix
);
2567 if (MANAGER_IS_SYSTEM(u
->manager
))
2568 b
= strjoin("system-", escaped
, ".slice");
2570 b
= strappend(escaped
, ".slice");
2577 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
2578 ? SPECIAL_SYSTEM_SLICE
2579 : SPECIAL_ROOT_SLICE
;
2581 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
2585 return unit_set_slice(u
, slice
);
2588 const char *unit_slice_name(Unit
*u
) {
2591 if (!UNIT_ISSET(u
->slice
))
2594 return UNIT_DEREF(u
->slice
)->id
;
2597 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
2598 _cleanup_free_
char *t
= NULL
;
2605 r
= unit_name_change_suffix(u
->id
, type
, &t
);
2608 if (unit_has_name(u
, t
))
2611 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
2612 assert(r
< 0 || *_found
!= u
);
2616 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
2617 const char *name
, *old_owner
, *new_owner
;
2624 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
2626 bus_log_parse_error(r
);
2630 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
2631 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
2636 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
2643 if (u
->match_bus_slot
)
2646 match
= strjoina("type='signal',"
2647 "sender='org.freedesktop.DBus',"
2648 "path='/org/freedesktop/DBus',"
2649 "interface='org.freedesktop.DBus',"
2650 "member='NameOwnerChanged',"
2651 "arg0='", name
, "'");
2653 return sd_bus_add_match(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, u
);
2656 int unit_watch_bus_name(Unit
*u
, const char *name
) {
2662 /* Watch a specific name on the bus. We only support one unit
2663 * watching each name for now. */
2665 if (u
->manager
->api_bus
) {
2666 /* If the bus is already available, install the match directly.
2667 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2668 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
2670 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
2673 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
2675 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2676 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
2682 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
2686 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
2687 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2690 bool unit_can_serialize(Unit
*u
) {
2693 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
2696 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
2703 if (unit_can_serialize(u
)) {
2706 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
2710 rt
= unit_get_exec_runtime(u
);
2712 r
= exec_runtime_serialize(u
, rt
, f
, fds
);
2718 dual_timestamp_serialize(f
, "state-change-timestamp", &u
->state_change_timestamp
);
2720 dual_timestamp_serialize(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
2721 dual_timestamp_serialize(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
2722 dual_timestamp_serialize(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
2723 dual_timestamp_serialize(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
2725 dual_timestamp_serialize(f
, "condition-timestamp", &u
->condition_timestamp
);
2726 dual_timestamp_serialize(f
, "assert-timestamp", &u
->assert_timestamp
);
2728 if (dual_timestamp_is_set(&u
->condition_timestamp
))
2729 unit_serialize_item(u
, f
, "condition-result", yes_no(u
->condition_result
));
2731 if (dual_timestamp_is_set(&u
->assert_timestamp
))
2732 unit_serialize_item(u
, f
, "assert-result", yes_no(u
->assert_result
));
2734 unit_serialize_item(u
, f
, "transient", yes_no(u
->transient
));
2736 unit_serialize_item_format(u
, f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
2737 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
2738 unit_serialize_item_format(u
, f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
2741 unit_serialize_item(u
, f
, "cgroup", u
->cgroup_path
);
2742 unit_serialize_item(u
, f
, "cgroup-realized", yes_no(u
->cgroup_realized
));
2744 if (uid_is_valid(u
->ref_uid
))
2745 unit_serialize_item_format(u
, f
, "ref-uid", UID_FMT
, u
->ref_uid
);
2746 if (gid_is_valid(u
->ref_gid
))
2747 unit_serialize_item_format(u
, f
, "ref-gid", GID_FMT
, u
->ref_gid
);
2749 if (!sd_id128_is_null(u
->invocation_id
))
2750 unit_serialize_item_format(u
, f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
2752 bus_track_serialize(u
->bus_track
, f
, "ref");
2754 if (serialize_jobs
) {
2756 fprintf(f
, "job\n");
2757 job_serialize(u
->job
, f
);
2761 fprintf(f
, "job\n");
2762 job_serialize(u
->nop_job
, f
);
2771 int unit_serialize_item(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2787 int unit_serialize_item_escaped(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2788 _cleanup_free_
char *c
= NULL
;
2809 int unit_serialize_item_fd(Unit
*u
, FILE *f
, FDSet
*fds
, const char *key
, int fd
) {
2819 copy
= fdset_put_dup(fds
, fd
);
2823 fprintf(f
, "%s=%i\n", key
, copy
);
2827 void unit_serialize_item_format(Unit
*u
, FILE *f
, const char *key
, const char *format
, ...) {
2838 va_start(ap
, format
);
2839 vfprintf(f
, format
, ap
);
2845 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
2846 ExecRuntime
**rt
= NULL
;
2854 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
2856 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
2859 char line
[LINE_MAX
], *l
, *v
;
2862 if (!fgets(line
, sizeof(line
), f
)) {
2875 k
= strcspn(l
, "=");
2883 if (streq(l
, "job")) {
2885 /* new-style serialized job */
2892 r
= job_deserialize(j
, f
);
2898 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
2904 r
= job_install_deserialized(j
);
2906 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
2910 } else /* legacy for pre-44 */
2911 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
2913 } else if (streq(l
, "state-change-timestamp")) {
2914 dual_timestamp_deserialize(v
, &u
->state_change_timestamp
);
2916 } else if (streq(l
, "inactive-exit-timestamp")) {
2917 dual_timestamp_deserialize(v
, &u
->inactive_exit_timestamp
);
2919 } else if (streq(l
, "active-enter-timestamp")) {
2920 dual_timestamp_deserialize(v
, &u
->active_enter_timestamp
);
2922 } else if (streq(l
, "active-exit-timestamp")) {
2923 dual_timestamp_deserialize(v
, &u
->active_exit_timestamp
);
2925 } else if (streq(l
, "inactive-enter-timestamp")) {
2926 dual_timestamp_deserialize(v
, &u
->inactive_enter_timestamp
);
2928 } else if (streq(l
, "condition-timestamp")) {
2929 dual_timestamp_deserialize(v
, &u
->condition_timestamp
);
2931 } else if (streq(l
, "assert-timestamp")) {
2932 dual_timestamp_deserialize(v
, &u
->assert_timestamp
);
2934 } else if (streq(l
, "condition-result")) {
2936 r
= parse_boolean(v
);
2938 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
2940 u
->condition_result
= r
;
2944 } else if (streq(l
, "assert-result")) {
2946 r
= parse_boolean(v
);
2948 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
2950 u
->assert_result
= r
;
2954 } else if (streq(l
, "transient")) {
2956 r
= parse_boolean(v
);
2958 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
2964 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
2966 r
= safe_atou64(v
, &u
->cpu_usage_base
);
2968 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
2972 } else if (streq(l
, "cpu-usage-last")) {
2974 r
= safe_atou64(v
, &u
->cpu_usage_last
);
2976 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
2980 } else if (streq(l
, "cgroup")) {
2982 r
= unit_set_cgroup_path(u
, v
);
2984 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
2986 (void) unit_watch_cgroup(u
);
2989 } else if (streq(l
, "cgroup-realized")) {
2992 b
= parse_boolean(v
);
2994 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
2996 u
->cgroup_realized
= b
;
3000 } else if (streq(l
, "ref-uid")) {
3003 r
= parse_uid(v
, &uid
);
3005 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
3007 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
3011 } else if (streq(l
, "ref-gid")) {
3014 r
= parse_gid(v
, &gid
);
3016 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
3018 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
3020 } else if (streq(l
, "ref")) {
3022 r
= strv_extend(&u
->deserialized_refs
, v
);
3027 } else if (streq(l
, "invocation-id")) {
3030 r
= sd_id128_from_string(v
, &id
);
3032 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3034 r
= unit_set_invocation_id(u
, id
);
3036 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3042 if (unit_can_serialize(u
)) {
3044 r
= exec_runtime_deserialize_item(u
, rt
, l
, v
, fds
);
3046 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3050 /* Returns positive if key was handled by the call */
3055 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3057 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3061 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3062 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3063 * before 228 where the base for timeouts was not persistent across reboots. */
3065 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3066 dual_timestamp_get(&u
->state_change_timestamp
);
3071 int unit_add_node_link(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
) {
3073 _cleanup_free_
char *e
= NULL
;
3078 /* Adds in links to the device node that this unit is based on */
3082 if (!is_device_path(what
))
3085 /* When device units aren't supported (such as in a
3086 * container), don't create dependencies on them. */
3087 if (!unit_type_supported(UNIT_DEVICE
))
3090 r
= unit_name_from_path(what
, ".device", &e
);
3094 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3098 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3099 dep
= UNIT_BINDS_TO
;
3101 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3102 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3108 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false);
3116 int unit_coldplug(Unit
*u
) {
3122 /* Make sure we don't enter a loop, when coldplugging
3127 u
->coldplugged
= true;
3129 STRV_FOREACH(i
, u
->deserialized_refs
) {
3130 q
= bus_unit_track_add_name(u
, *i
);
3131 if (q
< 0 && r
>= 0)
3134 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3136 if (UNIT_VTABLE(u
)->coldplug
) {
3137 q
= UNIT_VTABLE(u
)->coldplug(u
);
3138 if (q
< 0 && r
>= 0)
3143 q
= job_coldplug(u
->job
);
3144 if (q
< 0 && r
>= 0)
3151 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3157 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3158 * are never out-of-date. */
3159 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3162 if (stat(path
, &st
) < 0)
3163 /* What, cannot access this anymore? */
3167 /* For masked files check if they are still so */
3168 return !null_or_empty(&st
);
3170 /* For non-empty files check the mtime */
3171 return timespec_load(&st
.st_mtim
) > mtime
;
3176 bool unit_need_daemon_reload(Unit
*u
) {
3177 _cleanup_strv_free_
char **t
= NULL
;
3182 /* For unit files, we allow masking… */
3183 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3184 u
->load_state
== UNIT_MASKED
))
3187 /* Source paths should not be masked… */
3188 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3191 (void) unit_find_dropin_paths(u
, &t
);
3192 if (!strv_equal(u
->dropin_paths
, t
))
3195 /* … any drop-ins that are masked are simply omitted from the list. */
3196 STRV_FOREACH(path
, u
->dropin_paths
)
3197 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3203 void unit_reset_failed(Unit
*u
) {
3206 if (UNIT_VTABLE(u
)->reset_failed
)
3207 UNIT_VTABLE(u
)->reset_failed(u
);
3209 RATELIMIT_RESET(u
->start_limit
);
3210 u
->start_limit_hit
= false;
3213 Unit
*unit_following(Unit
*u
) {
3216 if (UNIT_VTABLE(u
)->following
)
3217 return UNIT_VTABLE(u
)->following(u
);
3222 bool unit_stop_pending(Unit
*u
) {
3225 /* This call does check the current state of the unit. It's
3226 * hence useful to be called from state change calls of the
3227 * unit itself, where the state isn't updated yet. This is
3228 * different from unit_inactive_or_pending() which checks both
3229 * the current state and for a queued job. */
3231 return u
->job
&& u
->job
->type
== JOB_STOP
;
3234 bool unit_inactive_or_pending(Unit
*u
) {
3237 /* Returns true if the unit is inactive or going down */
3239 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3242 if (unit_stop_pending(u
))
3248 bool unit_active_or_pending(Unit
*u
) {
3251 /* Returns true if the unit is active or going up */
3253 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3257 (u
->job
->type
== JOB_START
||
3258 u
->job
->type
== JOB_RELOAD_OR_START
||
3259 u
->job
->type
== JOB_RESTART
))
3265 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3267 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3268 assert(SIGNAL_VALID(signo
));
3270 if (!UNIT_VTABLE(u
)->kill
)
3273 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3276 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3280 pid_set
= set_new(NULL
);
3284 /* Exclude the main/control pids from being killed via the cgroup */
3286 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3291 if (control_pid
> 0) {
3292 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3304 int unit_kill_common(
3310 sd_bus_error
*error
) {
3313 bool killed
= false;
3315 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3317 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3318 else if (main_pid
== 0)
3319 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3322 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3323 if (control_pid
< 0)
3324 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3325 else if (control_pid
== 0)
3326 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3329 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3330 if (control_pid
> 0) {
3331 if (kill(control_pid
, signo
) < 0)
3337 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3339 if (kill(main_pid
, signo
) < 0)
3345 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3346 _cleanup_set_free_ Set
*pid_set
= NULL
;
3349 /* Exclude the main/control pids from being killed via the cgroup */
3350 pid_set
= unit_pid_set(main_pid
, control_pid
);
3354 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3355 if (q
< 0 && q
!= -EAGAIN
&& q
!= -ESRCH
&& q
!= -ENOENT
)
3361 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3367 int unit_following_set(Unit
*u
, Set
**s
) {
3371 if (UNIT_VTABLE(u
)->following_set
)
3372 return UNIT_VTABLE(u
)->following_set(u
, s
);
3378 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3383 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3384 r
= unit_file_get_state(
3385 u
->manager
->unit_file_scope
,
3387 basename(u
->fragment_path
),
3388 &u
->unit_file_state
);
3390 u
->unit_file_state
= UNIT_FILE_BAD
;
3393 return u
->unit_file_state
;
3396 int unit_get_unit_file_preset(Unit
*u
) {
3399 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3400 u
->unit_file_preset
= unit_file_query_preset(
3401 u
->manager
->unit_file_scope
,
3403 basename(u
->fragment_path
));
3405 return u
->unit_file_preset
;
3408 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*u
) {
3413 unit_ref_unset(ref
);
3416 LIST_PREPEND(refs
, u
->refs
, ref
);
3420 void unit_ref_unset(UnitRef
*ref
) {
3426 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3427 * be unreferenced now. */
3428 unit_add_to_gc_queue(ref
->unit
);
3430 LIST_REMOVE(refs
, ref
->unit
->refs
, ref
);
3434 static int user_from_unit_name(Unit
*u
, char **ret
) {
3436 static const uint8_t hash_key
[] = {
3437 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3438 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3441 _cleanup_free_
char *n
= NULL
;
3444 r
= unit_name_to_prefix(u
->id
, &n
);
3448 if (valid_user_group_name(n
)) {
3454 /* If we can't use the unit name as a user name, then let's hash it and use that */
3455 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
3461 int unit_patch_contexts(Unit
*u
) {
3469 /* Patch in the manager defaults into the exec and cgroup
3470 * contexts, _after_ the rest of the settings have been
3473 ec
= unit_get_exec_context(u
);
3475 /* This only copies in the ones that need memory */
3476 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
3477 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
3478 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
3483 if (MANAGER_IS_USER(u
->manager
) &&
3484 !ec
->working_directory
) {
3486 r
= get_home_dir(&ec
->working_directory
);
3490 /* Allow user services to run, even if the
3491 * home directory is missing */
3492 ec
->working_directory_missing_ok
= true;
3495 if (ec
->private_devices
)
3496 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
3498 if (ec
->protect_kernel_modules
)
3499 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
3501 if (ec
->dynamic_user
) {
3503 r
= user_from_unit_name(u
, &ec
->user
);
3509 ec
->group
= strdup(ec
->user
);
3514 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3515 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3517 ec
->private_tmp
= true;
3518 ec
->remove_ipc
= true;
3519 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
3520 if (ec
->protect_home
== PROTECT_HOME_NO
)
3521 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
3525 cc
= unit_get_cgroup_context(u
);
3529 ec
->private_devices
&&
3530 cc
->device_policy
== CGROUP_AUTO
)
3531 cc
->device_policy
= CGROUP_CLOSED
;
3537 ExecContext
*unit_get_exec_context(Unit
*u
) {
3544 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
3548 return (ExecContext
*) ((uint8_t*) u
+ offset
);
3551 KillContext
*unit_get_kill_context(Unit
*u
) {
3558 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
3562 return (KillContext
*) ((uint8_t*) u
+ offset
);
3565 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
3571 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
3575 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
3578 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
3584 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3588 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
3591 static const char* unit_drop_in_dir(Unit
*u
, UnitSetPropertiesMode mode
) {
3594 if (!IN_SET(mode
, UNIT_RUNTIME
, UNIT_PERSISTENT
))
3597 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
3598 return u
->manager
->lookup_paths
.transient
;
3600 if (mode
== UNIT_RUNTIME
)
3601 return u
->manager
->lookup_paths
.runtime_control
;
3603 if (mode
== UNIT_PERSISTENT
)
3604 return u
->manager
->lookup_paths
.persistent_control
;
3609 int unit_write_drop_in(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3610 _cleanup_free_
char *p
= NULL
, *q
= NULL
;
3611 const char *dir
, *wrapped
;
3616 if (u
->transient_file
) {
3617 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3618 * write to the transient unit file. */
3619 fputs(data
, u
->transient_file
);
3620 fputc('\n', u
->transient_file
);
3624 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3627 dir
= unit_drop_in_dir(u
, mode
);
3631 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3632 "# or an equivalent operation. Do not edit.\n",
3636 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
3640 (void) mkdir_p(p
, 0755);
3641 r
= write_string_file_atomic_label(q
, wrapped
);
3645 r
= strv_push(&u
->dropin_paths
, q
);
3650 strv_uniq(u
->dropin_paths
);
3652 u
->dropin_mtime
= now(CLOCK_REALTIME
);
3657 int unit_write_drop_in_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3658 _cleanup_free_
char *p
= NULL
;
3666 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3669 va_start(ap
, format
);
3670 r
= vasprintf(&p
, format
, ap
);
3676 return unit_write_drop_in(u
, mode
, name
, p
);
3679 int unit_write_drop_in_private(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3686 if (!UNIT_VTABLE(u
)->private_section
)
3689 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3692 ndata
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
3694 return unit_write_drop_in(u
, mode
, name
, ndata
);
3697 int unit_write_drop_in_private_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3698 _cleanup_free_
char *p
= NULL
;
3706 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3709 va_start(ap
, format
);
3710 r
= vasprintf(&p
, format
, ap
);
3716 return unit_write_drop_in_private(u
, mode
, name
, p
);
3719 int unit_make_transient(Unit
*u
) {
3725 if (!UNIT_VTABLE(u
)->can_transient
)
3728 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
3732 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3733 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3735 RUN_WITH_UMASK(0022) {
3736 f
= fopen(path
, "we");
3743 if (u
->transient_file
)
3744 fclose(u
->transient_file
);
3745 u
->transient_file
= f
;
3747 free(u
->fragment_path
);
3748 u
->fragment_path
= path
;
3750 u
->source_path
= mfree(u
->source_path
);
3751 u
->dropin_paths
= strv_free(u
->dropin_paths
);
3752 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
3754 u
->load_state
= UNIT_STUB
;
3756 u
->transient
= true;
3758 unit_add_to_dbus_queue(u
);
3759 unit_add_to_gc_queue(u
);
3761 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3767 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
3768 _cleanup_free_
char *comm
= NULL
;
3770 (void) get_process_comm(pid
, &comm
);
3772 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3773 only, like for example systemd's own PAM stub process. */
3774 if (comm
&& comm
[0] == '(')
3777 log_unit_notice(userdata
,
3778 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
3781 signal_to_string(sig
));
3784 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
3789 case KILL_TERMINATE
:
3790 case KILL_TERMINATE_AND_LOG
:
3791 return c
->kill_signal
;
3800 assert_not_reached("KillOperation unknown");
3804 int unit_kill_context(
3810 bool main_pid_alien
) {
3812 bool wait_for_exit
= false, send_sighup
;
3813 cg_kill_log_func_t log_func
= NULL
;
3819 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
3820 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
3822 if (c
->kill_mode
== KILL_NONE
)
3825 sig
= operation_to_signal(c
, k
);
3829 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
3832 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
3833 log_func
= log_kill
;
3837 log_func(main_pid
, sig
, u
);
3839 r
= kill_and_sigcont(main_pid
, sig
);
3840 if (r
< 0 && r
!= -ESRCH
) {
3841 _cleanup_free_
char *comm
= NULL
;
3842 (void) get_process_comm(main_pid
, &comm
);
3844 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
3846 if (!main_pid_alien
)
3847 wait_for_exit
= true;
3849 if (r
!= -ESRCH
&& send_sighup
)
3850 (void) kill(main_pid
, SIGHUP
);
3854 if (control_pid
> 0) {
3856 log_func(control_pid
, sig
, u
);
3858 r
= kill_and_sigcont(control_pid
, sig
);
3859 if (r
< 0 && r
!= -ESRCH
) {
3860 _cleanup_free_
char *comm
= NULL
;
3861 (void) get_process_comm(control_pid
, &comm
);
3863 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
3865 wait_for_exit
= true;
3867 if (r
!= -ESRCH
&& send_sighup
)
3868 (void) kill(control_pid
, SIGHUP
);
3872 if (u
->cgroup_path
&&
3873 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
3874 _cleanup_set_free_ Set
*pid_set
= NULL
;
3876 /* Exclude the main/control pids from being killed via the cgroup */
3877 pid_set
= unit_pid_set(main_pid
, control_pid
);
3881 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3883 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
3887 if (r
!= -EAGAIN
&& r
!= -ESRCH
&& r
!= -ENOENT
)
3888 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
3892 /* FIXME: For now, on the legacy hierarchy, we
3893 * will not wait for the cgroup members to die
3894 * if we are running in a container or if this
3895 * is a delegation unit, simply because cgroup
3896 * notification is unreliable in these
3897 * cases. It doesn't work at all in
3898 * containers, and outside of containers it
3899 * can be confused easily by left-over
3900 * directories in the cgroup — which however
3901 * should not exist in non-delegated units. On
3902 * the unified hierarchy that's different,
3903 * there we get proper events. Hence rely on
3906 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
3907 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
3908 wait_for_exit
= true;
3913 pid_set
= unit_pid_set(main_pid
, control_pid
);
3917 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3926 return wait_for_exit
;
3929 int unit_require_mounts_for(Unit
*u
, const char *path
) {
3930 char prefix
[strlen(path
) + 1], *p
;
3936 /* Registers a unit for requiring a certain path and all its
3937 * prefixes. We keep a simple array of these paths in the
3938 * unit, since its usually short. However, we build a prefix
3939 * table for all possible prefixes so that new appearing mount
3940 * units can easily determine which units to make themselves a
3943 if (!path_is_absolute(path
))
3950 path_kill_slashes(p
);
3952 if (!path_is_safe(p
)) {
3957 if (strv_contains(u
->requires_mounts_for
, p
)) {
3962 r
= strv_consume(&u
->requires_mounts_for
, p
);
3966 PATH_FOREACH_PREFIX_MORE(prefix
, p
) {
3969 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
3973 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &string_hash_ops
);
3987 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4003 int unit_setup_exec_runtime(Unit
*u
) {
4009 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4012 /* Check if there already is an ExecRuntime for this unit? */
4013 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4017 /* Try to get it from somebody else */
4018 SET_FOREACH(other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
4020 *rt
= unit_get_exec_runtime(other
);
4022 exec_runtime_ref(*rt
);
4027 return exec_runtime_make(rt
, unit_get_exec_context(u
), u
->id
);
4030 int unit_setup_dynamic_creds(Unit
*u
) {
4032 DynamicCreds
*dcreds
;
4037 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4039 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4041 ec
= unit_get_exec_context(u
);
4044 if (!ec
->dynamic_user
)
4047 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4050 bool unit_type_supported(UnitType t
) {
4051 if (_unlikely_(t
< 0))
4053 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4056 if (!unit_vtable
[t
]->supported
)
4059 return unit_vtable
[t
]->supported();
4062 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4068 r
= dir_is_empty(where
);
4072 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4076 log_struct(LOG_NOTICE
,
4077 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4079 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4084 int unit_fail_if_symlink(Unit
*u
, const char* where
) {
4090 r
= is_symlink(where
);
4092 log_unit_debug_errno(u
, r
, "Failed to check symlink %s, ignoring: %m", where
);
4099 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4101 LOG_UNIT_MESSAGE(u
, "Mount on symlink %s not allowed.", where
),
4108 bool unit_is_pristine(Unit
*u
) {
4111 /* Check if the unit already exists or is already around,
4112 * in a number of different ways. Note that to cater for unit
4113 * types such as slice, we are generally fine with units that
4114 * are marked UNIT_LOADED even though nothing was
4115 * actually loaded, as those unit types don't require a file
4116 * on disk to validly load. */
4118 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4121 !strv_isempty(u
->dropin_paths
) ||
4126 pid_t
unit_control_pid(Unit
*u
) {
4129 if (UNIT_VTABLE(u
)->control_pid
)
4130 return UNIT_VTABLE(u
)->control_pid(u
);
4135 pid_t
unit_main_pid(Unit
*u
) {
4138 if (UNIT_VTABLE(u
)->main_pid
)
4139 return UNIT_VTABLE(u
)->main_pid(u
);
4144 static void unit_unref_uid_internal(
4148 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4152 assert(_manager_unref_uid
);
4154 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4155 * gid_t are actually the same time, with the same validity rules.
4157 * Drops a reference to UID/GID from a unit. */
4159 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4160 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4162 if (!uid_is_valid(*ref_uid
))
4165 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4166 *ref_uid
= UID_INVALID
;
4169 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4170 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4173 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4174 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4177 static int unit_ref_uid_internal(
4182 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4188 assert(uid_is_valid(uid
));
4189 assert(_manager_ref_uid
);
4191 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4192 * are actually the same type, and have the same validity rules.
4194 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4195 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4198 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4199 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4201 if (*ref_uid
== uid
)
4204 if (uid_is_valid(*ref_uid
)) /* Already set? */
4207 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4215 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4216 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4219 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4220 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4223 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4228 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4230 if (uid_is_valid(uid
)) {
4231 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4236 if (gid_is_valid(gid
)) {
4237 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4240 unit_unref_uid(u
, false);
4246 return r
> 0 || q
> 0;
4249 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4255 c
= unit_get_exec_context(u
);
4257 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4259 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4264 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4267 unit_unref_uid(u
, destroy_now
);
4268 unit_unref_gid(u
, destroy_now
);
4271 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4276 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4277 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4278 * objects when no service references the UID/GID anymore. */
4280 r
= unit_ref_uid_gid(u
, uid
, gid
);
4282 bus_unit_send_change_signal(u
);
4285 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4290 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4292 if (sd_id128_equal(u
->invocation_id
, id
))
4295 if (!sd_id128_is_null(u
->invocation_id
))
4296 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4298 if (sd_id128_is_null(id
)) {
4303 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
4307 u
->invocation_id
= id
;
4308 sd_id128_to_string(id
, u
->invocation_id_string
);
4310 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4317 u
->invocation_id
= SD_ID128_NULL
;
4318 u
->invocation_id_string
[0] = 0;
4322 int unit_acquire_invocation_id(Unit
*u
) {
4328 r
= sd_id128_randomize(&id
);
4330 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
4332 r
= unit_set_invocation_id(u
, id
);
4334 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");