2 This file is part of systemd.
4 Copyright 2010 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #include "sd-messages.h"
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
38 #include "fileio-label.h"
39 #include "formats-util.h"
40 #include "load-dropin.h"
41 #include "load-fragment.h"
46 #include "parse-util.h"
47 #include "path-util.h"
48 #include "process-util.h"
50 #include "signal-util.h"
52 #include "stat-util.h"
53 #include "stdio-util.h"
54 #include "string-util.h"
56 #include "umask-util.h"
57 #include "unit-name.h"
59 #include "user-util.h"
62 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
63 [UNIT_SERVICE
] = &service_vtable
,
64 [UNIT_SOCKET
] = &socket_vtable
,
65 [UNIT_BUSNAME
] = &busname_vtable
,
66 [UNIT_TARGET
] = &target_vtable
,
67 [UNIT_DEVICE
] = &device_vtable
,
68 [UNIT_MOUNT
] = &mount_vtable
,
69 [UNIT_AUTOMOUNT
] = &automount_vtable
,
70 [UNIT_SWAP
] = &swap_vtable
,
71 [UNIT_TIMER
] = &timer_vtable
,
72 [UNIT_PATH
] = &path_vtable
,
73 [UNIT_SLICE
] = &slice_vtable
,
74 [UNIT_SCOPE
] = &scope_vtable
77 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
79 Unit
*unit_new(Manager
*m
, size_t size
) {
83 assert(size
>= sizeof(Unit
));
89 u
->names
= set_new(&string_hash_ops
);
96 u
->type
= _UNIT_TYPE_INVALID
;
97 u
->default_dependencies
= true;
98 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
99 u
->unit_file_preset
= -1;
100 u
->on_failure_job_mode
= JOB_REPLACE
;
101 u
->cgroup_inotify_wd
= -1;
102 u
->job_timeout
= USEC_INFINITY
;
103 u
->ref_uid
= UID_INVALID
;
104 u
->ref_gid
= GID_INVALID
;
105 u
->cpu_usage_last
= NSEC_INFINITY
;
107 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
108 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
113 bool unit_has_name(Unit
*u
, const char *name
) {
117 return set_contains(u
->names
, (char*) name
);
120 static void unit_init(Unit
*u
) {
127 assert(u
->type
>= 0);
129 cc
= unit_get_cgroup_context(u
);
131 cgroup_context_init(cc
);
133 /* Copy in the manager defaults into the cgroup
134 * context, _before_ the rest of the settings have
135 * been initialized */
137 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
138 cc
->io_accounting
= u
->manager
->default_io_accounting
;
139 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
140 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
141 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
143 if (u
->type
!= UNIT_SLICE
)
144 cc
->tasks_max
= u
->manager
->default_tasks_max
;
147 ec
= unit_get_exec_context(u
);
149 exec_context_init(ec
);
151 kc
= unit_get_kill_context(u
);
153 kill_context_init(kc
);
155 if (UNIT_VTABLE(u
)->init
)
156 UNIT_VTABLE(u
)->init(u
);
159 int unit_add_name(Unit
*u
, const char *text
) {
160 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
167 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
172 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
181 if (set_contains(u
->names
, s
))
183 if (hashmap_contains(u
->manager
->units
, s
))
186 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
189 t
= unit_name_to_type(s
);
193 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
196 r
= unit_name_to_instance(s
, &i
);
200 if (i
&& !unit_type_may_template(t
))
203 /* Ensure that this unit is either instanced or not instanced,
204 * but not both. Note that we do allow names with different
205 * instance names however! */
206 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
209 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
212 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
215 r
= set_put(u
->names
, s
);
220 r
= hashmap_put(u
->manager
->units
, s
, u
);
222 (void) set_remove(u
->names
, s
);
226 if (u
->type
== _UNIT_TYPE_INVALID
) {
231 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
240 unit_add_to_dbus_queue(u
);
244 int unit_choose_id(Unit
*u
, const char *name
) {
245 _cleanup_free_
char *t
= NULL
;
252 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
257 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
264 /* Selects one of the names of this unit as the id */
265 s
= set_get(u
->names
, (char*) name
);
269 /* Determine the new instance from the new id */
270 r
= unit_name_to_instance(s
, &i
);
279 unit_add_to_dbus_queue(u
);
284 int unit_set_description(Unit
*u
, const char *description
) {
289 if (isempty(description
))
292 s
= strdup(description
);
297 free(u
->description
);
300 unit_add_to_dbus_queue(u
);
304 bool unit_check_gc(Unit
*u
) {
305 UnitActiveState state
;
314 state
= unit_active_state(u
);
316 /* If the unit is inactive and failed and no job is queued for
317 * it, then release its runtime resources */
318 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
319 UNIT_VTABLE(u
)->release_resources
)
320 UNIT_VTABLE(u
)->release_resources(u
);
322 /* But we keep the unit object around for longer when it is
323 * referenced or configured to not be gc'ed */
324 if (state
!= UNIT_INACTIVE
)
333 if (sd_bus_track_count(u
->bus_track
) > 0)
336 if (UNIT_VTABLE(u
)->check_gc
)
337 if (UNIT_VTABLE(u
)->check_gc(u
))
343 void unit_add_to_load_queue(Unit
*u
) {
345 assert(u
->type
!= _UNIT_TYPE_INVALID
);
347 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
350 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
351 u
->in_load_queue
= true;
354 void unit_add_to_cleanup_queue(Unit
*u
) {
357 if (u
->in_cleanup_queue
)
360 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
361 u
->in_cleanup_queue
= true;
364 void unit_add_to_gc_queue(Unit
*u
) {
367 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
370 if (unit_check_gc(u
))
373 LIST_PREPEND(gc_queue
, u
->manager
->gc_queue
, u
);
374 u
->in_gc_queue
= true;
376 u
->manager
->n_in_gc_queue
++;
379 void unit_add_to_dbus_queue(Unit
*u
) {
381 assert(u
->type
!= _UNIT_TYPE_INVALID
);
383 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
386 /* Shortcut things if nobody cares */
387 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
388 set_isempty(u
->manager
->private_buses
)) {
389 u
->sent_dbus_new_signal
= true;
393 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
394 u
->in_dbus_queue
= true;
397 static void bidi_set_free(Unit
*u
, Set
*s
) {
403 /* Frees the set and makes sure we are dropped from the
404 * inverse pointers */
406 SET_FOREACH(other
, s
, i
) {
409 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
410 set_remove(other
->dependencies
[d
], u
);
412 unit_add_to_gc_queue(other
);
418 static void unit_remove_transient(Unit
*u
) {
426 if (u
->fragment_path
)
427 (void) unlink(u
->fragment_path
);
429 STRV_FOREACH(i
, u
->dropin_paths
) {
430 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
432 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
436 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
440 /* Only drop transient drop-ins */
441 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
449 static void unit_free_requires_mounts_for(Unit
*u
) {
452 STRV_FOREACH(j
, u
->requires_mounts_for
) {
453 char s
[strlen(*j
) + 1];
455 PATH_FOREACH_PREFIX_MORE(s
, *j
) {
459 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
465 if (set_isempty(x
)) {
466 hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
473 u
->requires_mounts_for
= strv_free(u
->requires_mounts_for
);
476 static void unit_done(Unit
*u
) {
485 if (UNIT_VTABLE(u
)->done
)
486 UNIT_VTABLE(u
)->done(u
);
488 ec
= unit_get_exec_context(u
);
490 exec_context_done(ec
);
492 cc
= unit_get_cgroup_context(u
);
494 cgroup_context_done(cc
);
497 void unit_free(Unit
*u
) {
504 if (u
->transient_file
)
505 fclose(u
->transient_file
);
507 if (!MANAGER_IS_RELOADING(u
->manager
))
508 unit_remove_transient(u
);
510 bus_unit_send_removed_signal(u
);
514 sd_bus_slot_unref(u
->match_bus_slot
);
516 sd_bus_track_unref(u
->bus_track
);
517 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
519 unit_free_requires_mounts_for(u
);
521 SET_FOREACH(t
, u
->names
, i
)
522 hashmap_remove_value(u
->manager
->units
, t
, u
);
536 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
537 bidi_set_free(u
, u
->dependencies
[d
]);
539 if (u
->type
!= _UNIT_TYPE_INVALID
)
540 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
542 if (u
->in_load_queue
)
543 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
545 if (u
->in_dbus_queue
)
546 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
548 if (u
->in_cleanup_queue
)
549 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
551 if (u
->in_gc_queue
) {
552 LIST_REMOVE(gc_queue
, u
->manager
->gc_queue
, u
);
553 u
->manager
->n_in_gc_queue
--;
556 if (u
->in_cgroup_queue
)
557 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
559 unit_release_cgroup(u
);
561 unit_unref_uid_gid(u
, false);
563 (void) manager_update_failed_units(u
->manager
, u
, false);
564 set_remove(u
->manager
->startup_units
, u
);
566 free(u
->description
);
567 strv_free(u
->documentation
);
568 free(u
->fragment_path
);
569 free(u
->source_path
);
570 strv_free(u
->dropin_paths
);
573 free(u
->job_timeout_reboot_arg
);
575 set_free_free(u
->names
);
577 unit_unwatch_all_pids(u
);
579 condition_free_list(u
->conditions
);
580 condition_free_list(u
->asserts
);
584 unit_ref_unset(&u
->slice
);
587 unit_ref_unset(u
->refs
);
592 UnitActiveState
unit_active_state(Unit
*u
) {
595 if (u
->load_state
== UNIT_MERGED
)
596 return unit_active_state(unit_follow_merge(u
));
598 /* After a reload it might happen that a unit is not correctly
599 * loaded but still has a process around. That's why we won't
600 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
602 return UNIT_VTABLE(u
)->active_state(u
);
605 const char* unit_sub_state_to_string(Unit
*u
) {
608 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
611 static int complete_move(Set
**s
, Set
**other
) {
621 r
= set_move(*s
, *other
);
632 static int merge_names(Unit
*u
, Unit
*other
) {
640 r
= complete_move(&u
->names
, &other
->names
);
644 set_free_free(other
->names
);
648 SET_FOREACH(t
, u
->names
, i
)
649 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
654 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
659 assert(d
< _UNIT_DEPENDENCY_MAX
);
662 * If u does not have this dependency set allocated, there is no need
663 * to reserve anything. In that case other's set will be transferred
664 * as a whole to u by complete_move().
666 if (!u
->dependencies
[d
])
669 /* merge_dependencies() will skip a u-on-u dependency */
670 n_reserve
= set_size(other
->dependencies
[d
]) - !!set_get(other
->dependencies
[d
], u
);
672 return set_reserve(u
->dependencies
[d
], n_reserve
);
675 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
682 assert(d
< _UNIT_DEPENDENCY_MAX
);
684 /* Fix backwards pointers */
685 SET_FOREACH(back
, other
->dependencies
[d
], i
) {
688 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
689 /* Do not add dependencies between u and itself */
691 if (set_remove(back
->dependencies
[k
], other
))
692 maybe_warn_about_dependency(u
, other_id
, k
);
694 r
= set_remove_and_put(back
->dependencies
[k
], other
, u
);
696 set_remove(back
->dependencies
[k
], other
);
698 assert(r
>= 0 || r
== -ENOENT
);
703 /* Also do not move dependencies on u to itself */
704 back
= set_remove(other
->dependencies
[d
], u
);
706 maybe_warn_about_dependency(u
, other_id
, d
);
708 /* The move cannot fail. The caller must have performed a reservation. */
709 assert_se(complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
711 other
->dependencies
[d
] = set_free(other
->dependencies
[d
]);
714 int unit_merge(Unit
*u
, Unit
*other
) {
716 const char *other_id
= NULL
;
721 assert(u
->manager
== other
->manager
);
722 assert(u
->type
!= _UNIT_TYPE_INVALID
);
724 other
= unit_follow_merge(other
);
729 if (u
->type
!= other
->type
)
732 if (!u
->instance
!= !other
->instance
)
735 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
738 if (other
->load_state
!= UNIT_STUB
&&
739 other
->load_state
!= UNIT_NOT_FOUND
)
748 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
752 other_id
= strdupa(other
->id
);
754 /* Make reservations to ensure merge_dependencies() won't fail */
755 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
756 r
= reserve_dependencies(u
, other
, d
);
758 * We don't rollback reservations if we fail. We don't have
759 * a way to undo reservations. A reservation is not a leak.
766 r
= merge_names(u
, other
);
770 /* Redirect all references */
772 unit_ref_set(other
->refs
, u
);
774 /* Merge dependencies */
775 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
776 merge_dependencies(u
, other
, other_id
, d
);
778 other
->load_state
= UNIT_MERGED
;
779 other
->merged_into
= u
;
781 /* If there is still some data attached to the other node, we
782 * don't need it anymore, and can free it. */
783 if (other
->load_state
!= UNIT_STUB
)
784 if (UNIT_VTABLE(other
)->done
)
785 UNIT_VTABLE(other
)->done(other
);
787 unit_add_to_dbus_queue(u
);
788 unit_add_to_cleanup_queue(other
);
793 int unit_merge_by_name(Unit
*u
, const char *name
) {
794 _cleanup_free_
char *s
= NULL
;
801 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
805 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
812 other
= manager_get_unit(u
->manager
, name
);
814 return unit_merge(u
, other
);
816 return unit_add_name(u
, name
);
819 Unit
* unit_follow_merge(Unit
*u
) {
822 while (u
->load_state
== UNIT_MERGED
)
823 assert_se(u
= u
->merged_into
);
828 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
834 if (c
->working_directory
) {
835 r
= unit_require_mounts_for(u
, c
->working_directory
);
840 if (c
->root_directory
) {
841 r
= unit_require_mounts_for(u
, c
->root_directory
);
846 if (!MANAGER_IS_SYSTEM(u
->manager
))
849 if (c
->private_tmp
) {
850 r
= unit_require_mounts_for(u
, "/tmp");
854 r
= unit_require_mounts_for(u
, "/var/tmp");
859 if (c
->std_output
!= EXEC_OUTPUT_KMSG
&&
860 c
->std_output
!= EXEC_OUTPUT_SYSLOG
&&
861 c
->std_output
!= EXEC_OUTPUT_JOURNAL
&&
862 c
->std_output
!= EXEC_OUTPUT_KMSG_AND_CONSOLE
&&
863 c
->std_output
!= EXEC_OUTPUT_SYSLOG_AND_CONSOLE
&&
864 c
->std_output
!= EXEC_OUTPUT_JOURNAL_AND_CONSOLE
&&
865 c
->std_error
!= EXEC_OUTPUT_KMSG
&&
866 c
->std_error
!= EXEC_OUTPUT_SYSLOG
&&
867 c
->std_error
!= EXEC_OUTPUT_JOURNAL
&&
868 c
->std_error
!= EXEC_OUTPUT_KMSG_AND_CONSOLE
&&
869 c
->std_error
!= EXEC_OUTPUT_JOURNAL_AND_CONSOLE
&&
870 c
->std_error
!= EXEC_OUTPUT_SYSLOG_AND_CONSOLE
)
873 /* If syslog or kernel logging is requested, make sure our own
874 * logging daemon is run first. */
876 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, NULL
, true);
883 const char *unit_description(Unit
*u
) {
887 return u
->description
;
892 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
898 timestamp0
[FORMAT_TIMESTAMP_MAX
],
899 timestamp1
[FORMAT_TIMESTAMP_MAX
],
900 timestamp2
[FORMAT_TIMESTAMP_MAX
],
901 timestamp3
[FORMAT_TIMESTAMP_MAX
],
902 timestamp4
[FORMAT_TIMESTAMP_MAX
],
903 timespan
[FORMAT_TIMESPAN_MAX
];
905 _cleanup_set_free_ Set
*following_set
= NULL
;
910 assert(u
->type
>= 0);
912 prefix
= strempty(prefix
);
913 prefix2
= strjoina(prefix
, "\t");
917 "%s\tDescription: %s\n"
919 "%s\tUnit Load State: %s\n"
920 "%s\tUnit Active State: %s\n"
921 "%s\tState Change Timestamp: %s\n"
922 "%s\tInactive Exit Timestamp: %s\n"
923 "%s\tActive Enter Timestamp: %s\n"
924 "%s\tActive Exit Timestamp: %s\n"
925 "%s\tInactive Enter Timestamp: %s\n"
926 "%s\tGC Check Good: %s\n"
927 "%s\tNeed Daemon Reload: %s\n"
928 "%s\tTransient: %s\n"
931 "%s\tCGroup realized: %s\n"
932 "%s\tCGroup mask: 0x%x\n"
933 "%s\tCGroup members mask: 0x%x\n",
935 prefix
, unit_description(u
),
936 prefix
, strna(u
->instance
),
937 prefix
, unit_load_state_to_string(u
->load_state
),
938 prefix
, unit_active_state_to_string(unit_active_state(u
)),
939 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
940 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
941 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
942 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
943 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
944 prefix
, yes_no(unit_check_gc(u
)),
945 prefix
, yes_no(unit_need_daemon_reload(u
)),
946 prefix
, yes_no(u
->transient
),
947 prefix
, strna(unit_slice_name(u
)),
948 prefix
, strna(u
->cgroup_path
),
949 prefix
, yes_no(u
->cgroup_realized
),
950 prefix
, u
->cgroup_realized_mask
,
951 prefix
, u
->cgroup_members_mask
);
953 SET_FOREACH(t
, u
->names
, i
)
954 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
956 STRV_FOREACH(j
, u
->documentation
)
957 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
959 following
= unit_following(u
);
961 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
963 r
= unit_following_set(u
, &following_set
);
967 SET_FOREACH(other
, following_set
, i
)
968 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
971 if (u
->fragment_path
)
972 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
975 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
977 STRV_FOREACH(j
, u
->dropin_paths
)
978 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
980 if (u
->job_timeout
!= USEC_INFINITY
)
981 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
983 if (u
->job_timeout_action
!= FAILURE_ACTION_NONE
)
984 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, failure_action_to_string(u
->job_timeout_action
));
986 if (u
->job_timeout_reboot_arg
)
987 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
989 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
990 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
992 if (dual_timestamp_is_set(&u
->condition_timestamp
))
994 "%s\tCondition Timestamp: %s\n"
995 "%s\tCondition Result: %s\n",
996 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
997 prefix
, yes_no(u
->condition_result
));
999 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1001 "%s\tAssert Timestamp: %s\n"
1002 "%s\tAssert Result: %s\n",
1003 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1004 prefix
, yes_no(u
->assert_result
));
1006 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1009 SET_FOREACH(other
, u
->dependencies
[d
], i
)
1010 fprintf(f
, "%s\t%s: %s\n", prefix
, unit_dependency_to_string(d
), other
->id
);
1013 if (!strv_isempty(u
->requires_mounts_for
)) {
1015 "%s\tRequiresMountsFor:", prefix
);
1017 STRV_FOREACH(j
, u
->requires_mounts_for
)
1018 fprintf(f
, " %s", *j
);
1023 if (u
->load_state
== UNIT_LOADED
) {
1026 "%s\tStopWhenUnneeded: %s\n"
1027 "%s\tRefuseManualStart: %s\n"
1028 "%s\tRefuseManualStop: %s\n"
1029 "%s\tDefaultDependencies: %s\n"
1030 "%s\tOnFailureJobMode: %s\n"
1031 "%s\tIgnoreOnIsolate: %s\n",
1032 prefix
, yes_no(u
->stop_when_unneeded
),
1033 prefix
, yes_no(u
->refuse_manual_start
),
1034 prefix
, yes_no(u
->refuse_manual_stop
),
1035 prefix
, yes_no(u
->default_dependencies
),
1036 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1037 prefix
, yes_no(u
->ignore_on_isolate
));
1039 if (UNIT_VTABLE(u
)->dump
)
1040 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1042 } else if (u
->load_state
== UNIT_MERGED
)
1044 "%s\tMerged into: %s\n",
1045 prefix
, u
->merged_into
->id
);
1046 else if (u
->load_state
== UNIT_ERROR
)
1047 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1049 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1050 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1053 job_dump(u
->job
, f
, prefix2
);
1056 job_dump(u
->nop_job
, f
, prefix2
);
1060 /* Common implementation for multiple backends */
1061 int unit_load_fragment_and_dropin(Unit
*u
) {
1066 /* Load a .{service,socket,...} file */
1067 r
= unit_load_fragment(u
);
1071 if (u
->load_state
== UNIT_STUB
)
1074 /* Load drop-in directory data */
1075 r
= unit_load_dropin(unit_follow_merge(u
));
1082 /* Common implementation for multiple backends */
1083 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1088 /* Same as unit_load_fragment_and_dropin(), but whether
1089 * something can be loaded or not doesn't matter. */
1091 /* Load a .service file */
1092 r
= unit_load_fragment(u
);
1096 if (u
->load_state
== UNIT_STUB
)
1097 u
->load_state
= UNIT_LOADED
;
1099 /* Load drop-in directory data */
1100 r
= unit_load_dropin(unit_follow_merge(u
));
1107 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1111 if (target
->type
!= UNIT_TARGET
)
1114 /* Only add the dependency if both units are loaded, so that
1115 * that loop check below is reliable */
1116 if (u
->load_state
!= UNIT_LOADED
||
1117 target
->load_state
!= UNIT_LOADED
)
1120 /* If either side wants no automatic dependencies, then let's
1122 if (!u
->default_dependencies
||
1123 !target
->default_dependencies
)
1126 /* Don't create loops */
1127 if (set_get(target
->dependencies
[UNIT_BEFORE
], u
))
1130 return unit_add_dependency(target
, UNIT_AFTER
, u
, true);
1133 static int unit_add_target_dependencies(Unit
*u
) {
1135 static const UnitDependency deps
[] = {
1149 for (k
= 0; k
< ELEMENTSOF(deps
); k
++)
1150 SET_FOREACH(target
, u
->dependencies
[deps
[k
]], i
) {
1151 r
= unit_add_default_target_dependency(u
, target
);
1159 static int unit_add_slice_dependencies(Unit
*u
) {
1162 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1165 if (UNIT_ISSET(u
->slice
))
1166 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true);
1168 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1171 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, NULL
, true);
1174 static int unit_add_mount_dependencies(Unit
*u
) {
1180 STRV_FOREACH(i
, u
->requires_mounts_for
) {
1181 char prefix
[strlen(*i
) + 1];
1183 PATH_FOREACH_PREFIX_MORE(prefix
, *i
) {
1184 _cleanup_free_
char *p
= NULL
;
1187 r
= unit_name_from_path(prefix
, ".mount", &p
);
1191 m
= manager_get_unit(u
->manager
, p
);
1193 /* Make sure to load the mount unit if
1194 * it exists. If so the dependencies
1195 * on this unit will be added later
1196 * during the loading of the mount
1198 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1204 if (m
->load_state
!= UNIT_LOADED
)
1207 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true);
1211 if (m
->fragment_path
) {
1212 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true);
1222 static int unit_add_startup_units(Unit
*u
) {
1226 c
= unit_get_cgroup_context(u
);
1230 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1231 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1232 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1235 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1239 return set_put(u
->manager
->startup_units
, u
);
1242 int unit_load(Unit
*u
) {
1247 if (u
->in_load_queue
) {
1248 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1249 u
->in_load_queue
= false;
1252 if (u
->type
== _UNIT_TYPE_INVALID
)
1255 if (u
->load_state
!= UNIT_STUB
)
1258 if (u
->transient_file
) {
1259 r
= fflush_and_check(u
->transient_file
);
1263 fclose(u
->transient_file
);
1264 u
->transient_file
= NULL
;
1266 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1269 if (UNIT_VTABLE(u
)->load
) {
1270 r
= UNIT_VTABLE(u
)->load(u
);
1275 if (u
->load_state
== UNIT_STUB
) {
1280 if (u
->load_state
== UNIT_LOADED
) {
1282 r
= unit_add_target_dependencies(u
);
1286 r
= unit_add_slice_dependencies(u
);
1290 r
= unit_add_mount_dependencies(u
);
1294 r
= unit_add_startup_units(u
);
1298 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& set_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1299 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1304 unit_update_cgroup_members_masks(u
);
1307 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1309 unit_add_to_dbus_queue(unit_follow_merge(u
));
1310 unit_add_to_gc_queue(u
);
1315 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
: UNIT_ERROR
;
1317 unit_add_to_dbus_queue(u
);
1318 unit_add_to_gc_queue(u
);
1320 log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1325 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1332 /* If the condition list is empty, then it is true */
1336 /* Otherwise, if all of the non-trigger conditions apply and
1337 * if any of the trigger conditions apply (unless there are
1338 * none) we return true */
1339 LIST_FOREACH(conditions
, c
, first
) {
1342 r
= condition_test(c
);
1345 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1347 c
->trigger
? "|" : "",
1348 c
->negate
? "!" : "",
1354 c
->trigger
? "|" : "",
1355 c
->negate
? "!" : "",
1357 condition_result_to_string(c
->result
));
1359 if (!c
->trigger
&& r
<= 0)
1362 if (c
->trigger
&& triggered
<= 0)
1366 return triggered
!= 0;
1369 static bool unit_condition_test(Unit
*u
) {
1372 dual_timestamp_get(&u
->condition_timestamp
);
1373 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1375 return u
->condition_result
;
1378 static bool unit_assert_test(Unit
*u
) {
1381 dual_timestamp_get(&u
->assert_timestamp
);
1382 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1384 return u
->assert_result
;
1387 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1388 DISABLE_WARNING_FORMAT_NONLITERAL
;
1389 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1393 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1395 const UnitStatusMessageFormats
*format_table
;
1398 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1400 if (t
!= JOB_RELOAD
) {
1401 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1403 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1409 /* Return generic strings */
1411 return "Starting %s.";
1412 else if (t
== JOB_STOP
)
1413 return "Stopping %s.";
1415 return "Reloading %s.";
1418 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1423 /* Reload status messages have traditionally not been printed to console. */
1424 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1427 format
= unit_get_status_message_format(u
, t
);
1429 DISABLE_WARNING_FORMAT_NONLITERAL
;
1430 unit_status_printf(u
, "", format
);
1434 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1441 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1444 if (log_on_console())
1447 /* We log status messages for all units and all operations. */
1449 format
= unit_get_status_message_format(u
, t
);
1451 DISABLE_WARNING_FORMAT_NONLITERAL
;
1452 xsprintf(buf
, format
, unit_description(u
));
1455 mid
= t
== JOB_START
? SD_MESSAGE_UNIT_STARTING
:
1456 t
== JOB_STOP
? SD_MESSAGE_UNIT_STOPPING
:
1457 SD_MESSAGE_UNIT_RELOADING
;
1459 /* Note that we deliberately use LOG_MESSAGE() instead of
1460 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1461 * closely what is written to screen using the status output,
1462 * which is supposed the highest level, friendliest output
1463 * possible, which means we should avoid the low-level unit
1465 log_struct(LOG_INFO
,
1466 LOG_MESSAGE_ID(mid
),
1468 LOG_MESSAGE("%s", buf
),
1472 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1475 assert(t
< _JOB_TYPE_MAX
);
1477 unit_status_log_starting_stopping_reloading(u
, t
);
1478 unit_status_print_starting_stopping(u
, t
);
1481 int unit_start_limit_test(Unit
*u
) {
1484 if (ratelimit_test(&u
->start_limit
)) {
1485 u
->start_limit_hit
= false;
1489 log_unit_warning(u
, "Start request repeated too quickly.");
1490 u
->start_limit_hit
= true;
1492 return failure_action(u
->manager
, u
->start_limit_action
, u
->reboot_arg
);
1496 * -EBADR: This unit type does not support starting.
1497 * -EALREADY: Unit is already started.
1498 * -EAGAIN: An operation is already in progress. Retry later.
1499 * -ECANCELED: Too many requests for now.
1500 * -EPROTO: Assert failed
1501 * -EINVAL: Unit not loaded
1502 * -EOPNOTSUPP: Unit type not supported
1504 int unit_start(Unit
*u
) {
1505 UnitActiveState state
;
1510 /* If this is already started, then this will succeed. Note
1511 * that this will even succeed if this unit is not startable
1512 * by the user. This is relied on to detect when we need to
1513 * wait for units and when waiting is finished. */
1514 state
= unit_active_state(u
);
1515 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1518 /* Units that aren't loaded cannot be started */
1519 if (u
->load_state
!= UNIT_LOADED
)
1522 /* If the conditions failed, don't do anything at all. If we
1523 * already are activating this call might still be useful to
1524 * speed up activation in case there is some hold-off time,
1525 * but we don't want to recheck the condition in that case. */
1526 if (state
!= UNIT_ACTIVATING
&&
1527 !unit_condition_test(u
)) {
1528 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1532 /* If the asserts failed, fail the entire job */
1533 if (state
!= UNIT_ACTIVATING
&&
1534 !unit_assert_test(u
)) {
1535 log_unit_notice(u
, "Starting requested but asserts failed.");
1539 /* Units of types that aren't supported cannot be
1540 * started. Note that we do this test only after the condition
1541 * checks, so that we rather return condition check errors
1542 * (which are usually not considered a true failure) than "not
1543 * supported" errors (which are considered a failure).
1545 if (!unit_supported(u
))
1548 /* Forward to the main object, if we aren't it. */
1549 following
= unit_following(u
);
1551 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1552 return unit_start(following
);
1555 /* If it is stopped, but we cannot start it, then fail */
1556 if (!UNIT_VTABLE(u
)->start
)
1559 /* We don't suppress calls to ->start() here when we are
1560 * already starting, to allow this request to be used as a
1561 * "hurry up" call, for example when the unit is in some "auto
1562 * restart" state where it waits for a holdoff timer to elapse
1563 * before it will start again. */
1565 unit_add_to_dbus_queue(u
);
1567 return UNIT_VTABLE(u
)->start(u
);
1570 bool unit_can_start(Unit
*u
) {
1573 if (u
->load_state
!= UNIT_LOADED
)
1576 if (!unit_supported(u
))
1579 return !!UNIT_VTABLE(u
)->start
;
1582 bool unit_can_isolate(Unit
*u
) {
1585 return unit_can_start(u
) &&
1590 * -EBADR: This unit type does not support stopping.
1591 * -EALREADY: Unit is already stopped.
1592 * -EAGAIN: An operation is already in progress. Retry later.
1594 int unit_stop(Unit
*u
) {
1595 UnitActiveState state
;
1600 state
= unit_active_state(u
);
1601 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1604 following
= unit_following(u
);
1606 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1607 return unit_stop(following
);
1610 if (!UNIT_VTABLE(u
)->stop
)
1613 unit_add_to_dbus_queue(u
);
1615 return UNIT_VTABLE(u
)->stop(u
);
1619 * -EBADR: This unit type does not support reloading.
1620 * -ENOEXEC: Unit is not started.
1621 * -EAGAIN: An operation is already in progress. Retry later.
1623 int unit_reload(Unit
*u
) {
1624 UnitActiveState state
;
1629 if (u
->load_state
!= UNIT_LOADED
)
1632 if (!unit_can_reload(u
))
1635 state
= unit_active_state(u
);
1636 if (state
== UNIT_RELOADING
)
1639 if (state
!= UNIT_ACTIVE
) {
1640 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1644 following
= unit_following(u
);
1646 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1647 return unit_reload(following
);
1650 unit_add_to_dbus_queue(u
);
1652 return UNIT_VTABLE(u
)->reload(u
);
1655 bool unit_can_reload(Unit
*u
) {
1658 if (!UNIT_VTABLE(u
)->reload
)
1661 if (!UNIT_VTABLE(u
)->can_reload
)
1664 return UNIT_VTABLE(u
)->can_reload(u
);
1667 static void unit_check_unneeded(Unit
*u
) {
1669 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1671 static const UnitDependency needed_dependencies
[] = {
1685 /* If this service shall be shut down when unneeded then do
1688 if (!u
->stop_when_unneeded
)
1691 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
1694 for (j
= 0; j
< ELEMENTSOF(needed_dependencies
); j
++)
1695 SET_FOREACH(other
, u
->dependencies
[needed_dependencies
[j
]], i
)
1696 if (unit_active_or_pending(other
))
1699 /* If stopping a unit fails continuously we might enter a stop
1700 * loop here, hence stop acting on the service being
1701 * unnecessary after a while. */
1702 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1703 log_unit_warning(u
, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1707 log_unit_info(u
, "Unit not needed anymore. Stopping.");
1709 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1710 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1712 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1715 static void unit_check_binds_to(Unit
*u
) {
1716 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1727 if (unit_active_state(u
) != UNIT_ACTIVE
)
1730 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
1734 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1744 /* If stopping a unit fails continuously we might enter a stop
1745 * loop here, hence stop acting on the service being
1746 * unnecessary after a while. */
1747 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1748 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
1753 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
1755 /* A unit we need to run is gone. Sniff. Let's stop this. */
1756 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1758 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1761 static void retroactively_start_dependencies(Unit
*u
) {
1766 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
1768 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1769 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1770 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1771 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1773 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1774 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1775 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1776 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1778 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1779 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1780 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1781 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
1783 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
1784 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1785 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1787 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
1788 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1789 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1792 static void retroactively_stop_dependencies(Unit
*u
) {
1797 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1799 /* Pull down units which are bound to us recursively if enabled */
1800 SET_FOREACH(other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
1801 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1802 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1805 static void check_unneeded_dependencies(Unit
*u
) {
1810 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1812 /* Garbage collect services that might not be needed anymore, if enabled */
1813 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1814 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1815 unit_check_unneeded(other
);
1816 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1817 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1818 unit_check_unneeded(other
);
1819 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUISITE
], i
)
1820 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1821 unit_check_unneeded(other
);
1822 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1823 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1824 unit_check_unneeded(other
);
1827 void unit_start_on_failure(Unit
*u
) {
1833 if (set_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
1836 log_unit_info(u
, "Triggering OnFailure= dependencies.");
1838 SET_FOREACH(other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
1841 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, NULL
, NULL
);
1843 log_unit_error_errno(u
, r
, "Failed to enqueue OnFailure= job: %m");
1847 void unit_trigger_notify(Unit
*u
) {
1853 SET_FOREACH(other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
1854 if (UNIT_VTABLE(other
)->trigger_notify
)
1855 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
1858 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
1863 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
1864 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
1866 /* Note that this is called for all low-level state changes,
1867 * even if they might map to the same high-level
1868 * UnitActiveState! That means that ns == os is an expected
1869 * behavior here. For example: if a mount point is remounted
1870 * this function will be called too! */
1874 /* Update timestamps for state changes */
1875 if (!MANAGER_IS_RELOADING(m
)) {
1876 dual_timestamp_get(&u
->state_change_timestamp
);
1878 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
1879 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
1880 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
1881 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
1883 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1884 u
->active_enter_timestamp
= u
->state_change_timestamp
;
1885 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1886 u
->active_exit_timestamp
= u
->state_change_timestamp
;
1889 /* Keep track of failed units */
1890 (void) manager_update_failed_units(u
->manager
, u
, ns
== UNIT_FAILED
);
1892 /* Make sure the cgroup is always removed when we become inactive */
1893 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1894 unit_prune_cgroup(u
);
1896 /* Note that this doesn't apply to RemainAfterExit services exiting
1897 * successfully, since there's no change of state in that case. Which is
1898 * why it is handled in service_set_state() */
1899 if (UNIT_IS_INACTIVE_OR_FAILED(os
) != UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1902 ec
= unit_get_exec_context(u
);
1903 if (ec
&& exec_context_may_touch_console(ec
)) {
1904 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1907 if (m
->n_on_console
== 0)
1908 /* unset no_console_output flag, since the console is free */
1909 m
->no_console_output
= false;
1918 if (u
->job
->state
== JOB_WAITING
)
1920 /* So we reached a different state for this
1921 * job. Let's see if we can run it now if it
1922 * failed previously due to EAGAIN. */
1923 job_add_to_run_queue(u
->job
);
1925 /* Let's check whether this state change constitutes a
1926 * finished job, or maybe contradicts a running job and
1927 * hence needs to invalidate jobs. */
1929 switch (u
->job
->type
) {
1932 case JOB_VERIFY_ACTIVE
:
1934 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1935 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
1936 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
1939 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1940 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
1946 case JOB_RELOAD_OR_START
:
1947 case JOB_TRY_RELOAD
:
1949 if (u
->job
->state
== JOB_RUNNING
) {
1950 if (ns
== UNIT_ACTIVE
)
1951 job_finish_and_invalidate(u
->job
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
1952 else if (ns
!= UNIT_ACTIVATING
&& ns
!= UNIT_RELOADING
) {
1955 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1956 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
1964 case JOB_TRY_RESTART
:
1966 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1967 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
1968 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
1970 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
1976 assert_not_reached("Job type unknown");
1982 if (!MANAGER_IS_RELOADING(m
)) {
1984 /* If this state change happened without being
1985 * requested by a job, then let's retroactively start
1986 * or stop dependencies. We skip that step when
1987 * deserializing, since we don't want to create any
1988 * additional jobs just because something is already
1992 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
1993 retroactively_start_dependencies(u
);
1994 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
1995 retroactively_stop_dependencies(u
);
1998 /* stop unneeded units regardless if going down was expected or not */
1999 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2000 check_unneeded_dependencies(u
);
2002 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2003 log_unit_notice(u
, "Unit entered failed state.");
2004 unit_start_on_failure(u
);
2008 /* Some names are special */
2009 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2011 if (unit_has_name(u
, SPECIAL_DBUS_SERVICE
))
2012 /* The bus might have just become available,
2013 * hence try to connect to it, if we aren't
2017 if (u
->type
== UNIT_SERVICE
&&
2018 !UNIT_IS_ACTIVE_OR_RELOADING(os
) &&
2019 !MANAGER_IS_RELOADING(m
)) {
2020 /* Write audit record if we have just finished starting up */
2021 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, true);
2025 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
))
2026 manager_send_unit_plymouth(m
, u
);
2030 /* We don't care about D-Bus here, since we'll get an
2031 * asynchronous notification for it anyway. */
2033 if (u
->type
== UNIT_SERVICE
&&
2034 UNIT_IS_INACTIVE_OR_FAILED(ns
) &&
2035 !UNIT_IS_INACTIVE_OR_FAILED(os
) &&
2036 !MANAGER_IS_RELOADING(m
)) {
2038 /* Hmm, if there was no start record written
2039 * write it now, so that we always have a nice
2042 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, ns
== UNIT_INACTIVE
);
2044 if (ns
== UNIT_INACTIVE
)
2045 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, true);
2047 /* Write audit record if we have just finished shutting down */
2048 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, ns
== UNIT_INACTIVE
);
2050 u
->in_audit
= false;
2054 manager_recheck_journal(m
);
2055 unit_trigger_notify(u
);
2057 if (!MANAGER_IS_RELOADING(u
->manager
)) {
2058 /* Maybe we finished startup and are now ready for
2059 * being stopped because unneeded? */
2060 unit_check_unneeded(u
);
2062 /* Maybe we finished startup, but something we needed
2063 * has vanished? Let's die then. (This happens when
2064 * something BindsTo= to a Type=oneshot unit, as these
2065 * units go directly from starting to inactive,
2066 * without ever entering started.) */
2067 unit_check_binds_to(u
);
2070 unit_add_to_dbus_queue(u
);
2071 unit_add_to_gc_queue(u
);
2074 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2080 /* Watch a specific PID. We only support one or two units
2081 * watching each PID for now, not more. */
2083 r
= set_ensure_allocated(&u
->pids
, NULL
);
2087 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids1
, NULL
);
2091 r
= hashmap_put(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2093 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids2
, NULL
);
2097 r
= hashmap_put(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2100 q
= set_put(u
->pids
, PID_TO_PTR(pid
));
2107 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2111 (void) hashmap_remove_value(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2112 (void) hashmap_remove_value(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2113 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2116 void unit_unwatch_all_pids(Unit
*u
) {
2119 while (!set_isempty(u
->pids
))
2120 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2122 u
->pids
= set_free(u
->pids
);
2125 void unit_tidy_watch_pids(Unit
*u
, pid_t except1
, pid_t except2
) {
2131 /* Cleans dead PIDs from our list */
2133 SET_FOREACH(e
, u
->pids
, i
) {
2134 pid_t pid
= PTR_TO_PID(e
);
2136 if (pid
== except1
|| pid
== except2
)
2139 if (!pid_is_unwaited(pid
))
2140 unit_unwatch_pid(u
, pid
);
2144 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2146 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2150 case JOB_VERIFY_ACTIVE
:
2157 case JOB_TRY_RESTART
:
2158 return unit_can_start(u
);
2161 case JOB_TRY_RELOAD
:
2162 return unit_can_reload(u
);
2164 case JOB_RELOAD_OR_START
:
2165 return unit_can_reload(u
) && unit_can_start(u
);
2168 assert_not_reached("Invalid job type");
2172 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2175 /* Only warn about some unit types */
2176 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2179 if (streq_ptr(u
->id
, other
))
2180 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2182 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2185 int unit_add_dependency(Unit
*u
, UnitDependency d
, Unit
*other
, bool add_reference
) {
2187 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2188 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2189 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2190 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2191 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2192 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2193 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2194 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2195 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2196 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2197 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2198 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2199 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2200 [UNIT_BEFORE
] = UNIT_AFTER
,
2201 [UNIT_AFTER
] = UNIT_BEFORE
,
2202 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2203 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2204 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2205 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2206 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2207 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2208 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2209 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2211 int r
, q
= 0, v
= 0, w
= 0;
2212 Unit
*orig_u
= u
, *orig_other
= other
;
2215 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2218 u
= unit_follow_merge(u
);
2219 other
= unit_follow_merge(other
);
2221 /* We won't allow dependencies on ourselves. We will not
2222 * consider them an error however. */
2224 maybe_warn_about_dependency(orig_u
, orig_other
->id
, d
);
2228 r
= set_ensure_allocated(&u
->dependencies
[d
], NULL
);
2232 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
) {
2233 r
= set_ensure_allocated(&other
->dependencies
[inverse_table
[d
]], NULL
);
2238 if (add_reference
) {
2239 r
= set_ensure_allocated(&u
->dependencies
[UNIT_REFERENCES
], NULL
);
2243 r
= set_ensure_allocated(&other
->dependencies
[UNIT_REFERENCED_BY
], NULL
);
2248 q
= set_put(u
->dependencies
[d
], other
);
2252 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2253 v
= set_put(other
->dependencies
[inverse_table
[d
]], u
);
2260 if (add_reference
) {
2261 w
= set_put(u
->dependencies
[UNIT_REFERENCES
], other
);
2267 r
= set_put(other
->dependencies
[UNIT_REFERENCED_BY
], u
);
2272 unit_add_to_dbus_queue(u
);
2277 set_remove(u
->dependencies
[d
], other
);
2280 set_remove(other
->dependencies
[inverse_table
[d
]], u
);
2283 set_remove(u
->dependencies
[UNIT_REFERENCES
], other
);
2288 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
) {
2293 r
= unit_add_dependency(u
, d
, other
, add_reference
);
2297 return unit_add_dependency(u
, e
, other
, add_reference
);
2300 static int resolve_template(Unit
*u
, const char *name
, const char*path
, char **buf
, const char **ret
) {
2304 assert(name
|| path
);
2309 name
= basename(path
);
2311 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2318 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2320 _cleanup_free_
char *i
= NULL
;
2322 r
= unit_name_to_prefix(u
->id
, &i
);
2326 r
= unit_name_replace_instance(name
, i
, buf
);
2335 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, const char *path
, bool add_reference
) {
2336 _cleanup_free_
char *buf
= NULL
;
2341 assert(name
|| path
);
2343 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2347 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2351 return unit_add_dependency(u
, d
, other
, add_reference
);
2354 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, const char *path
, bool add_reference
) {
2355 _cleanup_free_
char *buf
= NULL
;
2360 assert(name
|| path
);
2362 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2366 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2370 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
);
2373 int set_unit_path(const char *p
) {
2374 /* This is mostly for debug purposes */
2375 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2381 char *unit_dbus_path(Unit
*u
) {
2387 return unit_dbus_path_from_name(u
->id
);
2390 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2394 /* Sets the unit slice if it has not been set before. Is extra
2395 * careful, to only allow this for units that actually have a
2396 * cgroup context. Also, we don't allow to set this for slices
2397 * (since the parent slice is derived from the name). Make
2398 * sure the unit we set is actually a slice. */
2400 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2403 if (u
->type
== UNIT_SLICE
)
2406 if (unit_active_state(u
) != UNIT_INACTIVE
)
2409 if (slice
->type
!= UNIT_SLICE
)
2412 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
2413 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
2416 if (UNIT_DEREF(u
->slice
) == slice
)
2419 /* Disallow slice changes if @u is already bound to cgroups */
2420 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
2423 unit_ref_unset(&u
->slice
);
2424 unit_ref_set(&u
->slice
, slice
);
2428 int unit_set_default_slice(Unit
*u
) {
2429 _cleanup_free_
char *b
= NULL
;
2430 const char *slice_name
;
2436 if (UNIT_ISSET(u
->slice
))
2440 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
2442 /* Implicitly place all instantiated units in their
2443 * own per-template slice */
2445 r
= unit_name_to_prefix(u
->id
, &prefix
);
2449 /* The prefix is already escaped, but it might include
2450 * "-" which has a special meaning for slice units,
2451 * hence escape it here extra. */
2452 escaped
= unit_name_escape(prefix
);
2456 if (MANAGER_IS_SYSTEM(u
->manager
))
2457 b
= strjoin("system-", escaped
, ".slice", NULL
);
2459 b
= strappend(escaped
, ".slice");
2466 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
2467 ? SPECIAL_SYSTEM_SLICE
2468 : SPECIAL_ROOT_SLICE
;
2470 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
2474 return unit_set_slice(u
, slice
);
2477 const char *unit_slice_name(Unit
*u
) {
2480 if (!UNIT_ISSET(u
->slice
))
2483 return UNIT_DEREF(u
->slice
)->id
;
2486 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
2487 _cleanup_free_
char *t
= NULL
;
2494 r
= unit_name_change_suffix(u
->id
, type
, &t
);
2497 if (unit_has_name(u
, t
))
2500 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
2501 assert(r
< 0 || *_found
!= u
);
2505 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
2506 const char *name
, *old_owner
, *new_owner
;
2513 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
2515 bus_log_parse_error(r
);
2519 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
2520 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
2525 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
2532 if (u
->match_bus_slot
)
2535 match
= strjoina("type='signal',"
2536 "sender='org.freedesktop.DBus',"
2537 "path='/org/freedesktop/DBus',"
2538 "interface='org.freedesktop.DBus',"
2539 "member='NameOwnerChanged',"
2540 "arg0='", name
, "'");
2542 return sd_bus_add_match(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, u
);
2545 int unit_watch_bus_name(Unit
*u
, const char *name
) {
2551 /* Watch a specific name on the bus. We only support one unit
2552 * watching each name for now. */
2554 if (u
->manager
->api_bus
) {
2555 /* If the bus is already available, install the match directly.
2556 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2557 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
2559 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
2562 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
2564 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2565 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
2571 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
2575 hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
2576 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2579 bool unit_can_serialize(Unit
*u
) {
2582 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
2585 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
2592 if (unit_can_serialize(u
)) {
2595 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
2599 rt
= unit_get_exec_runtime(u
);
2601 r
= exec_runtime_serialize(u
, rt
, f
, fds
);
2607 dual_timestamp_serialize(f
, "state-change-timestamp", &u
->state_change_timestamp
);
2609 dual_timestamp_serialize(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
2610 dual_timestamp_serialize(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
2611 dual_timestamp_serialize(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
2612 dual_timestamp_serialize(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
2614 dual_timestamp_serialize(f
, "condition-timestamp", &u
->condition_timestamp
);
2615 dual_timestamp_serialize(f
, "assert-timestamp", &u
->assert_timestamp
);
2617 if (dual_timestamp_is_set(&u
->condition_timestamp
))
2618 unit_serialize_item(u
, f
, "condition-result", yes_no(u
->condition_result
));
2620 if (dual_timestamp_is_set(&u
->assert_timestamp
))
2621 unit_serialize_item(u
, f
, "assert-result", yes_no(u
->assert_result
));
2623 unit_serialize_item(u
, f
, "transient", yes_no(u
->transient
));
2625 unit_serialize_item_format(u
, f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
2626 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
2627 unit_serialize_item_format(u
, f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
2630 unit_serialize_item(u
, f
, "cgroup", u
->cgroup_path
);
2631 unit_serialize_item(u
, f
, "cgroup-realized", yes_no(u
->cgroup_realized
));
2633 if (uid_is_valid(u
->ref_uid
))
2634 unit_serialize_item_format(u
, f
, "ref-uid", UID_FMT
, u
->ref_uid
);
2635 if (gid_is_valid(u
->ref_gid
))
2636 unit_serialize_item_format(u
, f
, "ref-gid", GID_FMT
, u
->ref_gid
);
2638 bus_track_serialize(u
->bus_track
, f
, "ref");
2640 if (serialize_jobs
) {
2642 fprintf(f
, "job\n");
2643 job_serialize(u
->job
, f
);
2647 fprintf(f
, "job\n");
2648 job_serialize(u
->nop_job
, f
);
2657 int unit_serialize_item(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2673 int unit_serialize_item_escaped(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2674 _cleanup_free_
char *c
= NULL
;
2695 int unit_serialize_item_fd(Unit
*u
, FILE *f
, FDSet
*fds
, const char *key
, int fd
) {
2705 copy
= fdset_put_dup(fds
, fd
);
2709 fprintf(f
, "%s=%i\n", key
, copy
);
2713 void unit_serialize_item_format(Unit
*u
, FILE *f
, const char *key
, const char *format
, ...) {
2724 va_start(ap
, format
);
2725 vfprintf(f
, format
, ap
);
2731 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
2732 ExecRuntime
**rt
= NULL
;
2740 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
2742 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
2745 char line
[LINE_MAX
], *l
, *v
;
2748 if (!fgets(line
, sizeof(line
), f
)) {
2761 k
= strcspn(l
, "=");
2769 if (streq(l
, "job")) {
2771 /* new-style serialized job */
2778 r
= job_deserialize(j
, f
);
2784 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
2790 r
= job_install_deserialized(j
);
2792 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
2796 } else /* legacy for pre-44 */
2797 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
2799 } else if (streq(l
, "state-change-timestamp")) {
2800 dual_timestamp_deserialize(v
, &u
->state_change_timestamp
);
2802 } else if (streq(l
, "inactive-exit-timestamp")) {
2803 dual_timestamp_deserialize(v
, &u
->inactive_exit_timestamp
);
2805 } else if (streq(l
, "active-enter-timestamp")) {
2806 dual_timestamp_deserialize(v
, &u
->active_enter_timestamp
);
2808 } else if (streq(l
, "active-exit-timestamp")) {
2809 dual_timestamp_deserialize(v
, &u
->active_exit_timestamp
);
2811 } else if (streq(l
, "inactive-enter-timestamp")) {
2812 dual_timestamp_deserialize(v
, &u
->inactive_enter_timestamp
);
2814 } else if (streq(l
, "condition-timestamp")) {
2815 dual_timestamp_deserialize(v
, &u
->condition_timestamp
);
2817 } else if (streq(l
, "assert-timestamp")) {
2818 dual_timestamp_deserialize(v
, &u
->assert_timestamp
);
2820 } else if (streq(l
, "condition-result")) {
2822 r
= parse_boolean(v
);
2824 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
2826 u
->condition_result
= r
;
2830 } else if (streq(l
, "assert-result")) {
2832 r
= parse_boolean(v
);
2834 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
2836 u
->assert_result
= r
;
2840 } else if (streq(l
, "transient")) {
2842 r
= parse_boolean(v
);
2844 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
2850 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
2852 r
= safe_atou64(v
, &u
->cpu_usage_base
);
2854 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
2858 } else if (streq(l
, "cpu-usage-last")) {
2860 r
= safe_atou64(v
, &u
->cpu_usage_last
);
2862 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
2866 } else if (streq(l
, "cgroup")) {
2868 r
= unit_set_cgroup_path(u
, v
);
2870 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
2872 (void) unit_watch_cgroup(u
);
2875 } else if (streq(l
, "cgroup-realized")) {
2878 b
= parse_boolean(v
);
2880 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
2882 u
->cgroup_realized
= b
;
2886 } else if (streq(l
, "ref-uid")) {
2889 r
= parse_uid(v
, &uid
);
2891 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
2893 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
2897 } else if (streq(l
, "ref-gid")) {
2900 r
= parse_gid(v
, &gid
);
2902 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
2904 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
2906 } else if (streq(l
, "ref")) {
2908 r
= strv_extend(&u
->deserialized_refs
, v
);
2915 if (unit_can_serialize(u
)) {
2917 r
= exec_runtime_deserialize_item(u
, rt
, l
, v
, fds
);
2919 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
2923 /* Returns positive if key was handled by the call */
2928 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
2930 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
2934 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
2935 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
2936 * before 228 where the base for timeouts was not persistent across reboots. */
2938 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
2939 dual_timestamp_get(&u
->state_change_timestamp
);
2944 int unit_add_node_link(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
) {
2946 _cleanup_free_
char *e
= NULL
;
2951 /* Adds in links to the device node that this unit is based on */
2955 if (!is_device_path(what
))
2958 /* When device units aren't supported (such as in a
2959 * container), don't create dependencies on them. */
2960 if (!unit_type_supported(UNIT_DEVICE
))
2963 r
= unit_name_from_path(what
, ".device", &e
);
2967 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
2971 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
2972 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
2978 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false);
2986 int unit_coldplug(Unit
*u
) {
2992 /* Make sure we don't enter a loop, when coldplugging
2997 u
->coldplugged
= true;
2999 STRV_FOREACH(i
, u
->deserialized_refs
) {
3000 q
= bus_unit_track_add_name(u
, *i
);
3001 if (q
< 0 && r
>= 0)
3004 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3006 if (UNIT_VTABLE(u
)->coldplug
) {
3007 q
= UNIT_VTABLE(u
)->coldplug(u
);
3008 if (q
< 0 && r
>= 0)
3013 q
= job_coldplug(u
->job
);
3014 if (q
< 0 && r
>= 0)
3021 static bool fragment_mtime_newer(const char *path
, usec_t mtime
) {
3027 if (stat(path
, &st
) < 0)
3028 /* What, cannot access this anymore? */
3032 /* For non-empty files check the mtime */
3033 return timespec_load(&st
.st_mtim
) > mtime
;
3034 else if (!null_or_empty(&st
))
3035 /* For masked files check if they are still so */
3041 bool unit_need_daemon_reload(Unit
*u
) {
3042 _cleanup_strv_free_
char **t
= NULL
;
3047 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
))
3050 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
))
3053 (void) unit_find_dropin_paths(u
, &t
);
3054 if (!strv_equal(u
->dropin_paths
, t
))
3057 STRV_FOREACH(path
, u
->dropin_paths
)
3058 if (fragment_mtime_newer(*path
, u
->dropin_mtime
))
3064 void unit_reset_failed(Unit
*u
) {
3067 if (UNIT_VTABLE(u
)->reset_failed
)
3068 UNIT_VTABLE(u
)->reset_failed(u
);
3070 RATELIMIT_RESET(u
->start_limit
);
3071 u
->start_limit_hit
= false;
3074 Unit
*unit_following(Unit
*u
) {
3077 if (UNIT_VTABLE(u
)->following
)
3078 return UNIT_VTABLE(u
)->following(u
);
3083 bool unit_stop_pending(Unit
*u
) {
3086 /* This call does check the current state of the unit. It's
3087 * hence useful to be called from state change calls of the
3088 * unit itself, where the state isn't updated yet. This is
3089 * different from unit_inactive_or_pending() which checks both
3090 * the current state and for a queued job. */
3092 return u
->job
&& u
->job
->type
== JOB_STOP
;
3095 bool unit_inactive_or_pending(Unit
*u
) {
3098 /* Returns true if the unit is inactive or going down */
3100 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3103 if (unit_stop_pending(u
))
3109 bool unit_active_or_pending(Unit
*u
) {
3112 /* Returns true if the unit is active or going up */
3114 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3118 (u
->job
->type
== JOB_START
||
3119 u
->job
->type
== JOB_RELOAD_OR_START
||
3120 u
->job
->type
== JOB_RESTART
))
3126 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3128 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3129 assert(SIGNAL_VALID(signo
));
3131 if (!UNIT_VTABLE(u
)->kill
)
3134 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3137 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3141 pid_set
= set_new(NULL
);
3145 /* Exclude the main/control pids from being killed via the cgroup */
3147 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3152 if (control_pid
> 0) {
3153 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3165 int unit_kill_common(
3171 sd_bus_error
*error
) {
3174 bool killed
= false;
3176 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3178 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3179 else if (main_pid
== 0)
3180 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3183 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3184 if (control_pid
< 0)
3185 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3186 else if (control_pid
== 0)
3187 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3190 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3191 if (control_pid
> 0) {
3192 if (kill(control_pid
, signo
) < 0)
3198 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3200 if (kill(main_pid
, signo
) < 0)
3206 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3207 _cleanup_set_free_ Set
*pid_set
= NULL
;
3210 /* Exclude the main/control pids from being killed via the cgroup */
3211 pid_set
= unit_pid_set(main_pid
, control_pid
);
3215 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3216 if (q
< 0 && q
!= -EAGAIN
&& q
!= -ESRCH
&& q
!= -ENOENT
)
3222 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3228 int unit_following_set(Unit
*u
, Set
**s
) {
3232 if (UNIT_VTABLE(u
)->following_set
)
3233 return UNIT_VTABLE(u
)->following_set(u
, s
);
3239 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3244 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3245 r
= unit_file_get_state(
3246 u
->manager
->unit_file_scope
,
3248 basename(u
->fragment_path
),
3249 &u
->unit_file_state
);
3251 u
->unit_file_state
= UNIT_FILE_BAD
;
3254 return u
->unit_file_state
;
3257 int unit_get_unit_file_preset(Unit
*u
) {
3260 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3261 u
->unit_file_preset
= unit_file_query_preset(
3262 u
->manager
->unit_file_scope
,
3264 basename(u
->fragment_path
));
3266 return u
->unit_file_preset
;
3269 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*u
) {
3274 unit_ref_unset(ref
);
3277 LIST_PREPEND(refs
, u
->refs
, ref
);
3281 void unit_ref_unset(UnitRef
*ref
) {
3287 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3288 * be unreferenced now. */
3289 unit_add_to_gc_queue(ref
->unit
);
3291 LIST_REMOVE(refs
, ref
->unit
->refs
, ref
);
3295 static int user_from_unit_name(Unit
*u
, char **ret
) {
3297 static const uint8_t hash_key
[] = {
3298 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3299 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3302 _cleanup_free_
char *n
= NULL
;
3305 r
= unit_name_to_prefix(u
->id
, &n
);
3309 if (valid_user_group_name(n
)) {
3315 /* If we can't use the unit name as a user name, then let's hash it and use that */
3316 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
3322 int unit_patch_contexts(Unit
*u
) {
3330 /* Patch in the manager defaults into the exec and cgroup
3331 * contexts, _after_ the rest of the settings have been
3334 ec
= unit_get_exec_context(u
);
3336 /* This only copies in the ones that need memory */
3337 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
3338 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
3339 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
3344 if (MANAGER_IS_USER(u
->manager
) &&
3345 !ec
->working_directory
) {
3347 r
= get_home_dir(&ec
->working_directory
);
3351 /* Allow user services to run, even if the
3352 * home directory is missing */
3353 ec
->working_directory_missing_ok
= true;
3356 if (MANAGER_IS_USER(u
->manager
) &&
3357 (ec
->syscall_whitelist
||
3358 !set_isempty(ec
->syscall_filter
) ||
3359 !set_isempty(ec
->syscall_archs
) ||
3360 ec
->address_families_whitelist
||
3361 !set_isempty(ec
->address_families
)))
3362 ec
->no_new_privileges
= true;
3364 if (ec
->private_devices
)
3365 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_MKNOD
);
3367 if (ec
->dynamic_user
) {
3369 r
= user_from_unit_name(u
, &ec
->user
);
3375 ec
->group
= strdup(ec
->user
);
3380 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3381 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3383 ec
->private_tmp
= true;
3384 ec
->remove_ipc
= true;
3385 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
3386 if (ec
->protect_home
== PROTECT_HOME_NO
)
3387 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
3391 cc
= unit_get_cgroup_context(u
);
3395 ec
->private_devices
&&
3396 cc
->device_policy
== CGROUP_AUTO
)
3397 cc
->device_policy
= CGROUP_CLOSED
;
3403 ExecContext
*unit_get_exec_context(Unit
*u
) {
3410 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
3414 return (ExecContext
*) ((uint8_t*) u
+ offset
);
3417 KillContext
*unit_get_kill_context(Unit
*u
) {
3424 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
3428 return (KillContext
*) ((uint8_t*) u
+ offset
);
3431 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
3437 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
3441 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
3444 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
3450 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3454 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
3457 static const char* unit_drop_in_dir(Unit
*u
, UnitSetPropertiesMode mode
) {
3460 if (!IN_SET(mode
, UNIT_RUNTIME
, UNIT_PERSISTENT
))
3463 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
3464 return u
->manager
->lookup_paths
.transient
;
3466 if (mode
== UNIT_RUNTIME
)
3467 return u
->manager
->lookup_paths
.runtime_control
;
3469 if (mode
== UNIT_PERSISTENT
)
3470 return u
->manager
->lookup_paths
.persistent_control
;
3475 int unit_write_drop_in(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3476 _cleanup_free_
char *p
= NULL
, *q
= NULL
;
3477 const char *dir
, *wrapped
;
3482 if (u
->transient_file
) {
3483 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3484 * write to the transient unit file. */
3485 fputs(data
, u
->transient_file
);
3486 fputc('\n', u
->transient_file
);
3490 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3493 dir
= unit_drop_in_dir(u
, mode
);
3497 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3498 "# or an equivalent operation. Do not edit.\n",
3502 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
3506 (void) mkdir_p(p
, 0755);
3507 r
= write_string_file_atomic_label(q
, wrapped
);
3511 r
= strv_push(&u
->dropin_paths
, q
);
3516 strv_uniq(u
->dropin_paths
);
3518 u
->dropin_mtime
= now(CLOCK_REALTIME
);
3523 int unit_write_drop_in_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3524 _cleanup_free_
char *p
= NULL
;
3532 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3535 va_start(ap
, format
);
3536 r
= vasprintf(&p
, format
, ap
);
3542 return unit_write_drop_in(u
, mode
, name
, p
);
3545 int unit_write_drop_in_private(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3552 if (!UNIT_VTABLE(u
)->private_section
)
3555 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3558 ndata
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
3560 return unit_write_drop_in(u
, mode
, name
, ndata
);
3563 int unit_write_drop_in_private_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3564 _cleanup_free_
char *p
= NULL
;
3572 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3575 va_start(ap
, format
);
3576 r
= vasprintf(&p
, format
, ap
);
3582 return unit_write_drop_in_private(u
, mode
, name
, p
);
3585 int unit_make_transient(Unit
*u
) {
3591 if (!UNIT_VTABLE(u
)->can_transient
)
3594 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
, NULL
);
3598 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3599 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3601 RUN_WITH_UMASK(0022) {
3602 f
= fopen(path
, "we");
3609 if (u
->transient_file
)
3610 fclose(u
->transient_file
);
3611 u
->transient_file
= f
;
3613 free(u
->fragment_path
);
3614 u
->fragment_path
= path
;
3616 u
->source_path
= mfree(u
->source_path
);
3617 u
->dropin_paths
= strv_free(u
->dropin_paths
);
3618 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
3620 u
->load_state
= UNIT_STUB
;
3622 u
->transient
= true;
3624 unit_add_to_dbus_queue(u
);
3625 unit_add_to_gc_queue(u
);
3627 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3633 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
3634 _cleanup_free_
char *comm
= NULL
;
3636 (void) get_process_comm(pid
, &comm
);
3638 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3639 only, like for example systemd's own PAM stub process. */
3640 if (comm
&& comm
[0] == '(')
3643 log_unit_notice(userdata
,
3644 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
3647 signal_to_string(sig
));
3650 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
3655 case KILL_TERMINATE
:
3656 case KILL_TERMINATE_AND_LOG
:
3657 return c
->kill_signal
;
3666 assert_not_reached("KillOperation unknown");
3670 int unit_kill_context(
3676 bool main_pid_alien
) {
3678 bool wait_for_exit
= false, send_sighup
;
3679 cg_kill_log_func_t log_func
;
3685 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0 if we
3686 * killed something worth waiting for, 0 otherwise. */
3688 if (c
->kill_mode
== KILL_NONE
)
3691 sig
= operation_to_signal(c
, k
);
3695 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
3699 k
!= KILL_TERMINATE
||
3700 IN_SET(sig
, SIGKILL
, SIGABRT
) ? log_kill
: NULL
;
3704 log_func(main_pid
, sig
, u
);
3706 r
= kill_and_sigcont(main_pid
, sig
);
3707 if (r
< 0 && r
!= -ESRCH
) {
3708 _cleanup_free_
char *comm
= NULL
;
3709 (void) get_process_comm(main_pid
, &comm
);
3711 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
3713 if (!main_pid_alien
)
3714 wait_for_exit
= true;
3716 if (r
!= -ESRCH
&& send_sighup
)
3717 (void) kill(main_pid
, SIGHUP
);
3721 if (control_pid
> 0) {
3723 log_func(control_pid
, sig
, u
);
3725 r
= kill_and_sigcont(control_pid
, sig
);
3726 if (r
< 0 && r
!= -ESRCH
) {
3727 _cleanup_free_
char *comm
= NULL
;
3728 (void) get_process_comm(control_pid
, &comm
);
3730 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
3732 wait_for_exit
= true;
3734 if (r
!= -ESRCH
&& send_sighup
)
3735 (void) kill(control_pid
, SIGHUP
);
3739 if (u
->cgroup_path
&&
3740 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
3741 _cleanup_set_free_ Set
*pid_set
= NULL
;
3743 /* Exclude the main/control pids from being killed via the cgroup */
3744 pid_set
= unit_pid_set(main_pid
, control_pid
);
3748 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3750 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
3754 if (r
!= -EAGAIN
&& r
!= -ESRCH
&& r
!= -ENOENT
)
3755 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
3759 /* FIXME: For now, on the legacy hierarchy, we
3760 * will not wait for the cgroup members to die
3761 * if we are running in a container or if this
3762 * is a delegation unit, simply because cgroup
3763 * notification is unreliable in these
3764 * cases. It doesn't work at all in
3765 * containers, and outside of containers it
3766 * can be confused easily by left-over
3767 * directories in the cgroup — which however
3768 * should not exist in non-delegated units. On
3769 * the unified hierarchy that's different,
3770 * there we get proper events. Hence rely on
3773 if (cg_unified(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
3774 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
3775 wait_for_exit
= true;
3780 pid_set
= unit_pid_set(main_pid
, control_pid
);
3784 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3793 return wait_for_exit
;
3796 int unit_require_mounts_for(Unit
*u
, const char *path
) {
3797 char prefix
[strlen(path
) + 1], *p
;
3803 /* Registers a unit for requiring a certain path and all its
3804 * prefixes. We keep a simple array of these paths in the
3805 * unit, since its usually short. However, we build a prefix
3806 * table for all possible prefixes so that new appearing mount
3807 * units can easily determine which units to make themselves a
3810 if (!path_is_absolute(path
))
3817 path_kill_slashes(p
);
3819 if (!path_is_safe(p
)) {
3824 if (strv_contains(u
->requires_mounts_for
, p
)) {
3829 r
= strv_consume(&u
->requires_mounts_for
, p
);
3833 PATH_FOREACH_PREFIX_MORE(prefix
, p
) {
3836 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
3840 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &string_hash_ops
);
3854 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
3870 int unit_setup_exec_runtime(Unit
*u
) {
3876 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3879 /* Check if there already is an ExecRuntime for this unit? */
3880 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
3884 /* Try to get it from somebody else */
3885 SET_FOREACH(other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
3887 *rt
= unit_get_exec_runtime(other
);
3889 exec_runtime_ref(*rt
);
3894 return exec_runtime_make(rt
, unit_get_exec_context(u
), u
->id
);
3897 int unit_setup_dynamic_creds(Unit
*u
) {
3899 DynamicCreds
*dcreds
;
3904 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
3906 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
3908 ec
= unit_get_exec_context(u
);
3911 if (!ec
->dynamic_user
)
3914 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
3917 bool unit_type_supported(UnitType t
) {
3918 if (_unlikely_(t
< 0))
3920 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
3923 if (!unit_vtable
[t
]->supported
)
3926 return unit_vtable
[t
]->supported();
3929 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
3935 r
= dir_is_empty(where
);
3939 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
3943 log_struct(LOG_NOTICE
,
3944 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING
),
3946 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
3951 int unit_fail_if_symlink(Unit
*u
, const char* where
) {
3957 r
= is_symlink(where
);
3959 log_unit_debug_errno(u
, r
, "Failed to check symlink %s, ignoring: %m", where
);
3966 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING
),
3968 LOG_UNIT_MESSAGE(u
, "Mount on symlink %s not allowed.", where
),
3975 bool unit_is_pristine(Unit
*u
) {
3978 /* Check if the unit already exists or is already around,
3979 * in a number of different ways. Note that to cater for unit
3980 * types such as slice, we are generally fine with units that
3981 * are marked UNIT_LOADED even though nothing was
3982 * actually loaded, as those unit types don't require a file
3983 * on disk to validly load. */
3985 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
3988 !strv_isempty(u
->dropin_paths
) ||
3993 pid_t
unit_control_pid(Unit
*u
) {
3996 if (UNIT_VTABLE(u
)->control_pid
)
3997 return UNIT_VTABLE(u
)->control_pid(u
);
4002 pid_t
unit_main_pid(Unit
*u
) {
4005 if (UNIT_VTABLE(u
)->main_pid
)
4006 return UNIT_VTABLE(u
)->main_pid(u
);
4011 static void unit_unref_uid_internal(
4015 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4019 assert(_manager_unref_uid
);
4021 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4022 * gid_t are actually the same time, with the same validity rules.
4024 * Drops a reference to UID/GID from a unit. */
4026 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4027 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4029 if (!uid_is_valid(*ref_uid
))
4032 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4033 *ref_uid
= UID_INVALID
;
4036 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4037 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4040 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4041 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4044 static int unit_ref_uid_internal(
4049 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4055 assert(uid_is_valid(uid
));
4056 assert(_manager_ref_uid
);
4058 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4059 * are actually the same type, and have the same validity rules.
4061 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4062 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4065 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4066 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4068 if (*ref_uid
== uid
)
4071 if (uid_is_valid(*ref_uid
)) /* Already set? */
4074 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4082 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4083 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4086 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4087 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4090 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4095 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4097 if (uid_is_valid(uid
)) {
4098 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4103 if (gid_is_valid(gid
)) {
4104 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4107 unit_unref_uid(u
, false);
4113 return r
> 0 || q
> 0;
4116 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4122 c
= unit_get_exec_context(u
);
4124 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4126 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4131 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4134 unit_unref_uid(u
, destroy_now
);
4135 unit_unref_gid(u
, destroy_now
);
4138 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4143 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4144 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4145 * objects when no service references the UID/GID anymore. */
4147 r
= unit_ref_uid_gid(u
, uid
, gid
);
4149 bus_unit_send_change_signal(u
);