2 This file is part of systemd.
4 Copyright 2010 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
27 #include "sd-messages.h"
29 #include "alloc-util.h"
30 #include "bus-common-errors.h"
32 #include "cgroup-util.h"
33 #include "dbus-unit.h"
38 #include "fileio-label.h"
39 #include "format-util.h"
40 #include "id128-util.h"
41 #include "load-dropin.h"
42 #include "load-fragment.h"
47 #include "parse-util.h"
48 #include "path-util.h"
49 #include "process-util.h"
51 #include "signal-util.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-util.h"
57 #include "umask-util.h"
58 #include "unit-name.h"
60 #include "user-util.h"
63 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
64 [UNIT_SERVICE
] = &service_vtable
,
65 [UNIT_SOCKET
] = &socket_vtable
,
66 [UNIT_BUSNAME
] = &busname_vtable
,
67 [UNIT_TARGET
] = &target_vtable
,
68 [UNIT_DEVICE
] = &device_vtable
,
69 [UNIT_MOUNT
] = &mount_vtable
,
70 [UNIT_AUTOMOUNT
] = &automount_vtable
,
71 [UNIT_SWAP
] = &swap_vtable
,
72 [UNIT_TIMER
] = &timer_vtable
,
73 [UNIT_PATH
] = &path_vtable
,
74 [UNIT_SLICE
] = &slice_vtable
,
75 [UNIT_SCOPE
] = &scope_vtable
78 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
);
80 Unit
*unit_new(Manager
*m
, size_t size
) {
84 assert(size
>= sizeof(Unit
));
90 u
->names
= set_new(&string_hash_ops
);
95 u
->type
= _UNIT_TYPE_INVALID
;
96 u
->default_dependencies
= true;
97 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
98 u
->unit_file_preset
= -1;
99 u
->on_failure_job_mode
= JOB_REPLACE
;
100 u
->cgroup_inotify_wd
= -1;
101 u
->job_timeout
= USEC_INFINITY
;
102 u
->ref_uid
= UID_INVALID
;
103 u
->ref_gid
= GID_INVALID
;
104 u
->cpu_usage_last
= NSEC_INFINITY
;
106 RATELIMIT_INIT(u
->start_limit
, m
->default_start_limit_interval
, m
->default_start_limit_burst
);
107 RATELIMIT_INIT(u
->auto_stop_ratelimit
, 10 * USEC_PER_SEC
, 16);
112 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
116 u
= unit_new(m
, size
);
120 r
= unit_add_name(u
, name
);
130 bool unit_has_name(Unit
*u
, const char *name
) {
134 return set_contains(u
->names
, (char*) name
);
137 static void unit_init(Unit
*u
) {
144 assert(u
->type
>= 0);
146 cc
= unit_get_cgroup_context(u
);
148 cgroup_context_init(cc
);
150 /* Copy in the manager defaults into the cgroup
151 * context, _before_ the rest of the settings have
152 * been initialized */
154 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
155 cc
->io_accounting
= u
->manager
->default_io_accounting
;
156 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
157 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
158 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
160 if (u
->type
!= UNIT_SLICE
)
161 cc
->tasks_max
= u
->manager
->default_tasks_max
;
164 ec
= unit_get_exec_context(u
);
166 exec_context_init(ec
);
168 kc
= unit_get_kill_context(u
);
170 kill_context_init(kc
);
172 if (UNIT_VTABLE(u
)->init
)
173 UNIT_VTABLE(u
)->init(u
);
176 int unit_add_name(Unit
*u
, const char *text
) {
177 _cleanup_free_
char *s
= NULL
, *i
= NULL
;
184 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
189 r
= unit_name_replace_instance(text
, u
->instance
, &s
);
198 if (set_contains(u
->names
, s
))
200 if (hashmap_contains(u
->manager
->units
, s
))
203 if (!unit_name_is_valid(s
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
206 t
= unit_name_to_type(s
);
210 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
213 r
= unit_name_to_instance(s
, &i
);
217 if (i
&& !unit_type_may_template(t
))
220 /* Ensure that this unit is either instanced or not instanced,
221 * but not both. Note that we do allow names with different
222 * instance names however! */
223 if (u
->type
!= _UNIT_TYPE_INVALID
&& !u
->instance
!= !i
)
226 if (!unit_type_may_alias(t
) && !set_isempty(u
->names
))
229 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
232 r
= set_put(u
->names
, s
);
237 r
= hashmap_put(u
->manager
->units
, s
, u
);
239 (void) set_remove(u
->names
, s
);
243 if (u
->type
== _UNIT_TYPE_INVALID
) {
248 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
257 unit_add_to_dbus_queue(u
);
261 int unit_choose_id(Unit
*u
, const char *name
) {
262 _cleanup_free_
char *t
= NULL
;
269 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
274 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
281 /* Selects one of the names of this unit as the id */
282 s
= set_get(u
->names
, (char*) name
);
286 /* Determine the new instance from the new id */
287 r
= unit_name_to_instance(s
, &i
);
296 unit_add_to_dbus_queue(u
);
301 int unit_set_description(Unit
*u
, const char *description
) {
306 if (isempty(description
))
309 s
= strdup(description
);
314 free(u
->description
);
317 unit_add_to_dbus_queue(u
);
321 bool unit_check_gc(Unit
*u
) {
322 UnitActiveState state
;
332 state
= unit_active_state(u
);
333 inactive
= state
== UNIT_INACTIVE
;
335 /* If the unit is inactive and failed and no job is queued for
336 * it, then release its runtime resources */
337 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
338 UNIT_VTABLE(u
)->release_resources
)
339 UNIT_VTABLE(u
)->release_resources(u
, inactive
);
341 /* But we keep the unit object around for longer when it is
342 * referenced or configured to not be gc'ed */
352 if (sd_bus_track_count(u
->bus_track
) > 0)
355 if (UNIT_VTABLE(u
)->check_gc
)
356 if (UNIT_VTABLE(u
)->check_gc(u
))
362 void unit_add_to_load_queue(Unit
*u
) {
364 assert(u
->type
!= _UNIT_TYPE_INVALID
);
366 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
369 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
370 u
->in_load_queue
= true;
373 void unit_add_to_cleanup_queue(Unit
*u
) {
376 if (u
->in_cleanup_queue
)
379 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
380 u
->in_cleanup_queue
= true;
383 void unit_add_to_gc_queue(Unit
*u
) {
386 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
389 if (unit_check_gc(u
))
392 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
393 u
->in_gc_queue
= true;
396 void unit_add_to_dbus_queue(Unit
*u
) {
398 assert(u
->type
!= _UNIT_TYPE_INVALID
);
400 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
403 /* Shortcut things if nobody cares */
404 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
405 set_isempty(u
->manager
->private_buses
)) {
406 u
->sent_dbus_new_signal
= true;
410 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
411 u
->in_dbus_queue
= true;
414 static void bidi_set_free(Unit
*u
, Set
*s
) {
420 /* Frees the set and makes sure we are dropped from the
421 * inverse pointers */
423 SET_FOREACH(other
, s
, i
) {
426 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
427 set_remove(other
->dependencies
[d
], u
);
429 unit_add_to_gc_queue(other
);
435 static void unit_remove_transient(Unit
*u
) {
443 if (u
->fragment_path
)
444 (void) unlink(u
->fragment_path
);
446 STRV_FOREACH(i
, u
->dropin_paths
) {
447 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
449 p
= dirname_malloc(*i
); /* Get the drop-in directory from the drop-in file */
453 pp
= dirname_malloc(p
); /* Get the config directory from the drop-in directory */
457 /* Only drop transient drop-ins */
458 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
466 static void unit_free_requires_mounts_for(Unit
*u
) {
469 STRV_FOREACH(j
, u
->requires_mounts_for
) {
470 char s
[strlen(*j
) + 1];
472 PATH_FOREACH_PREFIX_MORE(s
, *j
) {
476 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
482 if (set_isempty(x
)) {
483 hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
490 u
->requires_mounts_for
= strv_free(u
->requires_mounts_for
);
493 static void unit_done(Unit
*u
) {
502 if (UNIT_VTABLE(u
)->done
)
503 UNIT_VTABLE(u
)->done(u
);
505 ec
= unit_get_exec_context(u
);
507 exec_context_done(ec
);
509 cc
= unit_get_cgroup_context(u
);
511 cgroup_context_done(cc
);
514 void unit_free(Unit
*u
) {
522 if (u
->transient_file
)
523 fclose(u
->transient_file
);
525 if (!MANAGER_IS_RELOADING(u
->manager
))
526 unit_remove_transient(u
);
528 bus_unit_send_removed_signal(u
);
532 sd_bus_slot_unref(u
->match_bus_slot
);
534 sd_bus_track_unref(u
->bus_track
);
535 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
537 unit_free_requires_mounts_for(u
);
539 SET_FOREACH(t
, u
->names
, i
)
540 hashmap_remove_value(u
->manager
->units
, t
, u
);
542 if (!sd_id128_is_null(u
->invocation_id
))
543 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
557 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
558 bidi_set_free(u
, u
->dependencies
[d
]);
560 if (u
->type
!= _UNIT_TYPE_INVALID
)
561 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
563 if (u
->in_load_queue
)
564 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
566 if (u
->in_dbus_queue
)
567 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
569 if (u
->in_cleanup_queue
)
570 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
573 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
575 if (u
->in_cgroup_queue
)
576 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
578 unit_release_cgroup(u
);
580 unit_unref_uid_gid(u
, false);
582 (void) manager_update_failed_units(u
->manager
, u
, false);
583 set_remove(u
->manager
->startup_units
, u
);
585 free(u
->description
);
586 strv_free(u
->documentation
);
587 free(u
->fragment_path
);
588 free(u
->source_path
);
589 strv_free(u
->dropin_paths
);
592 free(u
->job_timeout_reboot_arg
);
594 set_free_free(u
->names
);
596 unit_unwatch_all_pids(u
);
598 condition_free_list(u
->conditions
);
599 condition_free_list(u
->asserts
);
603 unit_ref_unset(&u
->slice
);
606 unit_ref_unset(u
->refs
);
611 UnitActiveState
unit_active_state(Unit
*u
) {
614 if (u
->load_state
== UNIT_MERGED
)
615 return unit_active_state(unit_follow_merge(u
));
617 /* After a reload it might happen that a unit is not correctly
618 * loaded but still has a process around. That's why we won't
619 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
621 return UNIT_VTABLE(u
)->active_state(u
);
624 const char* unit_sub_state_to_string(Unit
*u
) {
627 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
630 static int complete_move(Set
**s
, Set
**other
) {
640 r
= set_move(*s
, *other
);
651 static int merge_names(Unit
*u
, Unit
*other
) {
659 r
= complete_move(&u
->names
, &other
->names
);
663 set_free_free(other
->names
);
667 SET_FOREACH(t
, u
->names
, i
)
668 assert_se(hashmap_replace(u
->manager
->units
, t
, u
) == 0);
673 static int reserve_dependencies(Unit
*u
, Unit
*other
, UnitDependency d
) {
678 assert(d
< _UNIT_DEPENDENCY_MAX
);
681 * If u does not have this dependency set allocated, there is no need
682 * to reserve anything. In that case other's set will be transferred
683 * as a whole to u by complete_move().
685 if (!u
->dependencies
[d
])
688 /* merge_dependencies() will skip a u-on-u dependency */
689 n_reserve
= set_size(other
->dependencies
[d
]) - !!set_get(other
->dependencies
[d
], u
);
691 return set_reserve(u
->dependencies
[d
], n_reserve
);
694 static void merge_dependencies(Unit
*u
, Unit
*other
, const char *other_id
, UnitDependency d
) {
701 assert(d
< _UNIT_DEPENDENCY_MAX
);
703 /* Fix backwards pointers */
704 SET_FOREACH(back
, other
->dependencies
[d
], i
) {
707 for (k
= 0; k
< _UNIT_DEPENDENCY_MAX
; k
++) {
708 /* Do not add dependencies between u and itself */
710 if (set_remove(back
->dependencies
[k
], other
))
711 maybe_warn_about_dependency(u
, other_id
, k
);
713 r
= set_remove_and_put(back
->dependencies
[k
], other
, u
);
715 set_remove(back
->dependencies
[k
], other
);
717 assert(r
>= 0 || r
== -ENOENT
);
722 /* Also do not move dependencies on u to itself */
723 back
= set_remove(other
->dependencies
[d
], u
);
725 maybe_warn_about_dependency(u
, other_id
, d
);
727 /* The move cannot fail. The caller must have performed a reservation. */
728 assert_se(complete_move(&u
->dependencies
[d
], &other
->dependencies
[d
]) == 0);
730 other
->dependencies
[d
] = set_free(other
->dependencies
[d
]);
733 int unit_merge(Unit
*u
, Unit
*other
) {
735 const char *other_id
= NULL
;
740 assert(u
->manager
== other
->manager
);
741 assert(u
->type
!= _UNIT_TYPE_INVALID
);
743 other
= unit_follow_merge(other
);
748 if (u
->type
!= other
->type
)
751 if (!u
->instance
!= !other
->instance
)
754 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
757 if (other
->load_state
!= UNIT_STUB
&&
758 other
->load_state
!= UNIT_NOT_FOUND
)
767 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
771 other_id
= strdupa(other
->id
);
773 /* Make reservations to ensure merge_dependencies() won't fail */
774 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
775 r
= reserve_dependencies(u
, other
, d
);
777 * We don't rollback reservations if we fail. We don't have
778 * a way to undo reservations. A reservation is not a leak.
785 r
= merge_names(u
, other
);
789 /* Redirect all references */
791 unit_ref_set(other
->refs
, u
);
793 /* Merge dependencies */
794 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++)
795 merge_dependencies(u
, other
, other_id
, d
);
797 other
->load_state
= UNIT_MERGED
;
798 other
->merged_into
= u
;
800 /* If there is still some data attached to the other node, we
801 * don't need it anymore, and can free it. */
802 if (other
->load_state
!= UNIT_STUB
)
803 if (UNIT_VTABLE(other
)->done
)
804 UNIT_VTABLE(other
)->done(other
);
806 unit_add_to_dbus_queue(u
);
807 unit_add_to_cleanup_queue(other
);
812 int unit_merge_by_name(Unit
*u
, const char *name
) {
813 _cleanup_free_
char *s
= NULL
;
820 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
824 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
831 other
= manager_get_unit(u
->manager
, name
);
833 return unit_merge(u
, other
);
835 return unit_add_name(u
, name
);
838 Unit
* unit_follow_merge(Unit
*u
) {
841 while (u
->load_state
== UNIT_MERGED
)
842 assert_se(u
= u
->merged_into
);
847 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
853 if (c
->working_directory
) {
854 r
= unit_require_mounts_for(u
, c
->working_directory
);
859 if (c
->root_directory
) {
860 r
= unit_require_mounts_for(u
, c
->root_directory
);
866 r
= unit_require_mounts_for(u
, c
->root_image
);
871 if (!MANAGER_IS_SYSTEM(u
->manager
))
874 if (c
->private_tmp
) {
877 FOREACH_STRING(p
, "/tmp", "/var/tmp") {
878 r
= unit_require_mounts_for(u
, p
);
883 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, NULL
, true);
888 if (!IN_SET(c
->std_output
,
889 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
890 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
891 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
) &&
892 !IN_SET(c
->std_error
,
893 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
894 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
,
895 EXEC_OUTPUT_SYSLOG
, EXEC_OUTPUT_SYSLOG_AND_CONSOLE
))
898 /* If syslog or kernel logging is requested, make sure our own
899 * logging daemon is run first. */
901 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, NULL
, true);
908 const char *unit_description(Unit
*u
) {
912 return u
->description
;
917 void unit_dump(Unit
*u
, FILE *f
, const char *prefix
) {
923 timestamp0
[FORMAT_TIMESTAMP_MAX
],
924 timestamp1
[FORMAT_TIMESTAMP_MAX
],
925 timestamp2
[FORMAT_TIMESTAMP_MAX
],
926 timestamp3
[FORMAT_TIMESTAMP_MAX
],
927 timestamp4
[FORMAT_TIMESTAMP_MAX
],
928 timespan
[FORMAT_TIMESPAN_MAX
];
930 _cleanup_set_free_ Set
*following_set
= NULL
;
935 assert(u
->type
>= 0);
937 prefix
= strempty(prefix
);
938 prefix2
= strjoina(prefix
, "\t");
942 "%s\tDescription: %s\n"
944 "%s\tUnit Load State: %s\n"
945 "%s\tUnit Active State: %s\n"
946 "%s\tState Change Timestamp: %s\n"
947 "%s\tInactive Exit Timestamp: %s\n"
948 "%s\tActive Enter Timestamp: %s\n"
949 "%s\tActive Exit Timestamp: %s\n"
950 "%s\tInactive Enter Timestamp: %s\n"
951 "%s\tGC Check Good: %s\n"
952 "%s\tNeed Daemon Reload: %s\n"
953 "%s\tTransient: %s\n"
954 "%s\tPerpetual: %s\n"
957 "%s\tCGroup realized: %s\n"
958 "%s\tCGroup mask: 0x%x\n"
959 "%s\tCGroup members mask: 0x%x\n",
961 prefix
, unit_description(u
),
962 prefix
, strna(u
->instance
),
963 prefix
, unit_load_state_to_string(u
->load_state
),
964 prefix
, unit_active_state_to_string(unit_active_state(u
)),
965 prefix
, strna(format_timestamp(timestamp0
, sizeof(timestamp0
), u
->state_change_timestamp
.realtime
)),
966 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->inactive_exit_timestamp
.realtime
)),
967 prefix
, strna(format_timestamp(timestamp2
, sizeof(timestamp2
), u
->active_enter_timestamp
.realtime
)),
968 prefix
, strna(format_timestamp(timestamp3
, sizeof(timestamp3
), u
->active_exit_timestamp
.realtime
)),
969 prefix
, strna(format_timestamp(timestamp4
, sizeof(timestamp4
), u
->inactive_enter_timestamp
.realtime
)),
970 prefix
, yes_no(unit_check_gc(u
)),
971 prefix
, yes_no(unit_need_daemon_reload(u
)),
972 prefix
, yes_no(u
->transient
),
973 prefix
, yes_no(u
->perpetual
),
974 prefix
, strna(unit_slice_name(u
)),
975 prefix
, strna(u
->cgroup_path
),
976 prefix
, yes_no(u
->cgroup_realized
),
977 prefix
, u
->cgroup_realized_mask
,
978 prefix
, u
->cgroup_members_mask
);
980 SET_FOREACH(t
, u
->names
, i
)
981 fprintf(f
, "%s\tName: %s\n", prefix
, t
);
983 if (!sd_id128_is_null(u
->invocation_id
))
984 fprintf(f
, "%s\tInvocation ID: " SD_ID128_FORMAT_STR
"\n",
985 prefix
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
987 STRV_FOREACH(j
, u
->documentation
)
988 fprintf(f
, "%s\tDocumentation: %s\n", prefix
, *j
);
990 following
= unit_following(u
);
992 fprintf(f
, "%s\tFollowing: %s\n", prefix
, following
->id
);
994 r
= unit_following_set(u
, &following_set
);
998 SET_FOREACH(other
, following_set
, i
)
999 fprintf(f
, "%s\tFollowing Set Member: %s\n", prefix
, other
->id
);
1002 if (u
->fragment_path
)
1003 fprintf(f
, "%s\tFragment Path: %s\n", prefix
, u
->fragment_path
);
1006 fprintf(f
, "%s\tSource Path: %s\n", prefix
, u
->source_path
);
1008 STRV_FOREACH(j
, u
->dropin_paths
)
1009 fprintf(f
, "%s\tDropIn Path: %s\n", prefix
, *j
);
1011 if (u
->job_timeout
!= USEC_INFINITY
)
1012 fprintf(f
, "%s\tJob Timeout: %s\n", prefix
, format_timespan(timespan
, sizeof(timespan
), u
->job_timeout
, 0));
1014 if (u
->job_timeout_action
!= EMERGENCY_ACTION_NONE
)
1015 fprintf(f
, "%s\tJob Timeout Action: %s\n", prefix
, emergency_action_to_string(u
->job_timeout_action
));
1017 if (u
->job_timeout_reboot_arg
)
1018 fprintf(f
, "%s\tJob Timeout Reboot Argument: %s\n", prefix
, u
->job_timeout_reboot_arg
);
1020 condition_dump_list(u
->conditions
, f
, prefix
, condition_type_to_string
);
1021 condition_dump_list(u
->asserts
, f
, prefix
, assert_type_to_string
);
1023 if (dual_timestamp_is_set(&u
->condition_timestamp
))
1025 "%s\tCondition Timestamp: %s\n"
1026 "%s\tCondition Result: %s\n",
1027 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->condition_timestamp
.realtime
)),
1028 prefix
, yes_no(u
->condition_result
));
1030 if (dual_timestamp_is_set(&u
->assert_timestamp
))
1032 "%s\tAssert Timestamp: %s\n"
1033 "%s\tAssert Result: %s\n",
1034 prefix
, strna(format_timestamp(timestamp1
, sizeof(timestamp1
), u
->assert_timestamp
.realtime
)),
1035 prefix
, yes_no(u
->assert_result
));
1037 for (d
= 0; d
< _UNIT_DEPENDENCY_MAX
; d
++) {
1040 SET_FOREACH(other
, u
->dependencies
[d
], i
)
1041 fprintf(f
, "%s\t%s: %s\n", prefix
, unit_dependency_to_string(d
), other
->id
);
1044 if (!strv_isempty(u
->requires_mounts_for
)) {
1046 "%s\tRequiresMountsFor:", prefix
);
1048 STRV_FOREACH(j
, u
->requires_mounts_for
)
1049 fprintf(f
, " %s", *j
);
1054 if (u
->load_state
== UNIT_LOADED
) {
1057 "%s\tStopWhenUnneeded: %s\n"
1058 "%s\tRefuseManualStart: %s\n"
1059 "%s\tRefuseManualStop: %s\n"
1060 "%s\tDefaultDependencies: %s\n"
1061 "%s\tOnFailureJobMode: %s\n"
1062 "%s\tIgnoreOnIsolate: %s\n",
1063 prefix
, yes_no(u
->stop_when_unneeded
),
1064 prefix
, yes_no(u
->refuse_manual_start
),
1065 prefix
, yes_no(u
->refuse_manual_stop
),
1066 prefix
, yes_no(u
->default_dependencies
),
1067 prefix
, job_mode_to_string(u
->on_failure_job_mode
),
1068 prefix
, yes_no(u
->ignore_on_isolate
));
1070 if (UNIT_VTABLE(u
)->dump
)
1071 UNIT_VTABLE(u
)->dump(u
, f
, prefix2
);
1073 } else if (u
->load_state
== UNIT_MERGED
)
1075 "%s\tMerged into: %s\n",
1076 prefix
, u
->merged_into
->id
);
1077 else if (u
->load_state
== UNIT_ERROR
)
1078 fprintf(f
, "%s\tLoad Error Code: %s\n", prefix
, strerror(-u
->load_error
));
1080 for (n
= sd_bus_track_first(u
->bus_track
); n
; n
= sd_bus_track_next(u
->bus_track
))
1081 fprintf(f
, "%s\tBus Ref: %s\n", prefix
, n
);
1084 job_dump(u
->job
, f
, prefix2
);
1087 job_dump(u
->nop_job
, f
, prefix2
);
1090 /* Common implementation for multiple backends */
1091 int unit_load_fragment_and_dropin(Unit
*u
) {
1097 /* Load a .{service,socket,...} file */
1098 r
= unit_load_fragment(u
);
1102 if (u
->load_state
== UNIT_STUB
)
1105 /* If the unit is an alias and the final unit has already been
1106 * loaded, there's no point in reloading the dropins one more time. */
1107 t
= unit_follow_merge(u
);
1108 if (t
!= u
&& t
->load_state
!= UNIT_STUB
)
1111 return unit_load_dropin(t
);
1114 /* Common implementation for multiple backends */
1115 int unit_load_fragment_and_dropin_optional(Unit
*u
) {
1121 /* Same as unit_load_fragment_and_dropin(), but whether
1122 * something can be loaded or not doesn't matter. */
1124 /* Load a .service file */
1125 r
= unit_load_fragment(u
);
1129 if (u
->load_state
== UNIT_STUB
)
1130 u
->load_state
= UNIT_LOADED
;
1132 /* If the unit is an alias and the final unit has already been
1133 * loaded, there's no point in reloading the dropins one more time. */
1134 t
= unit_follow_merge(u
);
1135 if (t
!= u
&& t
->load_state
!= UNIT_STUB
)
1138 return unit_load_dropin(t
);
1141 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1145 if (target
->type
!= UNIT_TARGET
)
1148 /* Only add the dependency if both units are loaded, so that
1149 * that loop check below is reliable */
1150 if (u
->load_state
!= UNIT_LOADED
||
1151 target
->load_state
!= UNIT_LOADED
)
1154 /* If either side wants no automatic dependencies, then let's
1156 if (!u
->default_dependencies
||
1157 !target
->default_dependencies
)
1160 /* Don't create loops */
1161 if (set_get(target
->dependencies
[UNIT_BEFORE
], u
))
1164 return unit_add_dependency(target
, UNIT_AFTER
, u
, true);
1167 static int unit_add_target_dependencies(Unit
*u
) {
1169 static const UnitDependency deps
[] = {
1183 for (k
= 0; k
< ELEMENTSOF(deps
); k
++)
1184 SET_FOREACH(target
, u
->dependencies
[deps
[k
]], i
) {
1185 r
= unit_add_default_target_dependency(u
, target
);
1193 static int unit_add_slice_dependencies(Unit
*u
) {
1196 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1199 if (UNIT_ISSET(u
->slice
))
1200 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, UNIT_DEREF(u
->slice
), true);
1202 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1205 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, NULL
, true);
1208 static int unit_add_mount_dependencies(Unit
*u
) {
1214 STRV_FOREACH(i
, u
->requires_mounts_for
) {
1215 char prefix
[strlen(*i
) + 1];
1217 PATH_FOREACH_PREFIX_MORE(prefix
, *i
) {
1218 _cleanup_free_
char *p
= NULL
;
1221 r
= unit_name_from_path(prefix
, ".mount", &p
);
1225 m
= manager_get_unit(u
->manager
, p
);
1227 /* Make sure to load the mount unit if
1228 * it exists. If so the dependencies
1229 * on this unit will be added later
1230 * during the loading of the mount
1232 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1238 if (m
->load_state
!= UNIT_LOADED
)
1241 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true);
1245 if (m
->fragment_path
) {
1246 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true);
1256 static int unit_add_startup_units(Unit
*u
) {
1260 c
= unit_get_cgroup_context(u
);
1264 if (c
->startup_cpu_shares
== CGROUP_CPU_SHARES_INVALID
&&
1265 c
->startup_io_weight
== CGROUP_WEIGHT_INVALID
&&
1266 c
->startup_blockio_weight
== CGROUP_BLKIO_WEIGHT_INVALID
)
1269 r
= set_ensure_allocated(&u
->manager
->startup_units
, NULL
);
1273 return set_put(u
->manager
->startup_units
, u
);
1276 int unit_load(Unit
*u
) {
1281 if (u
->in_load_queue
) {
1282 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1283 u
->in_load_queue
= false;
1286 if (u
->type
== _UNIT_TYPE_INVALID
)
1289 if (u
->load_state
!= UNIT_STUB
)
1292 if (u
->transient_file
) {
1293 r
= fflush_and_check(u
->transient_file
);
1297 fclose(u
->transient_file
);
1298 u
->transient_file
= NULL
;
1300 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1303 if (UNIT_VTABLE(u
)->load
) {
1304 r
= UNIT_VTABLE(u
)->load(u
);
1309 if (u
->load_state
== UNIT_STUB
) {
1314 if (u
->load_state
== UNIT_LOADED
) {
1316 r
= unit_add_target_dependencies(u
);
1320 r
= unit_add_slice_dependencies(u
);
1324 r
= unit_add_mount_dependencies(u
);
1328 r
= unit_add_startup_units(u
);
1332 if (u
->on_failure_job_mode
== JOB_ISOLATE
&& set_size(u
->dependencies
[UNIT_ON_FAILURE
]) > 1) {
1333 log_unit_error(u
, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1338 unit_update_cgroup_members_masks(u
);
1341 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1343 unit_add_to_dbus_queue(unit_follow_merge(u
));
1344 unit_add_to_gc_queue(u
);
1349 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
: UNIT_ERROR
;
1351 unit_add_to_dbus_queue(u
);
1352 unit_add_to_gc_queue(u
);
1354 log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1359 static bool unit_condition_test_list(Unit
*u
, Condition
*first
, const char *(*to_string
)(ConditionType t
)) {
1366 /* If the condition list is empty, then it is true */
1370 /* Otherwise, if all of the non-trigger conditions apply and
1371 * if any of the trigger conditions apply (unless there are
1372 * none) we return true */
1373 LIST_FOREACH(conditions
, c
, first
) {
1376 r
= condition_test(c
);
1379 "Couldn't determine result for %s=%s%s%s, assuming failed: %m",
1381 c
->trigger
? "|" : "",
1382 c
->negate
? "!" : "",
1388 c
->trigger
? "|" : "",
1389 c
->negate
? "!" : "",
1391 condition_result_to_string(c
->result
));
1393 if (!c
->trigger
&& r
<= 0)
1396 if (c
->trigger
&& triggered
<= 0)
1400 return triggered
!= 0;
1403 static bool unit_condition_test(Unit
*u
) {
1406 dual_timestamp_get(&u
->condition_timestamp
);
1407 u
->condition_result
= unit_condition_test_list(u
, u
->conditions
, condition_type_to_string
);
1409 return u
->condition_result
;
1412 static bool unit_assert_test(Unit
*u
) {
1415 dual_timestamp_get(&u
->assert_timestamp
);
1416 u
->assert_result
= unit_condition_test_list(u
, u
->asserts
, assert_type_to_string
);
1418 return u
->assert_result
;
1421 void unit_status_printf(Unit
*u
, const char *status
, const char *unit_status_msg_format
) {
1422 DISABLE_WARNING_FORMAT_NONLITERAL
;
1423 manager_status_printf(u
->manager
, STATUS_TYPE_NORMAL
, status
, unit_status_msg_format
, unit_description(u
));
1427 _pure_
static const char* unit_get_status_message_format(Unit
*u
, JobType t
) {
1429 const UnitStatusMessageFormats
*format_table
;
1432 assert(IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
));
1434 if (t
!= JOB_RELOAD
) {
1435 format_table
= &UNIT_VTABLE(u
)->status_message_formats
;
1437 format
= format_table
->starting_stopping
[t
== JOB_STOP
];
1443 /* Return generic strings */
1445 return "Starting %s.";
1446 else if (t
== JOB_STOP
)
1447 return "Stopping %s.";
1449 return "Reloading %s.";
1452 static void unit_status_print_starting_stopping(Unit
*u
, JobType t
) {
1457 /* Reload status messages have traditionally not been printed to console. */
1458 if (!IN_SET(t
, JOB_START
, JOB_STOP
))
1461 format
= unit_get_status_message_format(u
, t
);
1463 DISABLE_WARNING_FORMAT_NONLITERAL
;
1464 unit_status_printf(u
, "", format
);
1468 static void unit_status_log_starting_stopping_reloading(Unit
*u
, JobType t
) {
1475 if (!IN_SET(t
, JOB_START
, JOB_STOP
, JOB_RELOAD
))
1478 if (log_on_console())
1481 /* We log status messages for all units and all operations. */
1483 format
= unit_get_status_message_format(u
, t
);
1485 DISABLE_WARNING_FORMAT_NONLITERAL
;
1486 snprintf(buf
, sizeof buf
, format
, unit_description(u
));
1489 mid
= t
== JOB_START
? SD_MESSAGE_UNIT_STARTING
:
1490 t
== JOB_STOP
? SD_MESSAGE_UNIT_STOPPING
:
1491 SD_MESSAGE_UNIT_RELOADING
;
1493 /* Note that we deliberately use LOG_MESSAGE() instead of
1494 * LOG_UNIT_MESSAGE() here, since this is supposed to mimic
1495 * closely what is written to screen using the status output,
1496 * which is supposed the highest level, friendliest output
1497 * possible, which means we should avoid the low-level unit
1499 log_struct(LOG_INFO
,
1500 LOG_MESSAGE_ID(mid
),
1502 LOG_MESSAGE("%s", buf
),
1506 void unit_status_emit_starting_stopping_reloading(Unit
*u
, JobType t
) {
1509 assert(t
< _JOB_TYPE_MAX
);
1511 unit_status_log_starting_stopping_reloading(u
, t
);
1512 unit_status_print_starting_stopping(u
, t
);
1515 int unit_start_limit_test(Unit
*u
) {
1518 if (ratelimit_test(&u
->start_limit
)) {
1519 u
->start_limit_hit
= false;
1523 log_unit_warning(u
, "Start request repeated too quickly.");
1524 u
->start_limit_hit
= true;
1526 return emergency_action(u
->manager
, u
->start_limit_action
, u
->reboot_arg
, "unit failed");
1529 bool unit_shall_confirm_spawn(Unit
*u
) {
1531 if (manager_is_confirm_spawn_disabled(u
->manager
))
1534 /* For some reasons units remaining in the same process group
1535 * as PID 1 fail to acquire the console even if it's not used
1536 * by any process. So skip the confirmation question for them. */
1537 return !unit_get_exec_context(u
)->same_pgrp
;
1541 * -EBADR: This unit type does not support starting.
1542 * -EALREADY: Unit is already started.
1543 * -EAGAIN: An operation is already in progress. Retry later.
1544 * -ECANCELED: Too many requests for now.
1545 * -EPROTO: Assert failed
1546 * -EINVAL: Unit not loaded
1547 * -EOPNOTSUPP: Unit type not supported
1549 int unit_start(Unit
*u
) {
1550 UnitActiveState state
;
1555 /* If this is already started, then this will succeed. Note
1556 * that this will even succeed if this unit is not startable
1557 * by the user. This is relied on to detect when we need to
1558 * wait for units and when waiting is finished. */
1559 state
= unit_active_state(u
);
1560 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1563 /* Units that aren't loaded cannot be started */
1564 if (u
->load_state
!= UNIT_LOADED
)
1567 /* If the conditions failed, don't do anything at all. If we
1568 * already are activating this call might still be useful to
1569 * speed up activation in case there is some hold-off time,
1570 * but we don't want to recheck the condition in that case. */
1571 if (state
!= UNIT_ACTIVATING
&&
1572 !unit_condition_test(u
)) {
1573 log_unit_debug(u
, "Starting requested but condition failed. Not starting unit.");
1577 /* If the asserts failed, fail the entire job */
1578 if (state
!= UNIT_ACTIVATING
&&
1579 !unit_assert_test(u
)) {
1580 log_unit_notice(u
, "Starting requested but asserts failed.");
1584 /* Units of types that aren't supported cannot be
1585 * started. Note that we do this test only after the condition
1586 * checks, so that we rather return condition check errors
1587 * (which are usually not considered a true failure) than "not
1588 * supported" errors (which are considered a failure).
1590 if (!unit_supported(u
))
1593 /* Forward to the main object, if we aren't it. */
1594 following
= unit_following(u
);
1596 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1597 return unit_start(following
);
1600 /* If it is stopped, but we cannot start it, then fail */
1601 if (!UNIT_VTABLE(u
)->start
)
1604 /* We don't suppress calls to ->start() here when we are
1605 * already starting, to allow this request to be used as a
1606 * "hurry up" call, for example when the unit is in some "auto
1607 * restart" state where it waits for a holdoff timer to elapse
1608 * before it will start again. */
1610 unit_add_to_dbus_queue(u
);
1612 return UNIT_VTABLE(u
)->start(u
);
1615 bool unit_can_start(Unit
*u
) {
1618 if (u
->load_state
!= UNIT_LOADED
)
1621 if (!unit_supported(u
))
1624 return !!UNIT_VTABLE(u
)->start
;
1627 bool unit_can_isolate(Unit
*u
) {
1630 return unit_can_start(u
) &&
1635 * -EBADR: This unit type does not support stopping.
1636 * -EALREADY: Unit is already stopped.
1637 * -EAGAIN: An operation is already in progress. Retry later.
1639 int unit_stop(Unit
*u
) {
1640 UnitActiveState state
;
1645 state
= unit_active_state(u
);
1646 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1649 following
= unit_following(u
);
1651 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1652 return unit_stop(following
);
1655 if (!UNIT_VTABLE(u
)->stop
)
1658 unit_add_to_dbus_queue(u
);
1660 return UNIT_VTABLE(u
)->stop(u
);
1663 bool unit_can_stop(Unit
*u
) {
1666 if (!unit_supported(u
))
1672 return !!UNIT_VTABLE(u
)->stop
;
1676 * -EBADR: This unit type does not support reloading.
1677 * -ENOEXEC: Unit is not started.
1678 * -EAGAIN: An operation is already in progress. Retry later.
1680 int unit_reload(Unit
*u
) {
1681 UnitActiveState state
;
1686 if (u
->load_state
!= UNIT_LOADED
)
1689 if (!unit_can_reload(u
))
1692 state
= unit_active_state(u
);
1693 if (state
== UNIT_RELOADING
)
1696 if (state
!= UNIT_ACTIVE
) {
1697 log_unit_warning(u
, "Unit cannot be reloaded because it is inactive.");
1701 following
= unit_following(u
);
1703 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
1704 return unit_reload(following
);
1707 unit_add_to_dbus_queue(u
);
1709 return UNIT_VTABLE(u
)->reload(u
);
1712 bool unit_can_reload(Unit
*u
) {
1715 if (!UNIT_VTABLE(u
)->reload
)
1718 if (!UNIT_VTABLE(u
)->can_reload
)
1721 return UNIT_VTABLE(u
)->can_reload(u
);
1724 static void unit_check_unneeded(Unit
*u
) {
1726 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1728 static const UnitDependency needed_dependencies
[] = {
1742 /* If this service shall be shut down when unneeded then do
1745 if (!u
->stop_when_unneeded
)
1748 if (!UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
1751 for (j
= 0; j
< ELEMENTSOF(needed_dependencies
); j
++)
1752 SET_FOREACH(other
, u
->dependencies
[needed_dependencies
[j
]], i
)
1753 if (unit_active_or_pending(other
))
1756 /* If stopping a unit fails continuously we might enter a stop
1757 * loop here, hence stop acting on the service being
1758 * unnecessary after a while. */
1759 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1760 log_unit_warning(u
, "Unit not needed anymore, but not stopping since we tried this too often recently.");
1764 log_unit_info(u
, "Unit not needed anymore. Stopping.");
1766 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1767 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1769 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1772 static void unit_check_binds_to(Unit
*u
) {
1773 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1784 if (unit_active_state(u
) != UNIT_ACTIVE
)
1787 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
) {
1791 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1801 /* If stopping a unit fails continuously we might enter a stop
1802 * loop here, hence stop acting on the service being
1803 * unnecessary after a while. */
1804 if (!ratelimit_test(&u
->auto_stop_ratelimit
)) {
1805 log_unit_warning(u
, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other
->id
);
1810 log_unit_info(u
, "Unit is bound to inactive unit %s. Stopping, too.", other
->id
);
1812 /* A unit we need to run is gone. Sniff. Let's stop this. */
1813 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, NULL
);
1815 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1818 static void retroactively_start_dependencies(Unit
*u
) {
1823 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
1825 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1826 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1827 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1828 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1830 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1831 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1832 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1833 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
);
1835 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1836 if (!set_get(u
->dependencies
[UNIT_AFTER
], other
) &&
1837 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
1838 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
);
1840 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTS
], i
)
1841 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1842 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1844 SET_FOREACH(other
, u
->dependencies
[UNIT_CONFLICTED_BY
], i
)
1845 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1846 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1849 static void retroactively_stop_dependencies(Unit
*u
) {
1854 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1856 /* Pull down units which are bound to us recursively if enabled */
1857 SET_FOREACH(other
, u
->dependencies
[UNIT_BOUND_BY
], i
)
1858 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1859 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
);
1862 static void check_unneeded_dependencies(Unit
*u
) {
1867 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
1869 /* Garbage collect services that might not be needed anymore, if enabled */
1870 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUIRES
], i
)
1871 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1872 unit_check_unneeded(other
);
1873 SET_FOREACH(other
, u
->dependencies
[UNIT_WANTS
], i
)
1874 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1875 unit_check_unneeded(other
);
1876 SET_FOREACH(other
, u
->dependencies
[UNIT_REQUISITE
], i
)
1877 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1878 unit_check_unneeded(other
);
1879 SET_FOREACH(other
, u
->dependencies
[UNIT_BINDS_TO
], i
)
1880 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
1881 unit_check_unneeded(other
);
1884 void unit_start_on_failure(Unit
*u
) {
1890 if (set_size(u
->dependencies
[UNIT_ON_FAILURE
]) <= 0)
1893 log_unit_info(u
, "Triggering OnFailure= dependencies.");
1895 SET_FOREACH(other
, u
->dependencies
[UNIT_ON_FAILURE
], i
) {
1898 r
= manager_add_job(u
->manager
, JOB_START
, other
, u
->on_failure_job_mode
, NULL
, NULL
);
1900 log_unit_error_errno(u
, r
, "Failed to enqueue OnFailure= job: %m");
1904 void unit_trigger_notify(Unit
*u
) {
1910 SET_FOREACH(other
, u
->dependencies
[UNIT_TRIGGERED_BY
], i
)
1911 if (UNIT_VTABLE(other
)->trigger_notify
)
1912 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
1915 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
1920 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
1921 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
1923 /* Note that this is called for all low-level state changes,
1924 * even if they might map to the same high-level
1925 * UnitActiveState! That means that ns == os is an expected
1926 * behavior here. For example: if a mount point is remounted
1927 * this function will be called too! */
1931 /* Update timestamps for state changes */
1932 if (!MANAGER_IS_RELOADING(m
)) {
1933 dual_timestamp_get(&u
->state_change_timestamp
);
1935 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
1936 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
1937 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
1938 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
1940 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1941 u
->active_enter_timestamp
= u
->state_change_timestamp
;
1942 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1943 u
->active_exit_timestamp
= u
->state_change_timestamp
;
1946 /* Keep track of failed units */
1947 (void) manager_update_failed_units(u
->manager
, u
, ns
== UNIT_FAILED
);
1949 /* Make sure the cgroup is always removed when we become inactive */
1950 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1951 unit_prune_cgroup(u
);
1953 /* Note that this doesn't apply to RemainAfterExit services exiting
1954 * successfully, since there's no change of state in that case. Which is
1955 * why it is handled in service_set_state() */
1956 if (UNIT_IS_INACTIVE_OR_FAILED(os
) != UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1959 ec
= unit_get_exec_context(u
);
1960 if (ec
&& exec_context_may_touch_console(ec
)) {
1961 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
1964 if (m
->n_on_console
== 0)
1965 /* unset no_console_output flag, since the console is free */
1966 m
->no_console_output
= false;
1975 if (u
->job
->state
== JOB_WAITING
)
1977 /* So we reached a different state for this
1978 * job. Let's see if we can run it now if it
1979 * failed previously due to EAGAIN. */
1980 job_add_to_run_queue(u
->job
);
1982 /* Let's check whether this state change constitutes a
1983 * finished job, or maybe contradicts a running job and
1984 * hence needs to invalidate jobs. */
1986 switch (u
->job
->type
) {
1989 case JOB_VERIFY_ACTIVE
:
1991 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
1992 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
1993 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
1996 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
1997 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2003 case JOB_RELOAD_OR_START
:
2004 case JOB_TRY_RELOAD
:
2006 if (u
->job
->state
== JOB_RUNNING
) {
2007 if (ns
== UNIT_ACTIVE
)
2008 job_finish_and_invalidate(u
->job
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2009 else if (ns
!= UNIT_ACTIVATING
&& ns
!= UNIT_RELOADING
) {
2012 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2013 job_finish_and_invalidate(u
->job
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2021 case JOB_TRY_RESTART
:
2023 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2024 job_finish_and_invalidate(u
->job
, JOB_DONE
, true, false);
2025 else if (u
->job
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2027 job_finish_and_invalidate(u
->job
, JOB_FAILED
, true, false);
2033 assert_not_reached("Job type unknown");
2039 if (!MANAGER_IS_RELOADING(m
)) {
2041 /* If this state change happened without being
2042 * requested by a job, then let's retroactively start
2043 * or stop dependencies. We skip that step when
2044 * deserializing, since we don't want to create any
2045 * additional jobs just because something is already
2049 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2050 retroactively_start_dependencies(u
);
2051 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2052 retroactively_stop_dependencies(u
);
2055 /* stop unneeded units regardless if going down was expected or not */
2056 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2057 check_unneeded_dependencies(u
);
2059 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2060 log_unit_notice(u
, "Unit entered failed state.");
2061 unit_start_on_failure(u
);
2065 /* Some names are special */
2066 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2068 if (unit_has_name(u
, SPECIAL_DBUS_SERVICE
))
2069 /* The bus might have just become available,
2070 * hence try to connect to it, if we aren't
2074 if (u
->type
== UNIT_SERVICE
&&
2075 !UNIT_IS_ACTIVE_OR_RELOADING(os
) &&
2076 !MANAGER_IS_RELOADING(m
)) {
2077 /* Write audit record if we have just finished starting up */
2078 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, true);
2082 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
))
2083 manager_send_unit_plymouth(m
, u
);
2087 /* We don't care about D-Bus here, since we'll get an
2088 * asynchronous notification for it anyway. */
2090 if (u
->type
== UNIT_SERVICE
&&
2091 UNIT_IS_INACTIVE_OR_FAILED(ns
) &&
2092 !UNIT_IS_INACTIVE_OR_FAILED(os
) &&
2093 !MANAGER_IS_RELOADING(m
)) {
2095 /* Hmm, if there was no start record written
2096 * write it now, so that we always have a nice
2099 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_START
, ns
== UNIT_INACTIVE
);
2101 if (ns
== UNIT_INACTIVE
)
2102 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, true);
2104 /* Write audit record if we have just finished shutting down */
2105 manager_send_unit_audit(m
, u
, AUDIT_SERVICE_STOP
, ns
== UNIT_INACTIVE
);
2107 u
->in_audit
= false;
2111 manager_recheck_journal(m
);
2112 unit_trigger_notify(u
);
2114 if (!MANAGER_IS_RELOADING(u
->manager
)) {
2115 /* Maybe we finished startup and are now ready for
2116 * being stopped because unneeded? */
2117 unit_check_unneeded(u
);
2119 /* Maybe we finished startup, but something we needed
2120 * has vanished? Let's die then. (This happens when
2121 * something BindsTo= to a Type=oneshot unit, as these
2122 * units go directly from starting to inactive,
2123 * without ever entering started.) */
2124 unit_check_binds_to(u
);
2127 unit_add_to_dbus_queue(u
);
2128 unit_add_to_gc_queue(u
);
2131 int unit_watch_pid(Unit
*u
, pid_t pid
) {
2137 /* Watch a specific PID. We only support one or two units
2138 * watching each PID for now, not more. */
2140 r
= set_ensure_allocated(&u
->pids
, NULL
);
2144 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids1
, NULL
);
2148 r
= hashmap_put(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2150 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids2
, NULL
);
2154 r
= hashmap_put(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2157 q
= set_put(u
->pids
, PID_TO_PTR(pid
));
2164 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2168 (void) hashmap_remove_value(u
->manager
->watch_pids1
, PID_TO_PTR(pid
), u
);
2169 (void) hashmap_remove_value(u
->manager
->watch_pids2
, PID_TO_PTR(pid
), u
);
2170 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2173 void unit_unwatch_all_pids(Unit
*u
) {
2176 while (!set_isempty(u
->pids
))
2177 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2179 u
->pids
= set_free(u
->pids
);
2182 void unit_tidy_watch_pids(Unit
*u
, pid_t except1
, pid_t except2
) {
2188 /* Cleans dead PIDs from our list */
2190 SET_FOREACH(e
, u
->pids
, i
) {
2191 pid_t pid
= PTR_TO_PID(e
);
2193 if (pid
== except1
|| pid
== except2
)
2196 if (!pid_is_unwaited(pid
))
2197 unit_unwatch_pid(u
, pid
);
2201 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2203 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2207 case JOB_VERIFY_ACTIVE
:
2210 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2211 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2216 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2217 * external events), hence it makes no sense to permit enqueing such a request either. */
2218 return !u
->perpetual
;
2221 case JOB_TRY_RESTART
:
2222 return unit_can_stop(u
) && unit_can_start(u
);
2225 case JOB_TRY_RELOAD
:
2226 return unit_can_reload(u
);
2228 case JOB_RELOAD_OR_START
:
2229 return unit_can_reload(u
) && unit_can_start(u
);
2232 assert_not_reached("Invalid job type");
2236 static void maybe_warn_about_dependency(Unit
*u
, const char *other
, UnitDependency dependency
) {
2239 /* Only warn about some unit types */
2240 if (!IN_SET(dependency
, UNIT_CONFLICTS
, UNIT_CONFLICTED_BY
, UNIT_BEFORE
, UNIT_AFTER
, UNIT_ON_FAILURE
, UNIT_TRIGGERS
, UNIT_TRIGGERED_BY
))
2243 if (streq_ptr(u
->id
, other
))
2244 log_unit_warning(u
, "Dependency %s=%s dropped", unit_dependency_to_string(dependency
), u
->id
);
2246 log_unit_warning(u
, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency
), strna(other
), u
->id
);
2249 int unit_add_dependency(Unit
*u
, UnitDependency d
, Unit
*other
, bool add_reference
) {
2251 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
2252 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
2253 [UNIT_WANTS
] = UNIT_WANTED_BY
,
2254 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
2255 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
2256 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
2257 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
2258 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
2259 [UNIT_WANTED_BY
] = UNIT_WANTS
,
2260 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
2261 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
2262 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
2263 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
2264 [UNIT_BEFORE
] = UNIT_AFTER
,
2265 [UNIT_AFTER
] = UNIT_BEFORE
,
2266 [UNIT_ON_FAILURE
] = _UNIT_DEPENDENCY_INVALID
,
2267 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
2268 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
2269 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
2270 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
2271 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
2272 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
2273 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
,
2275 int r
, q
= 0, v
= 0, w
= 0;
2276 Unit
*orig_u
= u
, *orig_other
= other
;
2279 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
2282 u
= unit_follow_merge(u
);
2283 other
= unit_follow_merge(other
);
2285 /* We won't allow dependencies on ourselves. We will not
2286 * consider them an error however. */
2288 maybe_warn_about_dependency(orig_u
, orig_other
->id
, d
);
2292 if (d
== UNIT_BEFORE
&& other
->type
== UNIT_DEVICE
) {
2293 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
2297 r
= set_ensure_allocated(&u
->dependencies
[d
], NULL
);
2301 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
) {
2302 r
= set_ensure_allocated(&other
->dependencies
[inverse_table
[d
]], NULL
);
2307 if (add_reference
) {
2308 r
= set_ensure_allocated(&u
->dependencies
[UNIT_REFERENCES
], NULL
);
2312 r
= set_ensure_allocated(&other
->dependencies
[UNIT_REFERENCED_BY
], NULL
);
2317 q
= set_put(u
->dependencies
[d
], other
);
2321 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
2322 v
= set_put(other
->dependencies
[inverse_table
[d
]], u
);
2329 if (add_reference
) {
2330 w
= set_put(u
->dependencies
[UNIT_REFERENCES
], other
);
2336 r
= set_put(other
->dependencies
[UNIT_REFERENCED_BY
], u
);
2341 unit_add_to_dbus_queue(u
);
2346 set_remove(u
->dependencies
[d
], other
);
2349 set_remove(other
->dependencies
[inverse_table
[d
]], u
);
2352 set_remove(u
->dependencies
[UNIT_REFERENCES
], other
);
2357 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
) {
2362 r
= unit_add_dependency(u
, d
, other
, add_reference
);
2366 return unit_add_dependency(u
, e
, other
, add_reference
);
2369 static int resolve_template(Unit
*u
, const char *name
, const char*path
, char **buf
, const char **ret
) {
2373 assert(name
|| path
);
2378 name
= basename(path
);
2380 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
2387 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
2389 _cleanup_free_
char *i
= NULL
;
2391 r
= unit_name_to_prefix(u
->id
, &i
);
2395 r
= unit_name_replace_instance(name
, i
, buf
);
2404 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, const char *path
, bool add_reference
) {
2405 _cleanup_free_
char *buf
= NULL
;
2410 assert(name
|| path
);
2412 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2416 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2420 return unit_add_dependency(u
, d
, other
, add_reference
);
2423 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, const char *path
, bool add_reference
) {
2424 _cleanup_free_
char *buf
= NULL
;
2429 assert(name
|| path
);
2431 r
= resolve_template(u
, name
, path
, &buf
, &name
);
2435 r
= manager_load_unit(u
->manager
, name
, path
, NULL
, &other
);
2439 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
);
2442 int set_unit_path(const char *p
) {
2443 /* This is mostly for debug purposes */
2444 if (setenv("SYSTEMD_UNIT_PATH", p
, 1) < 0)
2450 char *unit_dbus_path(Unit
*u
) {
2456 return unit_dbus_path_from_name(u
->id
);
2459 char *unit_dbus_path_invocation_id(Unit
*u
) {
2462 if (sd_id128_is_null(u
->invocation_id
))
2465 return unit_dbus_path_from_name(u
->invocation_id_string
);
2468 int unit_set_slice(Unit
*u
, Unit
*slice
) {
2472 /* Sets the unit slice if it has not been set before. Is extra
2473 * careful, to only allow this for units that actually have a
2474 * cgroup context. Also, we don't allow to set this for slices
2475 * (since the parent slice is derived from the name). Make
2476 * sure the unit we set is actually a slice. */
2478 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
2481 if (u
->type
== UNIT_SLICE
)
2484 if (unit_active_state(u
) != UNIT_INACTIVE
)
2487 if (slice
->type
!= UNIT_SLICE
)
2490 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
2491 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
2494 if (UNIT_DEREF(u
->slice
) == slice
)
2497 /* Disallow slice changes if @u is already bound to cgroups */
2498 if (UNIT_ISSET(u
->slice
) && u
->cgroup_realized
)
2501 unit_ref_unset(&u
->slice
);
2502 unit_ref_set(&u
->slice
, slice
);
2506 int unit_set_default_slice(Unit
*u
) {
2507 _cleanup_free_
char *b
= NULL
;
2508 const char *slice_name
;
2514 if (UNIT_ISSET(u
->slice
))
2518 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
2520 /* Implicitly place all instantiated units in their
2521 * own per-template slice */
2523 r
= unit_name_to_prefix(u
->id
, &prefix
);
2527 /* The prefix is already escaped, but it might include
2528 * "-" which has a special meaning for slice units,
2529 * hence escape it here extra. */
2530 escaped
= unit_name_escape(prefix
);
2534 if (MANAGER_IS_SYSTEM(u
->manager
))
2535 b
= strjoin("system-", escaped
, ".slice");
2537 b
= strappend(escaped
, ".slice");
2544 MANAGER_IS_SYSTEM(u
->manager
) && !unit_has_name(u
, SPECIAL_INIT_SCOPE
)
2545 ? SPECIAL_SYSTEM_SLICE
2546 : SPECIAL_ROOT_SLICE
;
2548 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
2552 return unit_set_slice(u
, slice
);
2555 const char *unit_slice_name(Unit
*u
) {
2558 if (!UNIT_ISSET(u
->slice
))
2561 return UNIT_DEREF(u
->slice
)->id
;
2564 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
2565 _cleanup_free_
char *t
= NULL
;
2572 r
= unit_name_change_suffix(u
->id
, type
, &t
);
2575 if (unit_has_name(u
, t
))
2578 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
2579 assert(r
< 0 || *_found
!= u
);
2583 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
2584 const char *name
, *old_owner
, *new_owner
;
2591 r
= sd_bus_message_read(message
, "sss", &name
, &old_owner
, &new_owner
);
2593 bus_log_parse_error(r
);
2597 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
2598 UNIT_VTABLE(u
)->bus_name_owner_change(u
, name
, old_owner
, new_owner
);
2603 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
2610 if (u
->match_bus_slot
)
2613 match
= strjoina("type='signal',"
2614 "sender='org.freedesktop.DBus',"
2615 "path='/org/freedesktop/DBus',"
2616 "interface='org.freedesktop.DBus',"
2617 "member='NameOwnerChanged',"
2618 "arg0='", name
, "'");
2620 return sd_bus_add_match(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, u
);
2623 int unit_watch_bus_name(Unit
*u
, const char *name
) {
2629 /* Watch a specific name on the bus. We only support one unit
2630 * watching each name for now. */
2632 if (u
->manager
->api_bus
) {
2633 /* If the bus is already available, install the match directly.
2634 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
2635 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
2637 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
2640 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
2642 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2643 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
2649 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
2653 hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
2654 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
2657 bool unit_can_serialize(Unit
*u
) {
2660 return UNIT_VTABLE(u
)->serialize
&& UNIT_VTABLE(u
)->deserialize_item
;
2663 int unit_serialize(Unit
*u
, FILE *f
, FDSet
*fds
, bool serialize_jobs
) {
2670 if (unit_can_serialize(u
)) {
2673 r
= UNIT_VTABLE(u
)->serialize(u
, f
, fds
);
2677 rt
= unit_get_exec_runtime(u
);
2679 r
= exec_runtime_serialize(u
, rt
, f
, fds
);
2685 dual_timestamp_serialize(f
, "state-change-timestamp", &u
->state_change_timestamp
);
2687 dual_timestamp_serialize(f
, "inactive-exit-timestamp", &u
->inactive_exit_timestamp
);
2688 dual_timestamp_serialize(f
, "active-enter-timestamp", &u
->active_enter_timestamp
);
2689 dual_timestamp_serialize(f
, "active-exit-timestamp", &u
->active_exit_timestamp
);
2690 dual_timestamp_serialize(f
, "inactive-enter-timestamp", &u
->inactive_enter_timestamp
);
2692 dual_timestamp_serialize(f
, "condition-timestamp", &u
->condition_timestamp
);
2693 dual_timestamp_serialize(f
, "assert-timestamp", &u
->assert_timestamp
);
2695 if (dual_timestamp_is_set(&u
->condition_timestamp
))
2696 unit_serialize_item(u
, f
, "condition-result", yes_no(u
->condition_result
));
2698 if (dual_timestamp_is_set(&u
->assert_timestamp
))
2699 unit_serialize_item(u
, f
, "assert-result", yes_no(u
->assert_result
));
2701 unit_serialize_item(u
, f
, "transient", yes_no(u
->transient
));
2703 unit_serialize_item_format(u
, f
, "cpu-usage-base", "%" PRIu64
, u
->cpu_usage_base
);
2704 if (u
->cpu_usage_last
!= NSEC_INFINITY
)
2705 unit_serialize_item_format(u
, f
, "cpu-usage-last", "%" PRIu64
, u
->cpu_usage_last
);
2708 unit_serialize_item(u
, f
, "cgroup", u
->cgroup_path
);
2709 unit_serialize_item(u
, f
, "cgroup-realized", yes_no(u
->cgroup_realized
));
2711 if (uid_is_valid(u
->ref_uid
))
2712 unit_serialize_item_format(u
, f
, "ref-uid", UID_FMT
, u
->ref_uid
);
2713 if (gid_is_valid(u
->ref_gid
))
2714 unit_serialize_item_format(u
, f
, "ref-gid", GID_FMT
, u
->ref_gid
);
2716 if (!sd_id128_is_null(u
->invocation_id
))
2717 unit_serialize_item_format(u
, f
, "invocation-id", SD_ID128_FORMAT_STR
, SD_ID128_FORMAT_VAL(u
->invocation_id
));
2719 bus_track_serialize(u
->bus_track
, f
, "ref");
2721 if (serialize_jobs
) {
2723 fprintf(f
, "job\n");
2724 job_serialize(u
->job
, f
);
2728 fprintf(f
, "job\n");
2729 job_serialize(u
->nop_job
, f
);
2738 int unit_serialize_item(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2754 int unit_serialize_item_escaped(Unit
*u
, FILE *f
, const char *key
, const char *value
) {
2755 _cleanup_free_
char *c
= NULL
;
2776 int unit_serialize_item_fd(Unit
*u
, FILE *f
, FDSet
*fds
, const char *key
, int fd
) {
2786 copy
= fdset_put_dup(fds
, fd
);
2790 fprintf(f
, "%s=%i\n", key
, copy
);
2794 void unit_serialize_item_format(Unit
*u
, FILE *f
, const char *key
, const char *format
, ...) {
2805 va_start(ap
, format
);
2806 vfprintf(f
, format
, ap
);
2812 int unit_deserialize(Unit
*u
, FILE *f
, FDSet
*fds
) {
2813 ExecRuntime
**rt
= NULL
;
2821 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
2823 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
2826 char line
[LINE_MAX
], *l
, *v
;
2829 if (!fgets(line
, sizeof(line
), f
)) {
2842 k
= strcspn(l
, "=");
2850 if (streq(l
, "job")) {
2852 /* new-style serialized job */
2859 r
= job_deserialize(j
, f
);
2865 r
= hashmap_put(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
), j
);
2871 r
= job_install_deserialized(j
);
2873 hashmap_remove(u
->manager
->jobs
, UINT32_TO_PTR(j
->id
));
2877 } else /* legacy for pre-44 */
2878 log_unit_warning(u
, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v
);
2880 } else if (streq(l
, "state-change-timestamp")) {
2881 dual_timestamp_deserialize(v
, &u
->state_change_timestamp
);
2883 } else if (streq(l
, "inactive-exit-timestamp")) {
2884 dual_timestamp_deserialize(v
, &u
->inactive_exit_timestamp
);
2886 } else if (streq(l
, "active-enter-timestamp")) {
2887 dual_timestamp_deserialize(v
, &u
->active_enter_timestamp
);
2889 } else if (streq(l
, "active-exit-timestamp")) {
2890 dual_timestamp_deserialize(v
, &u
->active_exit_timestamp
);
2892 } else if (streq(l
, "inactive-enter-timestamp")) {
2893 dual_timestamp_deserialize(v
, &u
->inactive_enter_timestamp
);
2895 } else if (streq(l
, "condition-timestamp")) {
2896 dual_timestamp_deserialize(v
, &u
->condition_timestamp
);
2898 } else if (streq(l
, "assert-timestamp")) {
2899 dual_timestamp_deserialize(v
, &u
->assert_timestamp
);
2901 } else if (streq(l
, "condition-result")) {
2903 r
= parse_boolean(v
);
2905 log_unit_debug(u
, "Failed to parse condition result value %s, ignoring.", v
);
2907 u
->condition_result
= r
;
2911 } else if (streq(l
, "assert-result")) {
2913 r
= parse_boolean(v
);
2915 log_unit_debug(u
, "Failed to parse assert result value %s, ignoring.", v
);
2917 u
->assert_result
= r
;
2921 } else if (streq(l
, "transient")) {
2923 r
= parse_boolean(v
);
2925 log_unit_debug(u
, "Failed to parse transient bool %s, ignoring.", v
);
2931 } else if (STR_IN_SET(l
, "cpu-usage-base", "cpuacct-usage-base")) {
2933 r
= safe_atou64(v
, &u
->cpu_usage_base
);
2935 log_unit_debug(u
, "Failed to parse CPU usage base %s, ignoring.", v
);
2939 } else if (streq(l
, "cpu-usage-last")) {
2941 r
= safe_atou64(v
, &u
->cpu_usage_last
);
2943 log_unit_debug(u
, "Failed to read CPU usage last %s, ignoring.", v
);
2947 } else if (streq(l
, "cgroup")) {
2949 r
= unit_set_cgroup_path(u
, v
);
2951 log_unit_debug_errno(u
, r
, "Failed to set cgroup path %s, ignoring: %m", v
);
2953 (void) unit_watch_cgroup(u
);
2956 } else if (streq(l
, "cgroup-realized")) {
2959 b
= parse_boolean(v
);
2961 log_unit_debug(u
, "Failed to parse cgroup-realized bool %s, ignoring.", v
);
2963 u
->cgroup_realized
= b
;
2967 } else if (streq(l
, "ref-uid")) {
2970 r
= parse_uid(v
, &uid
);
2972 log_unit_debug(u
, "Failed to parse referenced UID %s, ignoring.", v
);
2974 unit_ref_uid_gid(u
, uid
, GID_INVALID
);
2978 } else if (streq(l
, "ref-gid")) {
2981 r
= parse_gid(v
, &gid
);
2983 log_unit_debug(u
, "Failed to parse referenced GID %s, ignoring.", v
);
2985 unit_ref_uid_gid(u
, UID_INVALID
, gid
);
2987 } else if (streq(l
, "ref")) {
2989 r
= strv_extend(&u
->deserialized_refs
, v
);
2994 } else if (streq(l
, "invocation-id")) {
2997 r
= sd_id128_from_string(v
, &id
);
2999 log_unit_debug(u
, "Failed to parse invocation id %s, ignoring.", v
);
3001 r
= unit_set_invocation_id(u
, id
);
3003 log_unit_warning_errno(u
, r
, "Failed to set invocation ID for unit: %m");
3009 if (unit_can_serialize(u
)) {
3011 r
= exec_runtime_deserialize_item(u
, rt
, l
, v
, fds
);
3013 log_unit_warning(u
, "Failed to deserialize runtime parameter '%s', ignoring.", l
);
3017 /* Returns positive if key was handled by the call */
3022 r
= UNIT_VTABLE(u
)->deserialize_item(u
, l
, v
, fds
);
3024 log_unit_warning(u
, "Failed to deserialize unit parameter '%s', ignoring.", l
);
3028 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3029 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3030 * before 228 where the base for timeouts was not persistent across reboots. */
3032 if (!dual_timestamp_is_set(&u
->state_change_timestamp
))
3033 dual_timestamp_get(&u
->state_change_timestamp
);
3038 int unit_add_node_link(Unit
*u
, const char *what
, bool wants
, UnitDependency dep
) {
3040 _cleanup_free_
char *e
= NULL
;
3045 /* Adds in links to the device node that this unit is based on */
3049 if (!is_device_path(what
))
3052 /* When device units aren't supported (such as in a
3053 * container), don't create dependencies on them. */
3054 if (!unit_type_supported(UNIT_DEVICE
))
3057 r
= unit_name_from_path(what
, ".device", &e
);
3061 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3065 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3066 dep
= UNIT_BINDS_TO
;
3068 r
= unit_add_two_dependencies(u
, UNIT_AFTER
,
3069 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3075 r
= unit_add_dependency(device
, UNIT_WANTS
, u
, false);
3083 int unit_coldplug(Unit
*u
) {
3089 /* Make sure we don't enter a loop, when coldplugging
3094 u
->coldplugged
= true;
3096 STRV_FOREACH(i
, u
->deserialized_refs
) {
3097 q
= bus_unit_track_add_name(u
, *i
);
3098 if (q
< 0 && r
>= 0)
3101 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3103 if (UNIT_VTABLE(u
)->coldplug
) {
3104 q
= UNIT_VTABLE(u
)->coldplug(u
);
3105 if (q
< 0 && r
>= 0)
3110 q
= job_coldplug(u
->job
);
3111 if (q
< 0 && r
>= 0)
3118 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3124 if (stat(path
, &st
) < 0)
3125 /* What, cannot access this anymore? */
3129 /* For masked files check if they are still so */
3130 return !null_or_empty(&st
);
3132 /* For non-empty files check the mtime */
3133 return timespec_load(&st
.st_mtim
) > mtime
;
3138 bool unit_need_daemon_reload(Unit
*u
) {
3139 _cleanup_strv_free_
char **t
= NULL
;
3144 /* For unit files, we allow masking… */
3145 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3146 u
->load_state
== UNIT_MASKED
))
3149 /* Source paths should not be masked… */
3150 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3153 (void) unit_find_dropin_paths(u
, &t
);
3154 if (!strv_equal(u
->dropin_paths
, t
))
3157 /* … any drop-ins that are masked are simply omitted from the list. */
3158 STRV_FOREACH(path
, u
->dropin_paths
)
3159 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3165 void unit_reset_failed(Unit
*u
) {
3168 if (UNIT_VTABLE(u
)->reset_failed
)
3169 UNIT_VTABLE(u
)->reset_failed(u
);
3171 RATELIMIT_RESET(u
->start_limit
);
3172 u
->start_limit_hit
= false;
3175 Unit
*unit_following(Unit
*u
) {
3178 if (UNIT_VTABLE(u
)->following
)
3179 return UNIT_VTABLE(u
)->following(u
);
3184 bool unit_stop_pending(Unit
*u
) {
3187 /* This call does check the current state of the unit. It's
3188 * hence useful to be called from state change calls of the
3189 * unit itself, where the state isn't updated yet. This is
3190 * different from unit_inactive_or_pending() which checks both
3191 * the current state and for a queued job. */
3193 return u
->job
&& u
->job
->type
== JOB_STOP
;
3196 bool unit_inactive_or_pending(Unit
*u
) {
3199 /* Returns true if the unit is inactive or going down */
3201 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3204 if (unit_stop_pending(u
))
3210 bool unit_active_or_pending(Unit
*u
) {
3213 /* Returns true if the unit is active or going up */
3215 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3219 (u
->job
->type
== JOB_START
||
3220 u
->job
->type
== JOB_RELOAD_OR_START
||
3221 u
->job
->type
== JOB_RESTART
))
3227 int unit_kill(Unit
*u
, KillWho w
, int signo
, sd_bus_error
*error
) {
3229 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3230 assert(SIGNAL_VALID(signo
));
3232 if (!UNIT_VTABLE(u
)->kill
)
3235 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, error
);
3238 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3242 pid_set
= set_new(NULL
);
3246 /* Exclude the main/control pids from being killed via the cgroup */
3248 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3253 if (control_pid
> 0) {
3254 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3266 int unit_kill_common(
3272 sd_bus_error
*error
) {
3275 bool killed
= false;
3277 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3279 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3280 else if (main_pid
== 0)
3281 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3284 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3285 if (control_pid
< 0)
3286 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3287 else if (control_pid
== 0)
3288 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3291 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3292 if (control_pid
> 0) {
3293 if (kill(control_pid
, signo
) < 0)
3299 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
))
3301 if (kill(main_pid
, signo
) < 0)
3307 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
) {
3308 _cleanup_set_free_ Set
*pid_set
= NULL
;
3311 /* Exclude the main/control pids from being killed via the cgroup */
3312 pid_set
= unit_pid_set(main_pid
, control_pid
);
3316 q
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, NULL
, NULL
);
3317 if (q
< 0 && q
!= -EAGAIN
&& q
!= -ESRCH
&& q
!= -ENOENT
)
3323 if (r
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
))
3329 int unit_following_set(Unit
*u
, Set
**s
) {
3333 if (UNIT_VTABLE(u
)->following_set
)
3334 return UNIT_VTABLE(u
)->following_set(u
, s
);
3340 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
3345 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
3346 r
= unit_file_get_state(
3347 u
->manager
->unit_file_scope
,
3349 basename(u
->fragment_path
),
3350 &u
->unit_file_state
);
3352 u
->unit_file_state
= UNIT_FILE_BAD
;
3355 return u
->unit_file_state
;
3358 int unit_get_unit_file_preset(Unit
*u
) {
3361 if (u
->unit_file_preset
< 0 && u
->fragment_path
)
3362 u
->unit_file_preset
= unit_file_query_preset(
3363 u
->manager
->unit_file_scope
,
3365 basename(u
->fragment_path
));
3367 return u
->unit_file_preset
;
3370 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*u
) {
3375 unit_ref_unset(ref
);
3378 LIST_PREPEND(refs
, u
->refs
, ref
);
3382 void unit_ref_unset(UnitRef
*ref
) {
3388 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3389 * be unreferenced now. */
3390 unit_add_to_gc_queue(ref
->unit
);
3392 LIST_REMOVE(refs
, ref
->unit
->refs
, ref
);
3396 static int user_from_unit_name(Unit
*u
, char **ret
) {
3398 static const uint8_t hash_key
[] = {
3399 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3400 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3403 _cleanup_free_
char *n
= NULL
;
3406 r
= unit_name_to_prefix(u
->id
, &n
);
3410 if (valid_user_group_name(n
)) {
3416 /* If we can't use the unit name as a user name, then let's hash it and use that */
3417 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
3423 int unit_patch_contexts(Unit
*u
) {
3431 /* Patch in the manager defaults into the exec and cgroup
3432 * contexts, _after_ the rest of the settings have been
3435 ec
= unit_get_exec_context(u
);
3437 /* This only copies in the ones that need memory */
3438 for (i
= 0; i
< _RLIMIT_MAX
; i
++)
3439 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
3440 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
3445 if (MANAGER_IS_USER(u
->manager
) &&
3446 !ec
->working_directory
) {
3448 r
= get_home_dir(&ec
->working_directory
);
3452 /* Allow user services to run, even if the
3453 * home directory is missing */
3454 ec
->working_directory_missing_ok
= true;
3457 if (ec
->private_devices
)
3458 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
3460 if (ec
->protect_kernel_modules
)
3461 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
3463 if (ec
->dynamic_user
) {
3465 r
= user_from_unit_name(u
, &ec
->user
);
3471 ec
->group
= strdup(ec
->user
);
3476 /* If the dynamic user option is on, let's make sure that the unit can't leave its UID/GID
3477 * around in the file system or on IPC objects. Hence enforce a strict sandbox. */
3479 ec
->private_tmp
= true;
3480 ec
->remove_ipc
= true;
3481 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
3482 if (ec
->protect_home
== PROTECT_HOME_NO
)
3483 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
3487 cc
= unit_get_cgroup_context(u
);
3491 ec
->private_devices
&&
3492 cc
->device_policy
== CGROUP_AUTO
)
3493 cc
->device_policy
= CGROUP_CLOSED
;
3499 ExecContext
*unit_get_exec_context(Unit
*u
) {
3506 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
3510 return (ExecContext
*) ((uint8_t*) u
+ offset
);
3513 KillContext
*unit_get_kill_context(Unit
*u
) {
3520 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
3524 return (KillContext
*) ((uint8_t*) u
+ offset
);
3527 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
3533 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
3537 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
3540 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
3546 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3550 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
3553 static const char* unit_drop_in_dir(Unit
*u
, UnitSetPropertiesMode mode
) {
3556 if (!IN_SET(mode
, UNIT_RUNTIME
, UNIT_PERSISTENT
))
3559 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
3560 return u
->manager
->lookup_paths
.transient
;
3562 if (mode
== UNIT_RUNTIME
)
3563 return u
->manager
->lookup_paths
.runtime_control
;
3565 if (mode
== UNIT_PERSISTENT
)
3566 return u
->manager
->lookup_paths
.persistent_control
;
3571 int unit_write_drop_in(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3572 _cleanup_free_
char *p
= NULL
, *q
= NULL
;
3573 const char *dir
, *wrapped
;
3578 if (u
->transient_file
) {
3579 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
3580 * write to the transient unit file. */
3581 fputs(data
, u
->transient_file
);
3582 fputc('\n', u
->transient_file
);
3586 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3589 dir
= unit_drop_in_dir(u
, mode
);
3593 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
3594 "# or an equivalent operation. Do not edit.\n",
3598 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
3602 (void) mkdir_p(p
, 0755);
3603 r
= write_string_file_atomic_label(q
, wrapped
);
3607 r
= strv_push(&u
->dropin_paths
, q
);
3612 strv_uniq(u
->dropin_paths
);
3614 u
->dropin_mtime
= now(CLOCK_REALTIME
);
3619 int unit_write_drop_in_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3620 _cleanup_free_
char *p
= NULL
;
3628 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3631 va_start(ap
, format
);
3632 r
= vasprintf(&p
, format
, ap
);
3638 return unit_write_drop_in(u
, mode
, name
, p
);
3641 int unit_write_drop_in_private(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *data
) {
3648 if (!UNIT_VTABLE(u
)->private_section
)
3651 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3654 ndata
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
3656 return unit_write_drop_in(u
, mode
, name
, ndata
);
3659 int unit_write_drop_in_private_format(Unit
*u
, UnitSetPropertiesMode mode
, const char *name
, const char *format
, ...) {
3660 _cleanup_free_
char *p
= NULL
;
3668 if (!IN_SET(mode
, UNIT_PERSISTENT
, UNIT_RUNTIME
))
3671 va_start(ap
, format
);
3672 r
= vasprintf(&p
, format
, ap
);
3678 return unit_write_drop_in_private(u
, mode
, name
, p
);
3681 int unit_make_transient(Unit
*u
) {
3687 if (!UNIT_VTABLE(u
)->can_transient
)
3690 path
= strjoin(u
->manager
->lookup_paths
.transient
, "/", u
->id
);
3694 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
3695 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
3697 RUN_WITH_UMASK(0022) {
3698 f
= fopen(path
, "we");
3705 if (u
->transient_file
)
3706 fclose(u
->transient_file
);
3707 u
->transient_file
= f
;
3709 free(u
->fragment_path
);
3710 u
->fragment_path
= path
;
3712 u
->source_path
= mfree(u
->source_path
);
3713 u
->dropin_paths
= strv_free(u
->dropin_paths
);
3714 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
3716 u
->load_state
= UNIT_STUB
;
3718 u
->transient
= true;
3720 unit_add_to_dbus_queue(u
);
3721 unit_add_to_gc_queue(u
);
3723 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
3729 static void log_kill(pid_t pid
, int sig
, void *userdata
) {
3730 _cleanup_free_
char *comm
= NULL
;
3732 (void) get_process_comm(pid
, &comm
);
3734 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
3735 only, like for example systemd's own PAM stub process. */
3736 if (comm
&& comm
[0] == '(')
3739 log_unit_notice(userdata
,
3740 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
3743 signal_to_string(sig
));
3746 static int operation_to_signal(KillContext
*c
, KillOperation k
) {
3751 case KILL_TERMINATE
:
3752 case KILL_TERMINATE_AND_LOG
:
3753 return c
->kill_signal
;
3762 assert_not_reached("KillOperation unknown");
3766 int unit_kill_context(
3772 bool main_pid_alien
) {
3774 bool wait_for_exit
= false, send_sighup
;
3775 cg_kill_log_func_t log_func
= NULL
;
3781 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
3782 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
3784 if (c
->kill_mode
== KILL_NONE
)
3787 sig
= operation_to_signal(c
, k
);
3791 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
3794 if (k
!= KILL_TERMINATE
|| IN_SET(sig
, SIGKILL
, SIGABRT
))
3795 log_func
= log_kill
;
3799 log_func(main_pid
, sig
, u
);
3801 r
= kill_and_sigcont(main_pid
, sig
);
3802 if (r
< 0 && r
!= -ESRCH
) {
3803 _cleanup_free_
char *comm
= NULL
;
3804 (void) get_process_comm(main_pid
, &comm
);
3806 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
3808 if (!main_pid_alien
)
3809 wait_for_exit
= true;
3811 if (r
!= -ESRCH
&& send_sighup
)
3812 (void) kill(main_pid
, SIGHUP
);
3816 if (control_pid
> 0) {
3818 log_func(control_pid
, sig
, u
);
3820 r
= kill_and_sigcont(control_pid
, sig
);
3821 if (r
< 0 && r
!= -ESRCH
) {
3822 _cleanup_free_
char *comm
= NULL
;
3823 (void) get_process_comm(control_pid
, &comm
);
3825 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
3827 wait_for_exit
= true;
3829 if (r
!= -ESRCH
&& send_sighup
)
3830 (void) kill(control_pid
, SIGHUP
);
3834 if (u
->cgroup_path
&&
3835 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
3836 _cleanup_set_free_ Set
*pid_set
= NULL
;
3838 /* Exclude the main/control pids from being killed via the cgroup */
3839 pid_set
= unit_pid_set(main_pid
, control_pid
);
3843 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3845 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
3849 if (r
!= -EAGAIN
&& r
!= -ESRCH
&& r
!= -ENOENT
)
3850 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", u
->cgroup_path
);
3854 /* FIXME: For now, on the legacy hierarchy, we
3855 * will not wait for the cgroup members to die
3856 * if we are running in a container or if this
3857 * is a delegation unit, simply because cgroup
3858 * notification is unreliable in these
3859 * cases. It doesn't work at all in
3860 * containers, and outside of containers it
3861 * can be confused easily by left-over
3862 * directories in the cgroup — which however
3863 * should not exist in non-delegated units. On
3864 * the unified hierarchy that's different,
3865 * there we get proper events. Hence rely on
3868 if (cg_unified(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
3869 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
3870 wait_for_exit
= true;
3875 pid_set
= unit_pid_set(main_pid
, control_pid
);
3879 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
3888 return wait_for_exit
;
3891 int unit_require_mounts_for(Unit
*u
, const char *path
) {
3892 char prefix
[strlen(path
) + 1], *p
;
3898 /* Registers a unit for requiring a certain path and all its
3899 * prefixes. We keep a simple array of these paths in the
3900 * unit, since its usually short. However, we build a prefix
3901 * table for all possible prefixes so that new appearing mount
3902 * units can easily determine which units to make themselves a
3905 if (!path_is_absolute(path
))
3912 path_kill_slashes(p
);
3914 if (!path_is_safe(p
)) {
3919 if (strv_contains(u
->requires_mounts_for
, p
)) {
3924 r
= strv_consume(&u
->requires_mounts_for
, p
);
3928 PATH_FOREACH_PREFIX_MORE(prefix
, p
) {
3931 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
3935 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &string_hash_ops
);
3949 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
3965 int unit_setup_exec_runtime(Unit
*u
) {
3971 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
3974 /* Check if there already is an ExecRuntime for this unit? */
3975 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
3979 /* Try to get it from somebody else */
3980 SET_FOREACH(other
, u
->dependencies
[UNIT_JOINS_NAMESPACE_OF
], i
) {
3982 *rt
= unit_get_exec_runtime(other
);
3984 exec_runtime_ref(*rt
);
3989 return exec_runtime_make(rt
, unit_get_exec_context(u
), u
->id
);
3992 int unit_setup_dynamic_creds(Unit
*u
) {
3994 DynamicCreds
*dcreds
;
3999 offset
= UNIT_VTABLE(u
)->dynamic_creds_offset
;
4001 dcreds
= (DynamicCreds
*) ((uint8_t*) u
+ offset
);
4003 ec
= unit_get_exec_context(u
);
4006 if (!ec
->dynamic_user
)
4009 return dynamic_creds_acquire(dcreds
, u
->manager
, ec
->user
, ec
->group
);
4012 bool unit_type_supported(UnitType t
) {
4013 if (_unlikely_(t
< 0))
4015 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4018 if (!unit_vtable
[t
]->supported
)
4021 return unit_vtable
[t
]->supported();
4024 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4030 r
= dir_is_empty(where
);
4034 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4038 log_struct(LOG_NOTICE
,
4039 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING
),
4041 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4046 int unit_fail_if_symlink(Unit
*u
, const char* where
) {
4052 r
= is_symlink(where
);
4054 log_unit_debug_errno(u
, r
, "Failed to check symlink %s, ignoring: %m", where
);
4061 LOG_MESSAGE_ID(SD_MESSAGE_OVERMOUNTING
),
4063 LOG_UNIT_MESSAGE(u
, "Mount on symlink %s not allowed.", where
),
4070 bool unit_is_pristine(Unit
*u
) {
4073 /* Check if the unit already exists or is already around,
4074 * in a number of different ways. Note that to cater for unit
4075 * types such as slice, we are generally fine with units that
4076 * are marked UNIT_LOADED even though nothing was
4077 * actually loaded, as those unit types don't require a file
4078 * on disk to validly load. */
4080 return !(!IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) ||
4083 !strv_isempty(u
->dropin_paths
) ||
4088 pid_t
unit_control_pid(Unit
*u
) {
4091 if (UNIT_VTABLE(u
)->control_pid
)
4092 return UNIT_VTABLE(u
)->control_pid(u
);
4097 pid_t
unit_main_pid(Unit
*u
) {
4100 if (UNIT_VTABLE(u
)->main_pid
)
4101 return UNIT_VTABLE(u
)->main_pid(u
);
4106 static void unit_unref_uid_internal(
4110 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4114 assert(_manager_unref_uid
);
4116 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4117 * gid_t are actually the same time, with the same validity rules.
4119 * Drops a reference to UID/GID from a unit. */
4121 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4122 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4124 if (!uid_is_valid(*ref_uid
))
4127 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4128 *ref_uid
= UID_INVALID
;
4131 void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4132 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4135 void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4136 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4139 static int unit_ref_uid_internal(
4144 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
4150 assert(uid_is_valid(uid
));
4151 assert(_manager_ref_uid
);
4153 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4154 * are actually the same type, and have the same validity rules.
4156 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4157 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4160 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4161 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4163 if (*ref_uid
== uid
)
4166 if (uid_is_valid(*ref_uid
)) /* Already set? */
4169 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
4177 int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
4178 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
4181 int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
4182 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
4185 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
4190 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4192 if (uid_is_valid(uid
)) {
4193 r
= unit_ref_uid(u
, uid
, clean_ipc
);
4198 if (gid_is_valid(gid
)) {
4199 q
= unit_ref_gid(u
, gid
, clean_ipc
);
4202 unit_unref_uid(u
, false);
4208 return r
> 0 || q
> 0;
4211 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
4217 c
= unit_get_exec_context(u
);
4219 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
4221 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4226 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
4229 unit_unref_uid(u
, destroy_now
);
4230 unit_unref_gid(u
, destroy_now
);
4233 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
4238 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4239 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4240 * objects when no service references the UID/GID anymore. */
4242 r
= unit_ref_uid_gid(u
, uid
, gid
);
4244 bus_unit_send_change_signal(u
);
4247 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
4252 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
4254 if (sd_id128_equal(u
->invocation_id
, id
))
4257 if (!sd_id128_is_null(u
->invocation_id
))
4258 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4260 if (sd_id128_is_null(id
)) {
4265 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
4269 u
->invocation_id
= id
;
4270 sd_id128_to_string(id
, u
->invocation_id_string
);
4272 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
4279 u
->invocation_id
= SD_ID128_NULL
;
4280 u
->invocation_id_string
[0] = 0;
4284 int unit_acquire_invocation_id(Unit
*u
) {
4290 r
= sd_id128_randomize(&id
);
4292 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
4294 r
= unit_set_invocation_id(u
, id
);
4296 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");