1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
28 #include "exec-credential.h"
31 #include "fileio-label.h"
33 #include "format-util.h"
34 #include "id128-util.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
41 #include "logarithm.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
47 #include "serialize.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
63 #include "user-util.h"
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
72 #define MENTIONWORTHY_IP_BYTES (0ULL)
74 /* Thresholds for logging at INFO level about resource consumption */
75 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
76 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
77 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
79 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
80 [UNIT_SERVICE
] = &service_vtable
,
81 [UNIT_SOCKET
] = &socket_vtable
,
82 [UNIT_TARGET
] = &target_vtable
,
83 [UNIT_DEVICE
] = &device_vtable
,
84 [UNIT_MOUNT
] = &mount_vtable
,
85 [UNIT_AUTOMOUNT
] = &automount_vtable
,
86 [UNIT_SWAP
] = &swap_vtable
,
87 [UNIT_TIMER
] = &timer_vtable
,
88 [UNIT_PATH
] = &path_vtable
,
89 [UNIT_SLICE
] = &slice_vtable
,
90 [UNIT_SCOPE
] = &scope_vtable
,
93 Unit
* unit_new(Manager
*m
, size_t size
) {
97 assert(size
>= sizeof(Unit
));
104 u
->type
= _UNIT_TYPE_INVALID
;
105 u
->default_dependencies
= true;
106 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
107 u
->unit_file_preset
= -1;
108 u
->on_failure_job_mode
= JOB_REPLACE
;
109 u
->on_success_job_mode
= JOB_FAIL
;
110 u
->cgroup_control_inotify_wd
= -1;
111 u
->cgroup_memory_inotify_wd
= -1;
112 u
->job_timeout
= USEC_INFINITY
;
113 u
->job_running_timeout
= USEC_INFINITY
;
114 u
->ref_uid
= UID_INVALID
;
115 u
->ref_gid
= GID_INVALID
;
116 u
->cpu_usage_last
= NSEC_INFINITY
;
117 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
118 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
120 u
->ip_accounting_ingress_map_fd
= -EBADF
;
121 u
->ip_accounting_egress_map_fd
= -EBADF
;
122 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++)
123 u
->io_accounting_last
[i
] = UINT64_MAX
;
125 u
->ipv4_allow_map_fd
= -EBADF
;
126 u
->ipv6_allow_map_fd
= -EBADF
;
127 u
->ipv4_deny_map_fd
= -EBADF
;
128 u
->ipv6_deny_map_fd
= -EBADF
;
130 u
->last_section_private
= -1;
132 u
->start_ratelimit
= (RateLimit
) {
133 m
->defaults
.start_limit_interval
,
134 m
->defaults
.start_limit_burst
137 u
->auto_start_stop_ratelimit
= (const RateLimit
) {
145 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
146 _cleanup_(unit_freep
) Unit
*u
= NULL
;
149 u
= unit_new(m
, size
);
153 r
= unit_add_name(u
, name
);
162 bool unit_has_name(const Unit
*u
, const char *name
) {
166 return streq_ptr(name
, u
->id
) ||
167 set_contains(u
->aliases
, name
);
170 static void unit_init(Unit
*u
) {
177 assert(u
->type
>= 0);
179 cc
= unit_get_cgroup_context(u
);
181 cgroup_context_init(cc
);
183 /* Copy in the manager defaults into the cgroup
184 * context, _before_ the rest of the settings have
185 * been initialized */
187 cc
->cpu_accounting
= u
->manager
->defaults
.cpu_accounting
;
188 cc
->io_accounting
= u
->manager
->defaults
.io_accounting
;
189 cc
->blockio_accounting
= u
->manager
->defaults
.blockio_accounting
;
190 cc
->memory_accounting
= u
->manager
->defaults
.memory_accounting
;
191 cc
->tasks_accounting
= u
->manager
->defaults
.tasks_accounting
;
192 cc
->ip_accounting
= u
->manager
->defaults
.ip_accounting
;
194 if (u
->type
!= UNIT_SLICE
)
195 cc
->tasks_max
= u
->manager
->defaults
.tasks_max
;
197 cc
->memory_pressure_watch
= u
->manager
->defaults
.memory_pressure_watch
;
198 cc
->memory_pressure_threshold_usec
= u
->manager
->defaults
.memory_pressure_threshold_usec
;
201 ec
= unit_get_exec_context(u
);
203 exec_context_init(ec
);
205 if (u
->manager
->defaults
.oom_score_adjust_set
) {
206 ec
->oom_score_adjust
= u
->manager
->defaults
.oom_score_adjust
;
207 ec
->oom_score_adjust_set
= true;
210 if (MANAGER_IS_SYSTEM(u
->manager
))
211 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
213 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
215 /* User manager might have its umask redefined by PAM or UMask=. In this
216 * case let the units it manages inherit this value by default. They can
217 * still tune this value through their own unit file */
218 (void) get_process_umask(0, &ec
->umask
);
222 kc
= unit_get_kill_context(u
);
224 kill_context_init(kc
);
226 if (UNIT_VTABLE(u
)->init
)
227 UNIT_VTABLE(u
)->init(u
);
230 static int unit_add_alias(Unit
*u
, char *donated_name
) {
233 /* Make sure that u->names is allocated. We may leave u->names
234 * empty if we fail later, but this is not a problem. */
235 r
= set_ensure_put(&u
->aliases
, &string_hash_ops
, donated_name
);
243 int unit_add_name(Unit
*u
, const char *text
) {
244 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
251 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
253 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
254 "instance is not set when adding name '%s': %m", text
);
256 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
258 return log_unit_debug_errno(u
, r
,
259 "failed to build instance name from '%s': %m", text
);
266 if (unit_has_name(u
, name
))
269 if (hashmap_contains(u
->manager
->units
, name
))
270 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
271 "unit already exist when adding name '%s': %m", name
);
273 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
274 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
275 "name '%s' is invalid: %m", name
);
277 t
= unit_name_to_type(name
);
279 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
280 "failed to derive unit type from name '%s': %m", name
);
282 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
283 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
284 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
287 r
= unit_name_to_instance(name
, &instance
);
289 return log_unit_debug_errno(u
, r
, "failed to extract instance from name '%s': %m", name
);
291 if (instance
&& !unit_type_may_template(t
))
292 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "templates are not allowed for name '%s': %m", name
);
294 /* Ensure that this unit either has no instance, or that the instance matches. */
295 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
296 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
297 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
298 name
, instance
, u
->instance
);
300 if (u
->id
&& !unit_type_may_alias(t
))
301 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
302 "cannot add name %s, aliases are not allowed for %s units.",
303 name
, unit_type_to_string(t
));
305 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
306 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "cannot add name, manager has too many units: %m");
308 /* Add name to the global hashmap first, because that's easier to undo */
309 r
= hashmap_put(u
->manager
->units
, name
, u
);
311 return log_unit_debug_errno(u
, r
, "add unit to hashmap failed for name '%s': %m", text
);
314 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
316 hashmap_remove(u
->manager
->units
, name
);
322 /* A new name, we don't need the set yet. */
323 assert(u
->type
== _UNIT_TYPE_INVALID
);
324 assert(!u
->instance
);
327 u
->id
= TAKE_PTR(name
);
328 u
->instance
= TAKE_PTR(instance
);
330 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
334 unit_add_to_dbus_queue(u
);
338 int unit_choose_id(Unit
*u
, const char *name
) {
339 _cleanup_free_
char *t
= NULL
;
346 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
350 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
357 if (streq_ptr(u
->id
, name
))
358 return 0; /* Nothing to do. */
360 /* Selects one of the aliases of this unit as the id */
361 s
= set_get(u
->aliases
, (char*) name
);
366 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
370 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
372 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
373 unit_add_to_dbus_queue(u
);
378 int unit_set_description(Unit
*u
, const char *description
) {
383 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
387 unit_add_to_dbus_queue(u
);
392 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
395 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
396 if (other
->job
|| other
->nop_job
)
399 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
400 if (other
->job
|| other
->nop_job
)
406 void unit_release_resources(Unit
*u
) {
407 UnitActiveState state
;
412 if (u
->job
|| u
->nop_job
)
418 state
= unit_active_state(u
);
419 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
422 if (unit_will_restart(u
))
425 ec
= unit_get_exec_context(u
);
426 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
427 exec_context_destroy_runtime_directory(ec
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
429 if (UNIT_VTABLE(u
)->release_resources
)
430 UNIT_VTABLE(u
)->release_resources(u
);
433 bool unit_may_gc(Unit
*u
) {
434 UnitActiveState state
;
439 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
440 * unit may be collected, and false if there's some reason to keep it loaded.
442 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
443 * using markers to properly collect dependency loops.
446 if (u
->job
|| u
->nop_job
)
452 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
453 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
454 * before we release the unit. */
455 if (u
->in_cgroup_empty_queue
|| u
->in_cgroup_oom_queue
)
458 /* Make sure to send out D-Bus events before we unload the unit */
459 if (u
->in_dbus_queue
)
462 if (sd_bus_track_count(u
->bus_track
) > 0)
465 state
= unit_active_state(u
);
467 /* But we keep the unit object around for longer when it is referenced or configured to not be
469 switch (u
->collect_mode
) {
471 case COLLECT_INACTIVE
:
472 if (state
!= UNIT_INACTIVE
)
477 case COLLECT_INACTIVE_OR_FAILED
:
478 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
484 assert_not_reached();
487 /* Check if any OnFailure= or on Success= jobs may be pending */
488 if (unit_success_failure_handler_has_jobs(u
))
491 if (u
->cgroup_path
) {
492 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
493 * around. Units with active processes should never be collected. */
495 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
497 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u
->cgroup_path
));
502 if (!UNIT_VTABLE(u
)->may_gc
)
505 return UNIT_VTABLE(u
)->may_gc(u
);
508 void unit_add_to_load_queue(Unit
*u
) {
510 assert(u
->type
!= _UNIT_TYPE_INVALID
);
512 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
515 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
516 u
->in_load_queue
= true;
519 void unit_add_to_cleanup_queue(Unit
*u
) {
522 if (u
->in_cleanup_queue
)
525 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
526 u
->in_cleanup_queue
= true;
529 void unit_add_to_gc_queue(Unit
*u
) {
532 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
538 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
539 u
->in_gc_queue
= true;
542 void unit_add_to_dbus_queue(Unit
*u
) {
544 assert(u
->type
!= _UNIT_TYPE_INVALID
);
546 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
549 /* Shortcut things if nobody cares */
550 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
551 sd_bus_track_count(u
->bus_track
) <= 0 &&
552 set_isempty(u
->manager
->private_buses
)) {
553 u
->sent_dbus_new_signal
= true;
557 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
558 u
->in_dbus_queue
= true;
561 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
564 if (u
->in_stop_when_unneeded_queue
)
567 if (!u
->stop_when_unneeded
)
570 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
573 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
574 u
->in_stop_when_unneeded_queue
= true;
577 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
580 if (u
->in_start_when_upheld_queue
)
583 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
586 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
589 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
590 u
->in_start_when_upheld_queue
= true;
593 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
596 if (u
->in_stop_when_bound_queue
)
599 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
602 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
605 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
606 u
->in_stop_when_bound_queue
= true;
609 static bool unit_can_release_resources(Unit
*u
) {
614 if (UNIT_VTABLE(u
)->release_resources
)
617 ec
= unit_get_exec_context(u
);
618 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
624 void unit_submit_to_release_resources_queue(Unit
*u
) {
627 if (u
->in_release_resources_queue
)
630 if (u
->job
|| u
->nop_job
)
636 if (!unit_can_release_resources(u
))
639 LIST_PREPEND(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
640 u
->in_release_resources_queue
= true;
643 static void unit_clear_dependencies(Unit
*u
) {
646 /* Removes all dependencies configured on u and their reverse dependencies. */
648 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
650 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
653 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
654 hashmap_remove(other_deps
, u
);
656 unit_add_to_gc_queue(other
);
662 u
->dependencies
= hashmap_free(u
->dependencies
);
665 static void unit_remove_transient(Unit
*u
) {
671 if (u
->fragment_path
)
672 (void) unlink(u
->fragment_path
);
674 STRV_FOREACH(i
, u
->dropin_paths
) {
675 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
677 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
680 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
683 /* Only drop transient drop-ins */
684 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
692 static void unit_free_requires_mounts_for(Unit
*u
) {
696 _cleanup_free_
char *path
= NULL
;
698 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
702 char s
[strlen(path
) + 1];
704 PATH_FOREACH_PREFIX_MORE(s
, path
) {
708 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
712 (void) set_remove(x
, u
);
714 if (set_isempty(x
)) {
715 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
723 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
726 static void unit_done(Unit
*u
) {
735 if (UNIT_VTABLE(u
)->done
)
736 UNIT_VTABLE(u
)->done(u
);
738 ec
= unit_get_exec_context(u
);
740 exec_context_done(ec
);
742 cc
= unit_get_cgroup_context(u
);
744 cgroup_context_done(cc
);
747 Unit
* unit_free(Unit
*u
) {
754 sd_event_source_disable_unref(u
->auto_start_stop_event_source
);
756 u
->transient_file
= safe_fclose(u
->transient_file
);
758 if (!MANAGER_IS_RELOADING(u
->manager
))
759 unit_remove_transient(u
);
761 bus_unit_send_removed_signal(u
);
765 unit_dequeue_rewatch_pids(u
);
767 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
768 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
769 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
770 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
772 unit_free_requires_mounts_for(u
);
774 SET_FOREACH(t
, u
->aliases
)
775 hashmap_remove_value(u
->manager
->units
, t
, u
);
777 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
779 if (!sd_id128_is_null(u
->invocation_id
))
780 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
794 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
795 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
796 slice
= UNIT_GET_SLICE(u
);
797 unit_clear_dependencies(u
);
799 unit_add_family_to_cgroup_realize_queue(slice
);
802 manager_unref_console(u
->manager
);
804 fdset_free(u
->initial_socket_bind_link_fds
);
806 bpf_link_free(u
->ipv4_socket_bind_link
);
807 bpf_link_free(u
->ipv6_socket_bind_link
);
810 unit_release_cgroup(u
);
812 if (!MANAGER_IS_RELOADING(u
->manager
))
813 unit_unlink_state_files(u
);
815 unit_unref_uid_gid(u
, false);
817 (void) manager_update_failed_units(u
->manager
, u
, false);
818 set_remove(u
->manager
->startup_units
, u
);
820 unit_unwatch_all_pids(u
);
822 while (u
->refs_by_target
)
823 unit_ref_unset(u
->refs_by_target
);
825 if (u
->type
!= _UNIT_TYPE_INVALID
)
826 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
828 if (u
->in_load_queue
)
829 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
831 if (u
->in_dbus_queue
)
832 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
834 if (u
->in_cleanup_queue
)
835 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
838 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
840 if (u
->in_cgroup_realize_queue
)
841 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
843 if (u
->in_cgroup_empty_queue
)
844 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
846 if (u
->in_cgroup_oom_queue
)
847 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
849 if (u
->in_target_deps_queue
)
850 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
852 if (u
->in_stop_when_unneeded_queue
)
853 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
855 if (u
->in_start_when_upheld_queue
)
856 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
858 if (u
->in_stop_when_bound_queue
)
859 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
861 if (u
->in_release_resources_queue
)
862 LIST_REMOVE(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
864 bpf_firewall_close(u
);
866 hashmap_free(u
->bpf_foreign_by_key
);
868 bpf_program_free(u
->bpf_device_control_installed
);
871 bpf_link_free(u
->restrict_ifaces_ingress_bpf_link
);
872 bpf_link_free(u
->restrict_ifaces_egress_bpf_link
);
874 fdset_free(u
->initial_restric_ifaces_link_fds
);
876 condition_free_list(u
->conditions
);
877 condition_free_list(u
->asserts
);
879 free(u
->description
);
880 strv_free(u
->documentation
);
881 free(u
->fragment_path
);
882 free(u
->source_path
);
883 strv_free(u
->dropin_paths
);
886 free(u
->job_timeout_reboot_arg
);
889 free(u
->access_selinux_context
);
891 set_free_free(u
->aliases
);
894 activation_details_unref(u
->activation_details
);
899 FreezerState
unit_freezer_state(Unit
*u
) {
902 return u
->freezer_state
;
905 int unit_freezer_state_kernel(Unit
*u
, FreezerState
*ret
) {
906 char *values
[1] = {};
911 r
= cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events",
912 STRV_MAKE("frozen"), values
);
916 r
= _FREEZER_STATE_INVALID
;
919 if (streq(values
[0], "0"))
921 else if (streq(values
[0], "1"))
931 UnitActiveState
unit_active_state(Unit
*u
) {
934 if (u
->load_state
== UNIT_MERGED
)
935 return unit_active_state(unit_follow_merge(u
));
937 /* After a reload it might happen that a unit is not correctly
938 * loaded but still has a process around. That's why we won't
939 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
941 return UNIT_VTABLE(u
)->active_state(u
);
944 const char* unit_sub_state_to_string(Unit
*u
) {
947 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
950 static int unit_merge_names(Unit
*u
, Unit
*other
) {
957 r
= unit_add_alias(u
, other
->id
);
961 r
= set_move(u
->aliases
, other
->aliases
);
963 set_remove(u
->aliases
, other
->id
);
968 other
->aliases
= set_free_free(other
->aliases
);
970 SET_FOREACH(name
, u
->aliases
)
971 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
976 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
985 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
988 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
989 * hashmaps is an estimate that is likely too high since they probably use some of the same
990 * types. But it's never too low, and that's all we need. */
992 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
994 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
998 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
1003 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
1004 * other unit's dependencies.
1006 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
1007 * reserve anything for. In that case other's set will be transferred as a whole to u by
1008 * complete_move(). */
1010 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
1011 Hashmap
*other_deps
;
1013 other_deps
= hashmap_get(other
->dependencies
, d
);
1015 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
1023 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
1024 /* Only warn about some unit types */
1025 return IN_SET(dependency
,
1036 static int unit_per_dependency_type_hashmap_update(
1039 UnitDependencyMask origin_mask
,
1040 UnitDependencyMask destination_mask
) {
1042 UnitDependencyInfo info
;
1046 assert_cc(sizeof(void*) == sizeof(info
));
1048 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1049 * exists, or insert it anew if not. */
1051 info
.data
= hashmap_get(per_type
, other
);
1053 /* Entry already exists. Add in our mask. */
1055 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
1056 FLAGS_SET(destination_mask
, info
.destination_mask
))
1059 info
.origin_mask
|= origin_mask
;
1060 info
.destination_mask
|= destination_mask
;
1062 r
= hashmap_update(per_type
, other
, info
.data
);
1064 info
= (UnitDependencyInfo
) {
1065 .origin_mask
= origin_mask
,
1066 .destination_mask
= destination_mask
,
1069 r
= hashmap_put(per_type
, other
, info
.data
);
1077 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1079 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1080 * since the hashmaps all want it as void pointer. */
1088 /* First, remove dependency to other. */
1089 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1090 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1091 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1092 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1093 other
->id
, other
->id
, u
->id
);
1095 if (hashmap_isempty(deps
))
1096 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1100 _cleanup_hashmap_free_ Hashmap
*other_deps
= NULL
;
1101 UnitDependencyInfo di_back
;
1104 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1105 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1109 deps
= hashmap_get(u
->dependencies
, dt
);
1111 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1112 * referenced units as 'back'. */
1113 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1118 /* This is a dependency pointing back to the unit we want to merge with?
1119 * Suppress it (but warn) */
1120 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1121 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1122 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1123 u
->id
, other
->id
, other
->id
, u
->id
);
1125 hashmap_remove(other_deps
, back
);
1129 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1130 * point to 'u' instead. */
1131 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1132 UnitDependencyInfo di_move
;
1134 di_move
.data
= hashmap_remove(back_deps
, other
);
1138 assert_se(unit_per_dependency_type_hashmap_update(
1141 di_move
.origin_mask
,
1142 di_move
.destination_mask
) >= 0);
1145 /* The target unit already has dependencies of this type, let's then merge this individually. */
1147 assert_se(unit_per_dependency_type_hashmap_update(
1150 di_back
.origin_mask
,
1151 di_back
.destination_mask
) >= 0);
1154 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1155 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1156 * dependencies of this type, let's move them per type wholesale. */
1158 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1161 other
->dependencies
= hashmap_free(other
->dependencies
);
1164 int unit_merge(Unit
*u
, Unit
*other
) {
1169 assert(u
->manager
== other
->manager
);
1170 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1172 other
= unit_follow_merge(other
);
1177 if (u
->type
!= other
->type
)
1180 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1183 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1186 if (!streq_ptr(u
->instance
, other
->instance
))
1195 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1198 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1199 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1200 r
= unit_reserve_dependencies(u
, other
);
1204 /* Redirect all references */
1205 while (other
->refs_by_target
)
1206 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1208 /* Merge dependencies */
1209 unit_merge_dependencies(u
, other
);
1211 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1212 r
= unit_merge_names(u
, other
);
1216 other
->load_state
= UNIT_MERGED
;
1217 other
->merged_into
= u
;
1219 if (!u
->activation_details
)
1220 u
->activation_details
= activation_details_ref(other
->activation_details
);
1222 /* If there is still some data attached to the other node, we
1223 * don't need it anymore, and can free it. */
1224 if (other
->load_state
!= UNIT_STUB
)
1225 if (UNIT_VTABLE(other
)->done
)
1226 UNIT_VTABLE(other
)->done(other
);
1228 unit_add_to_dbus_queue(u
);
1229 unit_add_to_cleanup_queue(other
);
1234 int unit_merge_by_name(Unit
*u
, const char *name
) {
1235 _cleanup_free_
char *s
= NULL
;
1239 /* Either add name to u, or if a unit with name already exists, merge it with u.
1240 * If name is a template, do the same for name@instance, where instance is u's instance. */
1245 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1249 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1256 other
= manager_get_unit(u
->manager
, name
);
1258 return unit_merge(u
, other
);
1260 return unit_add_name(u
, name
);
1263 Unit
* unit_follow_merge(Unit
*u
) {
1266 while (u
->load_state
== UNIT_MERGED
)
1267 assert_se(u
= u
->merged_into
);
1272 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1278 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1280 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
1281 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
1286 if (c
->root_directory
) {
1287 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
1292 if (c
->root_image
) {
1293 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
1298 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1299 if (!u
->manager
->prefix
[dt
])
1302 for (size_t i
= 0; i
< c
->directories
[dt
].n_items
; i
++) {
1303 _cleanup_free_
char *p
= NULL
;
1305 p
= path_join(u
->manager
->prefix
[dt
], c
->directories
[dt
].items
[i
].path
);
1309 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1315 if (!MANAGER_IS_SYSTEM(u
->manager
))
1318 /* For the following three directory types we need write access, and /var/ is possibly on the root
1319 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1320 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1321 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1322 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1323 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1328 if (c
->private_tmp
) {
1330 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1331 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1332 * /tmp specifically and masking other mount units should be handled more
1333 * gracefully too, see PR#16894. */
1334 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "tmp.mount", true, UNIT_DEPENDENCY_FILE
);
1338 r
= unit_require_mounts_for(u
, "/var/tmp", UNIT_DEPENDENCY_FILE
);
1342 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1347 if (c
->root_image
) {
1348 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1349 * implicit dependency on udev */
1351 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1356 if (!IN_SET(c
->std_output
,
1357 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1358 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1359 !IN_SET(c
->std_error
,
1360 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1361 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1365 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1368 if (c
->log_namespace
) {
1369 _cleanup_free_
char *socket_unit
= NULL
, *varlink_socket_unit
= NULL
;
1371 r
= unit_name_build_from_type("systemd-journald", c
->log_namespace
, UNIT_SOCKET
, &socket_unit
);
1375 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1379 r
= unit_name_build_from_type("systemd-journald-varlink", c
->log_namespace
, UNIT_SOCKET
, &varlink_socket_unit
);
1383 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, varlink_socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1387 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1392 r
= unit_add_default_credential_dependencies(u
, c
);
1399 const char* unit_description(Unit
*u
) {
1403 return u
->description
;
1405 return strna(u
->id
);
1408 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1412 /* Return u->id, u->description, or "{u->id} - {u->description}".
1413 * Versions with u->description are only used if it is set.
1414 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1417 * Note that *ret_combined_buffer may be set to NULL. */
1419 if (!u
->description
||
1420 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1421 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1422 streq(u
->description
, u
->id
)) {
1424 if (ret_combined_buffer
)
1425 *ret_combined_buffer
= NULL
;
1429 if (ret_combined_buffer
) {
1430 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1431 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1432 if (*ret_combined_buffer
)
1433 return *ret_combined_buffer
;
1434 log_oom(); /* Fall back to ->description */
1436 *ret_combined_buffer
= NULL
;
1439 return u
->description
;
1442 /* Common implementation for multiple backends */
1443 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1448 /* Load a .{service,socket,...} file */
1449 r
= unit_load_fragment(u
);
1453 if (u
->load_state
== UNIT_STUB
) {
1454 if (fragment_required
)
1457 u
->load_state
= UNIT_LOADED
;
1460 /* Load drop-in directory data. If u is an alias, we might be reloading the
1461 * target unit needlessly. But we cannot be sure which drops-ins have already
1462 * been loaded and which not, at least without doing complicated book-keeping,
1463 * so let's always reread all drop-ins. */
1464 r
= unit_load_dropin(unit_follow_merge(u
));
1468 if (u
->source_path
) {
1471 if (stat(u
->source_path
, &st
) >= 0)
1472 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1474 u
->source_mtime
= 0;
1480 void unit_add_to_target_deps_queue(Unit
*u
) {
1481 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1483 if (u
->in_target_deps_queue
)
1486 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1487 u
->in_target_deps_queue
= true;
1490 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1494 if (target
->type
!= UNIT_TARGET
)
1497 /* Only add the dependency if both units are loaded, so that
1498 * that loop check below is reliable */
1499 if (u
->load_state
!= UNIT_LOADED
||
1500 target
->load_state
!= UNIT_LOADED
)
1503 /* If either side wants no automatic dependencies, then let's
1505 if (!u
->default_dependencies
||
1506 !target
->default_dependencies
)
1509 /* Don't create loops */
1510 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1513 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1516 static int unit_add_slice_dependencies(Unit
*u
) {
1520 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1523 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1524 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1526 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1528 slice
= UNIT_GET_SLICE(u
);
1530 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1532 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1535 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1538 static int unit_add_mount_dependencies(Unit
*u
) {
1539 UnitDependencyInfo di
;
1541 bool changed
= false;
1546 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
) {
1547 char prefix
[strlen(path
) + 1];
1549 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1550 _cleanup_free_
char *p
= NULL
;
1553 r
= unit_name_from_path(prefix
, ".mount", &p
);
1555 continue; /* If the path cannot be converted to a mount unit name, then it's
1556 * not manageable as a unit by systemd, and hence we don't need a
1557 * dependency on it. Let's thus silently ignore the issue. */
1561 m
= manager_get_unit(u
->manager
, p
);
1563 /* Make sure to load the mount unit if it exists. If so the dependencies on
1564 * this unit will be added later during the loading of the mount unit. */
1565 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1571 if (m
->load_state
!= UNIT_LOADED
)
1574 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1577 changed
= changed
|| r
> 0;
1579 if (m
->fragment_path
) {
1580 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1583 changed
= changed
|| r
> 0;
1591 static int unit_add_oomd_dependencies(Unit
*u
) {
1598 if (!u
->default_dependencies
)
1601 c
= unit_get_cgroup_context(u
);
1605 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1609 if (!cg_all_unified())
1612 r
= cg_mask_supported(&mask
);
1614 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1616 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1619 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1622 static int unit_add_startup_units(Unit
*u
) {
1623 if (!unit_has_startup_cgroup_constraints(u
))
1626 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1629 static int unit_validate_on_failure_job_mode(
1631 const char *job_mode_setting
,
1633 const char *dependency_name
,
1634 UnitDependencyAtom atom
) {
1636 Unit
*other
, *found
= NULL
;
1638 if (job_mode
!= JOB_ISOLATE
)
1641 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
1644 else if (found
!= other
)
1645 return log_unit_error_errno(
1646 u
, SYNTHETIC_ERRNO(ENOEXEC
),
1647 "More than one %s dependencies specified but %sisolate set. Refusing.",
1648 dependency_name
, job_mode_setting
);
1654 int unit_load(Unit
*u
) {
1659 if (u
->in_load_queue
) {
1660 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1661 u
->in_load_queue
= false;
1664 if (u
->type
== _UNIT_TYPE_INVALID
)
1667 if (u
->load_state
!= UNIT_STUB
)
1670 if (u
->transient_file
) {
1671 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1672 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1674 r
= fflush_and_check(u
->transient_file
);
1678 u
->transient_file
= safe_fclose(u
->transient_file
);
1679 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1682 r
= UNIT_VTABLE(u
)->load(u
);
1686 assert(u
->load_state
!= UNIT_STUB
);
1688 if (u
->load_state
== UNIT_LOADED
) {
1689 unit_add_to_target_deps_queue(u
);
1691 r
= unit_add_slice_dependencies(u
);
1695 r
= unit_add_mount_dependencies(u
);
1699 r
= unit_add_oomd_dependencies(u
);
1703 r
= unit_add_startup_units(u
);
1707 r
= unit_validate_on_failure_job_mode(u
, "OnSuccessJobMode=", u
->on_success_job_mode
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
);
1711 r
= unit_validate_on_failure_job_mode(u
, "OnFailureJobMode=", u
->on_failure_job_mode
, "OnFailure=", UNIT_ATOM_ON_FAILURE
);
1715 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1716 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1718 /* We finished loading, let's ensure our parents recalculate the members mask */
1719 unit_invalidate_cgroup_members_masks(u
);
1722 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1724 unit_add_to_dbus_queue(unit_follow_merge(u
));
1725 unit_add_to_gc_queue(u
);
1726 (void) manager_varlink_send_managed_oom_update(u
);
1731 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1732 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1734 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1735 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1739 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1740 * an attempt is made to load this unit, we know we need to check again. */
1741 if (u
->load_state
== UNIT_NOT_FOUND
)
1742 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1744 unit_add_to_dbus_queue(u
);
1745 unit_add_to_gc_queue(u
);
1747 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1751 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1756 if (u
&& !unit_log_level_test(u
, level
))
1757 return -ERRNO_VALUE(error
);
1759 va_start(ap
, format
);
1761 r
= log_object_internalv(level
, error
, file
, line
, func
,
1762 u
->manager
->unit_log_field
,
1764 u
->manager
->invocation_log_field
,
1765 u
->invocation_id_string
,
1768 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1774 static bool unit_test_condition(Unit
*u
) {
1775 _cleanup_strv_free_
char **env
= NULL
;
1780 dual_timestamp_get(&u
->condition_timestamp
);
1782 r
= manager_get_effective_environment(u
->manager
, &env
);
1784 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1785 u
->condition_result
= true;
1787 u
->condition_result
= condition_test_list(
1790 condition_type_to_string
,
1794 unit_add_to_dbus_queue(u
);
1795 return u
->condition_result
;
1798 static bool unit_test_assert(Unit
*u
) {
1799 _cleanup_strv_free_
char **env
= NULL
;
1804 dual_timestamp_get(&u
->assert_timestamp
);
1806 r
= manager_get_effective_environment(u
->manager
, &env
);
1808 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1809 u
->assert_result
= CONDITION_ERROR
;
1811 u
->assert_result
= condition_test_list(
1814 assert_type_to_string
,
1818 unit_add_to_dbus_queue(u
);
1819 return u
->assert_result
;
1822 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1823 if (log_get_show_color()) {
1824 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1825 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1827 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1830 DISABLE_WARNING_FORMAT_NONLITERAL
;
1831 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1835 int unit_test_start_limit(Unit
*u
) {
1840 if (ratelimit_below(&u
->start_ratelimit
)) {
1841 u
->start_limit_hit
= false;
1845 log_unit_warning(u
, "Start request repeated too quickly.");
1846 u
->start_limit_hit
= true;
1848 reason
= strjoina("unit ", u
->id
, " failed");
1850 emergency_action(u
->manager
, u
->start_limit_action
,
1851 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1852 u
->reboot_arg
, -1, reason
);
1857 static bool unit_verify_deps(Unit
*u
) {
1862 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1863 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1864 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1865 * that are not used in conjunction with After= as for them any such check would make things entirely
1868 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1870 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1873 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1874 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1882 /* Errors that aren't really errors:
1883 * -EALREADY: Unit is already started.
1884 * -ECOMM: Condition failed
1885 * -EAGAIN: An operation is already in progress. Retry later.
1887 * Errors that are real errors:
1888 * -EBADR: This unit type does not support starting.
1889 * -ECANCELED: Start limit hit, too many requests for now
1890 * -EPROTO: Assert failed
1891 * -EINVAL: Unit not loaded
1892 * -EOPNOTSUPP: Unit type not supported
1893 * -ENOLINK: The necessary dependencies are not fulfilled.
1894 * -ESTALE: This unit has been started before and can't be started a second time
1895 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1897 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1898 UnitActiveState state
;
1904 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1905 if (UNIT_VTABLE(u
)->subsystem_ratelimited
) {
1906 r
= UNIT_VTABLE(u
)->subsystem_ratelimited(u
->manager
);
1913 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1914 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1915 * waiting is finished. */
1916 state
= unit_active_state(u
);
1917 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1919 if (state
== UNIT_MAINTENANCE
)
1922 /* Units that aren't loaded cannot be started */
1923 if (u
->load_state
!= UNIT_LOADED
)
1926 /* Refuse starting scope units more than once */
1927 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1930 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1931 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1932 * recheck the condition in that case. */
1933 if (state
!= UNIT_ACTIVATING
&&
1934 !unit_test_condition(u
))
1935 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition not met. Not starting unit.");
1937 /* If the asserts failed, fail the entire job */
1938 if (state
!= UNIT_ACTIVATING
&&
1939 !unit_test_assert(u
))
1940 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1942 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1943 * condition checks, so that we rather return condition check errors (which are usually not
1944 * considered a true failure) than "not supported" errors (which are considered a failure).
1946 if (!unit_type_supported(u
->type
))
1949 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1950 * should have taken care of this already, but let's check this here again. After all, our
1951 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1952 if (!unit_verify_deps(u
))
1955 /* Forward to the main object, if we aren't it. */
1956 following
= unit_following(u
);
1958 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1959 return unit_start(following
, details
);
1962 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1963 if (UNIT_VTABLE(u
)->can_start
) {
1964 r
= UNIT_VTABLE(u
)->can_start(u
);
1969 /* If it is stopped, but we cannot start it, then fail */
1970 if (!UNIT_VTABLE(u
)->start
)
1973 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1974 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1975 * waits for a holdoff timer to elapse before it will start again. */
1977 unit_add_to_dbus_queue(u
);
1978 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
1980 if (!u
->activation_details
) /* Older details object wins */
1981 u
->activation_details
= activation_details_ref(details
);
1983 return UNIT_VTABLE(u
)->start(u
);
1986 bool unit_can_start(Unit
*u
) {
1989 if (u
->load_state
!= UNIT_LOADED
)
1992 if (!unit_type_supported(u
->type
))
1995 /* Scope units may be started only once */
1996 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1999 return !!UNIT_VTABLE(u
)->start
;
2002 bool unit_can_isolate(Unit
*u
) {
2005 return unit_can_start(u
) &&
2010 * -EBADR: This unit type does not support stopping.
2011 * -EALREADY: Unit is already stopped.
2012 * -EAGAIN: An operation is already in progress. Retry later.
2014 int unit_stop(Unit
*u
) {
2015 UnitActiveState state
;
2020 state
= unit_active_state(u
);
2021 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
2024 following
= unit_following(u
);
2026 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
2027 return unit_stop(following
);
2030 if (!UNIT_VTABLE(u
)->stop
)
2033 unit_add_to_dbus_queue(u
);
2034 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
2036 return UNIT_VTABLE(u
)->stop(u
);
2039 bool unit_can_stop(Unit
*u
) {
2042 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2043 * Extrinsic units follow external state and they may stop following external state changes
2044 * (hence we return true here), but an attempt to do this through the manager will fail. */
2046 if (!unit_type_supported(u
->type
))
2052 return !!UNIT_VTABLE(u
)->stop
;
2056 * -EBADR: This unit type does not support reloading.
2057 * -ENOEXEC: Unit is not started.
2058 * -EAGAIN: An operation is already in progress. Retry later.
2060 int unit_reload(Unit
*u
) {
2061 UnitActiveState state
;
2066 if (u
->load_state
!= UNIT_LOADED
)
2069 if (!unit_can_reload(u
))
2072 state
= unit_active_state(u
);
2073 if (state
== UNIT_RELOADING
)
2076 if (state
!= UNIT_ACTIVE
)
2077 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2079 following
= unit_following(u
);
2081 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2082 return unit_reload(following
);
2085 unit_add_to_dbus_queue(u
);
2087 if (!UNIT_VTABLE(u
)->reload
) {
2088 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2089 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), /* reload_success = */ true);
2093 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
2095 return UNIT_VTABLE(u
)->reload(u
);
2098 bool unit_can_reload(Unit
*u
) {
2101 if (UNIT_VTABLE(u
)->can_reload
)
2102 return UNIT_VTABLE(u
)->can_reload(u
);
2104 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2107 return UNIT_VTABLE(u
)->reload
;
2110 bool unit_is_unneeded(Unit
*u
) {
2114 if (!u
->stop_when_unneeded
)
2117 /* Don't clean up while the unit is transitioning or is even inactive. */
2118 if (unit_active_state(u
) != UNIT_ACTIVE
)
2123 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2124 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2125 * restart, then don't clean this one up. */
2130 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2133 if (unit_will_restart(other
))
2140 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2145 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2146 * that is active declared an Uphold= dependencies on it */
2148 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2150 *ret_culprit
= NULL
;
2154 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2158 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2160 *ret_culprit
= other
;
2166 *ret_culprit
= NULL
;
2170 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2175 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2176 * because the other unit is down. */
2178 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2179 /* Don't clean up while the unit is transitioning or is even inactive. */
2181 *ret_culprit
= NULL
;
2185 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2189 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2191 *ret_culprit
= other
;
2198 *ret_culprit
= NULL
;
2202 static void check_unneeded_dependencies(Unit
*u
) {
2206 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2208 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2209 unit_submit_to_stop_when_unneeded_queue(other
);
2212 static void check_uphold_dependencies(Unit
*u
) {
2216 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2218 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2219 unit_submit_to_start_when_upheld_queue(other
);
2222 static void check_bound_by_dependencies(Unit
*u
) {
2226 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2228 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2229 unit_submit_to_stop_when_bound_queue(other
);
2232 static void retroactively_start_dependencies(Unit
*u
) {
2236 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2238 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2239 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2240 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2241 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2243 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2244 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2245 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2246 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
, NULL
);
2248 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2249 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2250 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2253 static void retroactively_stop_dependencies(Unit
*u
) {
2257 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2259 /* Pull down units which are bound to us recursively if enabled */
2260 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2261 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2262 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2265 void unit_start_on_failure(
2267 const char *dependency_name
,
2268 UnitDependencyAtom atom
,
2276 assert(dependency_name
);
2277 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2279 /* Act on OnFailure= and OnSuccess= dependencies */
2281 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
2282 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2285 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2289 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, NULL
, &error
, NULL
);
2291 log_unit_warning_errno(
2292 u
, r
, "Failed to enqueue %s job, ignoring: %s",
2293 dependency_name
, bus_error_message(&error
, r
));
2298 log_unit_debug(u
, "Triggering %s dependencies done (%i %s).",
2299 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2302 void unit_trigger_notify(Unit
*u
) {
2307 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2308 if (UNIT_VTABLE(other
)->trigger_notify
)
2309 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2312 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2313 if (condition_notice
&& log_level
> LOG_NOTICE
)
2315 if (condition_info
&& log_level
> LOG_INFO
)
2320 static int unit_log_resources(Unit
*u
) {
2321 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4];
2322 bool any_traffic
= false, have_ip_accounting
= false, any_io
= false, have_io_accounting
= false;
2323 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
, *rr
= NULL
, *wr
= NULL
;
2324 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2325 size_t n_message_parts
= 0, n_iovec
= 0;
2326 char* message_parts
[1 + 2 + 2 + 1], *t
;
2327 nsec_t nsec
= NSEC_INFINITY
;
2329 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2330 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2331 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2332 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2333 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2335 const char* const io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2336 [CGROUP_IO_READ_BYTES
] = "IO_METRIC_READ_BYTES",
2337 [CGROUP_IO_WRITE_BYTES
] = "IO_METRIC_WRITE_BYTES",
2338 [CGROUP_IO_READ_OPERATIONS
] = "IO_METRIC_READ_OPERATIONS",
2339 [CGROUP_IO_WRITE_OPERATIONS
] = "IO_METRIC_WRITE_OPERATIONS",
2344 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2345 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2346 * information and the complete data in structured fields. */
2348 (void) unit_get_cpu_usage(u
, &nsec
);
2349 if (nsec
!= NSEC_INFINITY
) {
2350 /* Format the CPU time for inclusion in the structured log message */
2351 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2355 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2357 /* Format the CPU time for inclusion in the human language message string */
2358 t
= strjoin("consumed ", FORMAT_TIMESPAN(nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
), " CPU time");
2364 message_parts
[n_message_parts
++] = t
;
2366 log_level
= raise_level(log_level
,
2367 nsec
> MENTIONWORTHY_CPU_NSEC
,
2368 nsec
> NOTICEWORTHY_CPU_NSEC
);
2371 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2372 uint64_t value
= UINT64_MAX
;
2374 assert(io_fields
[k
]);
2376 (void) unit_get_io_accounting(u
, k
, k
> 0, &value
);
2377 if (value
== UINT64_MAX
)
2380 have_io_accounting
= true;
2384 /* Format IO accounting data for inclusion in the structured log message */
2385 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
], value
) < 0) {
2389 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2391 /* Format the IO accounting data for inclusion in the human language message string, but only
2392 * for the bytes counters (and not for the operations counters) */
2393 if (k
== CGROUP_IO_READ_BYTES
) {
2395 rr
= strjoin("read ", strna(FORMAT_BYTES(value
)), " from disk");
2400 } else if (k
== CGROUP_IO_WRITE_BYTES
) {
2402 wr
= strjoin("written ", strna(FORMAT_BYTES(value
)), " to disk");
2409 if (IN_SET(k
, CGROUP_IO_READ_BYTES
, CGROUP_IO_WRITE_BYTES
))
2410 log_level
= raise_level(log_level
,
2411 value
> MENTIONWORTHY_IO_BYTES
,
2412 value
> NOTICEWORTHY_IO_BYTES
);
2415 if (have_io_accounting
) {
2418 message_parts
[n_message_parts
++] = TAKE_PTR(rr
);
2420 message_parts
[n_message_parts
++] = TAKE_PTR(wr
);
2425 k
= strdup("no IO");
2431 message_parts
[n_message_parts
++] = k
;
2435 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2436 uint64_t value
= UINT64_MAX
;
2438 assert(ip_fields
[m
]);
2440 (void) unit_get_ip_accounting(u
, m
, &value
);
2441 if (value
== UINT64_MAX
)
2444 have_ip_accounting
= true;
2448 /* Format IP accounting data for inclusion in the structured log message */
2449 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2453 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2455 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2456 * bytes counters (and not for the packets counters) */
2457 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2459 igress
= strjoin("received ", strna(FORMAT_BYTES(value
)), " IP traffic");
2464 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2466 egress
= strjoin("sent ", strna(FORMAT_BYTES(value
)), " IP traffic");
2473 if (IN_SET(m
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
2474 log_level
= raise_level(log_level
,
2475 value
> MENTIONWORTHY_IP_BYTES
,
2476 value
> NOTICEWORTHY_IP_BYTES
);
2479 /* This check is here because it is the earliest point following all possible log_level assignments. If
2480 * log_level is assigned anywhere after this point, move this check. */
2481 if (!unit_log_level_test(u
, log_level
)) {
2486 if (have_ip_accounting
) {
2489 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2491 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2496 k
= strdup("no IP traffic");
2502 message_parts
[n_message_parts
++] = k
;
2506 /* Is there any accounting data available at all? */
2512 if (n_message_parts
== 0)
2513 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2515 _cleanup_free_
char *joined
= NULL
;
2517 message_parts
[n_message_parts
] = NULL
;
2519 joined
= strv_join(message_parts
, ", ");
2525 joined
[0] = ascii_toupper(joined
[0]);
2526 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2529 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2530 * and hence don't increase n_iovec for them */
2531 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2532 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2534 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2535 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2537 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2538 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2540 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
+ 4);
2544 free_many_charp(message_parts
, n_message_parts
);
2546 for (size_t i
= 0; i
< n_iovec
; i
++)
2547 free(iovec
[i
].iov_base
);
2553 static void unit_update_on_console(Unit
*u
) {
2558 b
= unit_needs_console(u
);
2559 if (u
->on_console
== b
)
2564 manager_ref_console(u
->manager
);
2566 manager_unref_console(u
->manager
);
2569 static void unit_emit_audit_start(Unit
*u
) {
2572 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2575 /* Write audit record if we have just finished starting up */
2576 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ true);
2580 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2583 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2587 /* Write audit record if we have just finished shutting down */
2588 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2589 u
->in_audit
= false;
2591 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2592 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2594 if (state
== UNIT_INACTIVE
)
2595 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ true);
2599 static bool unit_process_job(Job
*j
, UnitActiveState ns
, bool reload_success
) {
2600 bool unexpected
= false;
2605 if (j
->state
== JOB_WAITING
)
2606 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2608 job_add_to_run_queue(j
);
2610 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2611 * hence needs to invalidate jobs. */
2616 case JOB_VERIFY_ACTIVE
:
2618 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2619 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2620 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2623 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2624 if (ns
== UNIT_FAILED
)
2625 result
= JOB_FAILED
;
2629 job_finish_and_invalidate(j
, result
, true, false);
2636 case JOB_RELOAD_OR_START
:
2637 case JOB_TRY_RELOAD
:
2639 if (j
->state
== JOB_RUNNING
) {
2640 if (ns
== UNIT_ACTIVE
)
2641 job_finish_and_invalidate(j
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2642 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2645 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2646 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2654 case JOB_TRY_RESTART
:
2656 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2657 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2658 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2660 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2666 assert_not_reached();
2672 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2677 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2678 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2680 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2681 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2682 * remounted this function will be called too! */
2686 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2687 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2688 unit_add_to_dbus_queue(u
);
2690 /* Update systemd-oomd on the property/state change */
2692 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2694 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2695 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2696 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2697 * have the information on the property. Thus, indiscriminately send an update. */
2698 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2699 (void) manager_varlink_send_managed_oom_update(u
);
2702 /* Update timestamps for state changes */
2703 if (!MANAGER_IS_RELOADING(m
)) {
2704 dual_timestamp_get(&u
->state_change_timestamp
);
2706 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2707 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2708 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2709 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2711 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2712 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2713 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2714 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2717 /* Keep track of failed units */
2718 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2720 /* Make sure the cgroup and state files are always removed when we become inactive */
2721 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2722 SET_FLAG(u
->markers
,
2723 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2725 unit_prune_cgroup(u
);
2726 unit_unlink_state_files(u
);
2727 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2728 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2730 unit_update_on_console(u
);
2732 if (!MANAGER_IS_RELOADING(m
)) {
2735 /* Let's propagate state changes to the job */
2737 unexpected
= unit_process_job(u
->job
, ns
, reload_success
);
2741 /* If this state change happened without being requested by a job, then let's retroactively start or
2742 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2743 * additional jobs just because something is already activated. */
2746 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2747 retroactively_start_dependencies(u
);
2748 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2749 retroactively_stop_dependencies(u
);
2752 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2753 log_unit_debug(u
, "Unit entered failed state.");
2754 unit_start_on_failure(u
, "OnFailure=", UNIT_ATOM_ON_FAILURE
, u
->on_failure_job_mode
);
2757 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2758 /* This unit just finished starting up */
2760 unit_emit_audit_start(u
);
2761 manager_send_unit_plymouth(m
, u
);
2764 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2765 /* This unit just stopped/failed. */
2767 unit_emit_audit_stop(u
, ns
);
2768 unit_log_resources(u
);
2771 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
))
2772 unit_start_on_failure(u
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
, u
->on_success_job_mode
);
2775 manager_recheck_journal(m
);
2776 manager_recheck_dbus(m
);
2778 unit_trigger_notify(u
);
2780 if (!MANAGER_IS_RELOADING(m
)) {
2781 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2782 reason
= strjoina("unit ", u
->id
, " failed");
2783 emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2784 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2785 reason
= strjoina("unit ", u
->id
, " succeeded");
2786 emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2790 /* And now, add the unit or depending units to various queues that will act on the new situation if
2791 * needed. These queues generally check for continuous state changes rather than events (like most of
2792 * the state propagation above), and do work deferred instead of instantly, since they typically
2793 * don't want to run during reloading, and usually involve checking combined state of multiple units
2796 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2797 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2798 check_unneeded_dependencies(u
);
2799 check_bound_by_dependencies(u
);
2801 /* Maybe someone wants us to remain up? */
2802 unit_submit_to_start_when_upheld_queue(u
);
2804 /* Maybe the unit should be GC'ed now? */
2805 unit_add_to_gc_queue(u
);
2807 /* Maybe we can release some resources now? */
2808 unit_submit_to_release_resources_queue(u
);
2811 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2812 /* Start uphold units regardless if going up was expected or not */
2813 check_uphold_dependencies(u
);
2815 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2816 unit_submit_to_stop_when_unneeded_queue(u
);
2818 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2819 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2820 * inactive, without ever entering started.) */
2821 unit_submit_to_stop_when_bound_queue(u
);
2825 int unit_watch_pidref(Unit
*u
, PidRef
*pid
, bool exclusive
) {
2826 _cleanup_(pidref_freep
) PidRef
*pid_dup
= NULL
;
2829 /* Adds a specific PID to the set of PIDs this unit watches. */
2832 assert(pidref_is_set(pid
));
2834 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2835 * opportunity to remove any stalled references to this PID as they can be created
2836 * easily (when watching a process which is not our direct child). */
2838 manager_unwatch_pidref(u
->manager
, pid
);
2840 if (set_contains(u
->pids
, pid
)) /* early exit if already being watched */
2843 r
= pidref_dup(pid
, &pid_dup
);
2847 /* First, insert into the set of PIDs maintained by the unit */
2848 r
= set_ensure_put(&u
->pids
, &pidref_hash_ops
, pid_dup
);
2852 pid
= TAKE_PTR(pid_dup
); /* continue with our copy now that we have installed it properly in our set */
2854 /* Second, insert it into the simple global table, see if that works */
2855 r
= hashmap_ensure_put(&u
->manager
->watch_pids
, &pidref_hash_ops
, pid
, u
);
2859 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2860 * hashmap that points to an array. */
2862 PidRef
*old_pid
= NULL
;
2863 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &old_pid
);
2865 /* Count entries in array */
2867 for (; array
&& array
[n
]; n
++)
2870 /* Allocate a new array */
2871 _cleanup_free_ Unit
**new_array
= new(Unit
*, n
+ 2);
2875 /* Append us to the end */
2876 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2878 new_array
[n
+1] = NULL
;
2880 /* Make sure the hashmap is allocated */
2881 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids_more
, &pidref_hash_ops
);
2885 /* Add or replace the old array */
2886 r
= hashmap_replace(u
->manager
->watch_pids_more
, old_pid
?: pid
, new_array
);
2890 TAKE_PTR(new_array
); /* Now part of the hash table */
2891 free(array
); /* Which means we can now delete the old version */
2895 int unit_watch_pid(Unit
*u
, pid_t pid
, bool exclusive
) {
2896 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
2900 assert(pid_is_valid(pid
));
2902 r
= pidref_set_pid(&pidref
, pid
);
2906 return unit_watch_pidref(u
, &pidref
, exclusive
);
2909 void unit_unwatch_pidref(Unit
*u
, PidRef
*pid
) {
2911 assert(pidref_is_set(pid
));
2913 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2914 _cleanup_(pidref_freep
) PidRef
*pid1
= set_remove(u
->pids
, pid
);
2916 return; /* Early exit if this PID was never watched by us */
2918 /* First let's drop the unit from the simple hash table, if it is included there */
2919 PidRef
*pid2
= NULL
;
2920 Unit
*uu
= hashmap_get2(u
->manager
->watch_pids
, pid
, (void**) &pid2
);
2922 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2923 assert((uu
== u
) == (pid1
== pid2
));
2926 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2927 assert_se(hashmap_remove_value(u
->manager
->watch_pids
, pid2
, uu
) == uu
);
2929 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2930 PidRef
*pid3
= NULL
;
2931 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &pid3
);
2933 /* Let's iterate through the array, dropping our own entry */
2934 size_t m
= 0, n
= 0;
2935 for (; array
&& array
[n
]; n
++)
2937 array
[m
++] = array
[n
];
2939 return; /* Not there */
2941 array
[m
] = NULL
; /* set trailing NULL marker on the new end */
2944 /* The array is now empty, remove the entire entry */
2945 assert_se(hashmap_remove_value(u
->manager
->watch_pids_more
, pid3
, array
) == array
);
2948 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2949 * we will delete, but by the PidRef object of the Unit that is now first in the
2952 PidRef
*new_pid3
= ASSERT_PTR(set_get(array
[0]->pids
, pid
));
2953 assert_se(hashmap_replace(u
->manager
->watch_pids_more
, new_pid3
, array
) >= 0);
2958 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2959 return unit_unwatch_pidref(u
, &PIDREF_MAKE_FROM_PID(pid
));
2962 void unit_unwatch_all_pids(Unit
*u
) {
2965 while (!set_isempty(u
->pids
))
2966 unit_unwatch_pidref(u
, set_first(u
->pids
));
2968 u
->pids
= set_free(u
->pids
);
2971 static void unit_tidy_watch_pids(Unit
*u
) {
2972 PidRef
*except1
, *except2
, *e
;
2976 /* Cleans dead PIDs from our list */
2978 except1
= unit_main_pid(u
);
2979 except2
= unit_control_pid(u
);
2981 SET_FOREACH(e
, u
->pids
) {
2982 if (pidref_equal(except1
, e
) || pidref_equal(except2
, e
))
2985 if (pidref_is_unwaited(e
) <= 0)
2986 unit_unwatch_pidref(u
, e
);
2990 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2991 Unit
*u
= ASSERT_PTR(userdata
);
2995 unit_tidy_watch_pids(u
);
2996 unit_watch_all_pids(u
);
2998 /* If the PID set is empty now, then let's finish this off. */
2999 unit_synthesize_cgroup_empty_event(u
);
3004 int unit_enqueue_rewatch_pids(Unit
*u
) {
3009 if (!u
->cgroup_path
)
3012 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
3015 if (r
> 0) /* On unified we can use proper notifications */
3018 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
3019 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
3020 * involves issuing kill(pid, 0) on all processes we watch. */
3022 if (!u
->rewatch_pids_event_source
) {
3023 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
3025 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
3027 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
3029 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
3031 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: %m");
3033 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
3035 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
3038 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
3040 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
3045 void unit_dequeue_rewatch_pids(Unit
*u
) {
3049 if (!u
->rewatch_pids_event_source
)
3052 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
3054 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3056 u
->rewatch_pids_event_source
= sd_event_source_disable_unref(u
->rewatch_pids_event_source
);
3059 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
3061 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
3065 case JOB_VERIFY_ACTIVE
:
3068 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3069 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3074 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3075 * external events), hence it makes no sense to permit enqueuing such a request either. */
3076 return !u
->perpetual
;
3079 case JOB_TRY_RESTART
:
3080 return unit_can_stop(u
) && unit_can_start(u
);
3083 case JOB_TRY_RELOAD
:
3084 return unit_can_reload(u
);
3086 case JOB_RELOAD_OR_START
:
3087 return unit_can_reload(u
) && unit_can_start(u
);
3090 assert_not_reached();
3094 static Hashmap
*unit_get_dependency_hashmap_per_type(Unit
*u
, UnitDependency d
) {
3098 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3100 deps
= hashmap_get(u
->dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
3102 _cleanup_hashmap_free_ Hashmap
*h
= NULL
;
3104 h
= hashmap_new(NULL
);
3108 if (hashmap_ensure_put(&u
->dependencies
, NULL
, UNIT_DEPENDENCY_TO_PTR(d
), h
) < 0)
3117 typedef enum NotifyDependencyFlags
{
3118 NOTIFY_DEPENDENCY_UPDATE_FROM
= 1 << 0,
3119 NOTIFY_DEPENDENCY_UPDATE_TO
= 1 << 1,
3120 } NotifyDependencyFlags
;
3122 static int unit_add_dependency_impl(
3126 UnitDependencyMask mask
) {
3128 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3129 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3130 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3131 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3132 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3133 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3134 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3135 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3136 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3137 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3138 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3139 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3140 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3141 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3142 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3143 [UNIT_BEFORE
] = UNIT_AFTER
,
3144 [UNIT_AFTER
] = UNIT_BEFORE
,
3145 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3146 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3147 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3148 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3149 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3150 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3151 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3152 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3153 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3154 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3155 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3156 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3157 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3158 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3159 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3162 Hashmap
*u_deps
, *other_deps
;
3163 UnitDependencyInfo u_info
, u_info_old
, other_info
, other_info_old
;
3164 NotifyDependencyFlags flags
= 0;
3169 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3170 assert(inverse_table
[d
] >= 0 && inverse_table
[d
] < _UNIT_DEPENDENCY_MAX
);
3171 assert(mask
> 0 && mask
< _UNIT_DEPENDENCY_MASK_FULL
);
3173 /* Ensure the following two hashmaps for each unit exist:
3174 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3175 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3176 u_deps
= unit_get_dependency_hashmap_per_type(u
, d
);
3180 other_deps
= unit_get_dependency_hashmap_per_type(other
, inverse_table
[d
]);
3184 /* Save the original dependency info. */
3185 u_info
.data
= u_info_old
.data
= hashmap_get(u_deps
, other
);
3186 other_info
.data
= other_info_old
.data
= hashmap_get(other_deps
, u
);
3188 /* Update dependency info. */
3189 u_info
.origin_mask
|= mask
;
3190 other_info
.destination_mask
|= mask
;
3192 /* Save updated dependency info. */
3193 if (u_info
.data
!= u_info_old
.data
) {
3194 r
= hashmap_replace(u_deps
, other
, u_info
.data
);
3198 flags
= NOTIFY_DEPENDENCY_UPDATE_FROM
;
3201 if (other_info
.data
!= other_info_old
.data
) {
3202 r
= hashmap_replace(other_deps
, u
, other_info
.data
);
3204 if (u_info
.data
!= u_info_old
.data
) {
3205 /* Restore the old dependency. */
3206 if (u_info_old
.data
)
3207 (void) hashmap_update(u_deps
, other
, u_info_old
.data
);
3209 hashmap_remove(u_deps
, other
);
3214 flags
|= NOTIFY_DEPENDENCY_UPDATE_TO
;
3220 int unit_add_dependency(
3225 UnitDependencyMask mask
) {
3227 UnitDependencyAtom a
;
3230 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3231 * there, no need to notify! */
3232 NotifyDependencyFlags notify_flags
;
3235 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3238 u
= unit_follow_merge(u
);
3239 other
= unit_follow_merge(other
);
3240 a
= unit_dependency_to_atom(d
);
3243 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3245 if (unit_should_warn_about_dependency(d
))
3246 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3247 unit_dependency_to_string(d
), u
->id
);
3251 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3254 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3255 * running timeout at a specific time. */
3256 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3257 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3261 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3262 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3266 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3267 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3268 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3269 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3270 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3271 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3273 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3274 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3275 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3276 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3277 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3278 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3280 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3281 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3282 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3284 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3285 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3286 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3288 r
= unit_add_dependency_impl(u
, d
, other
, mask
);
3293 if (add_reference
) {
3294 r
= unit_add_dependency_impl(u
, UNIT_REFERENCES
, other
, mask
);
3300 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_FROM
))
3301 unit_add_to_dbus_queue(u
);
3302 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_TO
))
3303 unit_add_to_dbus_queue(other
);
3305 return notify_flags
!= 0;
3308 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3312 assert(d
>= 0 || e
>= 0);
3315 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3321 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3326 return r
> 0 || s
> 0;
3329 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3337 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3344 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3346 _cleanup_free_
char *i
= NULL
;
3348 r
= unit_name_to_prefix(u
->id
, &i
);
3352 r
= unit_name_replace_instance(name
, i
, buf
);
3361 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3362 _cleanup_free_
char *buf
= NULL
;
3369 r
= resolve_template(u
, name
, &buf
, &name
);
3373 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3376 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3380 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3383 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3384 _cleanup_free_
char *buf
= NULL
;
3391 r
= resolve_template(u
, name
, &buf
, &name
);
3395 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3398 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3402 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3405 int set_unit_path(const char *p
) {
3406 /* This is mostly for debug purposes */
3407 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, 1));
3410 char *unit_dbus_path(Unit
*u
) {
3416 return unit_dbus_path_from_name(u
->id
);
3419 char *unit_dbus_path_invocation_id(Unit
*u
) {
3422 if (sd_id128_is_null(u
->invocation_id
))
3425 return unit_dbus_path_from_name(u
->invocation_id_string
);
3428 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3433 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3435 if (sd_id128_equal(u
->invocation_id
, id
))
3438 if (!sd_id128_is_null(u
->invocation_id
))
3439 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3441 if (sd_id128_is_null(id
)) {
3446 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3450 u
->invocation_id
= id
;
3451 sd_id128_to_string(id
, u
->invocation_id_string
);
3453 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3460 u
->invocation_id
= SD_ID128_NULL
;
3461 u
->invocation_id_string
[0] = 0;
3465 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3471 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3472 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3473 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3475 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3478 if (u
->type
== UNIT_SLICE
)
3481 if (unit_active_state(u
) != UNIT_INACTIVE
)
3484 if (slice
->type
!= UNIT_SLICE
)
3487 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3488 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3491 if (UNIT_GET_SLICE(u
) == slice
)
3494 /* Disallow slice changes if @u is already bound to cgroups */
3495 if (UNIT_GET_SLICE(u
) && u
->cgroup_realized
)
3498 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3499 if (UNIT_GET_SLICE(u
))
3500 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3502 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3509 int unit_set_default_slice(Unit
*u
) {
3510 const char *slice_name
;
3516 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3519 if (UNIT_GET_SLICE(u
))
3523 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3525 /* Implicitly place all instantiated units in their
3526 * own per-template slice */
3528 r
= unit_name_to_prefix(u
->id
, &prefix
);
3532 /* The prefix is already escaped, but it might include
3533 * "-" which has a special meaning for slice units,
3534 * hence escape it here extra. */
3535 escaped
= unit_name_escape(prefix
);
3539 if (MANAGER_IS_SYSTEM(u
->manager
))
3540 slice_name
= strjoina("system-", escaped
, ".slice");
3542 slice_name
= strjoina("app-", escaped
, ".slice");
3544 } else if (unit_is_extrinsic(u
))
3545 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3546 * the root slice. They don't really belong in one of the subslices. */
3547 slice_name
= SPECIAL_ROOT_SLICE
;
3549 else if (MANAGER_IS_SYSTEM(u
->manager
))
3550 slice_name
= SPECIAL_SYSTEM_SLICE
;
3552 slice_name
= SPECIAL_APP_SLICE
;
3554 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3558 return unit_set_slice(u
, slice
);
3561 const char *unit_slice_name(Unit
*u
) {
3565 slice
= UNIT_GET_SLICE(u
);
3572 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3573 _cleanup_free_
char *t
= NULL
;
3580 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3583 if (unit_has_name(u
, t
))
3586 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3587 assert(r
< 0 || *_found
!= u
);
3591 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3592 const char *new_owner
;
3593 Unit
*u
= ASSERT_PTR(userdata
);
3598 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3600 bus_log_parse_error(r
);
3604 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3605 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3610 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3611 const sd_bus_error
*e
;
3612 const char *new_owner
;
3613 Unit
*u
= ASSERT_PTR(userdata
);
3618 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3620 e
= sd_bus_message_get_error(message
);
3622 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3623 r
= sd_bus_error_get_errno(e
);
3624 log_unit_error_errno(u
, r
,
3625 "Unexpected error response from GetNameOwner(): %s",
3626 bus_error_message(e
, r
));
3631 r
= sd_bus_message_read(message
, "s", &new_owner
);
3633 return bus_log_parse_error(r
);
3635 assert(!isempty(new_owner
));
3638 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3639 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3644 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3645 _cleanup_(sd_bus_message_unrefp
) sd_bus_message
*m
= NULL
;
3647 usec_t timeout_usec
= 0;
3654 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3657 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3658 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3659 * value defined above. */
3660 if (UNIT_VTABLE(u
)->get_timeout_start_usec
)
3661 timeout_usec
= UNIT_VTABLE(u
)->get_timeout_start_usec(u
);
3663 match
= strjoina("type='signal',"
3664 "sender='org.freedesktop.DBus',"
3665 "path='/org/freedesktop/DBus',"
3666 "interface='org.freedesktop.DBus',"
3667 "member='NameOwnerChanged',"
3668 "arg0='", name
, "'");
3670 r
= bus_add_match_full(
3675 signal_name_owner_changed
,
3682 r
= sd_bus_message_new_method_call(
3685 "org.freedesktop.DBus",
3686 "/org/freedesktop/DBus",
3687 "org.freedesktop.DBus",
3692 r
= sd_bus_message_append(m
, "s", name
);
3696 r
= sd_bus_call_async(
3698 &u
->get_name_owner_slot
,
3700 get_name_owner_handler
,
3705 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3709 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3713 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3719 /* Watch a specific name on the bus. We only support one unit
3720 * watching each name for now. */
3722 if (u
->manager
->api_bus
) {
3723 /* If the bus is already available, install the match directly.
3724 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3725 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3727 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3730 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3732 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3733 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3734 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3740 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3744 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3745 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3746 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3749 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3750 _cleanup_free_
char *e
= NULL
;
3756 /* Adds in links to the device node that this unit is based on */
3760 if (!is_device_path(what
))
3763 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3764 if (!unit_type_supported(UNIT_DEVICE
))
3767 r
= unit_name_from_path(what
, ".device", &e
);
3771 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3775 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3776 dep
= UNIT_BINDS_TO
;
3778 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3779 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3780 device
, true, mask
);
3783 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3784 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3792 if (!path_startswith(what
, "/dev/"))
3795 /* If we don't support devices, then also don't bother with blockdev@.target */
3796 if (!unit_type_supported(UNIT_DEVICE
))
3799 r
= unit_name_path_escape(what
, &escaped
);
3803 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3807 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3810 int unit_coldplug(Unit
*u
) {
3815 /* Make sure we don't enter a loop, when coldplugging recursively. */
3819 u
->coldplugged
= true;
3821 STRV_FOREACH(i
, u
->deserialized_refs
)
3822 RET_GATHER(r
, bus_unit_track_add_name(u
, *i
));
3824 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3826 if (UNIT_VTABLE(u
)->coldplug
)
3827 RET_GATHER(r
, UNIT_VTABLE(u
)->coldplug(u
));
3830 RET_GATHER(r
, job_coldplug(u
->job
));
3832 RET_GATHER(r
, job_coldplug(u
->nop_job
));
3834 unit_modify_nft_set(u
, /* add = */ true);
3838 void unit_catchup(Unit
*u
) {
3841 if (UNIT_VTABLE(u
)->catchup
)
3842 UNIT_VTABLE(u
)->catchup(u
);
3844 unit_cgroup_catchup(u
);
3847 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3853 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3854 * are never out-of-date. */
3855 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3858 if (stat(path
, &st
) < 0)
3859 /* What, cannot access this anymore? */
3863 /* For masked files check if they are still so */
3864 return !null_or_empty(&st
);
3866 /* For non-empty files check the mtime */
3867 return timespec_load(&st
.st_mtim
) > mtime
;
3872 bool unit_need_daemon_reload(Unit
*u
) {
3873 _cleanup_strv_free_
char **dropins
= NULL
;
3878 if (u
->manager
->unit_file_state_outdated
)
3881 /* For unit files, we allow masking… */
3882 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3883 u
->load_state
== UNIT_MASKED
))
3886 /* Source paths should not be masked… */
3887 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3890 if (u
->load_state
== UNIT_LOADED
)
3891 (void) unit_find_dropin_paths(u
, &dropins
);
3892 if (!strv_equal(u
->dropin_paths
, dropins
))
3895 /* … any drop-ins that are masked are simply omitted from the list. */
3896 STRV_FOREACH(path
, u
->dropin_paths
)
3897 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3903 void unit_reset_failed(Unit
*u
) {
3906 if (UNIT_VTABLE(u
)->reset_failed
)
3907 UNIT_VTABLE(u
)->reset_failed(u
);
3909 ratelimit_reset(&u
->start_ratelimit
);
3910 u
->start_limit_hit
= false;
3913 Unit
*unit_following(Unit
*u
) {
3916 if (UNIT_VTABLE(u
)->following
)
3917 return UNIT_VTABLE(u
)->following(u
);
3922 bool unit_stop_pending(Unit
*u
) {
3925 /* This call does check the current state of the unit. It's
3926 * hence useful to be called from state change calls of the
3927 * unit itself, where the state isn't updated yet. This is
3928 * different from unit_inactive_or_pending() which checks both
3929 * the current state and for a queued job. */
3931 return unit_has_job_type(u
, JOB_STOP
);
3934 bool unit_inactive_or_pending(Unit
*u
) {
3937 /* Returns true if the unit is inactive or going down */
3939 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3942 if (unit_stop_pending(u
))
3948 bool unit_active_or_pending(Unit
*u
) {
3951 /* Returns true if the unit is active or going up */
3953 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3957 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3963 bool unit_will_restart_default(Unit
*u
) {
3966 return unit_has_job_type(u
, JOB_START
);
3969 bool unit_will_restart(Unit
*u
) {
3972 if (!UNIT_VTABLE(u
)->will_restart
)
3975 return UNIT_VTABLE(u
)->will_restart(u
);
3978 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3981 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3982 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3985 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3986 _cleanup_set_free_ Set
*pid_set
= NULL
;
3989 pid_set
= set_new(NULL
);
3993 /* Exclude the main/control pids from being killed via the cgroup */
3995 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
4000 if (control_pid
> 0) {
4001 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
4006 return TAKE_PTR(pid_set
);
4009 static int kill_common_log(const PidRef
*pid
, int signo
, void *userdata
) {
4010 _cleanup_free_
char *comm
= NULL
;
4011 Unit
*u
= ASSERT_PTR(userdata
);
4013 (void) pidref_get_comm(pid
, &comm
);
4015 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
4016 signal_to_string(signo
), pid
->pid
, strna(comm
));
4021 static int kill_or_sigqueue(PidRef
* pidref
, int signo
, int code
, int value
) {
4022 assert(pidref_is_set(pidref
));
4023 assert(SIGNAL_VALID(signo
));
4028 log_debug("Killing " PID_FMT
" with signal SIG%s.", pidref
->pid
, signal_to_string(signo
));
4029 return pidref_kill(pidref
, signo
);
4032 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pidref
->pid
, signal_to_string(signo
));
4033 return pidref_sigqueue(pidref
, signo
, value
);
4036 assert_not_reached();
4046 sd_bus_error
*error
) {
4048 PidRef
*main_pid
, *control_pid
;
4049 bool killed
= false;
4052 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4053 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4054 * stop a service ourselves. */
4058 assert(who
< _KILL_WHO_MAX
);
4059 assert(SIGNAL_VALID(signo
));
4060 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
4062 main_pid
= unit_main_pid(u
);
4063 control_pid
= unit_control_pid(u
);
4065 if (!UNIT_HAS_CGROUP_CONTEXT(u
) && !main_pid
&& !control_pid
)
4066 return sd_bus_error_setf(error
, SD_BUS_ERROR_NOT_SUPPORTED
, "Unit type does not support process killing.");
4068 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4070 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4071 if (!pidref_is_set(main_pid
))
4072 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4075 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4077 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4078 if (!pidref_is_set(control_pid
))
4079 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4082 if (pidref_is_set(control_pid
) &&
4083 IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4084 _cleanup_free_
char *comm
= NULL
;
4085 (void) pidref_get_comm(control_pid
, &comm
);
4087 r
= kill_or_sigqueue(control_pid
, signo
, code
, value
);
4091 /* Report this failure both to the logs and to the client */
4092 sd_bus_error_set_errnof(
4094 "Failed to send signal SIG%s to control process " PID_FMT
" (%s): %m",
4095 signal_to_string(signo
), control_pid
->pid
, strna(comm
));
4096 log_unit_warning_errno(
4098 "Failed to send signal SIG%s to control process " PID_FMT
" (%s) on client request: %m",
4099 signal_to_string(signo
), control_pid
->pid
, strna(comm
));
4101 log_unit_info(u
, "Sent signal SIG%s to control process " PID_FMT
" (%s) on client request.",
4102 signal_to_string(signo
), control_pid
->pid
, strna(comm
));
4107 if (pidref_is_set(main_pid
) &&
4108 IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4109 _cleanup_free_
char *comm
= NULL
;
4110 (void) pidref_get_comm(main_pid
, &comm
);
4112 r
= kill_or_sigqueue(main_pid
, signo
, code
, value
);
4117 sd_bus_error_set_errnof(
4119 "Failed to send signal SIG%s to main process " PID_FMT
" (%s): %m",
4120 signal_to_string(signo
), main_pid
->pid
, strna(comm
));
4123 log_unit_warning_errno(
4125 "Failed to send signal SIG%s to main process " PID_FMT
" (%s) on client request: %m",
4126 signal_to_string(signo
), main_pid
->pid
, strna(comm
));
4129 log_unit_info(u
, "Sent signal SIG%s to main process " PID_FMT
" (%s) on client request.",
4130 signal_to_string(signo
), main_pid
->pid
, strna(comm
));
4135 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4136 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4137 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4138 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
&& code
== SI_USER
) {
4139 _cleanup_set_free_ Set
*pid_set
= NULL
;
4141 /* Exclude the main/control pids from being killed via the cgroup */
4142 pid_set
= unit_pid_set(main_pid
? main_pid
->pid
: 0, control_pid
? control_pid
->pid
: 0);
4146 r
= cg_kill_recursive(u
->cgroup_path
, signo
, 0, pid_set
, kill_common_log
, u
);
4148 if (!IN_SET(r
, -ESRCH
, -ENOENT
)) {
4152 sd_bus_error_set_errnof(
4154 "Failed to send signal SIG%s to auxiliary processes: %m",
4155 signal_to_string(signo
));
4158 log_unit_warning_errno(
4160 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4161 signal_to_string(signo
));
4167 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4168 if (ret
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
))
4169 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4174 int unit_following_set(Unit
*u
, Set
**s
) {
4178 if (UNIT_VTABLE(u
)->following_set
)
4179 return UNIT_VTABLE(u
)->following_set(u
, s
);
4185 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4190 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4191 r
= unit_file_get_state(
4192 u
->manager
->runtime_scope
,
4195 &u
->unit_file_state
);
4197 u
->unit_file_state
= UNIT_FILE_BAD
;
4200 return u
->unit_file_state
;
4203 PresetAction
unit_get_unit_file_preset(Unit
*u
) {
4208 if (u
->unit_file_preset
< 0 && u
->fragment_path
) {
4209 _cleanup_free_
char *bn
= NULL
;
4211 r
= path_extract_filename(u
->fragment_path
, &bn
);
4213 return (u
->unit_file_preset
= r
);
4215 if (r
== O_DIRECTORY
)
4216 return (u
->unit_file_preset
= -EISDIR
);
4218 u
->unit_file_preset
= unit_file_query_preset(
4219 u
->manager
->runtime_scope
,
4225 return u
->unit_file_preset
;
4228 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4234 unit_ref_unset(ref
);
4236 ref
->source
= source
;
4237 ref
->target
= target
;
4238 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4242 void unit_ref_unset(UnitRef
*ref
) {
4248 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4249 * be unreferenced now. */
4250 unit_add_to_gc_queue(ref
->target
);
4252 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4253 ref
->source
= ref
->target
= NULL
;
4256 static int user_from_unit_name(Unit
*u
, char **ret
) {
4258 static const uint8_t hash_key
[] = {
4259 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4260 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4263 _cleanup_free_
char *n
= NULL
;
4266 r
= unit_name_to_prefix(u
->id
, &n
);
4270 if (valid_user_group_name(n
, 0)) {
4275 /* If we can't use the unit name as a user name, then let's hash it and use that */
4276 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4282 int unit_patch_contexts(Unit
*u
) {
4289 /* Patch in the manager defaults into the exec and cgroup
4290 * contexts, _after_ the rest of the settings have been
4293 ec
= unit_get_exec_context(u
);
4295 /* This only copies in the ones that need memory */
4296 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4297 if (u
->manager
->defaults
.rlimit
[i
] && !ec
->rlimit
[i
]) {
4298 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->defaults
.rlimit
[i
], 1);
4303 if (MANAGER_IS_USER(u
->manager
) &&
4304 !ec
->working_directory
) {
4306 r
= get_home_dir(&ec
->working_directory
);
4310 /* Allow user services to run, even if the
4311 * home directory is missing */
4312 ec
->working_directory_missing_ok
= true;
4315 if (ec
->private_devices
)
4316 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4318 if (ec
->protect_kernel_modules
)
4319 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4321 if (ec
->protect_kernel_logs
)
4322 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4324 if (ec
->protect_clock
)
4325 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4327 if (ec
->dynamic_user
) {
4329 r
= user_from_unit_name(u
, &ec
->user
);
4335 ec
->group
= strdup(ec
->user
);
4340 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4341 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4344 ec
->private_tmp
= true;
4345 ec
->remove_ipc
= true;
4346 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4347 if (ec
->protect_home
== PROTECT_HOME_NO
)
4348 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4350 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4352 ec
->no_new_privileges
= true;
4353 ec
->restrict_suid_sgid
= true;
4356 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++)
4357 exec_directory_sort(ec
->directories
+ dt
);
4360 cc
= unit_get_cgroup_context(u
);
4363 if (ec
->private_devices
&&
4364 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4365 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4367 /* Only add these if needed, as they imply that everything else is blocked. */
4368 if (cc
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
|| cc
->device_allow
) {
4369 if (ec
->root_image
|| ec
->mount_images
) {
4371 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4372 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4373 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4377 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4378 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
);
4383 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4384 * Same for mapper and verity. */
4385 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4386 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4392 if (ec
->protect_clock
) {
4393 r
= cgroup_context_add_device_allow(cc
, "char-rtc", CGROUP_DEVICE_READ
);
4398 /* If there are encrypted credentials we might need to access the TPM. */
4399 if (exec_context_has_encrypted_credentials(ec
)) {
4400 r
= cgroup_context_add_device_allow(cc
, "char-tpm", CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4410 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4417 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4421 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4424 KillContext
*unit_get_kill_context(Unit
*u
) {
4431 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4435 return (KillContext
*) ((uint8_t*) u
+ offset
);
4438 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4444 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4448 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4451 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4457 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4461 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4464 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4467 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4470 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4471 return u
->manager
->lookup_paths
.transient
;
4473 if (flags
& UNIT_PERSISTENT
)
4474 return u
->manager
->lookup_paths
.persistent_control
;
4476 if (flags
& UNIT_RUNTIME
)
4477 return u
->manager
->lookup_paths
.runtime_control
;
4482 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4484 assert(popcount(flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
)) <= 1);
4487 _cleanup_free_
char *t
= NULL
;
4489 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4490 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4491 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4492 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4495 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4496 t
= specifier_escape(s
);
4503 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4504 * ExecStart= and friends, i.e. '$' and quotes. */
4506 if (flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
)) {
4509 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX_ENV
) {
4510 t2
= strreplace(s
, "$", "$$");
4513 free_and_replace(t
, t2
);
4516 t2
= shell_escape(t
?: s
, "\"");
4519 free_and_replace(t
, t2
);
4523 } else if (flags
& UNIT_ESCAPE_C
) {
4529 free_and_replace(t
, t2
);
4538 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4539 _cleanup_free_
char *result
= NULL
;
4542 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4543 * lines in a way suitable for ExecStart= stanzas. */
4545 STRV_FOREACH(i
, l
) {
4546 _cleanup_free_
char *buf
= NULL
;
4551 p
= unit_escape_setting(*i
, flags
, &buf
);
4555 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4556 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4570 if (!GREEDY_REALLOC(result
, n
+ 1))
4575 return TAKE_PTR(result
);
4578 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4579 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4580 const char *dir
, *wrapped
;
4587 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4590 data
= unit_escape_setting(data
, flags
, &escaped
);
4594 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4595 * previous section header is the same */
4597 if (flags
& UNIT_PRIVATE
) {
4598 if (!UNIT_VTABLE(u
)->private_section
)
4601 if (!u
->transient_file
|| u
->last_section_private
< 0)
4602 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4603 else if (u
->last_section_private
== 0)
4604 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4606 if (!u
->transient_file
|| u
->last_section_private
< 0)
4607 data
= strjoina("[Unit]\n", data
);
4608 else if (u
->last_section_private
> 0)
4609 data
= strjoina("\n[Unit]\n", data
);
4612 if (u
->transient_file
) {
4613 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4614 * write to the transient unit file. */
4615 fputs(data
, u
->transient_file
);
4617 if (!endswith(data
, "\n"))
4618 fputc('\n', u
->transient_file
);
4620 /* Remember which section we wrote this entry to */
4621 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4625 dir
= unit_drop_in_dir(u
, flags
);
4629 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4630 "# or an equivalent operation. Do not edit.\n",
4634 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4638 (void) mkdir_p_label(p
, 0755);
4640 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4641 * recreate the cache after every drop-in we write. */
4642 if (u
->manager
->unit_path_cache
) {
4643 r
= set_put_strdup(&u
->manager
->unit_path_cache
, p
);
4648 r
= write_string_file_atomic_label(q
, wrapped
);
4652 r
= strv_push(&u
->dropin_paths
, q
);
4657 strv_uniq(u
->dropin_paths
);
4659 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4664 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4665 _cleanup_free_
char *p
= NULL
;
4673 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4676 va_start(ap
, format
);
4677 r
= vasprintf(&p
, format
, ap
);
4683 return unit_write_setting(u
, flags
, name
, p
);
4686 int unit_make_transient(Unit
*u
) {
4687 _cleanup_free_
char *path
= NULL
;
4692 if (!UNIT_VTABLE(u
)->can_transient
)
4695 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4697 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4701 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4702 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4705 f
= fopen(path
, "we");
4710 safe_fclose(u
->transient_file
);
4711 u
->transient_file
= f
;
4713 free_and_replace(u
->fragment_path
, path
);
4715 u
->source_path
= mfree(u
->source_path
);
4716 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4717 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4719 u
->load_state
= UNIT_STUB
;
4721 u
->transient
= true;
4723 unit_add_to_dbus_queue(u
);
4724 unit_add_to_gc_queue(u
);
4726 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4732 static int log_kill(const PidRef
*pid
, int sig
, void *userdata
) {
4733 _cleanup_free_
char *comm
= NULL
;
4735 assert(pidref_is_set(pid
));
4737 (void) pidref_get_comm(pid
, &comm
);
4739 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4740 only, like for example systemd's own PAM stub process. */
4741 if (comm
&& comm
[0] == '(')
4742 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4743 * here to let the manager know that a process was killed. */
4746 log_unit_notice(userdata
,
4747 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4750 signal_to_string(sig
));
4755 static int operation_to_signal(
4756 const KillContext
*c
,
4758 bool *ret_noteworthy
) {
4764 case KILL_TERMINATE
:
4765 case KILL_TERMINATE_AND_LOG
:
4766 *ret_noteworthy
= false;
4767 return c
->kill_signal
;
4770 *ret_noteworthy
= false;
4771 return restart_kill_signal(c
);
4774 *ret_noteworthy
= true;
4775 return c
->final_kill_signal
;
4778 *ret_noteworthy
= true;
4779 return c
->watchdog_signal
;
4782 assert_not_reached();
4786 int unit_kill_context(
4791 PidRef
* control_pid
,
4792 bool main_pid_alien
) {
4794 bool wait_for_exit
= false, send_sighup
;
4795 cg_kill_log_func_t log_func
= NULL
;
4801 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4802 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4803 * which is used for user-requested killing of unit processes. */
4805 if (c
->kill_mode
== KILL_NONE
)
4809 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4811 log_func
= log_kill
;
4815 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4818 if (pidref_is_set(main_pid
)) {
4820 log_func(main_pid
, sig
, u
);
4822 r
= pidref_kill_and_sigcont(main_pid
, sig
);
4823 if (r
< 0 && r
!= -ESRCH
) {
4824 _cleanup_free_
char *comm
= NULL
;
4825 (void) pidref_get_comm(main_pid
, &comm
);
4827 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
->pid
, strna(comm
));
4829 if (!main_pid_alien
)
4830 wait_for_exit
= true;
4832 if (r
!= -ESRCH
&& send_sighup
)
4833 (void) pidref_kill(main_pid
, SIGHUP
);
4837 if (pidref_is_set(control_pid
)) {
4839 log_func(control_pid
, sig
, u
);
4841 r
= pidref_kill_and_sigcont(control_pid
, sig
);
4842 if (r
< 0 && r
!= -ESRCH
) {
4843 _cleanup_free_
char *comm
= NULL
;
4844 (void) pidref_get_comm(control_pid
, &comm
);
4846 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
->pid
, strna(comm
));
4848 wait_for_exit
= true;
4850 if (r
!= -ESRCH
&& send_sighup
)
4851 (void) pidref_kill(control_pid
, SIGHUP
);
4855 if (u
->cgroup_path
&&
4856 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4857 _cleanup_set_free_ Set
*pid_set
= NULL
;
4859 /* Exclude the main/control pids from being killed via the cgroup */
4860 pid_set
= unit_pid_set(main_pid
? main_pid
->pid
: 0, control_pid
? control_pid
->pid
: 0);
4864 r
= cg_kill_recursive(
4867 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4871 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4872 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
4876 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4877 * we are running in a container or if this is a delegation unit, simply because cgroup
4878 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4879 * of containers it can be confused easily by left-over directories in the cgroup — which
4880 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4881 * there we get proper events. Hence rely on them. */
4883 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4884 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4885 wait_for_exit
= true;
4890 pid_set
= unit_pid_set(main_pid
? main_pid
->pid
: 0, control_pid
? control_pid
->pid
: 0);
4894 (void) cg_kill_recursive(
4899 /* kill_log= */ NULL
,
4900 /* userdata= */ NULL
);
4905 return wait_for_exit
;
4908 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4914 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4915 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4916 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4917 * appearing mount units can easily determine which units to make themselves a dependency of. */
4919 if (!path_is_absolute(path
))
4922 if (hashmap_contains(u
->requires_mounts_for
, path
)) /* Exit quickly if the path is already covered. */
4925 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4926 * only after simplification, since path_is_normalized() rejects paths with '.'.
4927 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4928 _cleanup_free_
char *p
= NULL
;
4929 r
= path_simplify_alloc(path
, &p
);
4934 if (!path_is_normalized(path
))
4937 UnitDependencyInfo di
= {
4941 r
= hashmap_ensure_put(&u
->requires_mounts_for
, &path_hash_ops
, p
, di
.data
);
4945 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
4947 char prefix
[strlen(path
) + 1];
4948 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4951 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4953 _cleanup_free_
char *q
= NULL
;
4955 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4967 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4983 int unit_setup_exec_runtime(Unit
*u
) {
4984 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
4985 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
4986 _cleanup_set_free_ Set
*units
= NULL
;
4993 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4996 /* Check if there already is an ExecRuntime for this unit? */
4997 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
5001 ec
= unit_get_exec_context(u
);
5004 r
= unit_get_transitive_dependency_set(u
, UNIT_ATOM_JOINS_NAMESPACE_OF
, &units
);
5008 /* Try to get it from somebody else */
5009 SET_FOREACH(other
, units
) {
5010 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
5018 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
5023 if (ec
->dynamic_user
) {
5024 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
5029 r
= exec_runtime_make(u
, ec
, esr
, dcreds
, rt
);
5039 bool unit_type_supported(UnitType t
) {
5040 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5043 if (_unlikely_(t
< 0))
5045 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
5048 if (cache
[t
] == 0) {
5051 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
5053 r
= getenv_bool(ascii_strupper(e
));
5054 if (r
< 0 && r
!= -ENXIO
)
5055 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
5057 cache
[t
] = r
== 0 ? -1 : 1;
5062 if (!unit_vtable
[t
]->supported
)
5065 return unit_vtable
[t
]->supported();
5068 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
5074 if (!unit_log_level_test(u
, LOG_NOTICE
))
5077 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
5078 if (r
> 0 || r
== -ENOTDIR
)
5081 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
5085 log_unit_struct(u
, LOG_NOTICE
,
5086 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5087 LOG_UNIT_INVOCATION_ID(u
),
5088 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
5092 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
5093 _cleanup_free_
char *canonical_where
= NULL
;
5099 r
= chase(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
, NULL
);
5101 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
5105 /* We will happily ignore a trailing slash (or any redundant slashes) */
5106 if (path_equal(where
, canonical_where
))
5109 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5110 log_unit_struct(u
, LOG_ERR
,
5111 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5112 LOG_UNIT_INVOCATION_ID(u
),
5113 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
5119 bool unit_is_pristine(Unit
*u
) {
5122 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5123 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5124 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5126 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5127 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5128 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5131 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
5132 !u
->fragment_path
&&
5138 PidRef
* unit_control_pid(Unit
*u
) {
5141 if (UNIT_VTABLE(u
)->control_pid
)
5142 return UNIT_VTABLE(u
)->control_pid(u
);
5147 PidRef
* unit_main_pid(Unit
*u
) {
5150 if (UNIT_VTABLE(u
)->main_pid
)
5151 return UNIT_VTABLE(u
)->main_pid(u
);
5156 static void unit_modify_user_nft_set(Unit
*u
, bool add
, NFTSetSource source
, uint32_t element
) {
5161 if (!MANAGER_IS_SYSTEM(u
->manager
))
5165 c
= unit_get_cgroup_context(u
);
5169 if (!u
->manager
->fw_ctx
) {
5170 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
5174 assert(u
->manager
->fw_ctx
);
5177 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
5178 if (nft_set
->source
!= source
)
5181 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
5183 log_warning_errno(r
, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5184 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5186 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5187 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5191 static void unit_unref_uid_internal(
5195 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
5199 assert(_manager_unref_uid
);
5201 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5202 * gid_t are actually the same time, with the same validity rules.
5204 * Drops a reference to UID/GID from a unit. */
5206 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5207 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5209 if (!uid_is_valid(*ref_uid
))
5212 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
5213 *ref_uid
= UID_INVALID
;
5216 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
5219 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_USER
, u
->ref_uid
);
5221 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
5224 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
5227 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_GROUP
, u
->ref_gid
);
5229 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
5232 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5235 unit_unref_uid(u
, destroy_now
);
5236 unit_unref_gid(u
, destroy_now
);
5239 static int unit_ref_uid_internal(
5244 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5250 assert(uid_is_valid(uid
));
5251 assert(_manager_ref_uid
);
5253 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5254 * are actually the same type, and have the same validity rules.
5256 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5257 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5260 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5261 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5263 if (*ref_uid
== uid
)
5266 if (uid_is_valid(*ref_uid
)) /* Already set? */
5269 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5277 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5278 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5281 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5282 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5285 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5290 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5292 if (uid_is_valid(uid
)) {
5293 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5298 if (gid_is_valid(gid
)) {
5299 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5302 unit_unref_uid(u
, false);
5308 return r
> 0 || q
> 0;
5311 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5317 c
= unit_get_exec_context(u
);
5319 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5321 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5323 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_USER
, uid
);
5324 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_GROUP
, gid
);
5329 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5334 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5335 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5336 * objects when no service references the UID/GID anymore. */
5338 r
= unit_ref_uid_gid(u
, uid
, gid
);
5340 unit_add_to_dbus_queue(u
);
5343 int unit_acquire_invocation_id(Unit
*u
) {
5349 r
= sd_id128_randomize(&id
);
5351 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5353 r
= unit_set_invocation_id(u
, id
);
5355 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5357 unit_add_to_dbus_queue(u
);
5361 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5362 const char *confirm_spawn
;
5368 /* Copy parameters from manager */
5369 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5373 p
->runtime_scope
= u
->manager
->runtime_scope
;
5375 confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5376 if (confirm_spawn
) {
5377 p
->confirm_spawn
= strdup(confirm_spawn
);
5378 if (!p
->confirm_spawn
)
5382 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5383 p
->prefix
= u
->manager
->prefix
;
5384 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5386 /* Copy parameters from unit */
5387 p
->cgroup_path
= u
->cgroup_path
;
5388 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5390 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5391 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5393 p
->shall_confirm_spawn
= !!u
->manager
->confirm_spawn
;
5395 p
->fallback_smack_process_label
= u
->manager
->defaults
.smack_process_label
;
5397 if (u
->manager
->restrict_fs
&& p
->bpf_outer_map_fd
< 0) {
5398 int fd
= lsm_bpf_map_restrict_fs_fd(u
);
5402 p
->bpf_outer_map_fd
= fd
;
5405 p
->user_lookup_fd
= u
->manager
->user_lookup_fds
[1];
5407 p
->cgroup_id
= u
->cgroup_id
;
5408 p
->invocation_id
= u
->invocation_id
;
5409 sd_id128_to_string(p
->invocation_id
, p
->invocation_id_string
);
5410 p
->unit_id
= strdup(u
->id
);
5417 int unit_fork_helper_process(Unit
*u
, const char *name
, PidRef
*ret
) {
5424 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5425 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5427 (void) unit_realize_cgroup(u
);
5429 r
= safe_fork(name
, FORK_REOPEN_LOG
|FORK_DEATHSIG
, &pid
);
5433 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
5438 q
= pidref_set_pid(&pidref
, pid
);
5442 *ret
= TAKE_PIDREF(pidref
);
5448 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5449 (void) ignore_signals(SIGPIPE
);
5451 if (u
->cgroup_path
) {
5452 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5454 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(u
->cgroup_path
));
5462 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, PidRef
*ret_pid
) {
5463 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
5469 r
= unit_fork_helper_process(u
, "(sd-rmrf)", &pid
);
5473 int ret
= EXIT_SUCCESS
;
5475 STRV_FOREACH(i
, paths
) {
5476 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5478 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5486 r
= unit_watch_pidref(u
, &pid
, /* exclusive= */ true);
5490 *ret_pid
= TAKE_PIDREF(pid
);
5494 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5498 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5499 /* No bit set anymore, let's drop the whole entry */
5500 assert_se(hashmap_remove(deps
, other
));
5502 /* Mask was reduced, let's update the entry */
5503 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5506 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5510 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5515 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5519 UnitDependencyInfo di
;
5524 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5525 Hashmap
*other_deps
;
5527 if (FLAGS_SET(~mask
, di
.origin_mask
))
5530 di
.origin_mask
&= ~mask
;
5531 unit_update_dependency_mask(deps
, other
, di
);
5533 /* We updated the dependency from our unit to the other unit now. But most
5534 * dependencies imply a reverse dependency. Hence, let's delete that one
5535 * too. For that we go through all dependency types on the other unit and
5536 * delete all those which point to us and have the right mask set. */
5538 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5539 UnitDependencyInfo dj
;
5541 dj
.data
= hashmap_get(other_deps
, u
);
5542 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5545 dj
.destination_mask
&= ~mask
;
5546 unit_update_dependency_mask(other_deps
, u
, dj
);
5549 unit_add_to_gc_queue(other
);
5551 /* The unit 'other' may not be wanted by the unit 'u'. */
5552 unit_submit_to_stop_when_unneeded_queue(other
);
5562 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5569 if (MANAGER_IS_SYSTEM(u
->manager
))
5570 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5572 _cleanup_free_
char *user_path
= NULL
;
5573 r
= xdg_user_runtime_dir(&user_path
, "/systemd/units/invocation:");
5576 p
= strjoin(user_path
, u
->id
);
5586 static int unit_export_invocation_id(Unit
*u
) {
5587 _cleanup_free_
char *p
= NULL
;
5592 if (u
->exported_invocation_id
)
5595 if (sd_id128_is_null(u
->invocation_id
))
5598 r
= unit_get_invocation_path(u
, &p
);
5600 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5602 r
= symlink_atomic_label(u
->invocation_id_string
, p
);
5604 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5606 u
->exported_invocation_id
= true;
5610 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5618 if (u
->exported_log_level_max
)
5621 if (c
->log_level_max
< 0)
5624 assert(c
->log_level_max
<= 7);
5626 buf
[0] = '0' + c
->log_level_max
;
5629 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5630 r
= symlink_atomic(buf
, p
);
5632 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5634 u
->exported_log_level_max
= true;
5638 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5639 _cleanup_close_
int fd
= -EBADF
;
5640 struct iovec
*iovec
;
5647 if (u
->exported_log_extra_fields
)
5650 if (c
->n_log_extra_fields
<= 0)
5653 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5654 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5656 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5657 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5659 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5660 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5663 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5664 pattern
= strjoina(p
, ".XXXXXX");
5666 fd
= mkostemp_safe(pattern
);
5668 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5670 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5672 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5676 (void) fchmod(fd
, 0644);
5678 if (rename(pattern
, p
) < 0) {
5679 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5683 u
->exported_log_extra_fields
= true;
5687 (void) unlink(pattern
);
5691 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5692 _cleanup_free_
char *buf
= NULL
;
5699 if (u
->exported_log_ratelimit_interval
)
5702 if (c
->log_ratelimit_interval_usec
== 0)
5705 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5707 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit_interval_usec
) < 0)
5710 r
= symlink_atomic(buf
, p
);
5712 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5714 u
->exported_log_ratelimit_interval
= true;
5718 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5719 _cleanup_free_
char *buf
= NULL
;
5726 if (u
->exported_log_ratelimit_burst
)
5729 if (c
->log_ratelimit_burst
== 0)
5732 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5734 if (asprintf(&buf
, "%u", c
->log_ratelimit_burst
) < 0)
5737 r
= symlink_atomic(buf
, p
);
5739 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5741 u
->exported_log_ratelimit_burst
= true;
5745 void unit_export_state_files(Unit
*u
) {
5746 const ExecContext
*c
;
5753 if (MANAGER_IS_TEST_RUN(u
->manager
))
5756 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5757 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5758 * the IPC system itself and PID 1 also log to the journal.
5760 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5761 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5762 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5763 * namespace at least.
5765 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5766 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5769 (void) unit_export_invocation_id(u
);
5771 if (!MANAGER_IS_SYSTEM(u
->manager
))
5774 c
= unit_get_exec_context(u
);
5776 (void) unit_export_log_level_max(u
, c
);
5777 (void) unit_export_log_extra_fields(u
, c
);
5778 (void) unit_export_log_ratelimit_interval(u
, c
);
5779 (void) unit_export_log_ratelimit_burst(u
, c
);
5783 void unit_unlink_state_files(Unit
*u
) {
5791 /* Undoes the effect of unit_export_state() */
5793 if (u
->exported_invocation_id
) {
5794 _cleanup_free_
char *invocation_path
= NULL
;
5795 int r
= unit_get_invocation_path(u
, &invocation_path
);
5797 (void) unlink(invocation_path
);
5798 u
->exported_invocation_id
= false;
5802 if (!MANAGER_IS_SYSTEM(u
->manager
))
5805 if (u
->exported_log_level_max
) {
5806 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5809 u
->exported_log_level_max
= false;
5812 if (u
->exported_log_extra_fields
) {
5813 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5816 u
->exported_log_extra_fields
= false;
5819 if (u
->exported_log_ratelimit_interval
) {
5820 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5823 u
->exported_log_ratelimit_interval
= false;
5826 if (u
->exported_log_ratelimit_burst
) {
5827 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5830 u
->exported_log_ratelimit_burst
= false;
5834 int unit_prepare_exec(Unit
*u
) {
5839 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5840 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5841 r
= bpf_firewall_load_custom(u
);
5845 /* Prepares everything so that we can fork of a process for this unit */
5847 (void) unit_realize_cgroup(u
);
5849 if (u
->reset_accounting
) {
5850 (void) unit_reset_accounting(u
);
5851 u
->reset_accounting
= false;
5854 unit_export_state_files(u
);
5856 r
= unit_setup_exec_runtime(u
);
5863 static bool ignore_leftover_process(const char *comm
) {
5864 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5867 int unit_log_leftover_process_start(const PidRef
*pid
, int sig
, void *userdata
) {
5868 _cleanup_free_
char *comm
= NULL
;
5870 assert(pidref_is_set(pid
));
5872 (void) pidref_get_comm(pid
, &comm
);
5874 if (ignore_leftover_process(comm
))
5877 /* During start we print a warning */
5879 log_unit_warning(userdata
,
5880 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5881 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5882 pid
->pid
, strna(comm
));
5887 int unit_log_leftover_process_stop(const PidRef
*pid
, int sig
, void *userdata
) {
5888 _cleanup_free_
char *comm
= NULL
;
5890 assert(pidref_is_set(pid
));
5892 (void) pidref_get_comm(pid
, &comm
);
5894 if (ignore_leftover_process(comm
))
5897 /* During stop we only print an informational message */
5899 log_unit_info(userdata
,
5900 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
5901 pid
->pid
, strna(comm
));
5906 int unit_warn_leftover_processes(Unit
*u
, cg_kill_log_func_t log_func
) {
5909 (void) unit_pick_cgroup_path(u
);
5911 if (!u
->cgroup_path
)
5914 return cg_kill_recursive(
5923 bool unit_needs_console(Unit
*u
) {
5925 UnitActiveState state
;
5929 state
= unit_active_state(u
);
5931 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5934 if (UNIT_VTABLE(u
)->needs_console
)
5935 return UNIT_VTABLE(u
)->needs_console(u
);
5937 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5938 ec
= unit_get_exec_context(u
);
5942 return exec_context_may_touch_console(ec
);
5945 int unit_pid_attachable(Unit
*u
, PidRef
*pid
, sd_bus_error
*error
) {
5950 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5951 * and not a kernel thread either */
5953 /* First, a simple range check */
5954 if (!pidref_is_set(pid
))
5955 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier is not valid.");
5957 /* Some extra safety check */
5958 if (pid
->pid
== 1 || pidref_is_self(pid
))
5959 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
->pid
);
5961 /* Don't even begin to bother with kernel threads */
5962 r
= pidref_is_kernel_thread(pid
);
5964 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
->pid
);
5966 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
->pid
);
5968 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
->pid
);
5973 void unit_log_success(Unit
*u
) {
5976 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5977 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5978 * a lot of devices. */
5980 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
5981 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5982 LOG_UNIT_INVOCATION_ID(u
),
5983 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
5986 void unit_log_failure(Unit
*u
, const char *result
) {
5990 log_unit_struct(u
, LOG_WARNING
,
5991 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5992 LOG_UNIT_INVOCATION_ID(u
),
5993 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5994 "UNIT_RESULT=%s", result
);
5997 void unit_log_skip(Unit
*u
, const char *result
) {
6001 log_unit_struct(u
, LOG_INFO
,
6002 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR
,
6003 LOG_UNIT_INVOCATION_ID(u
),
6004 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
6005 "UNIT_RESULT=%s", result
);
6008 void unit_log_process_exit(
6011 const char *command
,
6021 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
6022 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
6023 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
6027 else if (code
== CLD_EXITED
)
6030 level
= LOG_WARNING
;
6032 log_unit_struct(u
, level
,
6033 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
6034 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
6036 sigchld_code_to_string(code
), status
,
6037 strna(code
== CLD_EXITED
6038 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
6039 : signal_to_string(status
)),
6040 success
? " (success)" : ""),
6041 "EXIT_CODE=%s", sigchld_code_to_string(code
),
6042 "EXIT_STATUS=%i", status
,
6043 "COMMAND=%s", strna(command
),
6044 LOG_UNIT_INVOCATION_ID(u
));
6047 int unit_exit_status(Unit
*u
) {
6050 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6051 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6052 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6053 * service process has exited abnormally (signal/coredump). */
6055 if (!UNIT_VTABLE(u
)->exit_status
)
6058 return UNIT_VTABLE(u
)->exit_status(u
);
6061 int unit_failure_action_exit_status(Unit
*u
) {
6066 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6068 if (u
->failure_action_exit_status
>= 0)
6069 return u
->failure_action_exit_status
;
6071 r
= unit_exit_status(u
);
6072 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6078 int unit_success_action_exit_status(Unit
*u
) {
6083 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6085 if (u
->success_action_exit_status
>= 0)
6086 return u
->success_action_exit_status
;
6088 r
= unit_exit_status(u
);
6089 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6095 int unit_test_trigger_loaded(Unit
*u
) {
6098 /* Tests whether the unit to trigger is loaded */
6100 trigger
= UNIT_TRIGGER(u
);
6102 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6103 "Refusing to start, no unit to trigger.");
6104 if (trigger
->load_state
!= UNIT_LOADED
)
6105 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6106 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
6111 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
) {
6115 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6116 if (context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
)
6117 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
6119 exec_context_destroy_credentials(u
);
6120 exec_context_destroy_mount_ns_dir(u
);
6123 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
6124 UnitActiveState state
;
6128 /* Special return values:
6130 * -EOPNOTSUPP → cleaning not supported for this unit type
6131 * -EUNATCH → cleaning not defined for this resource type
6132 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6133 * a job queued or similar
6136 if (!UNIT_VTABLE(u
)->clean
)
6142 if (u
->load_state
!= UNIT_LOADED
)
6148 state
= unit_active_state(u
);
6149 if (state
!= UNIT_INACTIVE
)
6152 return UNIT_VTABLE(u
)->clean(u
, mask
);
6155 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
6158 if (!UNIT_VTABLE(u
)->clean
||
6159 u
->load_state
!= UNIT_LOADED
) {
6164 /* When the clean() method is set, can_clean() really should be set too */
6165 assert(UNIT_VTABLE(u
)->can_clean
);
6167 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
6170 bool unit_can_start_refuse_manual(Unit
*u
) {
6171 return unit_can_start(u
) && !u
->refuse_manual_start
;
6174 bool unit_can_stop_refuse_manual(Unit
*u
) {
6175 return unit_can_stop(u
) && !u
->refuse_manual_stop
;
6178 bool unit_can_isolate_refuse_manual(Unit
*u
) {
6179 return unit_can_isolate(u
) && !u
->refuse_manual_start
;
6182 bool unit_can_freeze(Unit
*u
) {
6185 if (UNIT_VTABLE(u
)->can_freeze
)
6186 return UNIT_VTABLE(u
)->can_freeze(u
);
6188 return UNIT_VTABLE(u
)->freeze
;
6191 void unit_frozen(Unit
*u
) {
6194 u
->freezer_state
= FREEZER_FROZEN
;
6196 bus_unit_send_pending_freezer_message(u
, false);
6199 void unit_thawed(Unit
*u
) {
6202 u
->freezer_state
= FREEZER_RUNNING
;
6204 bus_unit_send_pending_freezer_message(u
, false);
6207 static int unit_freezer_action(Unit
*u
, FreezerAction action
) {
6209 int (*method
)(Unit
*);
6213 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
6215 method
= action
== FREEZER_FREEZE
? UNIT_VTABLE(u
)->freeze
: UNIT_VTABLE(u
)->thaw
;
6216 if (!method
|| !cg_freezer_supported())
6222 if (u
->load_state
!= UNIT_LOADED
)
6225 s
= unit_active_state(u
);
6226 if (s
!= UNIT_ACTIVE
)
6229 if ((IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_THAWING
) && action
== FREEZER_FREEZE
) ||
6230 (u
->freezer_state
== FREEZER_THAWING
&& action
== FREEZER_THAW
))
6237 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_THAWING
));
6242 int unit_freeze(Unit
*u
) {
6243 return unit_freezer_action(u
, FREEZER_FREEZE
);
6246 int unit_thaw(Unit
*u
) {
6247 return unit_freezer_action(u
, FREEZER_THAW
);
6250 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
6251 int unit_freeze_vtable_common(Unit
*u
) {
6252 return unit_cgroup_freezer_action(u
, FREEZER_FREEZE
);
6255 int unit_thaw_vtable_common(Unit
*u
) {
6256 return unit_cgroup_freezer_action(u
, FREEZER_THAW
);
6259 Condition
*unit_find_failed_condition(Unit
*u
) {
6260 Condition
*failed_trigger
= NULL
;
6261 bool has_succeeded_trigger
= false;
6263 if (u
->condition_result
)
6266 LIST_FOREACH(conditions
, c
, u
->conditions
)
6268 if (c
->result
== CONDITION_SUCCEEDED
)
6269 has_succeeded_trigger
= true;
6270 else if (!failed_trigger
)
6272 } else if (c
->result
!= CONDITION_SUCCEEDED
)
6275 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
6278 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
6279 [COLLECT_INACTIVE
] = "inactive",
6280 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
6283 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
6285 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
6290 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6291 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6292 * is NULL the first entry found), or NULL if not found. */
6294 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
6295 if (!other
|| other
== i
)
6301 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6302 _cleanup_free_ Unit
**array
= NULL
;
6309 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6310 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6311 * while the dependency table is continuously updated. */
6313 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6314 if (!GREEDY_REALLOC(array
, n
+ 1))
6320 *ret_array
= TAKE_PTR(array
);
6322 assert(n
<= INT_MAX
);
6326 int unit_get_transitive_dependency_set(Unit
*u
, UnitDependencyAtom atom
, Set
**ret
) {
6327 _cleanup_set_free_ Set
*units
= NULL
, *queue
= NULL
;
6334 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6337 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6338 r
= set_ensure_put(&units
, NULL
, other
);
6343 r
= set_ensure_put(&queue
, NULL
, other
);
6347 } while ((u
= set_steal_first(queue
)));
6349 *ret
= TAKE_PTR(units
);
6355 sd_event_source
**source
,
6358 sd_event_time_handler_t handler
) {
6367 if (usec
== USEC_INFINITY
)
6368 return sd_event_source_set_enabled(*source
, SD_EVENT_OFF
);
6370 r
= (relative
? sd_event_source_set_time_relative
: sd_event_source_set_time
)(*source
, usec
);
6374 return sd_event_source_set_enabled(*source
, SD_EVENT_ONESHOT
);
6377 if (usec
== USEC_INFINITY
)
6380 r
= (relative
? sd_event_add_time_relative
: sd_event_add_time
)(
6390 const char *d
= strjoina(unit_type_to_string(u
->type
), "-timer");
6391 (void) sd_event_source_set_description(*source
, d
);
6396 static int unit_get_nice(Unit
*u
) {
6399 ec
= unit_get_exec_context(u
);
6400 return ec
? ec
->nice
: 0;
6403 static uint64_t unit_get_cpu_weight(Unit
*u
) {
6406 cc
= unit_get_cgroup_context(u
);
6407 return cc
? cgroup_context_cpu_weight(cc
, manager_state(u
->manager
)) : CGROUP_WEIGHT_DEFAULT
;
6410 int unit_compare_priority(Unit
*a
, Unit
*b
) {
6413 ret
= CMP(a
->type
, b
->type
);
6417 ret
= CMP(unit_get_cpu_weight(a
), unit_get_cpu_weight(b
));
6421 ret
= CMP(unit_get_nice(a
), unit_get_nice(b
));
6425 return strcmp(a
->id
, b
->id
);
6428 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6429 [UNIT_PATH
] = &activation_details_path_vtable
,
6430 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6433 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6434 _cleanup_free_ ActivationDetails
*details
= NULL
;
6436 assert(trigger_unit
);
6437 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6438 assert(trigger_unit
->id
);
6440 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6444 *details
= (ActivationDetails
) {
6446 .trigger_unit_type
= trigger_unit
->type
,
6449 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6450 if (!details
->trigger_unit_name
)
6453 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6454 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6456 return TAKE_PTR(details
);
6459 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6463 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6464 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6466 free(details
->trigger_unit_name
);
6468 return mfree(details
);
6471 void activation_details_serialize(ActivationDetails
*details
, FILE *f
) {
6472 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6475 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6476 if (details
->trigger_unit_name
)
6477 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6478 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6479 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6482 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6492 if (!streq(key
, "activation-details-unit-type"))
6495 t
= unit_type_from_string(value
);
6499 /* The activation details vtable has defined ops only for path and timer units */
6500 if (!activation_details_vtable
[t
])
6503 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6507 **details
= (ActivationDetails
) {
6509 .trigger_unit_type
= t
,
6515 if (streq(key
, "activation-details-unit-name")) {
6516 r
= free_and_strdup(&(*details
)->trigger_unit_name
, value
);
6523 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6524 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6529 int activation_details_append_env(ActivationDetails
*details
, char ***strv
) {
6537 if (!isempty(details
->trigger_unit_name
)) {
6538 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6542 r
= strv_consume(strv
, TAKE_PTR(s
));
6547 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6548 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6553 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6556 int activation_details_append_pair(ActivationDetails
*details
, char ***strv
) {
6564 if (!isempty(details
->trigger_unit_name
)) {
6565 r
= strv_extend(strv
, "trigger_unit");
6569 r
= strv_extend(strv
, details
->trigger_unit_name
);
6574 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6575 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6580 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6583 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);