1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
18 #include "cgroup-setup.h"
19 #include "cgroup-util.h"
21 #include "core-varlink.h"
22 #include "dbus-unit.h"
29 #include "fileio-label.h"
31 #include "format-util.h"
32 #include "id128-util.h"
36 #include "load-dropin.h"
37 #include "load-fragment.h"
40 #include "missing_audit.h"
41 #include "mkdir-label.h"
42 #include "path-util.h"
43 #include "process-util.h"
45 #include "serialize.h"
47 #include "signal-util.h"
48 #include "sparse-endian.h"
50 #include "specifier.h"
51 #include "stat-util.h"
52 #include "stdio-util.h"
53 #include "string-table.h"
54 #include "string-util.h"
56 #include "terminal-util.h"
57 #include "tmpfile-util.h"
58 #include "umask-util.h"
59 #include "unit-name.h"
61 #include "user-util.h"
67 /* Thresholds for logging at INFO level about resource consumption */
68 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
69 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
70 #define MENTIONWORTHY_IP_BYTES (0ULL)
72 /* Thresholds for logging at INFO level about resource consumption */
73 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
74 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
75 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
77 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
78 [UNIT_SERVICE
] = &service_vtable
,
79 [UNIT_SOCKET
] = &socket_vtable
,
80 [UNIT_TARGET
] = &target_vtable
,
81 [UNIT_DEVICE
] = &device_vtable
,
82 [UNIT_MOUNT
] = &mount_vtable
,
83 [UNIT_AUTOMOUNT
] = &automount_vtable
,
84 [UNIT_SWAP
] = &swap_vtable
,
85 [UNIT_TIMER
] = &timer_vtable
,
86 [UNIT_PATH
] = &path_vtable
,
87 [UNIT_SLICE
] = &slice_vtable
,
88 [UNIT_SCOPE
] = &scope_vtable
,
91 Unit
* unit_new(Manager
*m
, size_t size
) {
95 assert(size
>= sizeof(Unit
));
102 u
->type
= _UNIT_TYPE_INVALID
;
103 u
->default_dependencies
= true;
104 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
105 u
->unit_file_preset
= -1;
106 u
->on_failure_job_mode
= JOB_REPLACE
;
107 u
->on_success_job_mode
= JOB_FAIL
;
108 u
->cgroup_control_inotify_wd
= -1;
109 u
->cgroup_memory_inotify_wd
= -1;
110 u
->job_timeout
= USEC_INFINITY
;
111 u
->job_running_timeout
= USEC_INFINITY
;
112 u
->ref_uid
= UID_INVALID
;
113 u
->ref_gid
= GID_INVALID
;
114 u
->cpu_usage_last
= NSEC_INFINITY
;
115 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
116 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
118 u
->ip_accounting_ingress_map_fd
= -EBADF
;
119 u
->ip_accounting_egress_map_fd
= -EBADF
;
120 for (CGroupIOAccountingMetric i
= 0; i
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; i
++)
121 u
->io_accounting_last
[i
] = UINT64_MAX
;
123 u
->ipv4_allow_map_fd
= -EBADF
;
124 u
->ipv6_allow_map_fd
= -EBADF
;
125 u
->ipv4_deny_map_fd
= -EBADF
;
126 u
->ipv6_deny_map_fd
= -EBADF
;
128 u
->last_section_private
= -1;
130 u
->start_ratelimit
= (RateLimit
) { m
->default_start_limit_interval
, m
->default_start_limit_burst
};
131 u
->auto_start_stop_ratelimit
= (const RateLimit
) { 10 * USEC_PER_SEC
, 16 };
136 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
137 _cleanup_(unit_freep
) Unit
*u
= NULL
;
140 u
= unit_new(m
, size
);
144 r
= unit_add_name(u
, name
);
153 bool unit_has_name(const Unit
*u
, const char *name
) {
157 return streq_ptr(name
, u
->id
) ||
158 set_contains(u
->aliases
, name
);
161 static void unit_init(Unit
*u
) {
168 assert(u
->type
>= 0);
170 cc
= unit_get_cgroup_context(u
);
172 cgroup_context_init(cc
);
174 /* Copy in the manager defaults into the cgroup
175 * context, _before_ the rest of the settings have
176 * been initialized */
178 cc
->cpu_accounting
= u
->manager
->default_cpu_accounting
;
179 cc
->io_accounting
= u
->manager
->default_io_accounting
;
180 cc
->blockio_accounting
= u
->manager
->default_blockio_accounting
;
181 cc
->memory_accounting
= u
->manager
->default_memory_accounting
;
182 cc
->tasks_accounting
= u
->manager
->default_tasks_accounting
;
183 cc
->ip_accounting
= u
->manager
->default_ip_accounting
;
185 if (u
->type
!= UNIT_SLICE
)
186 cc
->tasks_max
= u
->manager
->default_tasks_max
;
188 cc
->memory_pressure_watch
= u
->manager
->default_memory_pressure_watch
;
189 cc
->memory_pressure_threshold_usec
= u
->manager
->default_memory_pressure_threshold_usec
;
192 ec
= unit_get_exec_context(u
);
194 exec_context_init(ec
);
196 if (u
->manager
->default_oom_score_adjust_set
) {
197 ec
->oom_score_adjust
= u
->manager
->default_oom_score_adjust
;
198 ec
->oom_score_adjust_set
= true;
201 if (MANAGER_IS_SYSTEM(u
->manager
))
202 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
204 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
206 /* User manager might have its umask redefined by PAM or UMask=. In this
207 * case let the units it manages inherit this value by default. They can
208 * still tune this value through their own unit file */
209 (void) get_process_umask(getpid_cached(), &ec
->umask
);
213 kc
= unit_get_kill_context(u
);
215 kill_context_init(kc
);
217 if (UNIT_VTABLE(u
)->init
)
218 UNIT_VTABLE(u
)->init(u
);
221 static int unit_add_alias(Unit
*u
, char *donated_name
) {
224 /* Make sure that u->names is allocated. We may leave u->names
225 * empty if we fail later, but this is not a problem. */
226 r
= set_ensure_put(&u
->aliases
, &string_hash_ops
, donated_name
);
234 int unit_add_name(Unit
*u
, const char *text
) {
235 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
242 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
244 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
245 "instance is not set when adding name '%s': %m", text
);
247 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
249 return log_unit_debug_errno(u
, r
,
250 "failed to build instance name from '%s': %m", text
);
257 if (unit_has_name(u
, name
))
260 if (hashmap_contains(u
->manager
->units
, name
))
261 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
262 "unit already exist when adding name '%s': %m", name
);
264 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
265 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
266 "name '%s' is invalid: %m", name
);
268 t
= unit_name_to_type(name
);
270 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
271 "failed to derive unit type from name '%s': %m", name
);
273 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
274 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
275 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
278 r
= unit_name_to_instance(name
, &instance
);
280 return log_unit_debug_errno(u
, r
, "failed to extract instance from name '%s': %m", name
);
282 if (instance
&& !unit_type_may_template(t
))
283 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "templates are not allowed for name '%s': %m", name
);
285 /* Ensure that this unit either has no instance, or that the instance matches. */
286 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
287 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
288 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
289 name
, instance
, u
->instance
);
291 if (u
->id
&& !unit_type_may_alias(t
))
292 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
293 "cannot add name %s, aliases are not allowed for %s units.",
294 name
, unit_type_to_string(t
));
296 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
297 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "cannot add name, manager has too many units: %m");
299 /* Add name to the global hashmap first, because that's easier to undo */
300 r
= hashmap_put(u
->manager
->units
, name
, u
);
302 return log_unit_debug_errno(u
, r
, "add unit to hashmap failed for name '%s': %m", text
);
305 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
307 hashmap_remove(u
->manager
->units
, name
);
313 /* A new name, we don't need the set yet. */
314 assert(u
->type
== _UNIT_TYPE_INVALID
);
315 assert(!u
->instance
);
318 u
->id
= TAKE_PTR(name
);
319 u
->instance
= TAKE_PTR(instance
);
321 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
325 unit_add_to_dbus_queue(u
);
329 int unit_choose_id(Unit
*u
, const char *name
) {
330 _cleanup_free_
char *t
= NULL
;
337 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
341 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
348 if (streq_ptr(u
->id
, name
))
349 return 0; /* Nothing to do. */
351 /* Selects one of the aliases of this unit as the id */
352 s
= set_get(u
->aliases
, (char*) name
);
357 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
361 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
363 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
364 unit_add_to_dbus_queue(u
);
369 int unit_set_description(Unit
*u
, const char *description
) {
374 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
378 unit_add_to_dbus_queue(u
);
383 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
386 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
387 if (other
->job
|| other
->nop_job
)
390 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
391 if (other
->job
|| other
->nop_job
)
397 bool unit_may_gc(Unit
*u
) {
398 UnitActiveState state
;
403 /* Checks whether the unit is ready to be unloaded for garbage collection.
404 * Returns true when the unit may be collected, and false if there's some
405 * reason to keep it loaded.
407 * References from other units are *not* checked here. Instead, this is done
408 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
411 if (u
->job
|| u
->nop_job
)
414 state
= unit_active_state(u
);
416 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
417 if (UNIT_IS_INACTIVE_OR_FAILED(state
) &&
418 UNIT_VTABLE(u
)->release_resources
)
419 UNIT_VTABLE(u
)->release_resources(u
);
424 if (sd_bus_track_count(u
->bus_track
) > 0)
427 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
428 switch (u
->collect_mode
) {
430 case COLLECT_INACTIVE
:
431 if (state
!= UNIT_INACTIVE
)
436 case COLLECT_INACTIVE_OR_FAILED
:
437 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
443 assert_not_reached();
446 /* Check if any OnFailure= or on Success= jobs may be pending */
447 if (unit_success_failure_handler_has_jobs(u
))
450 if (u
->cgroup_path
) {
451 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
452 * around. Units with active processes should never be collected. */
454 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
456 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u
->cgroup_path
));
461 if (UNIT_VTABLE(u
)->may_gc
&& !UNIT_VTABLE(u
)->may_gc(u
))
467 void unit_add_to_load_queue(Unit
*u
) {
469 assert(u
->type
!= _UNIT_TYPE_INVALID
);
471 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
474 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
475 u
->in_load_queue
= true;
478 void unit_add_to_cleanup_queue(Unit
*u
) {
481 if (u
->in_cleanup_queue
)
484 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
485 u
->in_cleanup_queue
= true;
488 void unit_add_to_gc_queue(Unit
*u
) {
491 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
497 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
498 u
->in_gc_queue
= true;
501 void unit_add_to_dbus_queue(Unit
*u
) {
503 assert(u
->type
!= _UNIT_TYPE_INVALID
);
505 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
508 /* Shortcut things if nobody cares */
509 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
510 sd_bus_track_count(u
->bus_track
) <= 0 &&
511 set_isempty(u
->manager
->private_buses
)) {
512 u
->sent_dbus_new_signal
= true;
516 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
517 u
->in_dbus_queue
= true;
520 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
523 if (u
->in_stop_when_unneeded_queue
)
526 if (!u
->stop_when_unneeded
)
529 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
532 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
533 u
->in_stop_when_unneeded_queue
= true;
536 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
539 if (u
->in_start_when_upheld_queue
)
542 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
545 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
548 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
549 u
->in_start_when_upheld_queue
= true;
552 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
555 if (u
->in_stop_when_bound_queue
)
558 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
561 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
564 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
565 u
->in_stop_when_bound_queue
= true;
568 static void unit_clear_dependencies(Unit
*u
) {
571 /* Removes all dependencies configured on u and their reverse dependencies. */
573 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
575 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
578 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
579 hashmap_remove(other_deps
, u
);
581 unit_add_to_gc_queue(other
);
587 u
->dependencies
= hashmap_free(u
->dependencies
);
590 static void unit_remove_transient(Unit
*u
) {
596 if (u
->fragment_path
)
597 (void) unlink(u
->fragment_path
);
599 STRV_FOREACH(i
, u
->dropin_paths
) {
600 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
602 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
605 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
608 /* Only drop transient drop-ins */
609 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
617 static void unit_free_requires_mounts_for(Unit
*u
) {
621 _cleanup_free_
char *path
= NULL
;
623 path
= hashmap_steal_first_key(u
->requires_mounts_for
);
627 char s
[strlen(path
) + 1];
629 PATH_FOREACH_PREFIX_MORE(s
, path
) {
633 x
= hashmap_get2(u
->manager
->units_requiring_mounts_for
, s
, (void**) &y
);
637 (void) set_remove(x
, u
);
639 if (set_isempty(x
)) {
640 (void) hashmap_remove(u
->manager
->units_requiring_mounts_for
, y
);
648 u
->requires_mounts_for
= hashmap_free(u
->requires_mounts_for
);
651 static void unit_done(Unit
*u
) {
660 if (UNIT_VTABLE(u
)->done
)
661 UNIT_VTABLE(u
)->done(u
);
663 ec
= unit_get_exec_context(u
);
665 exec_context_done(ec
);
667 cc
= unit_get_cgroup_context(u
);
669 cgroup_context_done(cc
);
672 Unit
* unit_free(Unit
*u
) {
679 u
->transient_file
= safe_fclose(u
->transient_file
);
681 if (!MANAGER_IS_RELOADING(u
->manager
))
682 unit_remove_transient(u
);
684 bus_unit_send_removed_signal(u
);
688 unit_dequeue_rewatch_pids(u
);
690 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
691 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
692 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
693 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
695 unit_free_requires_mounts_for(u
);
697 SET_FOREACH(t
, u
->aliases
)
698 hashmap_remove_value(u
->manager
->units
, t
, u
);
700 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
702 if (!sd_id128_is_null(u
->invocation_id
))
703 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
717 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
718 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
719 slice
= UNIT_GET_SLICE(u
);
720 unit_clear_dependencies(u
);
722 unit_add_family_to_cgroup_realize_queue(slice
);
725 manager_unref_console(u
->manager
);
727 fdset_free(u
->initial_socket_bind_link_fds
);
729 bpf_link_free(u
->ipv4_socket_bind_link
);
730 bpf_link_free(u
->ipv6_socket_bind_link
);
733 unit_release_cgroup(u
);
735 if (!MANAGER_IS_RELOADING(u
->manager
))
736 unit_unlink_state_files(u
);
738 unit_unref_uid_gid(u
, false);
740 (void) manager_update_failed_units(u
->manager
, u
, false);
741 set_remove(u
->manager
->startup_units
, u
);
743 unit_unwatch_all_pids(u
);
745 while (u
->refs_by_target
)
746 unit_ref_unset(u
->refs_by_target
);
748 if (u
->type
!= _UNIT_TYPE_INVALID
)
749 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
751 if (u
->in_load_queue
)
752 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
754 if (u
->in_dbus_queue
)
755 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
757 if (u
->in_cleanup_queue
)
758 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
761 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
763 if (u
->in_cgroup_realize_queue
)
764 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
766 if (u
->in_cgroup_empty_queue
)
767 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
769 if (u
->in_cgroup_oom_queue
)
770 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
772 if (u
->in_target_deps_queue
)
773 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
775 if (u
->in_stop_when_unneeded_queue
)
776 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
778 if (u
->in_start_when_upheld_queue
)
779 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
781 if (u
->in_stop_when_bound_queue
)
782 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
784 bpf_firewall_close(u
);
786 hashmap_free(u
->bpf_foreign_by_key
);
788 bpf_program_free(u
->bpf_device_control_installed
);
791 bpf_link_free(u
->restrict_ifaces_ingress_bpf_link
);
792 bpf_link_free(u
->restrict_ifaces_egress_bpf_link
);
794 fdset_free(u
->initial_restric_ifaces_link_fds
);
796 condition_free_list(u
->conditions
);
797 condition_free_list(u
->asserts
);
799 free(u
->description
);
800 strv_free(u
->documentation
);
801 free(u
->fragment_path
);
802 free(u
->source_path
);
803 strv_free(u
->dropin_paths
);
806 free(u
->job_timeout_reboot_arg
);
809 free(u
->access_selinux_context
);
811 set_free_free(u
->aliases
);
814 activation_details_unref(u
->activation_details
);
819 FreezerState
unit_freezer_state(Unit
*u
) {
822 return u
->freezer_state
;
825 int unit_freezer_state_kernel(Unit
*u
, FreezerState
*ret
) {
826 char *values
[1] = {};
831 r
= cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.events",
832 STRV_MAKE("frozen"), values
);
836 r
= _FREEZER_STATE_INVALID
;
839 if (streq(values
[0], "0"))
841 else if (streq(values
[0], "1"))
851 UnitActiveState
unit_active_state(Unit
*u
) {
854 if (u
->load_state
== UNIT_MERGED
)
855 return unit_active_state(unit_follow_merge(u
));
857 /* After a reload it might happen that a unit is not correctly
858 * loaded but still has a process around. That's why we won't
859 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
861 return UNIT_VTABLE(u
)->active_state(u
);
864 const char* unit_sub_state_to_string(Unit
*u
) {
867 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
870 static int unit_merge_names(Unit
*u
, Unit
*other
) {
877 r
= unit_add_alias(u
, other
->id
);
881 r
= set_move(u
->aliases
, other
->aliases
);
883 set_remove(u
->aliases
, other
->id
);
888 other
->aliases
= set_free_free(other
->aliases
);
890 SET_FOREACH(name
, u
->aliases
)
891 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
896 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
905 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
908 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
909 * hashmaps is an estimate that is likely too high since they probably use some of the same
910 * types. But it's never too low, and that's all we need. */
912 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
914 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
918 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
923 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
924 * other unit's dependencies.
926 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
927 * reserve anything for. In that case other's set will be transferred as a whole to u by
928 * complete_move(). */
930 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
933 other_deps
= hashmap_get(other
->dependencies
, d
);
935 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
943 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
944 /* Only warn about some unit types */
945 return IN_SET(dependency
,
956 static int unit_per_dependency_type_hashmap_update(
959 UnitDependencyMask origin_mask
,
960 UnitDependencyMask destination_mask
) {
962 UnitDependencyInfo info
;
966 assert_cc(sizeof(void*) == sizeof(info
));
968 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
969 * exists, or insert it anew if not. */
971 info
.data
= hashmap_get(per_type
, other
);
973 /* Entry already exists. Add in our mask. */
975 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
976 FLAGS_SET(destination_mask
, info
.destination_mask
))
979 info
.origin_mask
|= origin_mask
;
980 info
.destination_mask
|= destination_mask
;
982 r
= hashmap_update(per_type
, other
, info
.data
);
984 info
= (UnitDependencyInfo
) {
985 .origin_mask
= origin_mask
,
986 .destination_mask
= destination_mask
,
989 r
= hashmap_put(per_type
, other
, info
.data
);
998 static int unit_add_dependency_hashmap(
999 Hashmap
**dependencies
,
1002 UnitDependencyMask origin_mask
,
1003 UnitDependencyMask destination_mask
) {
1008 assert(dependencies
);
1010 assert(origin_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
1011 assert(destination_mask
< _UNIT_DEPENDENCY_MASK_FULL
);
1012 assert(origin_mask
> 0 || destination_mask
> 0);
1014 /* Ensure the top-level dependency hashmap exists that maps UnitDependency → Hashmap(Unit* →
1015 * UnitDependencyInfo) */
1016 r
= hashmap_ensure_allocated(dependencies
, NULL
);
1020 /* Acquire the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency
1021 * type, and if it's missing allocate it and insert it. */
1022 per_type
= hashmap_get(*dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
1024 per_type
= hashmap_new(NULL
);
1028 r
= hashmap_put(*dependencies
, UNIT_DEPENDENCY_TO_PTR(d
), per_type
);
1030 hashmap_free(per_type
);
1035 return unit_per_dependency_type_hashmap_update(per_type
, other
, origin_mask
, destination_mask
);
1038 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1040 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1041 * since the hashmaps all want it as void pointer. */
1049 /* First, remove dependency to other. */
1050 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1051 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1052 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1053 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1054 other
->id
, other
->id
, u
->id
);
1056 if (hashmap_isempty(deps
))
1057 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1061 _cleanup_(hashmap_freep
) Hashmap
*other_deps
= NULL
;
1062 UnitDependencyInfo di_back
;
1065 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1066 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1070 deps
= hashmap_get(u
->dependencies
, dt
);
1072 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1073 * referenced units as 'back'. */
1074 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1079 /* This is a dependency pointing back to the unit we want to merge with?
1080 * Suppress it (but warn) */
1081 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1082 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1083 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1084 u
->id
, other
->id
, other
->id
, u
->id
);
1086 hashmap_remove(other_deps
, back
);
1090 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1091 * point to 'u' instead. */
1092 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1093 UnitDependencyInfo di_move
;
1095 di_move
.data
= hashmap_remove(back_deps
, other
);
1099 assert_se(unit_per_dependency_type_hashmap_update(
1102 di_move
.origin_mask
,
1103 di_move
.destination_mask
) >= 0);
1106 /* The target unit already has dependencies of this type, let's then merge this individually. */
1108 assert_se(unit_per_dependency_type_hashmap_update(
1111 di_back
.origin_mask
,
1112 di_back
.destination_mask
) >= 0);
1115 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1116 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1117 * dependencies of this type, let's move them per type wholesale. */
1119 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1122 other
->dependencies
= hashmap_free(other
->dependencies
);
1125 int unit_merge(Unit
*u
, Unit
*other
) {
1130 assert(u
->manager
== other
->manager
);
1131 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1133 other
= unit_follow_merge(other
);
1138 if (u
->type
!= other
->type
)
1141 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1144 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1147 if (!streq_ptr(u
->instance
, other
->instance
))
1156 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1159 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1160 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1161 r
= unit_reserve_dependencies(u
, other
);
1165 /* Redirect all references */
1166 while (other
->refs_by_target
)
1167 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1169 /* Merge dependencies */
1170 unit_merge_dependencies(u
, other
);
1172 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1173 r
= unit_merge_names(u
, other
);
1177 other
->load_state
= UNIT_MERGED
;
1178 other
->merged_into
= u
;
1180 if (!u
->activation_details
)
1181 u
->activation_details
= activation_details_ref(other
->activation_details
);
1183 /* If there is still some data attached to the other node, we
1184 * don't need it anymore, and can free it. */
1185 if (other
->load_state
!= UNIT_STUB
)
1186 if (UNIT_VTABLE(other
)->done
)
1187 UNIT_VTABLE(other
)->done(other
);
1189 unit_add_to_dbus_queue(u
);
1190 unit_add_to_cleanup_queue(other
);
1195 int unit_merge_by_name(Unit
*u
, const char *name
) {
1196 _cleanup_free_
char *s
= NULL
;
1200 /* Either add name to u, or if a unit with name already exists, merge it with u.
1201 * If name is a template, do the same for name@instance, where instance is u's instance. */
1206 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1210 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1217 other
= manager_get_unit(u
->manager
, name
);
1219 return unit_merge(u
, other
);
1221 return unit_add_name(u
, name
);
1224 Unit
* unit_follow_merge(Unit
*u
) {
1227 while (u
->load_state
== UNIT_MERGED
)
1228 assert_se(u
= u
->merged_into
);
1233 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1239 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1241 if (c
->working_directory
&& !c
->working_directory_missing_ok
) {
1242 r
= unit_require_mounts_for(u
, c
->working_directory
, UNIT_DEPENDENCY_FILE
);
1247 if (c
->root_directory
) {
1248 r
= unit_require_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
);
1253 if (c
->root_image
) {
1254 r
= unit_require_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
);
1259 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1260 if (!u
->manager
->prefix
[dt
])
1263 for (size_t i
= 0; i
< c
->directories
[dt
].n_items
; i
++) {
1264 _cleanup_free_
char *p
= NULL
;
1266 p
= path_join(u
->manager
->prefix
[dt
], c
->directories
[dt
].items
[i
].path
);
1270 r
= unit_require_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
);
1276 if (!MANAGER_IS_SYSTEM(u
->manager
))
1279 /* For the following three directory types we need write access, and /var/ is possibly on the root
1280 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1281 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1282 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1283 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1284 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1289 if (c
->private_tmp
) {
1291 /* FIXME: for now we make a special case for /tmp and add a weak dependency on
1292 * tmp.mount so /tmp being masked is supported. However there's no reason to treat
1293 * /tmp specifically and masking other mount units should be handled more
1294 * gracefully too, see PR#16894. */
1295 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "tmp.mount", true, UNIT_DEPENDENCY_FILE
);
1299 r
= unit_require_mounts_for(u
, "/var/tmp", UNIT_DEPENDENCY_FILE
);
1303 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1308 if (c
->root_image
) {
1309 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1310 * implicit dependency on udev */
1312 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1317 if (!IN_SET(c
->std_output
,
1318 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1319 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1320 !IN_SET(c
->std_error
,
1321 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1322 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1326 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1329 if (c
->log_namespace
) {
1330 _cleanup_free_
char *socket_unit
= NULL
, *varlink_socket_unit
= NULL
;
1332 r
= unit_name_build_from_type("systemd-journald", c
->log_namespace
, UNIT_SOCKET
, &socket_unit
);
1336 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1340 r
= unit_name_build_from_type("systemd-journald-varlink", c
->log_namespace
, UNIT_SOCKET
, &varlink_socket_unit
);
1344 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, varlink_socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1348 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1355 const char* unit_description(Unit
*u
) {
1359 return u
->description
;
1361 return strna(u
->id
);
1364 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1368 /* Return u->id, u->description, or "{u->id} - {u->description}".
1369 * Versions with u->description are only used if it is set.
1370 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1373 * Note that *ret_combined_buffer may be set to NULL. */
1375 if (!u
->description
||
1376 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1377 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1378 streq(u
->description
, u
->id
)) {
1380 if (ret_combined_buffer
)
1381 *ret_combined_buffer
= NULL
;
1385 if (ret_combined_buffer
) {
1386 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1387 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1388 if (*ret_combined_buffer
)
1389 return *ret_combined_buffer
;
1390 log_oom(); /* Fall back to ->description */
1392 *ret_combined_buffer
= NULL
;
1395 return u
->description
;
1398 /* Common implementation for multiple backends */
1399 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1404 /* Load a .{service,socket,...} file */
1405 r
= unit_load_fragment(u
);
1409 if (u
->load_state
== UNIT_STUB
) {
1410 if (fragment_required
)
1413 u
->load_state
= UNIT_LOADED
;
1416 /* Load drop-in directory data. If u is an alias, we might be reloading the
1417 * target unit needlessly. But we cannot be sure which drops-ins have already
1418 * been loaded and which not, at least without doing complicated book-keeping,
1419 * so let's always reread all drop-ins. */
1420 r
= unit_load_dropin(unit_follow_merge(u
));
1424 if (u
->source_path
) {
1427 if (stat(u
->source_path
, &st
) >= 0)
1428 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1430 u
->source_mtime
= 0;
1436 void unit_add_to_target_deps_queue(Unit
*u
) {
1437 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1439 if (u
->in_target_deps_queue
)
1442 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1443 u
->in_target_deps_queue
= true;
1446 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1450 if (target
->type
!= UNIT_TARGET
)
1453 /* Only add the dependency if both units are loaded, so that
1454 * that loop check below is reliable */
1455 if (u
->load_state
!= UNIT_LOADED
||
1456 target
->load_state
!= UNIT_LOADED
)
1459 /* If either side wants no automatic dependencies, then let's
1461 if (!u
->default_dependencies
||
1462 !target
->default_dependencies
)
1465 /* Don't create loops */
1466 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1469 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1472 static int unit_add_slice_dependencies(Unit
*u
) {
1476 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1479 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1480 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1482 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1484 slice
= UNIT_GET_SLICE(u
);
1486 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1488 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1491 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1494 static int unit_add_mount_dependencies(Unit
*u
) {
1495 UnitDependencyInfo di
;
1497 bool changed
= false;
1502 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->requires_mounts_for
) {
1503 char prefix
[strlen(path
) + 1];
1505 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1506 _cleanup_free_
char *p
= NULL
;
1509 r
= unit_name_from_path(prefix
, ".mount", &p
);
1511 continue; /* If the path cannot be converted to a mount unit name, then it's
1512 * not manageable as a unit by systemd, and hence we don't need a
1513 * dependency on it. Let's thus silently ignore the issue. */
1517 m
= manager_get_unit(u
->manager
, p
);
1519 /* Make sure to load the mount unit if it exists. If so the dependencies on
1520 * this unit will be added later during the loading of the mount unit. */
1521 (void) manager_load_unit_prepare(u
->manager
, p
, NULL
, NULL
, &m
);
1527 if (m
->load_state
!= UNIT_LOADED
)
1530 r
= unit_add_dependency(u
, UNIT_AFTER
, m
, true, di
.origin_mask
);
1533 changed
= changed
|| r
> 0;
1535 if (m
->fragment_path
) {
1536 r
= unit_add_dependency(u
, UNIT_REQUIRES
, m
, true, di
.origin_mask
);
1539 changed
= changed
|| r
> 0;
1547 static int unit_add_oomd_dependencies(Unit
*u
) {
1554 if (!u
->default_dependencies
)
1557 c
= unit_get_cgroup_context(u
);
1561 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1565 if (!cg_all_unified())
1568 r
= cg_mask_supported(&mask
);
1570 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1572 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1575 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1578 static int unit_add_startup_units(Unit
*u
) {
1579 if (!unit_has_startup_cgroup_constraints(u
))
1582 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1585 static int unit_validate_on_failure_job_mode(
1587 const char *job_mode_setting
,
1589 const char *dependency_name
,
1590 UnitDependencyAtom atom
) {
1592 Unit
*other
, *found
= NULL
;
1594 if (job_mode
!= JOB_ISOLATE
)
1597 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
1600 else if (found
!= other
)
1601 return log_unit_error_errno(
1602 u
, SYNTHETIC_ERRNO(ENOEXEC
),
1603 "More than one %s dependencies specified but %sisolate set. Refusing.",
1604 dependency_name
, job_mode_setting
);
1610 int unit_load(Unit
*u
) {
1615 if (u
->in_load_queue
) {
1616 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1617 u
->in_load_queue
= false;
1620 if (u
->type
== _UNIT_TYPE_INVALID
)
1623 if (u
->load_state
!= UNIT_STUB
)
1626 if (u
->transient_file
) {
1627 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1628 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1630 r
= fflush_and_check(u
->transient_file
);
1634 u
->transient_file
= safe_fclose(u
->transient_file
);
1635 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1638 r
= UNIT_VTABLE(u
)->load(u
);
1642 assert(u
->load_state
!= UNIT_STUB
);
1644 if (u
->load_state
== UNIT_LOADED
) {
1645 unit_add_to_target_deps_queue(u
);
1647 r
= unit_add_slice_dependencies(u
);
1651 r
= unit_add_mount_dependencies(u
);
1655 r
= unit_add_oomd_dependencies(u
);
1659 r
= unit_add_startup_units(u
);
1663 r
= unit_validate_on_failure_job_mode(u
, "OnSuccessJobMode=", u
->on_success_job_mode
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
);
1667 r
= unit_validate_on_failure_job_mode(u
, "OnFailureJobMode=", u
->on_failure_job_mode
, "OnFailure=", UNIT_ATOM_ON_FAILURE
);
1671 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1672 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1674 /* We finished loading, let's ensure our parents recalculate the members mask */
1675 unit_invalidate_cgroup_members_masks(u
);
1678 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1680 unit_add_to_dbus_queue(unit_follow_merge(u
));
1681 unit_add_to_gc_queue(u
);
1682 (void) manager_varlink_send_managed_oom_update(u
);
1687 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1688 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1690 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1691 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1695 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1696 * an attempt is made to load this unit, we know we need to check again. */
1697 if (u
->load_state
== UNIT_NOT_FOUND
)
1698 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1700 unit_add_to_dbus_queue(u
);
1701 unit_add_to_gc_queue(u
);
1703 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1707 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1712 if (u
&& !unit_log_level_test(u
, level
))
1713 return -ERRNO_VALUE(error
);
1715 va_start(ap
, format
);
1717 r
= log_object_internalv(level
, error
, file
, line
, func
,
1718 u
->manager
->unit_log_field
,
1720 u
->manager
->invocation_log_field
,
1721 u
->invocation_id_string
,
1724 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1730 static bool unit_test_condition(Unit
*u
) {
1731 _cleanup_strv_free_
char **env
= NULL
;
1736 dual_timestamp_get(&u
->condition_timestamp
);
1738 r
= manager_get_effective_environment(u
->manager
, &env
);
1740 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1741 u
->condition_result
= true;
1743 u
->condition_result
= condition_test_list(
1746 condition_type_to_string
,
1750 unit_add_to_dbus_queue(u
);
1751 return u
->condition_result
;
1754 static bool unit_test_assert(Unit
*u
) {
1755 _cleanup_strv_free_
char **env
= NULL
;
1760 dual_timestamp_get(&u
->assert_timestamp
);
1762 r
= manager_get_effective_environment(u
->manager
, &env
);
1764 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1765 u
->assert_result
= CONDITION_ERROR
;
1767 u
->assert_result
= condition_test_list(
1770 assert_type_to_string
,
1774 unit_add_to_dbus_queue(u
);
1775 return u
->assert_result
;
1778 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1779 if (log_get_show_color()) {
1780 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1781 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1783 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1786 DISABLE_WARNING_FORMAT_NONLITERAL
;
1787 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1791 int unit_test_start_limit(Unit
*u
) {
1796 if (ratelimit_below(&u
->start_ratelimit
)) {
1797 u
->start_limit_hit
= false;
1801 log_unit_warning(u
, "Start request repeated too quickly.");
1802 u
->start_limit_hit
= true;
1804 reason
= strjoina("unit ", u
->id
, " failed");
1806 emergency_action(u
->manager
, u
->start_limit_action
,
1807 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1808 u
->reboot_arg
, -1, reason
);
1813 bool unit_shall_confirm_spawn(Unit
*u
) {
1816 if (manager_is_confirm_spawn_disabled(u
->manager
))
1819 /* For some reasons units remaining in the same process group
1820 * as PID 1 fail to acquire the console even if it's not used
1821 * by any process. So skip the confirmation question for them. */
1822 return !unit_get_exec_context(u
)->same_pgrp
;
1825 static bool unit_verify_deps(Unit
*u
) {
1830 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1831 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1832 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1833 * that are not used in conjunction with After= as for them any such check would make things entirely
1836 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1838 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1841 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1842 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1850 /* Errors that aren't really errors:
1851 * -EALREADY: Unit is already started.
1852 * -ECOMM: Condition failed
1853 * -EAGAIN: An operation is already in progress. Retry later.
1855 * Errors that are real errors:
1856 * -EBADR: This unit type does not support starting.
1857 * -ECANCELED: Start limit hit, too many requests for now
1858 * -EPROTO: Assert failed
1859 * -EINVAL: Unit not loaded
1860 * -EOPNOTSUPP: Unit type not supported
1861 * -ENOLINK: The necessary dependencies are not fulfilled.
1862 * -ESTALE: This unit has been started before and can't be started a second time
1863 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1865 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1866 UnitActiveState state
;
1872 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is rate limited. */
1873 if (u
->type
== UNIT_MOUNT
&& sd_event_source_is_ratelimited(u
->manager
->mount_event_source
))
1876 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1877 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1878 * waiting is finished. */
1879 state
= unit_active_state(u
);
1880 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1882 if (state
== UNIT_MAINTENANCE
)
1885 /* Units that aren't loaded cannot be started */
1886 if (u
->load_state
!= UNIT_LOADED
)
1889 /* Refuse starting scope units more than once */
1890 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1893 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1894 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1895 * recheck the condition in that case. */
1896 if (state
!= UNIT_ACTIVATING
&&
1897 !unit_test_condition(u
))
1898 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition failed. Not starting unit.");
1900 /* If the asserts failed, fail the entire job */
1901 if (state
!= UNIT_ACTIVATING
&&
1902 !unit_test_assert(u
))
1903 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1905 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1906 * condition checks, so that we rather return condition check errors (which are usually not
1907 * considered a true failure) than "not supported" errors (which are considered a failure).
1909 if (!unit_type_supported(u
->type
))
1912 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1913 * should have taken care of this already, but let's check this here again. After all, our
1914 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1915 if (!unit_verify_deps(u
))
1918 /* Forward to the main object, if we aren't it. */
1919 following
= unit_following(u
);
1921 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1922 return unit_start(following
, details
);
1925 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1926 if (UNIT_VTABLE(u
)->can_start
) {
1927 r
= UNIT_VTABLE(u
)->can_start(u
);
1932 /* If it is stopped, but we cannot start it, then fail */
1933 if (!UNIT_VTABLE(u
)->start
)
1936 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1937 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1938 * waits for a holdoff timer to elapse before it will start again. */
1940 unit_add_to_dbus_queue(u
);
1941 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
1943 if (!u
->activation_details
) /* Older details object wins */
1944 u
->activation_details
= activation_details_ref(details
);
1946 return UNIT_VTABLE(u
)->start(u
);
1949 bool unit_can_start(Unit
*u
) {
1952 if (u
->load_state
!= UNIT_LOADED
)
1955 if (!unit_type_supported(u
->type
))
1958 /* Scope units may be started only once */
1959 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1962 return !!UNIT_VTABLE(u
)->start
;
1965 bool unit_can_isolate(Unit
*u
) {
1968 return unit_can_start(u
) &&
1973 * -EBADR: This unit type does not support stopping.
1974 * -EALREADY: Unit is already stopped.
1975 * -EAGAIN: An operation is already in progress. Retry later.
1977 int unit_stop(Unit
*u
) {
1978 UnitActiveState state
;
1983 state
= unit_active_state(u
);
1984 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
1987 following
= unit_following(u
);
1989 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
1990 return unit_stop(following
);
1993 if (!UNIT_VTABLE(u
)->stop
)
1996 unit_add_to_dbus_queue(u
);
1997 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
1999 return UNIT_VTABLE(u
)->stop(u
);
2002 bool unit_can_stop(Unit
*u
) {
2005 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2006 * Extrinsic units follow external state and they may stop following external state changes
2007 * (hence we return true here), but an attempt to do this through the manager will fail. */
2009 if (!unit_type_supported(u
->type
))
2015 return !!UNIT_VTABLE(u
)->stop
;
2019 * -EBADR: This unit type does not support reloading.
2020 * -ENOEXEC: Unit is not started.
2021 * -EAGAIN: An operation is already in progress. Retry later.
2023 int unit_reload(Unit
*u
) {
2024 UnitActiveState state
;
2029 if (u
->load_state
!= UNIT_LOADED
)
2032 if (!unit_can_reload(u
))
2035 state
= unit_active_state(u
);
2036 if (state
== UNIT_RELOADING
)
2039 if (state
!= UNIT_ACTIVE
)
2040 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2042 following
= unit_following(u
);
2044 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2045 return unit_reload(following
);
2048 unit_add_to_dbus_queue(u
);
2050 if (!UNIT_VTABLE(u
)->reload
) {
2051 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2052 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), 0);
2056 unit_cgroup_freezer_action(u
, FREEZER_THAW
);
2058 return UNIT_VTABLE(u
)->reload(u
);
2061 bool unit_can_reload(Unit
*u
) {
2064 if (UNIT_VTABLE(u
)->can_reload
)
2065 return UNIT_VTABLE(u
)->can_reload(u
);
2067 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2070 return UNIT_VTABLE(u
)->reload
;
2073 bool unit_is_unneeded(Unit
*u
) {
2077 if (!u
->stop_when_unneeded
)
2080 /* Don't clean up while the unit is transitioning or is even inactive. */
2081 if (unit_active_state(u
) != UNIT_ACTIVE
)
2086 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2087 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2088 * restart, then don't clean this one up. */
2093 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2096 if (unit_will_restart(other
))
2103 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2108 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2109 * that is active declared an Uphold= dependencies on it */
2111 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2113 *ret_culprit
= NULL
;
2117 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2121 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2123 *ret_culprit
= other
;
2129 *ret_culprit
= NULL
;
2133 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2138 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2139 * because the other unit is down. */
2141 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2142 /* Don't clean up while the unit is transitioning or is even inactive. */
2144 *ret_culprit
= NULL
;
2148 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2152 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2154 *ret_culprit
= other
;
2161 *ret_culprit
= NULL
;
2165 static void check_unneeded_dependencies(Unit
*u
) {
2169 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2171 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2172 unit_submit_to_stop_when_unneeded_queue(other
);
2175 static void check_uphold_dependencies(Unit
*u
) {
2179 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2181 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2182 unit_submit_to_start_when_upheld_queue(other
);
2185 static void check_bound_by_dependencies(Unit
*u
) {
2189 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2191 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2192 unit_submit_to_stop_when_bound_queue(other
);
2195 static void retroactively_start_dependencies(Unit
*u
) {
2199 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2201 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2202 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2203 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2204 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2206 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2207 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2208 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2209 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
, NULL
);
2211 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2212 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2213 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2216 static void retroactively_stop_dependencies(Unit
*u
) {
2220 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2222 /* Pull down units which are bound to us recursively if enabled */
2223 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2224 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2225 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2228 void unit_start_on_failure(
2230 const char *dependency_name
,
2231 UnitDependencyAtom atom
,
2239 assert(dependency_name
);
2240 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2242 /* Act on OnFailure= and OnSuccess= dependencies */
2244 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
2245 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2248 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2252 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, NULL
, &error
, NULL
);
2254 log_unit_warning_errno(
2255 u
, r
, "Failed to enqueue %s job, ignoring: %s",
2256 dependency_name
, bus_error_message(&error
, r
));
2261 log_unit_debug(u
, "Triggering %s dependencies done (%i %s).",
2262 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2265 void unit_trigger_notify(Unit
*u
) {
2270 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2271 if (UNIT_VTABLE(other
)->trigger_notify
)
2272 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2275 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2276 if (condition_notice
&& log_level
> LOG_NOTICE
)
2278 if (condition_info
&& log_level
> LOG_INFO
)
2283 static int unit_log_resources(Unit
*u
) {
2284 struct iovec iovec
[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4];
2285 bool any_traffic
= false, have_ip_accounting
= false, any_io
= false, have_io_accounting
= false;
2286 _cleanup_free_
char *igress
= NULL
, *egress
= NULL
, *rr
= NULL
, *wr
= NULL
;
2287 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2288 size_t n_message_parts
= 0, n_iovec
= 0;
2289 char* message_parts
[1 + 2 + 2 + 1], *t
;
2290 nsec_t nsec
= NSEC_INFINITY
;
2292 const char* const ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2293 [CGROUP_IP_INGRESS_BYTES
] = "IP_METRIC_INGRESS_BYTES",
2294 [CGROUP_IP_INGRESS_PACKETS
] = "IP_METRIC_INGRESS_PACKETS",
2295 [CGROUP_IP_EGRESS_BYTES
] = "IP_METRIC_EGRESS_BYTES",
2296 [CGROUP_IP_EGRESS_PACKETS
] = "IP_METRIC_EGRESS_PACKETS",
2298 const char* const io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2299 [CGROUP_IO_READ_BYTES
] = "IO_METRIC_READ_BYTES",
2300 [CGROUP_IO_WRITE_BYTES
] = "IO_METRIC_WRITE_BYTES",
2301 [CGROUP_IO_READ_OPERATIONS
] = "IO_METRIC_READ_OPERATIONS",
2302 [CGROUP_IO_WRITE_OPERATIONS
] = "IO_METRIC_WRITE_OPERATIONS",
2307 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2308 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2309 * information and the complete data in structured fields. */
2311 (void) unit_get_cpu_usage(u
, &nsec
);
2312 if (nsec
!= NSEC_INFINITY
) {
2313 /* Format the CPU time for inclusion in the structured log message */
2314 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, nsec
) < 0) {
2318 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2320 /* Format the CPU time for inclusion in the human language message string */
2321 t
= strjoin("consumed ", FORMAT_TIMESPAN(nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
), " CPU time");
2327 message_parts
[n_message_parts
++] = t
;
2329 log_level
= raise_level(log_level
,
2330 nsec
> MENTIONWORTHY_CPU_NSEC
,
2331 nsec
> NOTICEWORTHY_CPU_NSEC
);
2334 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2335 uint64_t value
= UINT64_MAX
;
2337 assert(io_fields
[k
]);
2339 (void) unit_get_io_accounting(u
, k
, k
> 0, &value
);
2340 if (value
== UINT64_MAX
)
2343 have_io_accounting
= true;
2347 /* Format IO accounting data for inclusion in the structured log message */
2348 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
], value
) < 0) {
2352 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2354 /* Format the IO accounting data for inclusion in the human language message string, but only
2355 * for the bytes counters (and not for the operations counters) */
2356 if (k
== CGROUP_IO_READ_BYTES
) {
2358 rr
= strjoin("read ", strna(FORMAT_BYTES(value
)), " from disk");
2363 } else if (k
== CGROUP_IO_WRITE_BYTES
) {
2365 wr
= strjoin("written ", strna(FORMAT_BYTES(value
)), " to disk");
2372 if (IN_SET(k
, CGROUP_IO_READ_BYTES
, CGROUP_IO_WRITE_BYTES
))
2373 log_level
= raise_level(log_level
,
2374 value
> MENTIONWORTHY_IO_BYTES
,
2375 value
> NOTICEWORTHY_IO_BYTES
);
2378 if (have_io_accounting
) {
2381 message_parts
[n_message_parts
++] = TAKE_PTR(rr
);
2383 message_parts
[n_message_parts
++] = TAKE_PTR(wr
);
2388 k
= strdup("no IO");
2394 message_parts
[n_message_parts
++] = k
;
2398 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2399 uint64_t value
= UINT64_MAX
;
2401 assert(ip_fields
[m
]);
2403 (void) unit_get_ip_accounting(u
, m
, &value
);
2404 if (value
== UINT64_MAX
)
2407 have_ip_accounting
= true;
2411 /* Format IP accounting data for inclusion in the structured log message */
2412 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
], value
) < 0) {
2416 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(t
);
2418 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2419 * bytes counters (and not for the packets counters) */
2420 if (m
== CGROUP_IP_INGRESS_BYTES
) {
2422 igress
= strjoin("received ", strna(FORMAT_BYTES(value
)), " IP traffic");
2427 } else if (m
== CGROUP_IP_EGRESS_BYTES
) {
2429 egress
= strjoin("sent ", strna(FORMAT_BYTES(value
)), " IP traffic");
2436 if (IN_SET(m
, CGROUP_IP_INGRESS_BYTES
, CGROUP_IP_EGRESS_BYTES
))
2437 log_level
= raise_level(log_level
,
2438 value
> MENTIONWORTHY_IP_BYTES
,
2439 value
> NOTICEWORTHY_IP_BYTES
);
2442 /* This check is here because it is the earliest point following all possible log_level assignments. If
2443 * log_level is assigned anywhere after this point, move this check. */
2444 if (!unit_log_level_test(u
, log_level
)) {
2449 if (have_ip_accounting
) {
2452 message_parts
[n_message_parts
++] = TAKE_PTR(igress
);
2454 message_parts
[n_message_parts
++] = TAKE_PTR(egress
);
2459 k
= strdup("no IP traffic");
2465 message_parts
[n_message_parts
++] = k
;
2469 /* Is there any accounting data available at all? */
2475 if (n_message_parts
== 0)
2476 t
= strjoina("MESSAGE=", u
->id
, ": Completed.");
2478 _cleanup_free_
char *joined
= NULL
;
2480 message_parts
[n_message_parts
] = NULL
;
2482 joined
= strv_join(message_parts
, ", ");
2488 joined
[0] = ascii_toupper(joined
[0]);
2489 t
= strjoina("MESSAGE=", u
->id
, ": ", joined
, ".");
2492 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2493 * and hence don't increase n_iovec for them */
2494 iovec
[n_iovec
] = IOVEC_MAKE_STRING(t
);
2495 iovec
[n_iovec
+ 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR
);
2497 t
= strjoina(u
->manager
->unit_log_field
, u
->id
);
2498 iovec
[n_iovec
+ 2] = IOVEC_MAKE_STRING(t
);
2500 t
= strjoina(u
->manager
->invocation_log_field
, u
->invocation_id_string
);
2501 iovec
[n_iovec
+ 3] = IOVEC_MAKE_STRING(t
);
2503 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
+ 4);
2507 for (size_t i
= 0; i
< n_message_parts
; i
++)
2508 free(message_parts
[i
]);
2510 for (size_t i
= 0; i
< n_iovec
; i
++)
2511 free(iovec
[i
].iov_base
);
2517 static void unit_update_on_console(Unit
*u
) {
2522 b
= unit_needs_console(u
);
2523 if (u
->on_console
== b
)
2528 manager_ref_console(u
->manager
);
2530 manager_unref_console(u
->manager
);
2533 static void unit_emit_audit_start(Unit
*u
) {
2536 if (u
->type
!= UNIT_SERVICE
)
2539 /* Write audit record if we have just finished starting up */
2540 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, true);
2544 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2547 if (u
->type
!= UNIT_SERVICE
)
2551 /* Write audit record if we have just finished shutting down */
2552 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, state
== UNIT_INACTIVE
);
2553 u
->in_audit
= false;
2555 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2556 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_START
, state
== UNIT_INACTIVE
);
2558 if (state
== UNIT_INACTIVE
)
2559 manager_send_unit_audit(u
->manager
, u
, AUDIT_SERVICE_STOP
, true);
2563 static bool unit_process_job(Job
*j
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2564 bool unexpected
= false;
2569 if (j
->state
== JOB_WAITING
)
2571 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2573 job_add_to_run_queue(j
);
2575 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2576 * hence needs to invalidate jobs. */
2581 case JOB_VERIFY_ACTIVE
:
2583 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2584 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2585 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2588 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2589 if (ns
== UNIT_FAILED
)
2590 result
= JOB_FAILED
;
2594 job_finish_and_invalidate(j
, result
, true, false);
2601 case JOB_RELOAD_OR_START
:
2602 case JOB_TRY_RELOAD
:
2604 if (j
->state
== JOB_RUNNING
) {
2605 if (ns
== UNIT_ACTIVE
)
2606 job_finish_and_invalidate(j
, (flags
& UNIT_NOTIFY_RELOAD_FAILURE
) ? JOB_FAILED
: JOB_DONE
, true, false);
2607 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2610 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2611 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2619 case JOB_TRY_RESTART
:
2621 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2622 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2623 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2625 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2631 assert_not_reached();
2637 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, UnitNotifyFlags flags
) {
2642 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2643 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2645 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2646 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2647 * remounted this function will be called too! */
2651 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2652 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2653 unit_add_to_dbus_queue(u
);
2655 /* Update systemd-oomd on the property/state change */
2657 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2659 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2660 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2661 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2662 * have the information on the property. Thus, indiscriminately send an update. */
2663 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2664 (void) manager_varlink_send_managed_oom_update(u
);
2667 /* Update timestamps for state changes */
2668 if (!MANAGER_IS_RELOADING(m
)) {
2669 dual_timestamp_get(&u
->state_change_timestamp
);
2671 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2672 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2673 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2674 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2676 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2677 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2678 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2679 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2682 /* Keep track of failed units */
2683 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2685 /* Make sure the cgroup and state files are always removed when we become inactive */
2686 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2687 SET_FLAG(u
->markers
,
2688 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2690 unit_prune_cgroup(u
);
2691 unit_unlink_state_files(u
);
2692 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2693 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2695 unit_update_on_console(u
);
2697 if (!MANAGER_IS_RELOADING(m
)) {
2700 /* Let's propagate state changes to the job */
2702 unexpected
= unit_process_job(u
->job
, ns
, flags
);
2706 /* If this state change happened without being requested by a job, then let's retroactively start or
2707 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2708 * additional jobs just because something is already activated. */
2711 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2712 retroactively_start_dependencies(u
);
2713 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2714 retroactively_stop_dependencies(u
);
2717 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2718 log_unit_debug(u
, "Unit entered failed state.");
2720 if (!(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2721 unit_start_on_failure(u
, "OnFailure=", UNIT_ATOM_ON_FAILURE
, u
->on_failure_job_mode
);
2724 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2725 /* This unit just finished starting up */
2727 unit_emit_audit_start(u
);
2728 manager_send_unit_plymouth(m
, u
);
2731 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2732 /* This unit just stopped/failed. */
2734 unit_emit_audit_stop(u
, ns
);
2735 unit_log_resources(u
);
2738 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
) &&
2739 !(flags
& UNIT_NOTIFY_WILL_AUTO_RESTART
))
2740 unit_start_on_failure(u
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
, u
->on_success_job_mode
);
2743 manager_recheck_journal(m
);
2744 manager_recheck_dbus(m
);
2746 unit_trigger_notify(u
);
2748 if (!MANAGER_IS_RELOADING(m
)) {
2749 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2750 reason
= strjoina("unit ", u
->id
, " failed");
2751 emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2752 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2753 reason
= strjoina("unit ", u
->id
, " succeeded");
2754 emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2758 /* And now, add the unit or depending units to various queues that will act on the new situation if
2759 * needed. These queues generally check for continuous state changes rather than events (like most of
2760 * the state propagation above), and do work deferred instead of instantly, since they typically
2761 * don't want to run during reloading, and usually involve checking combined state of multiple units
2764 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2765 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2766 check_unneeded_dependencies(u
);
2767 check_bound_by_dependencies(u
);
2769 /* Maybe someone wants us to remain up? */
2770 unit_submit_to_start_when_upheld_queue(u
);
2772 /* Maybe the unit should be GC'ed now? */
2773 unit_add_to_gc_queue(u
);
2776 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2777 /* Start uphold units regardless if going up was expected or not */
2778 check_uphold_dependencies(u
);
2780 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2781 unit_submit_to_stop_when_unneeded_queue(u
);
2783 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2784 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2785 * inactive, without ever entering started.) */
2786 unit_submit_to_stop_when_bound_queue(u
);
2790 int unit_watch_pid(Unit
*u
, pid_t pid
, bool exclusive
) {
2794 assert(pid_is_valid(pid
));
2796 /* Watch a specific PID */
2798 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2799 * opportunity to remove any stalled references to this PID as they can be created
2800 * easily (when watching a process which is not our direct child). */
2802 manager_unwatch_pid(u
->manager
, pid
);
2804 r
= set_ensure_allocated(&u
->pids
, NULL
);
2808 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids
, NULL
);
2812 /* First try, let's add the unit keyed by "pid". */
2813 r
= hashmap_put(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2819 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2820 * to an array of Units rather than just a Unit), lists us already. */
2822 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2824 for (; array
[n
]; n
++)
2828 if (found
) /* Found it already? if so, do nothing */
2833 /* Allocate a new array */
2834 new_array
= new(Unit
*, n
+ 2);
2838 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2840 new_array
[n
+1] = NULL
;
2842 /* Add or replace the old array */
2843 r
= hashmap_replace(u
->manager
->watch_pids
, PID_TO_PTR(-pid
), new_array
);
2854 r
= set_put(u
->pids
, PID_TO_PTR(pid
));
2861 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2865 assert(pid_is_valid(pid
));
2867 /* First let's drop the unit in case it's keyed as "pid". */
2868 (void) hashmap_remove_value(u
->manager
->watch_pids
, PID_TO_PTR(pid
), u
);
2870 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2871 array
= hashmap_get(u
->manager
->watch_pids
, PID_TO_PTR(-pid
));
2873 /* Let's iterate through the array, dropping our own entry */
2876 for (size_t n
= 0; array
[n
]; n
++)
2878 array
[m
++] = array
[n
];
2882 /* The array is now empty, remove the entire entry */
2883 assert_se(hashmap_remove(u
->manager
->watch_pids
, PID_TO_PTR(-pid
)) == array
);
2888 (void) set_remove(u
->pids
, PID_TO_PTR(pid
));
2891 void unit_unwatch_all_pids(Unit
*u
) {
2894 while (!set_isempty(u
->pids
))
2895 unit_unwatch_pid(u
, PTR_TO_PID(set_first(u
->pids
)));
2897 u
->pids
= set_free(u
->pids
);
2900 static void unit_tidy_watch_pids(Unit
*u
) {
2901 pid_t except1
, except2
;
2906 /* Cleans dead PIDs from our list */
2908 except1
= unit_main_pid(u
);
2909 except2
= unit_control_pid(u
);
2911 SET_FOREACH(e
, u
->pids
) {
2912 pid_t pid
= PTR_TO_PID(e
);
2914 if (pid
== except1
|| pid
== except2
)
2917 if (!pid_is_unwaited(pid
))
2918 unit_unwatch_pid(u
, pid
);
2922 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2923 Unit
*u
= ASSERT_PTR(userdata
);
2927 unit_tidy_watch_pids(u
);
2928 unit_watch_all_pids(u
);
2930 /* If the PID set is empty now, then let's finish this off. */
2931 unit_synthesize_cgroup_empty_event(u
);
2936 int unit_enqueue_rewatch_pids(Unit
*u
) {
2941 if (!u
->cgroup_path
)
2944 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2947 if (r
> 0) /* On unified we can use proper notifications */
2950 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2951 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2952 * involves issuing kill(pid, 0) on all processes we watch. */
2954 if (!u
->rewatch_pids_event_source
) {
2955 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2957 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2959 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2961 r
= sd_event_source_set_priority(s
, SD_EVENT_PRIORITY_IDLE
);
2963 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2965 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2967 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2970 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
2972 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
2977 void unit_dequeue_rewatch_pids(Unit
*u
) {
2981 if (!u
->rewatch_pids_event_source
)
2984 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
2986 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2988 u
->rewatch_pids_event_source
= sd_event_source_disable_unref(u
->rewatch_pids_event_source
);
2991 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
2993 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
2997 case JOB_VERIFY_ACTIVE
:
3000 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3001 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3006 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3007 * external events), hence it makes no sense to permit enqueuing such a request either. */
3008 return !u
->perpetual
;
3011 case JOB_TRY_RESTART
:
3012 return unit_can_stop(u
) && unit_can_start(u
);
3015 case JOB_TRY_RELOAD
:
3016 return unit_can_reload(u
);
3018 case JOB_RELOAD_OR_START
:
3019 return unit_can_reload(u
) && unit_can_start(u
);
3022 assert_not_reached();
3026 int unit_add_dependency(
3031 UnitDependencyMask mask
) {
3033 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3034 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3035 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3036 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3037 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3038 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3039 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3040 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3041 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3042 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3043 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3044 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3045 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3046 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3047 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3048 [UNIT_BEFORE
] = UNIT_AFTER
,
3049 [UNIT_AFTER
] = UNIT_BEFORE
,
3050 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3051 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3052 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3053 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3054 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3055 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3056 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3057 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3058 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3059 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3060 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3061 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3062 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3063 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3064 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3066 UnitDependencyAtom a
;
3069 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3070 * there, no need to notify! */
3071 bool notify
, notify_other
= false;
3074 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3077 u
= unit_follow_merge(u
);
3078 other
= unit_follow_merge(other
);
3079 a
= unit_dependency_to_atom(d
);
3082 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3084 if (unit_should_warn_about_dependency(d
))
3085 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3086 unit_dependency_to_string(d
), u
->id
);
3090 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3093 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3094 * running timeout at a specific time. */
3095 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3096 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3100 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3101 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3105 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3106 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3107 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3108 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3109 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3110 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3112 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3113 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3114 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3115 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3116 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3117 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3119 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3120 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3121 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3123 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3124 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3125 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3127 r
= unit_add_dependency_hashmap(&u
->dependencies
, d
, other
, mask
, 0);
3132 if (inverse_table
[d
] != _UNIT_DEPENDENCY_INVALID
&& inverse_table
[d
] != d
) {
3133 r
= unit_add_dependency_hashmap(&other
->dependencies
, inverse_table
[d
], u
, 0, mask
);
3136 notify_other
= r
> 0;
3139 if (add_reference
) {
3140 r
= unit_add_dependency_hashmap(&u
->dependencies
, UNIT_REFERENCES
, other
, mask
, 0);
3143 notify
= notify
|| r
> 0;
3145 r
= unit_add_dependency_hashmap(&other
->dependencies
, UNIT_REFERENCED_BY
, u
, 0, mask
);
3148 notify_other
= notify_other
|| r
> 0;
3152 unit_add_to_dbus_queue(u
);
3154 unit_add_to_dbus_queue(other
);
3156 return notify
|| notify_other
;
3159 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3164 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3168 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3172 return r
> 0 || s
> 0;
3175 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3183 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3190 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3192 _cleanup_free_
char *i
= NULL
;
3194 r
= unit_name_to_prefix(u
->id
, &i
);
3198 r
= unit_name_replace_instance(name
, i
, buf
);
3207 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3208 _cleanup_free_
char *buf
= NULL
;
3215 r
= resolve_template(u
, name
, &buf
, &name
);
3219 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3222 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3226 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3229 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3230 _cleanup_free_
char *buf
= NULL
;
3237 r
= resolve_template(u
, name
, &buf
, &name
);
3241 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3244 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3248 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3251 int set_unit_path(const char *p
) {
3252 /* This is mostly for debug purposes */
3253 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, 1));
3256 char *unit_dbus_path(Unit
*u
) {
3262 return unit_dbus_path_from_name(u
->id
);
3265 char *unit_dbus_path_invocation_id(Unit
*u
) {
3268 if (sd_id128_is_null(u
->invocation_id
))
3271 return unit_dbus_path_from_name(u
->invocation_id_string
);
3274 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3279 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3281 if (sd_id128_equal(u
->invocation_id
, id
))
3284 if (!sd_id128_is_null(u
->invocation_id
))
3285 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3287 if (sd_id128_is_null(id
)) {
3292 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3296 u
->invocation_id
= id
;
3297 sd_id128_to_string(id
, u
->invocation_id_string
);
3299 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3306 u
->invocation_id
= SD_ID128_NULL
;
3307 u
->invocation_id_string
[0] = 0;
3311 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3317 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3318 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3319 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3321 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3324 if (u
->type
== UNIT_SLICE
)
3327 if (unit_active_state(u
) != UNIT_INACTIVE
)
3330 if (slice
->type
!= UNIT_SLICE
)
3333 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3334 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3337 if (UNIT_GET_SLICE(u
) == slice
)
3340 /* Disallow slice changes if @u is already bound to cgroups */
3341 if (UNIT_GET_SLICE(u
) && u
->cgroup_realized
)
3344 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3345 if (UNIT_GET_SLICE(u
))
3346 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3348 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3355 int unit_set_default_slice(Unit
*u
) {
3356 const char *slice_name
;
3362 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3365 if (UNIT_GET_SLICE(u
))
3369 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3371 /* Implicitly place all instantiated units in their
3372 * own per-template slice */
3374 r
= unit_name_to_prefix(u
->id
, &prefix
);
3378 /* The prefix is already escaped, but it might include
3379 * "-" which has a special meaning for slice units,
3380 * hence escape it here extra. */
3381 escaped
= unit_name_escape(prefix
);
3385 if (MANAGER_IS_SYSTEM(u
->manager
))
3386 slice_name
= strjoina("system-", escaped
, ".slice");
3388 slice_name
= strjoina("app-", escaped
, ".slice");
3390 } else if (unit_is_extrinsic(u
))
3391 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3392 * the root slice. They don't really belong in one of the subslices. */
3393 slice_name
= SPECIAL_ROOT_SLICE
;
3395 else if (MANAGER_IS_SYSTEM(u
->manager
))
3396 slice_name
= SPECIAL_SYSTEM_SLICE
;
3398 slice_name
= SPECIAL_APP_SLICE
;
3400 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3404 return unit_set_slice(u
, slice
);
3407 const char *unit_slice_name(Unit
*u
) {
3411 slice
= UNIT_GET_SLICE(u
);
3418 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3419 _cleanup_free_
char *t
= NULL
;
3426 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3429 if (unit_has_name(u
, t
))
3432 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3433 assert(r
< 0 || *_found
!= u
);
3437 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3438 const char *new_owner
;
3439 Unit
*u
= ASSERT_PTR(userdata
);
3444 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3446 bus_log_parse_error(r
);
3450 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3451 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3456 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3457 const sd_bus_error
*e
;
3458 const char *new_owner
;
3459 Unit
*u
= ASSERT_PTR(userdata
);
3464 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3466 e
= sd_bus_message_get_error(message
);
3468 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3469 r
= sd_bus_error_get_errno(e
);
3470 log_unit_error_errno(u
, r
,
3471 "Unexpected error response from GetNameOwner(): %s",
3472 bus_error_message(e
, r
));
3477 r
= sd_bus_message_read(message
, "s", &new_owner
);
3479 return bus_log_parse_error(r
);
3481 assert(!isempty(new_owner
));
3484 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3485 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3490 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3498 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3501 match
= strjoina("type='signal',"
3502 "sender='org.freedesktop.DBus',"
3503 "path='/org/freedesktop/DBus',"
3504 "interface='org.freedesktop.DBus',"
3505 "member='NameOwnerChanged',"
3506 "arg0='", name
, "'");
3508 r
= sd_bus_add_match_async(bus
, &u
->match_bus_slot
, match
, signal_name_owner_changed
, NULL
, u
);
3512 r
= sd_bus_call_method_async(
3514 &u
->get_name_owner_slot
,
3515 "org.freedesktop.DBus",
3516 "/org/freedesktop/DBus",
3517 "org.freedesktop.DBus",
3519 get_name_owner_handler
,
3523 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3527 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3531 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3537 /* Watch a specific name on the bus. We only support one unit
3538 * watching each name for now. */
3540 if (u
->manager
->api_bus
) {
3541 /* If the bus is already available, install the match directly.
3542 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3543 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3545 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3548 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3550 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3551 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3552 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3558 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3562 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3563 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3564 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3567 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3568 _cleanup_free_
char *e
= NULL
;
3574 /* Adds in links to the device node that this unit is based on */
3578 if (!is_device_path(what
))
3581 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3582 if (!unit_type_supported(UNIT_DEVICE
))
3585 r
= unit_name_from_path(what
, ".device", &e
);
3589 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3593 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3594 dep
= UNIT_BINDS_TO
;
3596 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3597 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3598 device
, true, mask
);
3601 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3602 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3610 if (!path_startswith(what
, "/dev/"))
3613 /* If we don't support devices, then also don't bother with blockdev@.target */
3614 if (!unit_type_supported(UNIT_DEVICE
))
3617 r
= unit_name_path_escape(what
, &escaped
);
3621 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3625 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3628 int unit_coldplug(Unit
*u
) {
3633 /* Make sure we don't enter a loop, when coldplugging recursively. */
3637 u
->coldplugged
= true;
3639 STRV_FOREACH(i
, u
->deserialized_refs
) {
3640 q
= bus_unit_track_add_name(u
, *i
);
3641 if (q
< 0 && r
>= 0)
3644 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3646 if (UNIT_VTABLE(u
)->coldplug
) {
3647 q
= UNIT_VTABLE(u
)->coldplug(u
);
3648 if (q
< 0 && r
>= 0)
3653 q
= job_coldplug(u
->job
);
3654 if (q
< 0 && r
>= 0)
3658 q
= job_coldplug(u
->nop_job
);
3659 if (q
< 0 && r
>= 0)
3666 void unit_catchup(Unit
*u
) {
3669 if (UNIT_VTABLE(u
)->catchup
)
3670 UNIT_VTABLE(u
)->catchup(u
);
3672 unit_cgroup_catchup(u
);
3675 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3681 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3682 * are never out-of-date. */
3683 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3686 if (stat(path
, &st
) < 0)
3687 /* What, cannot access this anymore? */
3691 /* For masked files check if they are still so */
3692 return !null_or_empty(&st
);
3694 /* For non-empty files check the mtime */
3695 return timespec_load(&st
.st_mtim
) > mtime
;
3700 bool unit_need_daemon_reload(Unit
*u
) {
3701 _cleanup_strv_free_
char **t
= NULL
;
3705 /* For unit files, we allow masking… */
3706 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3707 u
->load_state
== UNIT_MASKED
))
3710 /* Source paths should not be masked… */
3711 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3714 if (u
->load_state
== UNIT_LOADED
)
3715 (void) unit_find_dropin_paths(u
, &t
);
3716 if (!strv_equal(u
->dropin_paths
, t
))
3719 /* … any drop-ins that are masked are simply omitted from the list. */
3720 STRV_FOREACH(path
, u
->dropin_paths
)
3721 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3727 void unit_reset_failed(Unit
*u
) {
3730 if (UNIT_VTABLE(u
)->reset_failed
)
3731 UNIT_VTABLE(u
)->reset_failed(u
);
3733 ratelimit_reset(&u
->start_ratelimit
);
3734 u
->start_limit_hit
= false;
3737 Unit
*unit_following(Unit
*u
) {
3740 if (UNIT_VTABLE(u
)->following
)
3741 return UNIT_VTABLE(u
)->following(u
);
3746 bool unit_stop_pending(Unit
*u
) {
3749 /* This call does check the current state of the unit. It's
3750 * hence useful to be called from state change calls of the
3751 * unit itself, where the state isn't updated yet. This is
3752 * different from unit_inactive_or_pending() which checks both
3753 * the current state and for a queued job. */
3755 return unit_has_job_type(u
, JOB_STOP
);
3758 bool unit_inactive_or_pending(Unit
*u
) {
3761 /* Returns true if the unit is inactive or going down */
3763 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3766 if (unit_stop_pending(u
))
3772 bool unit_active_or_pending(Unit
*u
) {
3775 /* Returns true if the unit is active or going up */
3777 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3781 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3787 bool unit_will_restart_default(Unit
*u
) {
3790 return unit_has_job_type(u
, JOB_START
);
3793 bool unit_will_restart(Unit
*u
) {
3796 if (!UNIT_VTABLE(u
)->will_restart
)
3799 return UNIT_VTABLE(u
)->will_restart(u
);
3802 int unit_kill(Unit
*u
, KillWho w
, int signo
, int code
, int value
, sd_bus_error
*error
) {
3804 assert(w
>= 0 && w
< _KILL_WHO_MAX
);
3805 assert(SIGNAL_VALID(signo
));
3806 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
3808 if (!UNIT_VTABLE(u
)->kill
)
3811 return UNIT_VTABLE(u
)->kill(u
, w
, signo
, code
, value
, error
);
3814 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3817 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3818 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3821 static Set
*unit_pid_set(pid_t main_pid
, pid_t control_pid
) {
3822 _cleanup_set_free_ Set
*pid_set
= NULL
;
3825 pid_set
= set_new(NULL
);
3829 /* Exclude the main/control pids from being killed via the cgroup */
3831 r
= set_put(pid_set
, PID_TO_PTR(main_pid
));
3836 if (control_pid
> 0) {
3837 r
= set_put(pid_set
, PID_TO_PTR(control_pid
));
3842 return TAKE_PTR(pid_set
);
3845 static int kill_common_log(pid_t pid
, int signo
, void *userdata
) {
3846 _cleanup_free_
char *comm
= NULL
;
3847 Unit
*u
= ASSERT_PTR(userdata
);
3849 (void) get_process_comm(pid
, &comm
);
3850 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
3851 signal_to_string(signo
), pid
, strna(comm
));
3856 static int kill_or_sigqueue(pid_t pid
, int signo
, int code
, int value
) {
3858 assert(SIGNAL_VALID(signo
));
3863 log_debug("Killing " PID_FMT
" with signal SIG%s.", pid
, signal_to_string(signo
));
3864 return RET_NERRNO(kill(pid
, signo
));
3867 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pid
, signal_to_string(signo
));
3868 return RET_NERRNO(sigqueue(pid
, signo
, (const union sigval
) { .sival_int
= value
}));
3871 assert_not_reached();
3875 int unit_kill_common(
3883 sd_bus_error
*error
) {
3885 bool killed
= false;
3888 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
3889 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
3890 * stop a service ourselves. */
3894 assert(who
< _KILL_WHO_MAX
);
3895 assert(SIGNAL_VALID(signo
));
3896 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
3898 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
3900 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
3902 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
3905 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
3906 if (control_pid
< 0)
3907 return sd_bus_error_setf(error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
3908 if (control_pid
== 0)
3909 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
3912 if (control_pid
> 0 &&
3913 IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
3914 _cleanup_free_
char *comm
= NULL
;
3915 (void) get_process_comm(control_pid
, &comm
);
3917 r
= kill_or_sigqueue(control_pid
, signo
, code
, value
);
3921 /* Report this failure both to the logs and to the client */
3922 sd_bus_error_set_errnof(
3924 "Failed to send signal SIG%s to control process " PID_FMT
" (%s): %m",
3925 signal_to_string(signo
), control_pid
, strna(comm
));
3926 log_unit_warning_errno(
3928 "Failed to send signal SIG%s to control process " PID_FMT
" (%s) on client request: %m",
3929 signal_to_string(signo
), control_pid
, strna(comm
));
3931 log_unit_info(u
, "Sent signal SIG%s to control process " PID_FMT
" (%s) on client request.",
3932 signal_to_string(signo
), control_pid
, strna(comm
));
3938 IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
3940 _cleanup_free_
char *comm
= NULL
;
3941 (void) get_process_comm(main_pid
, &comm
);
3943 r
= kill_or_sigqueue(main_pid
, signo
, code
, value
);
3948 sd_bus_error_set_errnof(
3950 "Failed to send signal SIG%s to main process " PID_FMT
" (%s): %m",
3951 signal_to_string(signo
), main_pid
, strna(comm
));
3954 log_unit_warning_errno(
3956 "Failed to send signal SIG%s to main process " PID_FMT
" (%s) on client request: %m",
3957 signal_to_string(signo
), main_pid
, strna(comm
));
3960 log_unit_info(u
, "Sent signal SIG%s to main process " PID_FMT
" (%s) on client request.",
3961 signal_to_string(signo
), main_pid
, strna(comm
));
3966 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
3967 * doesn't really make much sense (and given that enqueued values are a relatively expensive
3968 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
3969 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
&& code
== SI_USER
) {
3970 _cleanup_set_free_ Set
*pid_set
= NULL
;
3972 /* Exclude the main/control pids from being killed via the cgroup */
3973 pid_set
= unit_pid_set(main_pid
, control_pid
);
3977 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, signo
, 0, pid_set
, kill_common_log
, u
);
3979 if (!IN_SET(r
, -ESRCH
, -ENOENT
)) {
3983 sd_bus_error_set_errnof(
3985 "Failed to send signal SIG%s to auxiliary processes: %m",
3986 signal_to_string(signo
));
3989 log_unit_warning_errno(
3991 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
3992 signal_to_string(signo
));
3998 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
3999 if (ret
== 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
))
4000 return sd_bus_error_set_const(error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4005 int unit_following_set(Unit
*u
, Set
**s
) {
4009 if (UNIT_VTABLE(u
)->following_set
)
4010 return UNIT_VTABLE(u
)->following_set(u
, s
);
4016 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4021 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4022 r
= unit_file_get_state(
4023 u
->manager
->runtime_scope
,
4026 &u
->unit_file_state
);
4028 u
->unit_file_state
= UNIT_FILE_BAD
;
4031 return u
->unit_file_state
;
4034 int unit_get_unit_file_preset(Unit
*u
) {
4039 if (u
->unit_file_preset
< 0 && u
->fragment_path
) {
4040 _cleanup_free_
char *bn
= NULL
;
4042 r
= path_extract_filename(u
->fragment_path
, &bn
);
4044 return (u
->unit_file_preset
= r
);
4046 if (r
== O_DIRECTORY
)
4047 return (u
->unit_file_preset
= -EISDIR
);
4049 u
->unit_file_preset
= unit_file_query_preset(
4050 u
->manager
->runtime_scope
,
4056 return u
->unit_file_preset
;
4059 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4065 unit_ref_unset(ref
);
4067 ref
->source
= source
;
4068 ref
->target
= target
;
4069 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4073 void unit_ref_unset(UnitRef
*ref
) {
4079 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4080 * be unreferenced now. */
4081 unit_add_to_gc_queue(ref
->target
);
4083 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4084 ref
->source
= ref
->target
= NULL
;
4087 static int user_from_unit_name(Unit
*u
, char **ret
) {
4089 static const uint8_t hash_key
[] = {
4090 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4091 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4094 _cleanup_free_
char *n
= NULL
;
4097 r
= unit_name_to_prefix(u
->id
, &n
);
4101 if (valid_user_group_name(n
, 0)) {
4106 /* If we can't use the unit name as a user name, then let's hash it and use that */
4107 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4113 int unit_patch_contexts(Unit
*u
) {
4120 /* Patch in the manager defaults into the exec and cgroup
4121 * contexts, _after_ the rest of the settings have been
4124 ec
= unit_get_exec_context(u
);
4126 /* This only copies in the ones that need memory */
4127 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4128 if (u
->manager
->rlimit
[i
] && !ec
->rlimit
[i
]) {
4129 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->rlimit
[i
], 1);
4134 if (MANAGER_IS_USER(u
->manager
) &&
4135 !ec
->working_directory
) {
4137 r
= get_home_dir(&ec
->working_directory
);
4141 /* Allow user services to run, even if the
4142 * home directory is missing */
4143 ec
->working_directory_missing_ok
= true;
4146 if (ec
->private_devices
)
4147 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4149 if (ec
->protect_kernel_modules
)
4150 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4152 if (ec
->protect_kernel_logs
)
4153 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4155 if (ec
->protect_clock
)
4156 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4158 if (ec
->dynamic_user
) {
4160 r
= user_from_unit_name(u
, &ec
->user
);
4166 ec
->group
= strdup(ec
->user
);
4171 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4172 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4175 ec
->private_tmp
= true;
4176 ec
->remove_ipc
= true;
4177 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4178 if (ec
->protect_home
== PROTECT_HOME_NO
)
4179 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4181 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4183 ec
->no_new_privileges
= true;
4184 ec
->restrict_suid_sgid
= true;
4187 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++)
4188 exec_directory_sort(ec
->directories
+ dt
);
4191 cc
= unit_get_cgroup_context(u
);
4194 if (ec
->private_devices
&&
4195 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4196 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4198 /* Only add these if needed, as they imply that everything else is blocked. */
4199 if (cc
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
|| cc
->device_allow
) {
4200 if (ec
->root_image
|| ec
->mount_images
) {
4202 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4203 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4204 r
= cgroup_add_device_allow(cc
, p
, "rw");
4208 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4209 r
= cgroup_add_device_allow(cc
, p
, "rwm");
4214 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4215 * Same for mapper and verity. */
4216 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4217 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4223 if (ec
->protect_clock
) {
4224 r
= cgroup_add_device_allow(cc
, "char-rtc", "r");
4229 /* If there are encrypted credentials we might need to access the TPM. */
4230 if (exec_context_has_encrypted_credentials(ec
)) {
4231 r
= cgroup_add_device_allow(cc
, "char-tpm", "rw");
4241 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4248 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4252 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4255 KillContext
*unit_get_kill_context(Unit
*u
) {
4262 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4266 return (KillContext
*) ((uint8_t*) u
+ offset
);
4269 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4275 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4279 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4282 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4288 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4292 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4295 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4298 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4301 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4302 return u
->manager
->lookup_paths
.transient
;
4304 if (flags
& UNIT_PERSISTENT
)
4305 return u
->manager
->lookup_paths
.persistent_control
;
4307 if (flags
& UNIT_RUNTIME
)
4308 return u
->manager
->lookup_paths
.runtime_control
;
4313 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4315 assert(!FLAGS_SET(flags
, UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
));
4318 _cleanup_free_
char *t
= NULL
;
4320 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4321 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4322 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4323 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4326 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4327 t
= specifier_escape(s
);
4334 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4335 * ExecStart= and friends, i.e. '$' and ';' and quotes. */
4337 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX
) {
4338 char *t2
= shell_escape(s
, "$;'\"");
4341 free_and_replace(t
, t2
);
4345 } else if (flags
& UNIT_ESCAPE_C
) {
4346 char *t2
= cescape(s
);
4349 free_and_replace(t
, t2
);
4358 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4359 _cleanup_free_
char *result
= NULL
;
4362 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4363 * lines in a way suitable for ExecStart= stanzas. */
4365 STRV_FOREACH(i
, l
) {
4366 _cleanup_free_
char *buf
= NULL
;
4371 p
= unit_escape_setting(*i
, flags
, &buf
);
4375 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4376 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4390 if (!GREEDY_REALLOC(result
, n
+ 1))
4395 return TAKE_PTR(result
);
4398 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4399 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4400 const char *dir
, *wrapped
;
4407 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4410 data
= unit_escape_setting(data
, flags
, &escaped
);
4414 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4415 * previous section header is the same */
4417 if (flags
& UNIT_PRIVATE
) {
4418 if (!UNIT_VTABLE(u
)->private_section
)
4421 if (!u
->transient_file
|| u
->last_section_private
< 0)
4422 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4423 else if (u
->last_section_private
== 0)
4424 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4426 if (!u
->transient_file
|| u
->last_section_private
< 0)
4427 data
= strjoina("[Unit]\n", data
);
4428 else if (u
->last_section_private
> 0)
4429 data
= strjoina("\n[Unit]\n", data
);
4432 if (u
->transient_file
) {
4433 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4434 * write to the transient unit file. */
4435 fputs(data
, u
->transient_file
);
4437 if (!endswith(data
, "\n"))
4438 fputc('\n', u
->transient_file
);
4440 /* Remember which section we wrote this entry to */
4441 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4445 dir
= unit_drop_in_dir(u
, flags
);
4449 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4450 "# or an equivalent operation. Do not edit.\n",
4454 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4458 (void) mkdir_p_label(p
, 0755);
4460 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4461 * recreate the cache after every drop-in we write. */
4462 if (u
->manager
->unit_path_cache
) {
4463 r
= set_put_strdup(&u
->manager
->unit_path_cache
, p
);
4468 r
= write_string_file_atomic_label(q
, wrapped
);
4472 r
= strv_push(&u
->dropin_paths
, q
);
4477 strv_uniq(u
->dropin_paths
);
4479 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4484 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4485 _cleanup_free_
char *p
= NULL
;
4493 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4496 va_start(ap
, format
);
4497 r
= vasprintf(&p
, format
, ap
);
4503 return unit_write_setting(u
, flags
, name
, p
);
4506 int unit_make_transient(Unit
*u
) {
4507 _cleanup_free_
char *path
= NULL
;
4512 if (!UNIT_VTABLE(u
)->can_transient
)
4515 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4517 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4521 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4522 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4525 f
= fopen(path
, "we");
4530 safe_fclose(u
->transient_file
);
4531 u
->transient_file
= f
;
4533 free_and_replace(u
->fragment_path
, path
);
4535 u
->source_path
= mfree(u
->source_path
);
4536 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4537 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4539 u
->load_state
= UNIT_STUB
;
4541 u
->transient
= true;
4543 unit_add_to_dbus_queue(u
);
4544 unit_add_to_gc_queue(u
);
4546 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4552 static int log_kill(pid_t pid
, int sig
, void *userdata
) {
4553 _cleanup_free_
char *comm
= NULL
;
4555 (void) get_process_comm(pid
, &comm
);
4557 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4558 only, like for example systemd's own PAM stub process. */
4559 if (comm
&& comm
[0] == '(')
4560 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4561 * here to let the manager know that a process was killed. */
4564 log_unit_notice(userdata
,
4565 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4568 signal_to_string(sig
));
4573 static int operation_to_signal(
4574 const KillContext
*c
,
4576 bool *ret_noteworthy
) {
4582 case KILL_TERMINATE
:
4583 case KILL_TERMINATE_AND_LOG
:
4584 *ret_noteworthy
= false;
4585 return c
->kill_signal
;
4588 *ret_noteworthy
= false;
4589 return restart_kill_signal(c
);
4592 *ret_noteworthy
= true;
4593 return c
->final_kill_signal
;
4596 *ret_noteworthy
= true;
4597 return c
->watchdog_signal
;
4600 assert_not_reached();
4604 int unit_kill_context(
4610 bool main_pid_alien
) {
4612 bool wait_for_exit
= false, send_sighup
;
4613 cg_kill_log_func_t log_func
= NULL
;
4619 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4620 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4621 * which is used for user-requested killing of unit processes. */
4623 if (c
->kill_mode
== KILL_NONE
)
4627 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4629 log_func
= log_kill
;
4633 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4638 log_func(main_pid
, sig
, u
);
4640 r
= kill_and_sigcont(main_pid
, sig
);
4641 if (r
< 0 && r
!= -ESRCH
) {
4642 _cleanup_free_
char *comm
= NULL
;
4643 (void) get_process_comm(main_pid
, &comm
);
4645 log_unit_warning_errno(u
, r
, "Failed to kill main process " PID_FMT
" (%s), ignoring: %m", main_pid
, strna(comm
));
4647 if (!main_pid_alien
)
4648 wait_for_exit
= true;
4650 if (r
!= -ESRCH
&& send_sighup
)
4651 (void) kill(main_pid
, SIGHUP
);
4655 if (control_pid
> 0) {
4657 log_func(control_pid
, sig
, u
);
4659 r
= kill_and_sigcont(control_pid
, sig
);
4660 if (r
< 0 && r
!= -ESRCH
) {
4661 _cleanup_free_
char *comm
= NULL
;
4662 (void) get_process_comm(control_pid
, &comm
);
4664 log_unit_warning_errno(u
, r
, "Failed to kill control process " PID_FMT
" (%s), ignoring: %m", control_pid
, strna(comm
));
4666 wait_for_exit
= true;
4668 if (r
!= -ESRCH
&& send_sighup
)
4669 (void) kill(control_pid
, SIGHUP
);
4673 if (u
->cgroup_path
&&
4674 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4675 _cleanup_set_free_ Set
*pid_set
= NULL
;
4677 /* Exclude the main/control pids from being killed via the cgroup */
4678 pid_set
= unit_pid_set(main_pid
, control_pid
);
4682 r
= cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4684 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4688 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4689 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
4693 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4694 * we are running in a container or if this is a delegation unit, simply because cgroup
4695 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4696 * of containers it can be confused easily by left-over directories in the cgroup — which
4697 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4698 * there we get proper events. Hence rely on them. */
4700 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4701 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4702 wait_for_exit
= true;
4707 pid_set
= unit_pid_set(main_pid
, control_pid
);
4711 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
,
4720 return wait_for_exit
;
4723 int unit_require_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
) {
4729 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4730 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4731 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4732 * appearing mount units can easily determine which units to make themselves a dependency of. */
4734 if (!path_is_absolute(path
))
4737 if (hashmap_contains(u
->requires_mounts_for
, path
)) /* Exit quickly if the path is already covered. */
4740 _cleanup_free_
char *p
= strdup(path
);
4744 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4745 * only after simplification, since path_is_normalized() rejects paths with '.'.
4746 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4747 path
= path_simplify(p
);
4749 if (!path_is_normalized(path
))
4752 UnitDependencyInfo di
= {
4756 r
= hashmap_ensure_put(&u
->requires_mounts_for
, &path_hash_ops
, p
, di
.data
);
4760 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
4762 char prefix
[strlen(path
) + 1];
4763 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4766 x
= hashmap_get(u
->manager
->units_requiring_mounts_for
, prefix
);
4768 _cleanup_free_
char *q
= NULL
;
4770 r
= hashmap_ensure_allocated(&u
->manager
->units_requiring_mounts_for
, &path_hash_ops
);
4782 r
= hashmap_put(u
->manager
->units_requiring_mounts_for
, q
, x
);
4798 int unit_setup_exec_runtime(Unit
*u
) {
4799 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
4800 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
4807 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4810 /* Check if there already is an ExecRuntime for this unit? */
4811 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4815 ec
= unit_get_exec_context(u
);
4818 /* Try to get it from somebody else */
4819 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_JOINS_NAMESPACE_OF
) {
4820 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
4828 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
4833 if (ec
->dynamic_user
) {
4834 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
4839 r
= exec_runtime_make(esr
, dcreds
, rt
);
4849 bool unit_type_supported(UnitType t
) {
4850 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
4853 if (_unlikely_(t
< 0))
4855 if (_unlikely_(t
>= _UNIT_TYPE_MAX
))
4858 if (cache
[t
] == 0) {
4861 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
4863 r
= getenv_bool(ascii_strupper(e
));
4864 if (r
< 0 && r
!= -ENXIO
)
4865 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
4867 cache
[t
] = r
== 0 ? -1 : 1;
4872 if (!unit_vtable
[t
]->supported
)
4875 return unit_vtable
[t
]->supported();
4878 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
4884 if (!unit_log_level_test(u
, LOG_NOTICE
))
4887 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
4888 if (r
> 0 || r
== -ENOTDIR
)
4891 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
4895 log_unit_struct(u
, LOG_NOTICE
,
4896 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4897 LOG_UNIT_INVOCATION_ID(u
),
4898 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
4902 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
4903 _cleanup_free_
char *canonical_where
= NULL
;
4909 r
= chase(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
, NULL
);
4911 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
4915 /* We will happily ignore a trailing slash (or any redundant slashes) */
4916 if (path_equal(where
, canonical_where
))
4919 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4920 log_unit_struct(u
, LOG_ERR
,
4921 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
4922 LOG_UNIT_INVOCATION_ID(u
),
4923 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
4929 bool unit_is_pristine(Unit
*u
) {
4932 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
4933 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
4934 * even though nothing was actually loaded, as those unit types don't require a file on disk.
4936 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
4937 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
4938 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
4941 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
4942 !u
->fragment_path
&&
4948 pid_t
unit_control_pid(Unit
*u
) {
4951 if (UNIT_VTABLE(u
)->control_pid
)
4952 return UNIT_VTABLE(u
)->control_pid(u
);
4957 pid_t
unit_main_pid(Unit
*u
) {
4960 if (UNIT_VTABLE(u
)->main_pid
)
4961 return UNIT_VTABLE(u
)->main_pid(u
);
4966 static void unit_unref_uid_internal(
4970 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
4974 assert(_manager_unref_uid
);
4976 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4977 * gid_t are actually the same time, with the same validity rules.
4979 * Drops a reference to UID/GID from a unit. */
4981 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4982 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4984 if (!uid_is_valid(*ref_uid
))
4987 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
4988 *ref_uid
= UID_INVALID
;
4991 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
4992 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
4995 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
4996 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
4999 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5002 unit_unref_uid(u
, destroy_now
);
5003 unit_unref_gid(u
, destroy_now
);
5006 static int unit_ref_uid_internal(
5011 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5017 assert(uid_is_valid(uid
));
5018 assert(_manager_ref_uid
);
5020 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5021 * are actually the same type, and have the same validity rules.
5023 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5024 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5027 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5028 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5030 if (*ref_uid
== uid
)
5033 if (uid_is_valid(*ref_uid
)) /* Already set? */
5036 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5044 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5045 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5048 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5049 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5052 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5057 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5059 if (uid_is_valid(uid
)) {
5060 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5065 if (gid_is_valid(gid
)) {
5066 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5069 unit_unref_uid(u
, false);
5075 return r
> 0 || q
> 0;
5078 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5084 c
= unit_get_exec_context(u
);
5086 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5088 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5093 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5098 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5099 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5100 * objects when no service references the UID/GID anymore. */
5102 r
= unit_ref_uid_gid(u
, uid
, gid
);
5104 unit_add_to_dbus_queue(u
);
5107 int unit_acquire_invocation_id(Unit
*u
) {
5113 r
= sd_id128_randomize(&id
);
5115 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5117 r
= unit_set_invocation_id(u
, id
);
5119 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5121 unit_add_to_dbus_queue(u
);
5125 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5131 /* Copy parameters from manager */
5132 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5136 p
->confirm_spawn
= manager_get_confirm_spawn(u
->manager
);
5137 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5138 p
->prefix
= u
->manager
->prefix
;
5139 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5141 /* Copy parameters from unit */
5142 p
->cgroup_path
= u
->cgroup_path
;
5143 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5145 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5146 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5151 int unit_fork_helper_process(Unit
*u
, const char *name
, pid_t
*ret
) {
5157 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5158 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5160 (void) unit_realize_cgroup(u
);
5162 r
= safe_fork(name
, FORK_REOPEN_LOG
, ret
);
5166 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5167 (void) ignore_signals(SIGPIPE
);
5169 (void) prctl(PR_SET_PDEATHSIG
, SIGTERM
);
5171 if (u
->cgroup_path
) {
5172 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5174 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(u
->cgroup_path
));
5182 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, pid_t
*ret_pid
) {
5189 r
= unit_fork_helper_process(u
, "(sd-rmrf)", &pid
);
5193 int ret
= EXIT_SUCCESS
;
5195 STRV_FOREACH(i
, paths
) {
5196 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5198 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5206 r
= unit_watch_pid(u
, pid
, true);
5214 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5218 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5219 /* No bit set anymore, let's drop the whole entry */
5220 assert_se(hashmap_remove(deps
, other
));
5222 /* Mask was reduced, let's update the entry */
5223 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5226 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5230 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5235 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5239 UnitDependencyInfo di
;
5244 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5245 Hashmap
*other_deps
;
5247 if (FLAGS_SET(~mask
, di
.origin_mask
))
5250 di
.origin_mask
&= ~mask
;
5251 unit_update_dependency_mask(deps
, other
, di
);
5253 /* We updated the dependency from our unit to the other unit now. But most
5254 * dependencies imply a reverse dependency. Hence, let's delete that one
5255 * too. For that we go through all dependency types on the other unit and
5256 * delete all those which point to us and have the right mask set. */
5258 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5259 UnitDependencyInfo dj
;
5261 dj
.data
= hashmap_get(other_deps
, u
);
5262 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5265 dj
.destination_mask
&= ~mask
;
5266 unit_update_dependency_mask(other_deps
, u
, dj
);
5269 unit_add_to_gc_queue(other
);
5271 /* The unit 'other' may not be wanted by the unit 'u'. */
5272 unit_submit_to_stop_when_unneeded_queue(other
);
5282 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5289 if (MANAGER_IS_SYSTEM(u
->manager
))
5290 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5292 _cleanup_free_
char *user_path
= NULL
;
5293 r
= xdg_user_runtime_dir(&user_path
, "/systemd/units/invocation:");
5296 p
= strjoin(user_path
, u
->id
);
5306 static int unit_export_invocation_id(Unit
*u
) {
5307 _cleanup_free_
char *p
= NULL
;
5312 if (u
->exported_invocation_id
)
5315 if (sd_id128_is_null(u
->invocation_id
))
5318 r
= unit_get_invocation_path(u
, &p
);
5320 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5322 r
= symlink_atomic_label(u
->invocation_id_string
, p
);
5324 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5326 u
->exported_invocation_id
= true;
5330 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5338 if (u
->exported_log_level_max
)
5341 if (c
->log_level_max
< 0)
5344 assert(c
->log_level_max
<= 7);
5346 buf
[0] = '0' + c
->log_level_max
;
5349 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5350 r
= symlink_atomic(buf
, p
);
5352 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5354 u
->exported_log_level_max
= true;
5358 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5359 _cleanup_close_
int fd
= -EBADF
;
5360 struct iovec
*iovec
;
5367 if (u
->exported_log_extra_fields
)
5370 if (c
->n_log_extra_fields
<= 0)
5373 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5374 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5376 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5377 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5379 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5380 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5383 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5384 pattern
= strjoina(p
, ".XXXXXX");
5386 fd
= mkostemp_safe(pattern
);
5388 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5390 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5392 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5396 (void) fchmod(fd
, 0644);
5398 if (rename(pattern
, p
) < 0) {
5399 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5403 u
->exported_log_extra_fields
= true;
5407 (void) unlink(pattern
);
5411 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5412 _cleanup_free_
char *buf
= NULL
;
5419 if (u
->exported_log_ratelimit_interval
)
5422 if (c
->log_ratelimit_interval_usec
== 0)
5425 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5427 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit_interval_usec
) < 0)
5430 r
= symlink_atomic(buf
, p
);
5432 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5434 u
->exported_log_ratelimit_interval
= true;
5438 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5439 _cleanup_free_
char *buf
= NULL
;
5446 if (u
->exported_log_ratelimit_burst
)
5449 if (c
->log_ratelimit_burst
== 0)
5452 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5454 if (asprintf(&buf
, "%u", c
->log_ratelimit_burst
) < 0)
5457 r
= symlink_atomic(buf
, p
);
5459 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5461 u
->exported_log_ratelimit_burst
= true;
5465 void unit_export_state_files(Unit
*u
) {
5466 const ExecContext
*c
;
5473 if (MANAGER_IS_TEST_RUN(u
->manager
))
5476 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5477 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5478 * the IPC system itself and PID 1 also log to the journal.
5480 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5481 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5482 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5483 * namespace at least.
5485 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5486 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5489 (void) unit_export_invocation_id(u
);
5491 if (!MANAGER_IS_SYSTEM(u
->manager
))
5494 c
= unit_get_exec_context(u
);
5496 (void) unit_export_log_level_max(u
, c
);
5497 (void) unit_export_log_extra_fields(u
, c
);
5498 (void) unit_export_log_ratelimit_interval(u
, c
);
5499 (void) unit_export_log_ratelimit_burst(u
, c
);
5503 void unit_unlink_state_files(Unit
*u
) {
5511 /* Undoes the effect of unit_export_state() */
5513 if (u
->exported_invocation_id
) {
5514 _cleanup_free_
char *invocation_path
= NULL
;
5515 int r
= unit_get_invocation_path(u
, &invocation_path
);
5517 (void) unlink(invocation_path
);
5518 u
->exported_invocation_id
= false;
5522 if (!MANAGER_IS_SYSTEM(u
->manager
))
5525 if (u
->exported_log_level_max
) {
5526 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5529 u
->exported_log_level_max
= false;
5532 if (u
->exported_log_extra_fields
) {
5533 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5536 u
->exported_log_extra_fields
= false;
5539 if (u
->exported_log_ratelimit_interval
) {
5540 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5543 u
->exported_log_ratelimit_interval
= false;
5546 if (u
->exported_log_ratelimit_burst
) {
5547 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5550 u
->exported_log_ratelimit_burst
= false;
5554 int unit_prepare_exec(Unit
*u
) {
5559 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5560 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5561 r
= bpf_firewall_load_custom(u
);
5565 /* Prepares everything so that we can fork of a process for this unit */
5567 (void) unit_realize_cgroup(u
);
5569 if (u
->reset_accounting
) {
5570 (void) unit_reset_accounting(u
);
5571 u
->reset_accounting
= false;
5574 unit_export_state_files(u
);
5576 r
= unit_setup_exec_runtime(u
);
5583 static bool ignore_leftover_process(const char *comm
) {
5584 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5587 int unit_log_leftover_process_start(pid_t pid
, int sig
, void *userdata
) {
5588 _cleanup_free_
char *comm
= NULL
;
5590 (void) get_process_comm(pid
, &comm
);
5592 if (ignore_leftover_process(comm
))
5595 /* During start we print a warning */
5597 log_unit_warning(userdata
,
5598 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5599 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5605 int unit_log_leftover_process_stop(pid_t pid
, int sig
, void *userdata
) {
5606 _cleanup_free_
char *comm
= NULL
;
5608 (void) get_process_comm(pid
, &comm
);
5610 if (ignore_leftover_process(comm
))
5613 /* During stop we only print an informational message */
5615 log_unit_info(userdata
,
5616 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
5622 int unit_warn_leftover_processes(Unit
*u
, cg_kill_log_func_t log_func
) {
5625 (void) unit_pick_cgroup_path(u
);
5627 if (!u
->cgroup_path
)
5630 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, 0, 0, NULL
, log_func
, u
);
5633 bool unit_needs_console(Unit
*u
) {
5635 UnitActiveState state
;
5639 state
= unit_active_state(u
);
5641 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5644 if (UNIT_VTABLE(u
)->needs_console
)
5645 return UNIT_VTABLE(u
)->needs_console(u
);
5647 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5648 ec
= unit_get_exec_context(u
);
5652 return exec_context_may_touch_console(ec
);
5655 int unit_pid_attachable(Unit
*u
, pid_t pid
, sd_bus_error
*error
) {
5660 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5661 * and not a kernel thread either */
5663 /* First, a simple range check */
5664 if (!pid_is_valid(pid
))
5665 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier " PID_FMT
" is not valid.", pid
);
5667 /* Some extra safety check */
5668 if (pid
== 1 || pid
== getpid_cached())
5669 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
);
5671 /* Don't even begin to bother with kernel threads */
5672 r
= is_kernel_thread(pid
);
5674 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
);
5676 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
);
5678 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
);
5683 void unit_log_success(Unit
*u
) {
5686 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5687 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5688 * a lot of devices. */
5690 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
5691 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5692 LOG_UNIT_INVOCATION_ID(u
),
5693 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
5696 void unit_log_failure(Unit
*u
, const char *result
) {
5700 log_unit_struct(u
, LOG_WARNING
,
5701 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5702 LOG_UNIT_INVOCATION_ID(u
),
5703 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5704 "UNIT_RESULT=%s", result
);
5707 void unit_log_skip(Unit
*u
, const char *result
) {
5711 log_unit_struct(u
, LOG_INFO
,
5712 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR
,
5713 LOG_UNIT_INVOCATION_ID(u
),
5714 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
5715 "UNIT_RESULT=%s", result
);
5718 void unit_log_process_exit(
5721 const char *command
,
5731 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5732 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5733 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5737 else if (code
== CLD_EXITED
)
5740 level
= LOG_WARNING
;
5742 log_unit_struct(u
, level
,
5743 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
5744 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
5746 sigchld_code_to_string(code
), status
,
5747 strna(code
== CLD_EXITED
5748 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
5749 : signal_to_string(status
)),
5750 success
? " (success)" : ""),
5751 "EXIT_CODE=%s", sigchld_code_to_string(code
),
5752 "EXIT_STATUS=%i", status
,
5753 "COMMAND=%s", strna(command
),
5754 LOG_UNIT_INVOCATION_ID(u
));
5757 int unit_exit_status(Unit
*u
) {
5760 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5761 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5762 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5763 * service process has exited abnormally (signal/coredump). */
5765 if (!UNIT_VTABLE(u
)->exit_status
)
5768 return UNIT_VTABLE(u
)->exit_status(u
);
5771 int unit_failure_action_exit_status(Unit
*u
) {
5776 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5778 if (u
->failure_action_exit_status
>= 0)
5779 return u
->failure_action_exit_status
;
5781 r
= unit_exit_status(u
);
5782 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5788 int unit_success_action_exit_status(Unit
*u
) {
5793 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5795 if (u
->success_action_exit_status
>= 0)
5796 return u
->success_action_exit_status
;
5798 r
= unit_exit_status(u
);
5799 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
5805 int unit_test_trigger_loaded(Unit
*u
) {
5808 /* Tests whether the unit to trigger is loaded */
5810 trigger
= UNIT_TRIGGER(u
);
5812 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
5813 "Refusing to start, no unit to trigger.");
5814 if (trigger
->load_state
!= UNIT_LOADED
)
5815 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
5816 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
5821 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
) {
5825 if (context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
||
5826 (context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
&& !unit_will_restart(u
)))
5827 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
5829 exec_context_destroy_credentials(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
], u
->id
);
5830 exec_context_destroy_mount_ns_dir(u
);
5833 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
5834 UnitActiveState state
;
5838 /* Special return values:
5840 * -EOPNOTSUPP → cleaning not supported for this unit type
5841 * -EUNATCH → cleaning not defined for this resource type
5842 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5843 * a job queued or similar
5846 if (!UNIT_VTABLE(u
)->clean
)
5852 if (u
->load_state
!= UNIT_LOADED
)
5858 state
= unit_active_state(u
);
5859 if (state
!= UNIT_INACTIVE
)
5862 return UNIT_VTABLE(u
)->clean(u
, mask
);
5865 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
5868 if (!UNIT_VTABLE(u
)->clean
||
5869 u
->load_state
!= UNIT_LOADED
) {
5874 /* When the clean() method is set, can_clean() really should be set too */
5875 assert(UNIT_VTABLE(u
)->can_clean
);
5877 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
5880 bool unit_can_freeze(Unit
*u
) {
5883 if (UNIT_VTABLE(u
)->can_freeze
)
5884 return UNIT_VTABLE(u
)->can_freeze(u
);
5886 return UNIT_VTABLE(u
)->freeze
;
5889 void unit_frozen(Unit
*u
) {
5892 u
->freezer_state
= FREEZER_FROZEN
;
5894 bus_unit_send_pending_freezer_message(u
, false);
5897 void unit_thawed(Unit
*u
) {
5900 u
->freezer_state
= FREEZER_RUNNING
;
5902 bus_unit_send_pending_freezer_message(u
, false);
5905 static int unit_freezer_action(Unit
*u
, FreezerAction action
) {
5907 int (*method
)(Unit
*);
5911 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
5913 method
= action
== FREEZER_FREEZE
? UNIT_VTABLE(u
)->freeze
: UNIT_VTABLE(u
)->thaw
;
5914 if (!method
|| !cg_freezer_supported())
5920 if (u
->load_state
!= UNIT_LOADED
)
5923 s
= unit_active_state(u
);
5924 if (s
!= UNIT_ACTIVE
)
5927 if ((IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_THAWING
) && action
== FREEZER_FREEZE
) ||
5928 (u
->freezer_state
== FREEZER_THAWING
&& action
== FREEZER_THAW
))
5935 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_THAWING
));
5940 int unit_freeze(Unit
*u
) {
5941 return unit_freezer_action(u
, FREEZER_FREEZE
);
5944 int unit_thaw(Unit
*u
) {
5945 return unit_freezer_action(u
, FREEZER_THAW
);
5948 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
5949 int unit_freeze_vtable_common(Unit
*u
) {
5950 return unit_cgroup_freezer_action(u
, FREEZER_FREEZE
);
5953 int unit_thaw_vtable_common(Unit
*u
) {
5954 return unit_cgroup_freezer_action(u
, FREEZER_THAW
);
5957 Condition
*unit_find_failed_condition(Unit
*u
) {
5958 Condition
*failed_trigger
= NULL
;
5959 bool has_succeeded_trigger
= false;
5961 if (u
->condition_result
)
5964 LIST_FOREACH(conditions
, c
, u
->conditions
)
5966 if (c
->result
== CONDITION_SUCCEEDED
)
5967 has_succeeded_trigger
= true;
5968 else if (!failed_trigger
)
5970 } else if (c
->result
!= CONDITION_SUCCEEDED
)
5973 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
5976 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
5977 [COLLECT_INACTIVE
] = "inactive",
5978 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
5981 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
5983 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
5988 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
5989 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
5990 * is NULL the first entry found), or NULL if not found. */
5992 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
5993 if (!other
|| other
== i
)
5999 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6000 _cleanup_free_ Unit
**array
= NULL
;
6007 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6008 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6009 * while the dependency table is continuously updated. */
6011 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6012 if (!GREEDY_REALLOC(array
, n
+ 1))
6018 *ret_array
= TAKE_PTR(array
);
6020 assert(n
<= INT_MAX
);
6024 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6025 [UNIT_PATH
] = &activation_details_path_vtable
,
6026 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6029 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6030 _cleanup_free_ ActivationDetails
*details
= NULL
;
6032 assert(trigger_unit
);
6033 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6034 assert(trigger_unit
->id
);
6036 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6040 *details
= (ActivationDetails
) {
6042 .trigger_unit_type
= trigger_unit
->type
,
6045 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6046 if (!details
->trigger_unit_name
)
6049 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6050 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6052 return TAKE_PTR(details
);
6055 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6059 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6060 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6062 free(details
->trigger_unit_name
);
6064 return mfree(details
);
6067 void activation_details_serialize(ActivationDetails
*details
, FILE *f
) {
6068 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6071 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6072 if (details
->trigger_unit_name
)
6073 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6074 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6075 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6078 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6086 if (!streq(key
, "activation-details-unit-type"))
6089 t
= unit_type_from_string(value
);
6090 if (t
== _UNIT_TYPE_INVALID
)
6093 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6097 **details
= (ActivationDetails
) {
6099 .trigger_unit_type
= t
,
6105 if (streq(key
, "activation-details-unit-name")) {
6106 (*details
)->trigger_unit_name
= strdup(value
);
6107 if (!(*details
)->trigger_unit_name
)
6113 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6114 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6119 int activation_details_append_env(ActivationDetails
*details
, char ***strv
) {
6127 if (!isempty(details
->trigger_unit_name
)) {
6128 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6132 r
= strv_consume(strv
, TAKE_PTR(s
));
6137 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6138 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6143 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6146 int activation_details_append_pair(ActivationDetails
*details
, char ***strv
) {
6154 if (!isempty(details
->trigger_unit_name
)) {
6155 r
= strv_extend(strv
, "trigger_unit");
6159 r
= strv_extend(strv
, details
->trigger_unit_name
);
6164 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6165 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6170 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6173 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);