1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
9 #include "sd-messages.h"
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bpf-foreign.h"
15 #include "bpf-socket-bind.h"
16 #include "bus-common-errors.h"
17 #include "bus-internal.h"
19 #include "cgroup-setup.h"
20 #include "cgroup-util.h"
22 #include "core-varlink.h"
23 #include "dbus-unit.h"
28 #include "exec-credential.h"
31 #include "fileio-label.h"
33 #include "format-util.h"
34 #include "id128-util.h"
36 #include "iovec-util.h"
37 #include "label-util.h"
38 #include "load-dropin.h"
39 #include "load-fragment.h"
41 #include "logarithm.h"
43 #include "mkdir-label.h"
44 #include "path-util.h"
45 #include "process-util.h"
47 #include "serialize.h"
49 #include "signal-util.h"
50 #include "sparse-endian.h"
52 #include "specifier.h"
53 #include "stat-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
58 #include "terminal-util.h"
59 #include "tmpfile-util.h"
60 #include "umask-util.h"
61 #include "unit-name.h"
63 #include "user-util.h"
69 /* Thresholds for logging at INFO level about resource consumption */
70 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
71 #define MENTIONWORTHY_MEMORY_BYTES (64 * U64_MB)
72 #define MENTIONWORTHY_IO_BYTES (1 * U64_MB)
73 #define MENTIONWORTHY_IP_BYTES UINT64_C(0)
75 /* Thresholds for logging at NOTICE level about resource consumption */
76 #define NOTICEWORTHY_CPU_NSEC (10 * NSEC_PER_MINUTE)
77 #define NOTICEWORTHY_MEMORY_BYTES (512 * U64_MB)
78 #define NOTICEWORTHY_IO_BYTES (10 * U64_MB)
79 #define NOTICEWORTHY_IP_BYTES (128 * U64_MB)
81 const UnitVTable
* const unit_vtable
[_UNIT_TYPE_MAX
] = {
82 [UNIT_SERVICE
] = &service_vtable
,
83 [UNIT_SOCKET
] = &socket_vtable
,
84 [UNIT_TARGET
] = &target_vtable
,
85 [UNIT_DEVICE
] = &device_vtable
,
86 [UNIT_MOUNT
] = &mount_vtable
,
87 [UNIT_AUTOMOUNT
] = &automount_vtable
,
88 [UNIT_SWAP
] = &swap_vtable
,
89 [UNIT_TIMER
] = &timer_vtable
,
90 [UNIT_PATH
] = &path_vtable
,
91 [UNIT_SLICE
] = &slice_vtable
,
92 [UNIT_SCOPE
] = &scope_vtable
,
95 Unit
* unit_new(Manager
*m
, size_t size
) {
99 assert(size
>= sizeof(Unit
));
106 u
->type
= _UNIT_TYPE_INVALID
;
107 u
->default_dependencies
= true;
108 u
->unit_file_state
= _UNIT_FILE_STATE_INVALID
;
109 u
->unit_file_preset
= -1;
110 u
->on_failure_job_mode
= JOB_REPLACE
;
111 u
->on_success_job_mode
= JOB_FAIL
;
112 u
->cgroup_control_inotify_wd
= -1;
113 u
->cgroup_memory_inotify_wd
= -1;
114 u
->job_timeout
= USEC_INFINITY
;
115 u
->job_running_timeout
= USEC_INFINITY
;
116 u
->ref_uid
= UID_INVALID
;
117 u
->ref_gid
= GID_INVALID
;
118 u
->cpu_usage_last
= NSEC_INFINITY
;
120 unit_reset_memory_accounting_last(u
);
122 unit_reset_io_accounting_last(u
);
124 u
->cgroup_invalidated_mask
|= CGROUP_MASK_BPF_FIREWALL
;
125 u
->failure_action_exit_status
= u
->success_action_exit_status
= -1;
127 u
->ip_accounting_ingress_map_fd
= -EBADF
;
128 u
->ip_accounting_egress_map_fd
= -EBADF
;
130 u
->ipv4_allow_map_fd
= -EBADF
;
131 u
->ipv6_allow_map_fd
= -EBADF
;
132 u
->ipv4_deny_map_fd
= -EBADF
;
133 u
->ipv6_deny_map_fd
= -EBADF
;
135 u
->last_section_private
= -1;
137 u
->start_ratelimit
= (const RateLimit
) {
138 m
->defaults
.start_limit_interval
,
139 m
->defaults
.start_limit_burst
,
142 u
->auto_start_stop_ratelimit
= (const RateLimit
) { .interval
= 10 * USEC_PER_SEC
, .burst
= 16 };
147 int unit_new_for_name(Manager
*m
, size_t size
, const char *name
, Unit
**ret
) {
148 _cleanup_(unit_freep
) Unit
*u
= NULL
;
151 u
= unit_new(m
, size
);
155 r
= unit_add_name(u
, name
);
164 bool unit_has_name(const Unit
*u
, const char *name
) {
168 return streq_ptr(name
, u
->id
) ||
169 set_contains(u
->aliases
, name
);
172 static void unit_init(Unit
*u
) {
179 assert(u
->type
>= 0);
181 cc
= unit_get_cgroup_context(u
);
183 cgroup_context_init(cc
);
185 /* Copy in the manager defaults into the cgroup
186 * context, _before_ the rest of the settings have
187 * been initialized */
189 cc
->cpu_accounting
= u
->manager
->defaults
.cpu_accounting
;
190 cc
->io_accounting
= u
->manager
->defaults
.io_accounting
;
191 cc
->blockio_accounting
= u
->manager
->defaults
.blockio_accounting
;
192 cc
->memory_accounting
= u
->manager
->defaults
.memory_accounting
;
193 cc
->tasks_accounting
= u
->manager
->defaults
.tasks_accounting
;
194 cc
->ip_accounting
= u
->manager
->defaults
.ip_accounting
;
196 if (u
->type
!= UNIT_SLICE
)
197 cc
->tasks_max
= u
->manager
->defaults
.tasks_max
;
199 cc
->memory_pressure_watch
= u
->manager
->defaults
.memory_pressure_watch
;
200 cc
->memory_pressure_threshold_usec
= u
->manager
->defaults
.memory_pressure_threshold_usec
;
203 ec
= unit_get_exec_context(u
);
205 exec_context_init(ec
);
207 if (u
->manager
->defaults
.oom_score_adjust_set
) {
208 ec
->oom_score_adjust
= u
->manager
->defaults
.oom_score_adjust
;
209 ec
->oom_score_adjust_set
= true;
212 if (MANAGER_IS_SYSTEM(u
->manager
))
213 ec
->keyring_mode
= EXEC_KEYRING_SHARED
;
215 ec
->keyring_mode
= EXEC_KEYRING_INHERIT
;
217 /* User manager might have its umask redefined by PAM or UMask=. In this
218 * case let the units it manages inherit this value by default. They can
219 * still tune this value through their own unit file */
220 (void) get_process_umask(0, &ec
->umask
);
224 kc
= unit_get_kill_context(u
);
226 kill_context_init(kc
);
228 if (UNIT_VTABLE(u
)->init
)
229 UNIT_VTABLE(u
)->init(u
);
232 static int unit_add_alias(Unit
*u
, char *donated_name
) {
235 /* Make sure that u->names is allocated. We may leave u->names
236 * empty if we fail later, but this is not a problem. */
237 r
= set_ensure_put(&u
->aliases
, &string_hash_ops
, donated_name
);
245 int unit_add_name(Unit
*u
, const char *text
) {
246 _cleanup_free_
char *name
= NULL
, *instance
= NULL
;
253 if (unit_name_is_valid(text
, UNIT_NAME_TEMPLATE
)) {
255 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
256 "instance is not set when adding name '%s': %m", text
);
258 r
= unit_name_replace_instance(text
, u
->instance
, &name
);
260 return log_unit_debug_errno(u
, r
,
261 "failed to build instance name from '%s': %m", text
);
268 if (unit_has_name(u
, name
))
271 if (hashmap_contains(u
->manager
->units
, name
))
272 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
273 "unit already exist when adding name '%s': %m", name
);
275 if (!unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
))
276 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
277 "name '%s' is invalid: %m", name
);
279 t
= unit_name_to_type(name
);
281 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
282 "failed to derive unit type from name '%s': %m", name
);
284 if (u
->type
!= _UNIT_TYPE_INVALID
&& t
!= u
->type
)
285 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
286 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
289 r
= unit_name_to_instance(name
, &instance
);
291 return log_unit_debug_errno(u
, r
, "failed to extract instance from name '%s': %m", name
);
293 if (instance
&& !unit_type_may_template(t
))
294 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
), "templates are not allowed for name '%s': %m", name
);
296 /* Ensure that this unit either has no instance, or that the instance matches. */
297 if (u
->type
!= _UNIT_TYPE_INVALID
&& !streq_ptr(u
->instance
, instance
))
298 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
299 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
300 name
, instance
, u
->instance
);
302 if (u
->id
&& !unit_type_may_alias(t
))
303 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(EEXIST
),
304 "cannot add name %s, aliases are not allowed for %s units.",
305 name
, unit_type_to_string(t
));
307 if (hashmap_size(u
->manager
->units
) >= MANAGER_MAX_NAMES
)
308 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(E2BIG
), "cannot add name, manager has too many units: %m");
310 /* Add name to the global hashmap first, because that's easier to undo */
311 r
= hashmap_put(u
->manager
->units
, name
, u
);
313 return log_unit_debug_errno(u
, r
, "add unit to hashmap failed for name '%s': %m", text
);
316 r
= unit_add_alias(u
, name
); /* unit_add_alias() takes ownership of the name on success */
318 hashmap_remove(u
->manager
->units
, name
);
324 /* A new name, we don't need the set yet. */
325 assert(u
->type
== _UNIT_TYPE_INVALID
);
326 assert(!u
->instance
);
329 u
->id
= TAKE_PTR(name
);
330 u
->instance
= TAKE_PTR(instance
);
332 LIST_PREPEND(units_by_type
, u
->manager
->units_by_type
[t
], u
);
336 unit_add_to_dbus_queue(u
);
340 int unit_choose_id(Unit
*u
, const char *name
) {
341 _cleanup_free_
char *t
= NULL
;
348 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
352 r
= unit_name_replace_instance(name
, u
->instance
, &t
);
359 if (streq_ptr(u
->id
, name
))
360 return 0; /* Nothing to do. */
362 /* Selects one of the aliases of this unit as the id */
363 s
= set_get(u
->aliases
, (char*) name
);
368 r
= set_remove_and_put(u
->aliases
, name
, u
->id
);
372 assert_se(set_remove(u
->aliases
, name
)); /* see set_get() above… */
374 u
->id
= s
; /* Old u->id is now stored in the set, and s is not stored anywhere */
375 unit_add_to_dbus_queue(u
);
380 int unit_set_description(Unit
*u
, const char *description
) {
385 r
= free_and_strdup(&u
->description
, empty_to_null(description
));
389 unit_add_to_dbus_queue(u
);
394 static bool unit_success_failure_handler_has_jobs(Unit
*unit
) {
397 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_SUCCESS
)
398 if (other
->job
|| other
->nop_job
)
401 UNIT_FOREACH_DEPENDENCY(other
, unit
, UNIT_ATOM_ON_FAILURE
)
402 if (other
->job
|| other
->nop_job
)
408 void unit_release_resources(Unit
*u
) {
409 UnitActiveState state
;
414 if (u
->job
|| u
->nop_job
)
420 state
= unit_active_state(u
);
421 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
424 if (unit_will_restart(u
))
427 ec
= unit_get_exec_context(u
);
428 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
429 exec_context_destroy_runtime_directory(ec
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
431 if (UNIT_VTABLE(u
)->release_resources
)
432 UNIT_VTABLE(u
)->release_resources(u
);
435 bool unit_may_gc(Unit
*u
) {
436 UnitActiveState state
;
441 /* Checks whether the unit is ready to be unloaded for garbage collection. Returns true when the
442 * unit may be collected, and false if there's some reason to keep it loaded.
444 * References from other units are *not* checked here. Instead, this is done in unit_gc_sweep(), but
445 * using markers to properly collect dependency loops.
448 if (u
->job
|| u
->nop_job
)
454 /* if we saw a cgroup empty event for this unit, stay around until we processed it so that we remove
455 * the empty cgroup if possible. Similar, process any pending OOM events if they are already queued
456 * before we release the unit. */
457 if (u
->in_cgroup_empty_queue
|| u
->in_cgroup_oom_queue
)
460 /* Make sure to send out D-Bus events before we unload the unit */
461 if (u
->in_dbus_queue
)
464 if (sd_bus_track_count(u
->bus_track
) > 0)
467 state
= unit_active_state(u
);
469 /* But we keep the unit object around for longer when it is referenced or configured to not be
471 switch (u
->collect_mode
) {
473 case COLLECT_INACTIVE
:
474 if (state
!= UNIT_INACTIVE
)
479 case COLLECT_INACTIVE_OR_FAILED
:
480 if (!IN_SET(state
, UNIT_INACTIVE
, UNIT_FAILED
))
486 assert_not_reached();
489 /* Check if any OnFailure= or on Success= jobs may be pending */
490 if (unit_success_failure_handler_has_jobs(u
))
493 if (u
->cgroup_path
) {
494 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
495 * around. Units with active processes should never be collected. */
497 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
499 log_unit_debug_errno(u
, r
, "Failed to determine whether cgroup %s is empty: %m", empty_to_root(u
->cgroup_path
));
504 if (!UNIT_VTABLE(u
)->may_gc
)
507 return UNIT_VTABLE(u
)->may_gc(u
);
510 void unit_add_to_load_queue(Unit
*u
) {
512 assert(u
->type
!= _UNIT_TYPE_INVALID
);
514 if (u
->load_state
!= UNIT_STUB
|| u
->in_load_queue
)
517 LIST_PREPEND(load_queue
, u
->manager
->load_queue
, u
);
518 u
->in_load_queue
= true;
521 void unit_add_to_cleanup_queue(Unit
*u
) {
524 if (u
->in_cleanup_queue
)
527 LIST_PREPEND(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
528 u
->in_cleanup_queue
= true;
531 void unit_add_to_gc_queue(Unit
*u
) {
534 if (u
->in_gc_queue
|| u
->in_cleanup_queue
)
540 LIST_PREPEND(gc_queue
, u
->manager
->gc_unit_queue
, u
);
541 u
->in_gc_queue
= true;
544 void unit_add_to_dbus_queue(Unit
*u
) {
546 assert(u
->type
!= _UNIT_TYPE_INVALID
);
548 if (u
->load_state
== UNIT_STUB
|| u
->in_dbus_queue
)
551 /* Shortcut things if nobody cares */
552 if (sd_bus_track_count(u
->manager
->subscribed
) <= 0 &&
553 sd_bus_track_count(u
->bus_track
) <= 0 &&
554 set_isempty(u
->manager
->private_buses
)) {
555 u
->sent_dbus_new_signal
= true;
559 LIST_PREPEND(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
560 u
->in_dbus_queue
= true;
563 void unit_submit_to_stop_when_unneeded_queue(Unit
*u
) {
566 if (u
->in_stop_when_unneeded_queue
)
569 if (!u
->stop_when_unneeded
)
572 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
575 LIST_PREPEND(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
576 u
->in_stop_when_unneeded_queue
= true;
579 void unit_submit_to_start_when_upheld_queue(Unit
*u
) {
582 if (u
->in_start_when_upheld_queue
)
585 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)))
588 if (!unit_has_dependency(u
, UNIT_ATOM_START_STEADILY
, NULL
))
591 LIST_PREPEND(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
592 u
->in_start_when_upheld_queue
= true;
595 void unit_submit_to_stop_when_bound_queue(Unit
*u
) {
598 if (u
->in_stop_when_bound_queue
)
601 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
604 if (!unit_has_dependency(u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
, NULL
))
607 LIST_PREPEND(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
608 u
->in_stop_when_bound_queue
= true;
611 static bool unit_can_release_resources(Unit
*u
) {
616 if (UNIT_VTABLE(u
)->release_resources
)
619 ec
= unit_get_exec_context(u
);
620 if (ec
&& ec
->runtime_directory_preserve_mode
== EXEC_PRESERVE_RESTART
)
626 void unit_submit_to_release_resources_queue(Unit
*u
) {
629 if (u
->in_release_resources_queue
)
632 if (u
->job
|| u
->nop_job
)
638 if (!unit_can_release_resources(u
))
641 LIST_PREPEND(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
642 u
->in_release_resources_queue
= true;
645 static void unit_clear_dependencies(Unit
*u
) {
648 /* Removes all dependencies configured on u and their reverse dependencies. */
650 for (Hashmap
*deps
; (deps
= hashmap_steal_first(u
->dependencies
));) {
652 for (Unit
*other
; (other
= hashmap_steal_first_key(deps
));) {
655 HASHMAP_FOREACH(other_deps
, other
->dependencies
)
656 hashmap_remove(other_deps
, u
);
658 unit_add_to_gc_queue(other
);
664 u
->dependencies
= hashmap_free(u
->dependencies
);
667 static void unit_remove_transient(Unit
*u
) {
673 if (u
->fragment_path
)
674 (void) unlink(u
->fragment_path
);
676 STRV_FOREACH(i
, u
->dropin_paths
) {
677 _cleanup_free_
char *p
= NULL
, *pp
= NULL
;
679 if (path_extract_directory(*i
, &p
) < 0) /* Get the drop-in directory from the drop-in file */
682 if (path_extract_directory(p
, &pp
) < 0) /* Get the config directory from the drop-in directory */
685 /* Only drop transient drop-ins */
686 if (!path_equal(u
->manager
->lookup_paths
.transient
, pp
))
694 static void unit_free_mounts_for(Unit
*u
) {
697 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
699 _cleanup_free_
char *path
= NULL
;
701 path
= hashmap_steal_first_key(u
->mounts_for
[t
]);
705 char s
[strlen(path
) + 1];
707 PATH_FOREACH_PREFIX_MORE(s
, path
) {
711 x
= hashmap_get2(u
->manager
->units_needing_mounts_for
[t
], s
, (void**) &y
);
715 (void) set_remove(x
, u
);
717 if (set_isempty(x
)) {
718 assert_se(hashmap_remove(u
->manager
->units_needing_mounts_for
[t
], y
));
725 u
->mounts_for
[t
] = hashmap_free(u
->mounts_for
[t
]);
729 static void unit_done(Unit
*u
) {
738 if (UNIT_VTABLE(u
)->done
)
739 UNIT_VTABLE(u
)->done(u
);
741 ec
= unit_get_exec_context(u
);
743 exec_context_done(ec
);
745 cc
= unit_get_cgroup_context(u
);
747 cgroup_context_done(cc
);
750 Unit
* unit_free(Unit
*u
) {
757 sd_event_source_disable_unref(u
->auto_start_stop_event_source
);
759 u
->transient_file
= safe_fclose(u
->transient_file
);
761 if (!MANAGER_IS_RELOADING(u
->manager
))
762 unit_remove_transient(u
);
764 bus_unit_send_removed_signal(u
);
768 unit_dequeue_rewatch_pids(u
);
770 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
771 u
->bus_track
= sd_bus_track_unref(u
->bus_track
);
772 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
773 u
->pending_freezer_invocation
= sd_bus_message_unref(u
->pending_freezer_invocation
);
775 unit_free_mounts_for(u
);
777 SET_FOREACH(t
, u
->aliases
)
778 hashmap_remove_value(u
->manager
->units
, t
, u
);
780 hashmap_remove_value(u
->manager
->units
, u
->id
, u
);
782 if (!sd_id128_is_null(u
->invocation_id
))
783 hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
797 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
798 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
799 slice
= UNIT_GET_SLICE(u
);
800 unit_clear_dependencies(u
);
802 unit_add_family_to_cgroup_realize_queue(slice
);
805 manager_unref_console(u
->manager
);
807 fdset_free(u
->initial_socket_bind_link_fds
);
809 bpf_link_free(u
->ipv4_socket_bind_link
);
810 bpf_link_free(u
->ipv6_socket_bind_link
);
813 unit_release_cgroup(u
);
815 if (!MANAGER_IS_RELOADING(u
->manager
))
816 unit_unlink_state_files(u
);
818 unit_unref_uid_gid(u
, false);
820 (void) manager_update_failed_units(u
->manager
, u
, false);
821 set_remove(u
->manager
->startup_units
, u
);
823 unit_unwatch_all_pids(u
);
825 while (u
->refs_by_target
)
826 unit_ref_unset(u
->refs_by_target
);
828 if (u
->type
!= _UNIT_TYPE_INVALID
)
829 LIST_REMOVE(units_by_type
, u
->manager
->units_by_type
[u
->type
], u
);
831 if (u
->in_load_queue
)
832 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
834 if (u
->in_dbus_queue
)
835 LIST_REMOVE(dbus_queue
, u
->manager
->dbus_unit_queue
, u
);
837 if (u
->in_cleanup_queue
)
838 LIST_REMOVE(cleanup_queue
, u
->manager
->cleanup_queue
, u
);
841 LIST_REMOVE(gc_queue
, u
->manager
->gc_unit_queue
, u
);
843 if (u
->in_cgroup_realize_queue
)
844 LIST_REMOVE(cgroup_realize_queue
, u
->manager
->cgroup_realize_queue
, u
);
846 if (u
->in_cgroup_empty_queue
)
847 LIST_REMOVE(cgroup_empty_queue
, u
->manager
->cgroup_empty_queue
, u
);
849 if (u
->in_cgroup_oom_queue
)
850 LIST_REMOVE(cgroup_oom_queue
, u
->manager
->cgroup_oom_queue
, u
);
852 if (u
->in_target_deps_queue
)
853 LIST_REMOVE(target_deps_queue
, u
->manager
->target_deps_queue
, u
);
855 if (u
->in_stop_when_unneeded_queue
)
856 LIST_REMOVE(stop_when_unneeded_queue
, u
->manager
->stop_when_unneeded_queue
, u
);
858 if (u
->in_start_when_upheld_queue
)
859 LIST_REMOVE(start_when_upheld_queue
, u
->manager
->start_when_upheld_queue
, u
);
861 if (u
->in_stop_when_bound_queue
)
862 LIST_REMOVE(stop_when_bound_queue
, u
->manager
->stop_when_bound_queue
, u
);
864 if (u
->in_release_resources_queue
)
865 LIST_REMOVE(release_resources_queue
, u
->manager
->release_resources_queue
, u
);
867 bpf_firewall_close(u
);
869 hashmap_free(u
->bpf_foreign_by_key
);
871 bpf_program_free(u
->bpf_device_control_installed
);
874 bpf_link_free(u
->restrict_ifaces_ingress_bpf_link
);
875 bpf_link_free(u
->restrict_ifaces_egress_bpf_link
);
877 fdset_free(u
->initial_restric_ifaces_link_fds
);
879 condition_free_list(u
->conditions
);
880 condition_free_list(u
->asserts
);
882 free(u
->description
);
883 strv_free(u
->documentation
);
884 free(u
->fragment_path
);
885 free(u
->source_path
);
886 strv_free(u
->dropin_paths
);
889 free(u
->job_timeout_reboot_arg
);
892 free(u
->access_selinux_context
);
894 set_free_free(u
->aliases
);
897 activation_details_unref(u
->activation_details
);
902 FreezerState
unit_freezer_state(Unit
*u
) {
905 return u
->freezer_state
;
908 UnitActiveState
unit_active_state(Unit
*u
) {
911 if (u
->load_state
== UNIT_MERGED
)
912 return unit_active_state(unit_follow_merge(u
));
914 /* After a reload it might happen that a unit is not correctly
915 * loaded but still has a process around. That's why we won't
916 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
918 return UNIT_VTABLE(u
)->active_state(u
);
921 const char* unit_sub_state_to_string(Unit
*u
) {
924 return UNIT_VTABLE(u
)->sub_state_to_string(u
);
927 static int unit_merge_names(Unit
*u
, Unit
*other
) {
934 r
= unit_add_alias(u
, other
->id
);
938 r
= set_move(u
->aliases
, other
->aliases
);
940 set_remove(u
->aliases
, other
->id
);
945 other
->aliases
= set_free_free(other
->aliases
);
947 SET_FOREACH(name
, u
->aliases
)
948 assert_se(hashmap_replace(u
->manager
->units
, name
, u
) == 0);
953 static int unit_reserve_dependencies(Unit
*u
, Unit
*other
) {
962 /* Let's reserve some space in the dependency hashmaps so that later on merging the units cannot
965 * First make some room in the per dependency type hashmaps. Using the summed size of both units'
966 * hashmaps is an estimate that is likely too high since they probably use some of the same
967 * types. But it's never too low, and that's all we need. */
969 n_reserve
= MIN(hashmap_size(other
->dependencies
), LESS_BY((size_t) _UNIT_DEPENDENCY_MAX
, hashmap_size(u
->dependencies
)));
971 r
= hashmap_ensure_allocated(&u
->dependencies
, NULL
);
975 r
= hashmap_reserve(u
->dependencies
, n_reserve
);
980 /* Now, enlarge our per dependency type hashmaps by the number of entries in the same hashmap of the
981 * other unit's dependencies.
983 * NB: If u does not have a dependency set allocated for some dependency type, there is no need to
984 * reserve anything for. In that case other's set will be transferred as a whole to u by
985 * complete_move(). */
987 HASHMAP_FOREACH_KEY(deps
, d
, u
->dependencies
) {
990 other_deps
= hashmap_get(other
->dependencies
, d
);
992 r
= hashmap_reserve(deps
, hashmap_size(other_deps
));
1000 static bool unit_should_warn_about_dependency(UnitDependency dependency
) {
1001 /* Only warn about some unit types */
1002 return IN_SET(dependency
,
1013 static int unit_per_dependency_type_hashmap_update(
1016 UnitDependencyMask origin_mask
,
1017 UnitDependencyMask destination_mask
) {
1019 UnitDependencyInfo info
;
1023 assert_cc(sizeof(void*) == sizeof(info
));
1025 /* Acquire the UnitDependencyInfo entry for the Unit* we are interested in, and update it if it
1026 * exists, or insert it anew if not. */
1028 info
.data
= hashmap_get(per_type
, other
);
1030 /* Entry already exists. Add in our mask. */
1032 if (FLAGS_SET(origin_mask
, info
.origin_mask
) &&
1033 FLAGS_SET(destination_mask
, info
.destination_mask
))
1036 info
.origin_mask
|= origin_mask
;
1037 info
.destination_mask
|= destination_mask
;
1039 r
= hashmap_update(per_type
, other
, info
.data
);
1041 info
= (UnitDependencyInfo
) {
1042 .origin_mask
= origin_mask
,
1043 .destination_mask
= destination_mask
,
1046 r
= hashmap_put(per_type
, other
, info
.data
);
1054 static void unit_merge_dependencies(Unit
*u
, Unit
*other
) {
1056 void *dt
; /* Actually of type UnitDependency, except that we don't bother casting it here,
1057 * since the hashmaps all want it as void pointer. */
1065 /* First, remove dependency to other. */
1066 HASHMAP_FOREACH_KEY(deps
, dt
, u
->dependencies
) {
1067 if (hashmap_remove(deps
, other
) && unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1068 log_unit_warning(u
, "Dependency %s=%s is dropped, as %s is merged into %s.",
1069 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1070 other
->id
, other
->id
, u
->id
);
1072 if (hashmap_isempty(deps
))
1073 hashmap_free(hashmap_remove(u
->dependencies
, dt
));
1077 _cleanup_hashmap_free_ Hashmap
*other_deps
= NULL
;
1078 UnitDependencyInfo di_back
;
1081 /* Let's focus on one dependency type at a time, that 'other' has defined. */
1082 other_deps
= hashmap_steal_first_key_and_value(other
->dependencies
, &dt
);
1086 deps
= hashmap_get(u
->dependencies
, dt
);
1088 /* Now iterate through all dependencies of this dependency type, of 'other'. We refer to the
1089 * referenced units as 'back'. */
1090 HASHMAP_FOREACH_KEY(di_back
.data
, back
, other_deps
) {
1095 /* This is a dependency pointing back to the unit we want to merge with?
1096 * Suppress it (but warn) */
1097 if (unit_should_warn_about_dependency(UNIT_DEPENDENCY_FROM_PTR(dt
)))
1098 log_unit_warning(u
, "Dependency %s=%s in %s is dropped, as %s is merged into %s.",
1099 unit_dependency_to_string(UNIT_DEPENDENCY_FROM_PTR(dt
)),
1100 u
->id
, other
->id
, other
->id
, u
->id
);
1102 hashmap_remove(other_deps
, back
);
1106 /* Now iterate through all deps of 'back', and fix the ones pointing to 'other' to
1107 * point to 'u' instead. */
1108 HASHMAP_FOREACH_KEY(back_deps
, back_dt
, back
->dependencies
) {
1109 UnitDependencyInfo di_move
;
1111 di_move
.data
= hashmap_remove(back_deps
, other
);
1115 assert_se(unit_per_dependency_type_hashmap_update(
1118 di_move
.origin_mask
,
1119 di_move
.destination_mask
) >= 0);
1122 /* The target unit already has dependencies of this type, let's then merge this individually. */
1124 assert_se(unit_per_dependency_type_hashmap_update(
1127 di_back
.origin_mask
,
1128 di_back
.destination_mask
) >= 0);
1131 /* Now all references towards 'other' of the current type 'dt' are corrected to point to 'u'.
1132 * Lets's now move the deps of type 'dt' from 'other' to 'u'. If the unit does not have
1133 * dependencies of this type, let's move them per type wholesale. */
1135 assert_se(hashmap_put(u
->dependencies
, dt
, TAKE_PTR(other_deps
)) >= 0);
1138 other
->dependencies
= hashmap_free(other
->dependencies
);
1141 int unit_merge(Unit
*u
, Unit
*other
) {
1146 assert(u
->manager
== other
->manager
);
1147 assert(u
->type
!= _UNIT_TYPE_INVALID
);
1149 other
= unit_follow_merge(other
);
1154 if (u
->type
!= other
->type
)
1157 if (!unit_type_may_alias(u
->type
)) /* Merging only applies to unit names that support aliases */
1160 if (!IN_SET(other
->load_state
, UNIT_STUB
, UNIT_NOT_FOUND
))
1163 if (!streq_ptr(u
->instance
, other
->instance
))
1172 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
1175 /* Make reservations to ensure merge_dependencies() won't fail. We don't rollback reservations if we
1176 * fail. We don't have a way to undo reservations. A reservation is not a leak. */
1177 r
= unit_reserve_dependencies(u
, other
);
1181 /* Redirect all references */
1182 while (other
->refs_by_target
)
1183 unit_ref_set(other
->refs_by_target
, other
->refs_by_target
->source
, u
);
1185 /* Merge dependencies */
1186 unit_merge_dependencies(u
, other
);
1188 /* Merge names. It is better to do that after merging deps, otherwise the log message contains n/a. */
1189 r
= unit_merge_names(u
, other
);
1193 other
->load_state
= UNIT_MERGED
;
1194 other
->merged_into
= u
;
1196 if (!u
->activation_details
)
1197 u
->activation_details
= activation_details_ref(other
->activation_details
);
1199 /* If there is still some data attached to the other node, we
1200 * don't need it anymore, and can free it. */
1201 if (other
->load_state
!= UNIT_STUB
)
1202 if (UNIT_VTABLE(other
)->done
)
1203 UNIT_VTABLE(other
)->done(other
);
1205 unit_add_to_dbus_queue(u
);
1206 unit_add_to_cleanup_queue(other
);
1211 int unit_merge_by_name(Unit
*u
, const char *name
) {
1212 _cleanup_free_
char *s
= NULL
;
1216 /* Either add name to u, or if a unit with name already exists, merge it with u.
1217 * If name is a template, do the same for name@instance, where instance is u's instance. */
1222 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
1226 r
= unit_name_replace_instance(name
, u
->instance
, &s
);
1233 other
= manager_get_unit(u
->manager
, name
);
1235 return unit_merge(u
, other
);
1237 return unit_add_name(u
, name
);
1240 Unit
* unit_follow_merge(Unit
*u
) {
1243 while (u
->load_state
== UNIT_MERGED
)
1244 assert_se(u
= u
->merged_into
);
1249 int unit_add_exec_dependencies(Unit
*u
, ExecContext
*c
) {
1255 /* Unlike unit_add_dependency() or friends, this always returns 0 on success. */
1257 if (c
->working_directory
) {
1258 r
= unit_add_mounts_for(
1260 c
->working_directory
,
1261 UNIT_DEPENDENCY_FILE
,
1262 c
->working_directory_missing_ok
? UNIT_MOUNT_WANTS
: UNIT_MOUNT_REQUIRES
);
1267 if (c
->root_directory
) {
1268 r
= unit_add_mounts_for(u
, c
->root_directory
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1273 if (c
->root_image
) {
1274 r
= unit_add_mounts_for(u
, c
->root_image
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1279 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++) {
1280 if (!u
->manager
->prefix
[dt
])
1283 for (size_t i
= 0; i
< c
->directories
[dt
].n_items
; i
++) {
1284 _cleanup_free_
char *p
= NULL
;
1286 p
= path_join(u
->manager
->prefix
[dt
], c
->directories
[dt
].items
[i
].path
);
1290 r
= unit_add_mounts_for(u
, p
, UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_REQUIRES
);
1296 if (!MANAGER_IS_SYSTEM(u
->manager
))
1299 /* For the following three directory types we need write access, and /var/ is possibly on the root
1300 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1301 if (c
->directories
[EXEC_DIRECTORY_STATE
].n_items
> 0 ||
1302 c
->directories
[EXEC_DIRECTORY_CACHE
].n_items
> 0 ||
1303 c
->directories
[EXEC_DIRECTORY_LOGS
].n_items
> 0) {
1304 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_REMOUNT_FS_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1309 if (c
->private_tmp
) {
1310 r
= unit_add_mounts_for(u
, "/tmp", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1314 r
= unit_add_mounts_for(u
, "/var/tmp", UNIT_DEPENDENCY_FILE
, UNIT_MOUNT_WANTS
);
1318 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_TMPFILES_SETUP_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1323 if (c
->root_image
) {
1324 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1325 * implicit dependency on udev */
1327 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_UDEVD_SERVICE
, true, UNIT_DEPENDENCY_FILE
);
1332 if (!IN_SET(c
->std_output
,
1333 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1334 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1335 !IN_SET(c
->std_error
,
1336 EXEC_OUTPUT_JOURNAL
, EXEC_OUTPUT_JOURNAL_AND_CONSOLE
,
1337 EXEC_OUTPUT_KMSG
, EXEC_OUTPUT_KMSG_AND_CONSOLE
) &&
1341 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1344 if (c
->log_namespace
) {
1345 _cleanup_free_
char *socket_unit
= NULL
, *varlink_socket_unit
= NULL
;
1347 r
= unit_name_build_from_type("systemd-journald", c
->log_namespace
, UNIT_SOCKET
, &socket_unit
);
1351 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1355 r
= unit_name_build_from_type("systemd-journald-varlink", c
->log_namespace
, UNIT_SOCKET
, &varlink_socket_unit
);
1359 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, varlink_socket_unit
, true, UNIT_DEPENDENCY_FILE
);
1363 r
= unit_add_dependency_by_name(u
, UNIT_AFTER
, SPECIAL_JOURNALD_SOCKET
, true, UNIT_DEPENDENCY_FILE
);
1368 r
= unit_add_default_credential_dependencies(u
, c
);
1375 const char* unit_description(Unit
*u
) {
1379 return u
->description
;
1381 return strna(u
->id
);
1384 const char* unit_status_string(Unit
*u
, char **ret_combined_buffer
) {
1388 /* Return u->id, u->description, or "{u->id} - {u->description}".
1389 * Versions with u->description are only used if it is set.
1390 * The last option is used if configured and the caller provided the 'ret_combined_buffer'
1393 * Note that *ret_combined_buffer may be set to NULL. */
1395 if (!u
->description
||
1396 u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_NAME
||
1397 (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& !ret_combined_buffer
) ||
1398 streq(u
->description
, u
->id
)) {
1400 if (ret_combined_buffer
)
1401 *ret_combined_buffer
= NULL
;
1405 if (ret_combined_buffer
) {
1406 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
) {
1407 *ret_combined_buffer
= strjoin(u
->id
, " - ", u
->description
);
1408 if (*ret_combined_buffer
)
1409 return *ret_combined_buffer
;
1410 log_oom(); /* Fall back to ->description */
1412 *ret_combined_buffer
= NULL
;
1415 return u
->description
;
1418 /* Common implementation for multiple backends */
1419 int unit_load_fragment_and_dropin(Unit
*u
, bool fragment_required
) {
1424 /* Load a .{service,socket,...} file */
1425 r
= unit_load_fragment(u
);
1429 if (u
->load_state
== UNIT_STUB
) {
1430 if (fragment_required
)
1433 u
->load_state
= UNIT_LOADED
;
1436 /* Load drop-in directory data. If u is an alias, we might be reloading the
1437 * target unit needlessly. But we cannot be sure which drops-ins have already
1438 * been loaded and which not, at least without doing complicated book-keeping,
1439 * so let's always reread all drop-ins. */
1440 r
= unit_load_dropin(unit_follow_merge(u
));
1444 if (u
->source_path
) {
1447 if (stat(u
->source_path
, &st
) >= 0)
1448 u
->source_mtime
= timespec_load(&st
.st_mtim
);
1450 u
->source_mtime
= 0;
1456 void unit_add_to_target_deps_queue(Unit
*u
) {
1457 Manager
*m
= ASSERT_PTR(ASSERT_PTR(u
)->manager
);
1459 if (u
->in_target_deps_queue
)
1462 LIST_PREPEND(target_deps_queue
, m
->target_deps_queue
, u
);
1463 u
->in_target_deps_queue
= true;
1466 int unit_add_default_target_dependency(Unit
*u
, Unit
*target
) {
1470 if (target
->type
!= UNIT_TARGET
)
1473 /* Only add the dependency if both units are loaded, so that
1474 * that loop check below is reliable */
1475 if (u
->load_state
!= UNIT_LOADED
||
1476 target
->load_state
!= UNIT_LOADED
)
1479 /* If either side wants no automatic dependencies, then let's
1481 if (!u
->default_dependencies
||
1482 !target
->default_dependencies
)
1485 /* Don't create loops */
1486 if (unit_has_dependency(target
, UNIT_ATOM_BEFORE
, u
))
1489 return unit_add_dependency(target
, UNIT_AFTER
, u
, true, UNIT_DEPENDENCY_DEFAULT
);
1492 static int unit_add_slice_dependencies(Unit
*u
) {
1496 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1499 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1500 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1502 UnitDependencyMask mask
= u
->type
== UNIT_SLICE
? UNIT_DEPENDENCY_IMPLICIT
: UNIT_DEPENDENCY_FILE
;
1504 slice
= UNIT_GET_SLICE(u
);
1506 return unit_add_two_dependencies(u
, UNIT_AFTER
, UNIT_REQUIRES
, slice
, true, mask
);
1508 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
1511 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_REQUIRES
, SPECIAL_ROOT_SLICE
, true, mask
);
1514 static int unit_add_mount_dependencies(Unit
*u
) {
1515 bool changed
= false;
1520 for (UnitMountDependencyType t
= 0; t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
; ++t
) {
1521 UnitDependencyInfo di
;
1524 HASHMAP_FOREACH_KEY(di
.data
, path
, u
->mounts_for
[t
]) {
1526 char prefix
[strlen(ASSERT_PTR(path
)) + 1];
1528 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
1529 _cleanup_free_
char *p
= NULL
;
1532 r
= unit_name_from_path(prefix
, ".mount", &p
);
1534 continue; /* If the path cannot be converted to a mount unit name,
1535 * then it's not manageable as a unit by systemd, and
1536 * hence we don't need a dependency on it. Let's thus
1537 * silently ignore the issue. */
1541 m
= manager_get_unit(u
->manager
, p
);
1543 /* Make sure to load the mount unit if it exists. If so the
1544 * dependencies on this unit will be added later during the loading
1545 * of the mount unit. */
1546 (void) manager_load_unit_prepare(
1557 if (m
->load_state
!= UNIT_LOADED
)
1560 r
= unit_add_dependency(
1564 /* add_reference= */ true,
1568 changed
= changed
|| r
> 0;
1570 if (m
->fragment_path
) {
1571 r
= unit_add_dependency(
1573 unit_mount_dependency_type_to_dependency_type(t
),
1575 /* add_reference= */ true,
1579 changed
= changed
|| r
> 0;
1588 static int unit_add_oomd_dependencies(Unit
*u
) {
1595 if (!u
->default_dependencies
)
1598 c
= unit_get_cgroup_context(u
);
1602 bool wants_oomd
= c
->moom_swap
== MANAGED_OOM_KILL
|| c
->moom_mem_pressure
== MANAGED_OOM_KILL
;
1606 if (!cg_all_unified())
1609 r
= cg_mask_supported(&mask
);
1611 return log_debug_errno(r
, "Failed to determine supported controllers: %m");
1613 if (!FLAGS_SET(mask
, CGROUP_MASK_MEMORY
))
1616 return unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE
);
1619 static int unit_add_startup_units(Unit
*u
) {
1620 if (!unit_has_startup_cgroup_constraints(u
))
1623 return set_ensure_put(&u
->manager
->startup_units
, NULL
, u
);
1626 static int unit_validate_on_failure_job_mode(
1628 const char *job_mode_setting
,
1630 const char *dependency_name
,
1631 UnitDependencyAtom atom
) {
1633 Unit
*other
, *found
= NULL
;
1635 if (job_mode
!= JOB_ISOLATE
)
1638 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
1641 else if (found
!= other
)
1642 return log_unit_error_errno(
1643 u
, SYNTHETIC_ERRNO(ENOEXEC
),
1644 "More than one %s dependencies specified but %sisolate set. Refusing.",
1645 dependency_name
, job_mode_setting
);
1651 int unit_load(Unit
*u
) {
1656 if (u
->in_load_queue
) {
1657 LIST_REMOVE(load_queue
, u
->manager
->load_queue
, u
);
1658 u
->in_load_queue
= false;
1661 if (u
->type
== _UNIT_TYPE_INVALID
)
1664 if (u
->load_state
!= UNIT_STUB
)
1667 if (u
->transient_file
) {
1668 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1669 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1671 r
= fflush_and_check(u
->transient_file
);
1675 u
->transient_file
= safe_fclose(u
->transient_file
);
1676 u
->fragment_mtime
= now(CLOCK_REALTIME
);
1679 r
= UNIT_VTABLE(u
)->load(u
);
1683 assert(u
->load_state
!= UNIT_STUB
);
1685 if (u
->load_state
== UNIT_LOADED
) {
1686 unit_add_to_target_deps_queue(u
);
1688 r
= unit_add_slice_dependencies(u
);
1692 r
= unit_add_mount_dependencies(u
);
1696 r
= unit_add_oomd_dependencies(u
);
1700 r
= unit_add_startup_units(u
);
1704 r
= unit_validate_on_failure_job_mode(u
, "OnSuccessJobMode=", u
->on_success_job_mode
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
);
1708 r
= unit_validate_on_failure_job_mode(u
, "OnFailureJobMode=", u
->on_failure_job_mode
, "OnFailure=", UNIT_ATOM_ON_FAILURE
);
1712 if (u
->job_running_timeout
!= USEC_INFINITY
&& u
->job_running_timeout
> u
->job_timeout
)
1713 log_unit_warning(u
, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1715 /* We finished loading, let's ensure our parents recalculate the members mask */
1716 unit_invalidate_cgroup_members_masks(u
);
1719 assert((u
->load_state
!= UNIT_MERGED
) == !u
->merged_into
);
1721 unit_add_to_dbus_queue(unit_follow_merge(u
));
1722 unit_add_to_gc_queue(u
);
1723 (void) manager_varlink_send_managed_oom_update(u
);
1728 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1729 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1731 u
->load_state
= u
->load_state
== UNIT_STUB
? UNIT_NOT_FOUND
:
1732 r
== -ENOEXEC
? UNIT_BAD_SETTING
:
1736 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1737 * an attempt is made to load this unit, we know we need to check again. */
1738 if (u
->load_state
== UNIT_NOT_FOUND
)
1739 u
->fragment_not_found_timestamp_hash
= u
->manager
->unit_cache_timestamp_hash
;
1741 unit_add_to_dbus_queue(u
);
1742 unit_add_to_gc_queue(u
);
1744 return log_unit_debug_errno(u
, r
, "Failed to load configuration: %m");
1748 static int log_unit_internal(void *userdata
, int level
, int error
, const char *file
, int line
, const char *func
, const char *format
, ...) {
1753 if (u
&& !unit_log_level_test(u
, level
))
1754 return -ERRNO_VALUE(error
);
1756 va_start(ap
, format
);
1758 r
= log_object_internalv(level
, error
, file
, line
, func
,
1759 u
->manager
->unit_log_field
,
1761 u
->manager
->invocation_log_field
,
1762 u
->invocation_id_string
,
1765 r
= log_internalv(level
, error
, file
, line
, func
, format
, ap
);
1771 static bool unit_test_condition(Unit
*u
) {
1772 _cleanup_strv_free_
char **env
= NULL
;
1777 dual_timestamp_now(&u
->condition_timestamp
);
1779 r
= manager_get_effective_environment(u
->manager
, &env
);
1781 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1782 u
->condition_result
= true;
1784 u
->condition_result
= condition_test_list(
1787 condition_type_to_string
,
1791 unit_add_to_dbus_queue(u
);
1792 return u
->condition_result
;
1795 static bool unit_test_assert(Unit
*u
) {
1796 _cleanup_strv_free_
char **env
= NULL
;
1801 dual_timestamp_now(&u
->assert_timestamp
);
1803 r
= manager_get_effective_environment(u
->manager
, &env
);
1805 log_unit_error_errno(u
, r
, "Failed to determine effective environment: %m");
1806 u
->assert_result
= CONDITION_ERROR
;
1808 u
->assert_result
= condition_test_list(
1811 assert_type_to_string
,
1815 unit_add_to_dbus_queue(u
);
1816 return u
->assert_result
;
1819 void unit_status_printf(Unit
*u
, StatusType status_type
, const char *status
, const char *format
, const char *ident
) {
1820 if (log_get_show_color()) {
1821 if (u
->manager
->status_unit_format
== STATUS_UNIT_FORMAT_COMBINED
&& strchr(ident
, ' '))
1822 ident
= strjoina(ANSI_HIGHLIGHT
, u
->id
, ANSI_NORMAL
, " - ", u
->description
);
1824 ident
= strjoina(ANSI_HIGHLIGHT
, ident
, ANSI_NORMAL
);
1827 DISABLE_WARNING_FORMAT_NONLITERAL
;
1828 manager_status_printf(u
->manager
, status_type
, status
, format
, ident
);
1832 int unit_test_start_limit(Unit
*u
) {
1837 if (ratelimit_below(&u
->start_ratelimit
)) {
1838 u
->start_limit_hit
= false;
1842 log_unit_warning(u
, "Start request repeated too quickly.");
1843 u
->start_limit_hit
= true;
1845 reason
= strjoina("unit ", u
->id
, " failed");
1847 emergency_action(u
->manager
, u
->start_limit_action
,
1848 EMERGENCY_ACTION_IS_WATCHDOG
|EMERGENCY_ACTION_WARN
,
1849 u
->reboot_arg
, -1, reason
);
1854 static bool unit_verify_deps(Unit
*u
) {
1859 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined
1860 * with After=. We do not check Requires= or Requisite= here as they only should have an effect on
1861 * the job processing, but do not have any effect afterwards. We don't check BindsTo= dependencies
1862 * that are not used in conjunction with After= as for them any such check would make things entirely
1865 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
1867 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
))
1870 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
1871 log_unit_notice(u
, "Bound to unit %s, but unit isn't active.", other
->id
);
1879 /* Errors that aren't really errors:
1880 * -EALREADY: Unit is already started.
1881 * -ECOMM: Condition failed
1882 * -EAGAIN: An operation is already in progress. Retry later.
1884 * Errors that are real errors:
1885 * -EBADR: This unit type does not support starting.
1886 * -ECANCELED: Start limit hit, too many requests for now
1887 * -EPROTO: Assert failed
1888 * -EINVAL: Unit not loaded
1889 * -EOPNOTSUPP: Unit type not supported
1890 * -ENOLINK: The necessary dependencies are not fulfilled.
1891 * -ESTALE: This unit has been started before and can't be started a second time
1892 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1894 int unit_start(Unit
*u
, ActivationDetails
*details
) {
1895 UnitActiveState state
;
1901 /* Let's hold off running start jobs for mount units when /proc/self/mountinfo monitor is ratelimited. */
1902 if (UNIT_VTABLE(u
)->subsystem_ratelimited
) {
1903 r
= UNIT_VTABLE(u
)->subsystem_ratelimited(u
->manager
);
1910 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1911 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1912 * waiting is finished. */
1913 state
= unit_active_state(u
);
1914 if (UNIT_IS_ACTIVE_OR_RELOADING(state
))
1916 if (state
== UNIT_MAINTENANCE
)
1919 /* Units that aren't loaded cannot be started */
1920 if (u
->load_state
!= UNIT_LOADED
)
1923 /* Refuse starting scope units more than once */
1924 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_enter_timestamp
))
1927 /* If the conditions were unmet, don't do anything at all. If we already are activating this call might
1928 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1929 * recheck the condition in that case. */
1930 if (state
!= UNIT_ACTIVATING
&&
1931 !unit_test_condition(u
))
1932 return log_unit_debug_errno(u
, SYNTHETIC_ERRNO(ECOMM
), "Starting requested but condition not met. Not starting unit.");
1934 /* If the asserts failed, fail the entire job */
1935 if (state
!= UNIT_ACTIVATING
&&
1936 !unit_test_assert(u
))
1937 return log_unit_notice_errno(u
, SYNTHETIC_ERRNO(EPROTO
), "Starting requested but asserts failed.");
1939 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1940 * condition checks, so that we rather return condition check errors (which are usually not
1941 * considered a true failure) than "not supported" errors (which are considered a failure).
1943 if (!unit_type_supported(u
->type
))
1946 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1947 * should have taken care of this already, but let's check this here again. After all, our
1948 * dependencies might not be in effect anymore, due to a reload or due to an unmet condition. */
1949 if (!unit_verify_deps(u
))
1952 /* Forward to the main object, if we aren't it. */
1953 following
= unit_following(u
);
1955 log_unit_debug(u
, "Redirecting start request from %s to %s.", u
->id
, following
->id
);
1956 return unit_start(following
, details
);
1959 /* Check to make sure the unit isn't frozen */
1960 if (u
->freezer_state
!= FREEZER_RUNNING
)
1963 /* Check our ability to start early so that failure conditions don't cause us to enter a busy loop. */
1964 if (UNIT_VTABLE(u
)->can_start
) {
1965 r
= UNIT_VTABLE(u
)->can_start(u
);
1970 /* If it is stopped, but we cannot start it, then fail */
1971 if (!UNIT_VTABLE(u
)->start
)
1974 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1975 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1976 * waits for a holdoff timer to elapse before it will start again. */
1978 unit_add_to_dbus_queue(u
);
1980 if (!u
->activation_details
) /* Older details object wins */
1981 u
->activation_details
= activation_details_ref(details
);
1983 return UNIT_VTABLE(u
)->start(u
);
1986 bool unit_can_start(Unit
*u
) {
1989 if (u
->load_state
!= UNIT_LOADED
)
1992 if (!unit_type_supported(u
->type
))
1995 /* Scope units may be started only once */
1996 if (UNIT_VTABLE(u
)->once_only
&& dual_timestamp_is_set(&u
->inactive_exit_timestamp
))
1999 return !!UNIT_VTABLE(u
)->start
;
2002 bool unit_can_isolate(Unit
*u
) {
2005 return unit_can_start(u
) &&
2010 * -EBADR: This unit type does not support stopping.
2011 * -EALREADY: Unit is already stopped.
2012 * -EAGAIN: An operation is already in progress. Retry later.
2013 * -EDEADLK: Unit is frozen
2015 int unit_stop(Unit
*u
) {
2016 UnitActiveState state
;
2021 state
= unit_active_state(u
);
2022 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
2025 following
= unit_following(u
);
2027 log_unit_debug(u
, "Redirecting stop request from %s to %s.", u
->id
, following
->id
);
2028 return unit_stop(following
);
2031 /* Check to make sure the unit isn't frozen */
2032 if (u
->freezer_state
!= FREEZER_RUNNING
)
2035 if (!UNIT_VTABLE(u
)->stop
)
2038 unit_add_to_dbus_queue(u
);
2040 return UNIT_VTABLE(u
)->stop(u
);
2043 bool unit_can_stop(Unit
*u
) {
2046 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
2047 * Extrinsic units follow external state and they may stop following external state changes
2048 * (hence we return true here), but an attempt to do this through the manager will fail. */
2050 if (!unit_type_supported(u
->type
))
2056 return !!UNIT_VTABLE(u
)->stop
;
2060 * -EBADR: This unit type does not support reloading.
2061 * -ENOEXEC: Unit is not started.
2062 * -EAGAIN: An operation is already in progress. Retry later.
2063 * -EDEADLK: Unit is frozen.
2065 int unit_reload(Unit
*u
) {
2066 UnitActiveState state
;
2071 if (u
->load_state
!= UNIT_LOADED
)
2074 if (!unit_can_reload(u
))
2077 state
= unit_active_state(u
);
2078 if (state
== UNIT_RELOADING
)
2081 if (state
!= UNIT_ACTIVE
)
2082 return log_unit_warning_errno(u
, SYNTHETIC_ERRNO(ENOEXEC
), "Unit cannot be reloaded because it is inactive.");
2084 following
= unit_following(u
);
2086 log_unit_debug(u
, "Redirecting reload request from %s to %s.", u
->id
, following
->id
);
2087 return unit_reload(following
);
2090 /* Check to make sure the unit isn't frozen */
2091 if (u
->freezer_state
!= FREEZER_RUNNING
)
2094 unit_add_to_dbus_queue(u
);
2096 if (!UNIT_VTABLE(u
)->reload
) {
2097 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
2098 unit_notify(u
, unit_active_state(u
), unit_active_state(u
), /* reload_success = */ true);
2102 return UNIT_VTABLE(u
)->reload(u
);
2105 bool unit_can_reload(Unit
*u
) {
2108 if (UNIT_VTABLE(u
)->can_reload
)
2109 return UNIT_VTABLE(u
)->can_reload(u
);
2111 if (unit_has_dependency(u
, UNIT_ATOM_PROPAGATES_RELOAD_TO
, NULL
))
2114 return UNIT_VTABLE(u
)->reload
;
2117 bool unit_is_unneeded(Unit
*u
) {
2121 if (!u
->stop_when_unneeded
)
2124 /* Don't clean up while the unit is transitioning or is even inactive. */
2125 if (unit_active_state(u
) != UNIT_ACTIVE
)
2130 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_PINS_STOP_WHEN_UNNEEDED
) {
2131 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
2132 * restart, then don't clean this one up. */
2137 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
)))
2140 if (unit_will_restart(other
))
2147 bool unit_is_upheld_by_active(Unit
*u
, Unit
**ret_culprit
) {
2152 /* Checks if the unit needs to be started because it currently is not running, but some other unit
2153 * that is active declared an Uphold= dependencies on it */
2155 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u
)) || u
->job
) {
2157 *ret_culprit
= NULL
;
2161 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_START_STEADILY
) {
2165 if (UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other
))) {
2167 *ret_culprit
= other
;
2173 *ret_culprit
= NULL
;
2177 bool unit_is_bound_by_inactive(Unit
*u
, Unit
**ret_culprit
) {
2182 /* Checks whether this unit is bound to another unit that is inactive, i.e. whether we should stop
2183 * because the other unit is down. */
2185 if (unit_active_state(u
) != UNIT_ACTIVE
|| u
->job
) {
2186 /* Don't clean up while the unit is transitioning or is even inactive. */
2188 *ret_culprit
= NULL
;
2192 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_CANNOT_BE_ACTIVE_WITHOUT
) {
2196 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other
))) {
2198 *ret_culprit
= other
;
2205 *ret_culprit
= NULL
;
2209 static void check_unneeded_dependencies(Unit
*u
) {
2213 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2215 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_STOP_WHEN_UNNEEDED_QUEUE
)
2216 unit_submit_to_stop_when_unneeded_queue(other
);
2219 static void check_uphold_dependencies(Unit
*u
) {
2223 /* Add all units this unit depends on to the queue that processes Uphold= behaviour. */
2225 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_START_WHEN_UPHELD_QUEUE
)
2226 unit_submit_to_start_when_upheld_queue(other
);
2229 static void check_bound_by_dependencies(Unit
*u
) {
2233 /* Add all units this unit depends on to the queue that processes BindsTo= stop behaviour. */
2235 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_ADD_CANNOT_BE_ACTIVE_WITHOUT_QUEUE
)
2236 unit_submit_to_stop_when_bound_queue(other
);
2239 static void retroactively_start_dependencies(Unit
*u
) {
2243 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)));
2245 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_REPLACE
) /* Requires= + BindsTo= */
2246 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2247 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2248 manager_add_job(u
->manager
, JOB_START
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2250 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_START_FAIL
) /* Wants= */
2251 if (!unit_has_dependency(u
, UNIT_ATOM_AFTER
, other
) &&
2252 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other
)))
2253 manager_add_job(u
->manager
, JOB_START
, other
, JOB_FAIL
, NULL
, NULL
, NULL
);
2255 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_START
) /* Conflicts= (and inverse) */
2256 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2257 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2260 static void retroactively_stop_dependencies(Unit
*u
) {
2264 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)));
2266 /* Pull down units which are bound to us recursively if enabled */
2267 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_RETROACTIVE_STOP_ON_STOP
) /* BoundBy= */
2268 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other
)))
2269 manager_add_job(u
->manager
, JOB_STOP
, other
, JOB_REPLACE
, NULL
, NULL
, NULL
);
2272 void unit_start_on_failure(
2274 const char *dependency_name
,
2275 UnitDependencyAtom atom
,
2283 assert(dependency_name
);
2284 assert(IN_SET(atom
, UNIT_ATOM_ON_SUCCESS
, UNIT_ATOM_ON_FAILURE
));
2286 /* Act on OnFailure= and OnSuccess= dependencies */
2288 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
2289 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2292 log_unit_info(u
, "Triggering %s dependencies.", dependency_name
);
2296 r
= manager_add_job(u
->manager
, JOB_START
, other
, job_mode
, NULL
, &error
, NULL
);
2298 log_unit_warning_errno(
2299 u
, r
, "Failed to enqueue %s job, ignoring: %s",
2300 dependency_name
, bus_error_message(&error
, r
));
2305 log_unit_debug(u
, "Triggering %s dependencies done (%i %s).",
2306 dependency_name
, n_jobs
, n_jobs
== 1 ? "job" : "jobs");
2309 void unit_trigger_notify(Unit
*u
) {
2314 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_TRIGGERED_BY
)
2315 if (UNIT_VTABLE(other
)->trigger_notify
)
2316 UNIT_VTABLE(other
)->trigger_notify(other
, u
);
2319 static int raise_level(int log_level
, bool condition_info
, bool condition_notice
) {
2320 if (condition_notice
&& log_level
> LOG_NOTICE
)
2322 if (condition_info
&& log_level
> LOG_INFO
)
2327 static int unit_log_resources(Unit
*u
) {
2329 static const struct {
2330 const char *journal_field
;
2331 const char *message_suffix
;
2332 } memory_fields
[_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1] = {
2333 [CGROUP_MEMORY_PEAK
] = { "MEMORY_PEAK", "memory peak" },
2334 [CGROUP_MEMORY_SWAP_PEAK
] = { "MEMORY_SWAP_PEAK", "memory swap peak" },
2335 }, ip_fields
[_CGROUP_IP_ACCOUNTING_METRIC_MAX
] = {
2336 [CGROUP_IP_INGRESS_BYTES
] = { "IP_METRIC_INGRESS_BYTES", "incoming IP traffic" },
2337 [CGROUP_IP_EGRESS_BYTES
] = { "IP_METRIC_EGRESS_BYTES", "outgoing IP traffic" },
2338 [CGROUP_IP_INGRESS_PACKETS
] = { "IP_METRIC_INGRESS_PACKETS", NULL
},
2339 [CGROUP_IP_EGRESS_PACKETS
] = { "IP_METRIC_EGRESS_PACKETS", NULL
},
2340 }, io_fields
[_CGROUP_IO_ACCOUNTING_METRIC_MAX
] = {
2341 [CGROUP_IO_READ_BYTES
] = { "IO_METRIC_READ_BYTES", "read from disk" },
2342 [CGROUP_IO_WRITE_BYTES
] = { "IO_METRIC_WRITE_BYTES", "written to disk" },
2343 [CGROUP_IO_READ_OPERATIONS
] = { "IO_METRIC_READ_OPERATIONS", NULL
},
2344 [CGROUP_IO_WRITE_OPERATIONS
] = { "IO_METRIC_WRITE_OPERATIONS", NULL
},
2347 struct iovec
*iovec
= NULL
;
2349 _cleanup_free_
char *message
= NULL
, *t
= NULL
;
2350 nsec_t cpu_nsec
= NSEC_INFINITY
;
2351 int log_level
= LOG_DEBUG
; /* May be raised if resources consumed over a threshold */
2355 CLEANUP_ARRAY(iovec
, n_iovec
, iovec_array_free
);
2357 iovec
= new(struct iovec
, 1 + (_CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
+ 1) +
2358 _CGROUP_IP_ACCOUNTING_METRIC_MAX
+ _CGROUP_IO_ACCOUNTING_METRIC_MAX
+ 4);
2362 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2363 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2364 * information and the complete data in structured fields. */
2366 (void) unit_get_cpu_usage(u
, &cpu_nsec
);
2367 if (cpu_nsec
!= NSEC_INFINITY
) {
2368 /* Format the CPU time for inclusion in the structured log message */
2369 if (asprintf(&t
, "CPU_USAGE_NSEC=%" PRIu64
, cpu_nsec
) < 0)
2371 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2373 /* Format the CPU time for inclusion in the human language message string */
2374 if (strextendf_with_separator(&message
, ", ",
2375 "Consumed %s CPU time",
2376 FORMAT_TIMESPAN(cpu_nsec
/ NSEC_PER_USEC
, USEC_PER_MSEC
)) < 0)
2379 log_level
= raise_level(log_level
,
2380 cpu_nsec
> MENTIONWORTHY_CPU_NSEC
,
2381 cpu_nsec
> NOTICEWORTHY_CPU_NSEC
);
2384 for (CGroupMemoryAccountingMetric metric
= 0; metric
<= _CGROUP_MEMORY_ACCOUNTING_METRIC_CACHED_LAST
; metric
++) {
2385 uint64_t value
= UINT64_MAX
;
2387 assert(memory_fields
[metric
].journal_field
);
2388 assert(memory_fields
[metric
].message_suffix
);
2390 (void) unit_get_memory_accounting(u
, metric
, &value
);
2391 if (value
== UINT64_MAX
)
2394 if (asprintf(&t
, "%s=%" PRIu64
, memory_fields
[metric
].journal_field
, value
) < 0)
2396 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2398 /* If value is 0, we don't log it in the MESSAGE= field. */
2402 if (strextendf_with_separator(&message
, ", ", "%s %s",
2403 FORMAT_BYTES(value
), memory_fields
[metric
].message_suffix
) < 0)
2406 log_level
= raise_level(log_level
,
2407 value
> MENTIONWORTHY_MEMORY_BYTES
,
2408 value
> NOTICEWORTHY_MEMORY_BYTES
);
2411 for (CGroupIOAccountingMetric k
= 0; k
< _CGROUP_IO_ACCOUNTING_METRIC_MAX
; k
++) {
2412 uint64_t value
= UINT64_MAX
;
2414 assert(io_fields
[k
].journal_field
);
2416 (void) unit_get_io_accounting(u
, k
, k
> 0, &value
);
2417 if (value
== UINT64_MAX
)
2420 /* Format IO accounting data for inclusion in the structured log message */
2421 if (asprintf(&t
, "%s=%" PRIu64
, io_fields
[k
].journal_field
, value
) < 0)
2423 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2425 /* If value is 0, we don't log it in the MESSAGE= field. */
2429 /* Format the IO accounting data for inclusion in the human language message string, but only
2430 * for the bytes counters (and not for the operations counters) */
2431 if (io_fields
[k
].message_suffix
) {
2432 if (strextendf_with_separator(&message
, ", ", "%s %s",
2433 FORMAT_BYTES(value
), io_fields
[k
].message_suffix
) < 0)
2436 log_level
= raise_level(log_level
,
2437 value
> MENTIONWORTHY_IO_BYTES
,
2438 value
> NOTICEWORTHY_IO_BYTES
);
2442 for (CGroupIPAccountingMetric m
= 0; m
< _CGROUP_IP_ACCOUNTING_METRIC_MAX
; m
++) {
2443 uint64_t value
= UINT64_MAX
;
2445 assert(ip_fields
[m
].journal_field
);
2447 (void) unit_get_ip_accounting(u
, m
, &value
);
2448 if (value
== UINT64_MAX
)
2451 /* Format IP accounting data for inclusion in the structured log message */
2452 if (asprintf(&t
, "%s=%" PRIu64
, ip_fields
[m
].journal_field
, value
) < 0)
2454 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2456 /* If value is 0, we don't log it in the MESSAGE= field. */
2460 /* Format the IP accounting data for inclusion in the human language message string, but only
2461 * for the bytes counters (and not for the packets counters) */
2462 if (ip_fields
[m
].message_suffix
) {
2463 if (strextendf_with_separator(&message
, ", ", "%s %s",
2464 FORMAT_BYTES(value
), ip_fields
[m
].message_suffix
) < 0)
2467 log_level
= raise_level(log_level
,
2468 value
> MENTIONWORTHY_IP_BYTES
,
2469 value
> NOTICEWORTHY_IP_BYTES
);
2473 /* This check is here because it is the earliest point following all possible log_level assignments.
2474 * (If log_level is assigned anywhere after this point, move this check.) */
2475 if (!unit_log_level_test(u
, log_level
))
2478 /* Is there any accounting data available at all? */
2484 t
= strjoin("MESSAGE=", u
->id
, ": ", message
?: "Completed", ".");
2487 iovec
[n_iovec
++] = IOVEC_MAKE_STRING(TAKE_PTR(t
));
2489 if (!set_iovec_string_field(iovec
, &n_iovec
, "MESSAGE_ID=", SD_MESSAGE_UNIT_RESOURCES_STR
))
2492 if (!set_iovec_string_field(iovec
, &n_iovec
, u
->manager
->unit_log_field
, u
->id
))
2495 if (!set_iovec_string_field(iovec
, &n_iovec
, u
->manager
->invocation_log_field
, u
->invocation_id_string
))
2498 log_unit_struct_iovec(u
, log_level
, iovec
, n_iovec
);
2503 static void unit_update_on_console(Unit
*u
) {
2508 b
= unit_needs_console(u
);
2509 if (u
->on_console
== b
)
2514 manager_ref_console(u
->manager
);
2516 manager_unref_console(u
->manager
);
2519 static void unit_emit_audit_start(Unit
*u
) {
2522 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2525 /* Write audit record if we have just finished starting up */
2526 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ true);
2530 static void unit_emit_audit_stop(Unit
*u
, UnitActiveState state
) {
2533 if (UNIT_VTABLE(u
)->audit_start_message_type
<= 0)
2537 /* Write audit record if we have just finished shutting down */
2538 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2539 u
->in_audit
= false;
2541 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2542 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_start_message_type
, /* success= */ state
== UNIT_INACTIVE
);
2544 if (state
== UNIT_INACTIVE
)
2545 manager_send_unit_audit(u
->manager
, u
, UNIT_VTABLE(u
)->audit_stop_message_type
, /* success= */ true);
2549 static bool unit_process_job(Job
*j
, UnitActiveState ns
, bool reload_success
) {
2550 bool unexpected
= false;
2555 if (j
->state
== JOB_WAITING
)
2556 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2558 job_add_to_run_queue(j
);
2560 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2561 * hence needs to invalidate jobs. */
2566 case JOB_VERIFY_ACTIVE
:
2568 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2569 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2570 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_ACTIVATING
) {
2573 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2574 if (ns
== UNIT_FAILED
)
2575 result
= JOB_FAILED
;
2579 job_finish_and_invalidate(j
, result
, true, false);
2586 case JOB_RELOAD_OR_START
:
2587 case JOB_TRY_RELOAD
:
2589 if (j
->state
== JOB_RUNNING
) {
2590 if (ns
== UNIT_ACTIVE
)
2591 job_finish_and_invalidate(j
, reload_success
? JOB_DONE
: JOB_FAILED
, true, false);
2592 else if (!IN_SET(ns
, UNIT_ACTIVATING
, UNIT_RELOADING
)) {
2595 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2596 job_finish_and_invalidate(j
, ns
== UNIT_FAILED
? JOB_FAILED
: JOB_DONE
, true, false);
2604 case JOB_TRY_RESTART
:
2606 if (UNIT_IS_INACTIVE_OR_FAILED(ns
))
2607 job_finish_and_invalidate(j
, JOB_DONE
, true, false);
2608 else if (j
->state
== JOB_RUNNING
&& ns
!= UNIT_DEACTIVATING
) {
2610 job_finish_and_invalidate(j
, JOB_FAILED
, true, false);
2616 assert_not_reached();
2622 void unit_notify(Unit
*u
, UnitActiveState os
, UnitActiveState ns
, bool reload_success
) {
2627 assert(os
< _UNIT_ACTIVE_STATE_MAX
);
2628 assert(ns
< _UNIT_ACTIVE_STATE_MAX
);
2630 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2631 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2632 * remounted this function will be called too! */
2636 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2637 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2638 unit_add_to_dbus_queue(u
);
2640 /* Update systemd-oomd on the property/state change */
2642 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2644 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2645 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2646 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2647 * have the information on the property. Thus, indiscriminately send an update. */
2648 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) || UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2649 (void) manager_varlink_send_managed_oom_update(u
);
2652 /* Update timestamps for state changes */
2653 if (!MANAGER_IS_RELOADING(m
)) {
2654 dual_timestamp_now(&u
->state_change_timestamp
);
2656 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && !UNIT_IS_INACTIVE_OR_FAILED(ns
))
2657 u
->inactive_exit_timestamp
= u
->state_change_timestamp
;
2658 else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_INACTIVE_OR_FAILED(ns
))
2659 u
->inactive_enter_timestamp
= u
->state_change_timestamp
;
2661 if (!UNIT_IS_ACTIVE_OR_RELOADING(os
) && UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2662 u
->active_enter_timestamp
= u
->state_change_timestamp
;
2663 else if (UNIT_IS_ACTIVE_OR_RELOADING(os
) && !UNIT_IS_ACTIVE_OR_RELOADING(ns
))
2664 u
->active_exit_timestamp
= u
->state_change_timestamp
;
2667 /* Keep track of failed units */
2668 (void) manager_update_failed_units(m
, u
, ns
== UNIT_FAILED
);
2670 /* Make sure the cgroup and state files are always removed when we become inactive */
2671 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2672 SET_FLAG(u
->markers
,
2673 (1u << UNIT_MARKER_NEEDS_RELOAD
)|(1u << UNIT_MARKER_NEEDS_RESTART
),
2675 unit_prune_cgroup(u
);
2676 unit_unlink_state_files(u
);
2677 } else if (ns
!= os
&& ns
== UNIT_RELOADING
)
2678 SET_FLAG(u
->markers
, 1u << UNIT_MARKER_NEEDS_RELOAD
, false);
2680 unit_update_on_console(u
);
2682 if (!MANAGER_IS_RELOADING(m
)) {
2685 /* Let's propagate state changes to the job */
2687 unexpected
= unit_process_job(u
->job
, ns
, reload_success
);
2691 /* If this state change happened without being requested by a job, then let's retroactively start or
2692 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2693 * additional jobs just because something is already activated. */
2696 if (UNIT_IS_INACTIVE_OR_FAILED(os
) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns
))
2697 retroactively_start_dependencies(u
);
2698 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os
) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns
))
2699 retroactively_stop_dependencies(u
);
2702 if (ns
!= os
&& ns
== UNIT_FAILED
) {
2703 log_unit_debug(u
, "Unit entered failed state.");
2704 unit_start_on_failure(u
, "OnFailure=", UNIT_ATOM_ON_FAILURE
, u
->on_failure_job_mode
);
2707 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
) && !UNIT_IS_ACTIVE_OR_RELOADING(os
)) {
2708 /* This unit just finished starting up */
2710 unit_emit_audit_start(u
);
2711 manager_send_unit_plymouth(m
, u
);
2714 if (UNIT_IS_INACTIVE_OR_FAILED(ns
) && !UNIT_IS_INACTIVE_OR_FAILED(os
)) {
2715 /* This unit just stopped/failed. */
2717 unit_emit_audit_stop(u
, ns
);
2718 unit_log_resources(u
);
2721 if (ns
== UNIT_INACTIVE
&& !IN_SET(os
, UNIT_FAILED
, UNIT_INACTIVE
, UNIT_MAINTENANCE
))
2722 unit_start_on_failure(u
, "OnSuccess=", UNIT_ATOM_ON_SUCCESS
, u
->on_success_job_mode
);
2725 manager_recheck_journal(m
);
2726 manager_recheck_dbus(m
);
2728 unit_trigger_notify(u
);
2730 if (!MANAGER_IS_RELOADING(m
)) {
2731 if (os
!= UNIT_FAILED
&& ns
== UNIT_FAILED
) {
2732 reason
= strjoina("unit ", u
->id
, " failed");
2733 emergency_action(m
, u
->failure_action
, 0, u
->reboot_arg
, unit_failure_action_exit_status(u
), reason
);
2734 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os
) && ns
== UNIT_INACTIVE
) {
2735 reason
= strjoina("unit ", u
->id
, " succeeded");
2736 emergency_action(m
, u
->success_action
, 0, u
->reboot_arg
, unit_success_action_exit_status(u
), reason
);
2740 /* And now, add the unit or depending units to various queues that will act on the new situation if
2741 * needed. These queues generally check for continuous state changes rather than events (like most of
2742 * the state propagation above), and do work deferred instead of instantly, since they typically
2743 * don't want to run during reloading, and usually involve checking combined state of multiple units
2746 if (UNIT_IS_INACTIVE_OR_FAILED(ns
)) {
2747 /* Stop unneeded units and bound-by units regardless if going down was expected or not */
2748 check_unneeded_dependencies(u
);
2749 check_bound_by_dependencies(u
);
2751 /* Maybe someone wants us to remain up? */
2752 unit_submit_to_start_when_upheld_queue(u
);
2754 /* Maybe the unit should be GC'ed now? */
2755 unit_add_to_gc_queue(u
);
2757 /* Maybe we can release some resources now? */
2758 unit_submit_to_release_resources_queue(u
);
2761 if (UNIT_IS_ACTIVE_OR_RELOADING(ns
)) {
2762 /* Start uphold units regardless if going up was expected or not */
2763 check_uphold_dependencies(u
);
2765 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2766 unit_submit_to_stop_when_unneeded_queue(u
);
2768 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens
2769 * when something BindsTo= to a Type=oneshot unit, as these units go directly from starting to
2770 * inactive, without ever entering started.) */
2771 unit_submit_to_stop_when_bound_queue(u
);
2775 int unit_watch_pidref(Unit
*u
, const PidRef
*pid
, bool exclusive
) {
2776 _cleanup_(pidref_freep
) PidRef
*pid_dup
= NULL
;
2779 /* Adds a specific PID to the set of PIDs this unit watches. */
2782 assert(pidref_is_set(pid
));
2784 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2785 * opportunity to remove any stalled references to this PID as they can be created
2786 * easily (when watching a process which is not our direct child). */
2788 manager_unwatch_pidref(u
->manager
, pid
);
2790 if (set_contains(u
->pids
, pid
)) /* early exit if already being watched */
2793 r
= pidref_dup(pid
, &pid_dup
);
2797 /* First, insert into the set of PIDs maintained by the unit */
2798 r
= set_ensure_put(&u
->pids
, &pidref_hash_ops_free
, pid_dup
);
2802 pid
= TAKE_PTR(pid_dup
); /* continue with our copy now that we have installed it properly in our set */
2804 /* Second, insert it into the simple global table, see if that works */
2805 r
= hashmap_ensure_put(&u
->manager
->watch_pids
, &pidref_hash_ops_free
, pid
, u
);
2809 /* OK, the key is already assigned to a different unit. That's fine, then add us via the second
2810 * hashmap that points to an array. */
2812 PidRef
*old_pid
= NULL
;
2813 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &old_pid
);
2815 /* Count entries in array */
2817 for (; array
&& array
[n
]; n
++)
2820 /* Allocate a new array */
2821 _cleanup_free_ Unit
**new_array
= new(Unit
*, n
+ 2);
2825 /* Append us to the end */
2826 memcpy_safe(new_array
, array
, sizeof(Unit
*) * n
);
2828 new_array
[n
+1] = NULL
;
2830 /* Make sure the hashmap is allocated */
2831 r
= hashmap_ensure_allocated(&u
->manager
->watch_pids_more
, &pidref_hash_ops_free
);
2835 /* Add or replace the old array */
2836 r
= hashmap_replace(u
->manager
->watch_pids_more
, old_pid
?: pid
, new_array
);
2840 TAKE_PTR(new_array
); /* Now part of the hash table */
2841 free(array
); /* Which means we can now delete the old version */
2845 int unit_watch_pid(Unit
*u
, pid_t pid
, bool exclusive
) {
2846 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
2850 assert(pid_is_valid(pid
));
2852 r
= pidref_set_pid(&pidref
, pid
);
2856 return unit_watch_pidref(u
, &pidref
, exclusive
);
2859 void unit_unwatch_pidref(Unit
*u
, const PidRef
*pid
) {
2861 assert(pidref_is_set(pid
));
2863 /* Remove from the set we maintain for this unit. (And destroy the returned pid eventually) */
2864 _cleanup_(pidref_freep
) PidRef
*pid1
= set_remove(u
->pids
, pid
);
2866 return; /* Early exit if this PID was never watched by us */
2868 /* First let's drop the unit from the simple hash table, if it is included there */
2869 PidRef
*pid2
= NULL
;
2870 Unit
*uu
= hashmap_get2(u
->manager
->watch_pids
, pid
, (void**) &pid2
);
2872 /* Quick validation: iff we are in the watch_pids table then the PidRef object must be the same as in our local pids set */
2873 assert((uu
== u
) == (pid1
== pid2
));
2876 /* OK, we are in the first table. Let's remove it there then, and we are done already. */
2877 assert_se(hashmap_remove_value(u
->manager
->watch_pids
, pid2
, uu
));
2879 /* We weren't in the first table, then let's consult the 2nd table that points to an array */
2880 PidRef
*pid3
= NULL
;
2881 Unit
**array
= hashmap_get2(u
->manager
->watch_pids_more
, pid
, (void**) &pid3
);
2883 /* Let's iterate through the array, dropping our own entry */
2884 size_t m
= 0, n
= 0;
2885 for (; array
&& array
[n
]; n
++)
2887 array
[m
++] = array
[n
];
2889 return; /* Not there */
2891 array
[m
] = NULL
; /* set trailing NULL marker on the new end */
2894 /* The array is now empty, remove the entire entry */
2895 assert_se(hashmap_remove_value(u
->manager
->watch_pids_more
, pid3
, array
));
2898 /* The array is not empty, but let's make sure the entry is not keyed by the PidRef
2899 * we will delete, but by the PidRef object of the Unit that is now first in the
2902 PidRef
*new_pid3
= ASSERT_PTR(set_get(array
[0]->pids
, pid
));
2903 assert_se(hashmap_replace(u
->manager
->watch_pids_more
, new_pid3
, array
) >= 0);
2908 void unit_unwatch_pid(Unit
*u
, pid_t pid
) {
2909 return unit_unwatch_pidref(u
, &PIDREF_MAKE_FROM_PID(pid
));
2912 void unit_unwatch_all_pids(Unit
*u
) {
2915 while (!set_isempty(u
->pids
))
2916 unit_unwatch_pidref(u
, set_first(u
->pids
));
2918 u
->pids
= set_free(u
->pids
);
2921 void unit_unwatch_pidref_done(Unit
*u
, PidRef
*pidref
) {
2924 if (!pidref_is_set(pidref
))
2927 unit_unwatch_pidref(u
, pidref
);
2928 pidref_done(pidref
);
2931 static void unit_tidy_watch_pids(Unit
*u
) {
2932 PidRef
*except1
, *except2
, *e
;
2936 /* Cleans dead PIDs from our list */
2938 except1
= unit_main_pid(u
);
2939 except2
= unit_control_pid(u
);
2941 SET_FOREACH(e
, u
->pids
) {
2942 if (pidref_equal(except1
, e
) || pidref_equal(except2
, e
))
2945 if (pidref_is_unwaited(e
) <= 0)
2946 unit_unwatch_pidref(u
, e
);
2950 static int on_rewatch_pids_event(sd_event_source
*s
, void *userdata
) {
2951 Unit
*u
= ASSERT_PTR(userdata
);
2955 unit_tidy_watch_pids(u
);
2956 unit_watch_all_pids(u
);
2958 /* If the PID set is empty now, then let's finish this off. */
2959 unit_synthesize_cgroup_empty_event(u
);
2964 int unit_enqueue_rewatch_pids(Unit
*u
) {
2969 if (!u
->cgroup_path
)
2972 r
= cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
);
2975 if (r
> 0) /* On unified we can use proper notifications */
2978 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2979 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2980 * involves issuing kill(pid, 0) on all processes we watch. */
2982 if (!u
->rewatch_pids_event_source
) {
2983 _cleanup_(sd_event_source_unrefp
) sd_event_source
*s
= NULL
;
2985 r
= sd_event_add_defer(u
->manager
->event
, &s
, on_rewatch_pids_event
, u
);
2987 return log_error_errno(r
, "Failed to allocate event source for tidying watched PIDs: %m");
2989 r
= sd_event_source_set_priority(s
, EVENT_PRIORITY_REWATCH_PIDS
);
2991 return log_error_errno(r
, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2993 (void) sd_event_source_set_description(s
, "tidy-watch-pids");
2995 u
->rewatch_pids_event_source
= TAKE_PTR(s
);
2998 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_ONESHOT
);
3000 return log_error_errno(r
, "Failed to enable event source for tidying watched PIDs: %m");
3005 void unit_dequeue_rewatch_pids(Unit
*u
) {
3009 if (!u
->rewatch_pids_event_source
)
3012 r
= sd_event_source_set_enabled(u
->rewatch_pids_event_source
, SD_EVENT_OFF
);
3014 log_warning_errno(r
, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
3016 u
->rewatch_pids_event_source
= sd_event_source_disable_unref(u
->rewatch_pids_event_source
);
3019 bool unit_job_is_applicable(Unit
*u
, JobType j
) {
3021 assert(j
>= 0 && j
< _JOB_TYPE_MAX
);
3025 case JOB_VERIFY_ACTIVE
:
3028 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
3029 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
3034 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
3035 * external events), hence it makes no sense to permit enqueuing such a request either. */
3036 return !u
->perpetual
;
3039 case JOB_TRY_RESTART
:
3040 return unit_can_stop(u
) && unit_can_start(u
);
3043 case JOB_TRY_RELOAD
:
3044 return unit_can_reload(u
);
3046 case JOB_RELOAD_OR_START
:
3047 return unit_can_reload(u
) && unit_can_start(u
);
3050 assert_not_reached();
3054 static Hashmap
*unit_get_dependency_hashmap_per_type(Unit
*u
, UnitDependency d
) {
3058 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3060 deps
= hashmap_get(u
->dependencies
, UNIT_DEPENDENCY_TO_PTR(d
));
3062 _cleanup_hashmap_free_ Hashmap
*h
= NULL
;
3064 h
= hashmap_new(NULL
);
3068 if (hashmap_ensure_put(&u
->dependencies
, NULL
, UNIT_DEPENDENCY_TO_PTR(d
), h
) < 0)
3077 typedef enum NotifyDependencyFlags
{
3078 NOTIFY_DEPENDENCY_UPDATE_FROM
= 1 << 0,
3079 NOTIFY_DEPENDENCY_UPDATE_TO
= 1 << 1,
3080 } NotifyDependencyFlags
;
3082 static int unit_add_dependency_impl(
3086 UnitDependencyMask mask
) {
3088 static const UnitDependency inverse_table
[_UNIT_DEPENDENCY_MAX
] = {
3089 [UNIT_REQUIRES
] = UNIT_REQUIRED_BY
,
3090 [UNIT_REQUISITE
] = UNIT_REQUISITE_OF
,
3091 [UNIT_WANTS
] = UNIT_WANTED_BY
,
3092 [UNIT_BINDS_TO
] = UNIT_BOUND_BY
,
3093 [UNIT_PART_OF
] = UNIT_CONSISTS_OF
,
3094 [UNIT_UPHOLDS
] = UNIT_UPHELD_BY
,
3095 [UNIT_REQUIRED_BY
] = UNIT_REQUIRES
,
3096 [UNIT_REQUISITE_OF
] = UNIT_REQUISITE
,
3097 [UNIT_WANTED_BY
] = UNIT_WANTS
,
3098 [UNIT_BOUND_BY
] = UNIT_BINDS_TO
,
3099 [UNIT_CONSISTS_OF
] = UNIT_PART_OF
,
3100 [UNIT_UPHELD_BY
] = UNIT_UPHOLDS
,
3101 [UNIT_CONFLICTS
] = UNIT_CONFLICTED_BY
,
3102 [UNIT_CONFLICTED_BY
] = UNIT_CONFLICTS
,
3103 [UNIT_BEFORE
] = UNIT_AFTER
,
3104 [UNIT_AFTER
] = UNIT_BEFORE
,
3105 [UNIT_ON_SUCCESS
] = UNIT_ON_SUCCESS_OF
,
3106 [UNIT_ON_SUCCESS_OF
] = UNIT_ON_SUCCESS
,
3107 [UNIT_ON_FAILURE
] = UNIT_ON_FAILURE_OF
,
3108 [UNIT_ON_FAILURE_OF
] = UNIT_ON_FAILURE
,
3109 [UNIT_TRIGGERS
] = UNIT_TRIGGERED_BY
,
3110 [UNIT_TRIGGERED_BY
] = UNIT_TRIGGERS
,
3111 [UNIT_PROPAGATES_RELOAD_TO
] = UNIT_RELOAD_PROPAGATED_FROM
,
3112 [UNIT_RELOAD_PROPAGATED_FROM
] = UNIT_PROPAGATES_RELOAD_TO
,
3113 [UNIT_PROPAGATES_STOP_TO
] = UNIT_STOP_PROPAGATED_FROM
,
3114 [UNIT_STOP_PROPAGATED_FROM
] = UNIT_PROPAGATES_STOP_TO
,
3115 [UNIT_JOINS_NAMESPACE_OF
] = UNIT_JOINS_NAMESPACE_OF
, /* symmetric! 👓 */
3116 [UNIT_REFERENCES
] = UNIT_REFERENCED_BY
,
3117 [UNIT_REFERENCED_BY
] = UNIT_REFERENCES
,
3118 [UNIT_IN_SLICE
] = UNIT_SLICE_OF
,
3119 [UNIT_SLICE_OF
] = UNIT_IN_SLICE
,
3122 Hashmap
*u_deps
, *other_deps
;
3123 UnitDependencyInfo u_info
, u_info_old
, other_info
, other_info_old
;
3124 NotifyDependencyFlags flags
= 0;
3129 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3130 assert(inverse_table
[d
] >= 0 && inverse_table
[d
] < _UNIT_DEPENDENCY_MAX
);
3131 assert(mask
> 0 && mask
< _UNIT_DEPENDENCY_MASK_FULL
);
3133 /* Ensure the following two hashmaps for each unit exist:
3134 * - the top-level dependency hashmap that maps UnitDependency → Hashmap(Unit* → UnitDependencyInfo),
3135 * - the inner hashmap, that maps Unit* → UnitDependencyInfo, for the specified dependency type. */
3136 u_deps
= unit_get_dependency_hashmap_per_type(u
, d
);
3140 other_deps
= unit_get_dependency_hashmap_per_type(other
, inverse_table
[d
]);
3144 /* Save the original dependency info. */
3145 u_info
.data
= u_info_old
.data
= hashmap_get(u_deps
, other
);
3146 other_info
.data
= other_info_old
.data
= hashmap_get(other_deps
, u
);
3148 /* Update dependency info. */
3149 u_info
.origin_mask
|= mask
;
3150 other_info
.destination_mask
|= mask
;
3152 /* Save updated dependency info. */
3153 if (u_info
.data
!= u_info_old
.data
) {
3154 r
= hashmap_replace(u_deps
, other
, u_info
.data
);
3158 flags
= NOTIFY_DEPENDENCY_UPDATE_FROM
;
3161 if (other_info
.data
!= other_info_old
.data
) {
3162 r
= hashmap_replace(other_deps
, u
, other_info
.data
);
3164 if (u_info
.data
!= u_info_old
.data
) {
3165 /* Restore the old dependency. */
3166 if (u_info_old
.data
)
3167 (void) hashmap_update(u_deps
, other
, u_info_old
.data
);
3169 hashmap_remove(u_deps
, other
);
3174 flags
|= NOTIFY_DEPENDENCY_UPDATE_TO
;
3180 int unit_add_dependency(
3185 UnitDependencyMask mask
) {
3187 UnitDependencyAtom a
;
3190 /* Helper to know whether sending a notification is necessary or not: if the dependency is already
3191 * there, no need to notify! */
3192 NotifyDependencyFlags notify_flags
;
3195 assert(d
>= 0 && d
< _UNIT_DEPENDENCY_MAX
);
3198 u
= unit_follow_merge(u
);
3199 other
= unit_follow_merge(other
);
3200 a
= unit_dependency_to_atom(d
);
3203 /* We won't allow dependencies on ourselves. We will not consider them an error however. */
3205 if (unit_should_warn_about_dependency(d
))
3206 log_unit_warning(u
, "Dependency %s=%s is dropped.",
3207 unit_dependency_to_string(d
), u
->id
);
3211 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3214 /* Note that ordering a device unit after a unit is permitted since it allows to start its job
3215 * running timeout at a specific time. */
3216 if (FLAGS_SET(a
, UNIT_ATOM_BEFORE
) && other
->type
== UNIT_DEVICE
) {
3217 log_unit_warning(u
, "Dependency Before=%s ignored (.device units cannot be delayed)", other
->id
);
3221 if (FLAGS_SET(a
, UNIT_ATOM_ON_FAILURE
) && !UNIT_VTABLE(u
)->can_fail
) {
3222 log_unit_warning(u
, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other
->id
, unit_type_to_string(u
->type
));
3226 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERS
) && !UNIT_VTABLE(u
)->can_trigger
)
3227 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3228 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(u
->type
));
3229 if (FLAGS_SET(a
, UNIT_ATOM_TRIGGERED_BY
) && !UNIT_VTABLE(other
)->can_trigger
)
3230 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3231 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other
->id
, unit_type_to_string(other
->type
));
3233 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && other
->type
!= UNIT_SLICE
)
3234 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3235 "Requested dependency Slice=%s refused (%s is not a slice unit).", other
->id
, other
->id
);
3236 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && u
->type
!= UNIT_SLICE
)
3237 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3238 "Requested dependency SliceOf=%s refused (%s is not a slice unit).", other
->id
, u
->id
);
3240 if (FLAGS_SET(a
, UNIT_ATOM_IN_SLICE
) && !UNIT_HAS_CGROUP_CONTEXT(u
))
3241 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3242 "Requested dependency Slice=%s refused (%s is not a cgroup unit).", other
->id
, u
->id
);
3244 if (FLAGS_SET(a
, UNIT_ATOM_SLICE_OF
) && !UNIT_HAS_CGROUP_CONTEXT(other
))
3245 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(EINVAL
),
3246 "Requested dependency SliceOf=%s refused (%s is not a cgroup unit).", other
->id
, other
->id
);
3248 r
= unit_add_dependency_impl(u
, d
, other
, mask
);
3253 if (add_reference
) {
3254 r
= unit_add_dependency_impl(u
, UNIT_REFERENCES
, other
, mask
);
3260 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_FROM
))
3261 unit_add_to_dbus_queue(u
);
3262 if (FLAGS_SET(notify_flags
, NOTIFY_DEPENDENCY_UPDATE_TO
))
3263 unit_add_to_dbus_queue(other
);
3265 return notify_flags
!= 0;
3268 int unit_add_two_dependencies(Unit
*u
, UnitDependency d
, UnitDependency e
, Unit
*other
, bool add_reference
, UnitDependencyMask mask
) {
3272 assert(d
>= 0 || e
>= 0);
3275 r
= unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3281 s
= unit_add_dependency(u
, e
, other
, add_reference
, mask
);
3286 return r
> 0 || s
> 0;
3289 static int resolve_template(Unit
*u
, const char *name
, char **buf
, const char **ret
) {
3297 if (!unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
)) {
3304 r
= unit_name_replace_instance(name
, u
->instance
, buf
);
3306 _cleanup_free_
char *i
= NULL
;
3308 r
= unit_name_to_prefix(u
->id
, &i
);
3312 r
= unit_name_replace_instance(name
, i
, buf
);
3321 int unit_add_dependency_by_name(Unit
*u
, UnitDependency d
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3322 _cleanup_free_
char *buf
= NULL
;
3329 r
= resolve_template(u
, name
, &buf
, &name
);
3333 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3336 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3340 return unit_add_dependency(u
, d
, other
, add_reference
, mask
);
3343 int unit_add_two_dependencies_by_name(Unit
*u
, UnitDependency d
, UnitDependency e
, const char *name
, bool add_reference
, UnitDependencyMask mask
) {
3344 _cleanup_free_
char *buf
= NULL
;
3351 r
= resolve_template(u
, name
, &buf
, &name
);
3355 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3358 r
= manager_load_unit(u
->manager
, name
, NULL
, NULL
, &other
);
3362 return unit_add_two_dependencies(u
, d
, e
, other
, add_reference
, mask
);
3365 int set_unit_path(const char *p
) {
3366 /* This is mostly for debug purposes */
3367 return RET_NERRNO(setenv("SYSTEMD_UNIT_PATH", p
, 1));
3370 char *unit_dbus_path(Unit
*u
) {
3376 return unit_dbus_path_from_name(u
->id
);
3379 char *unit_dbus_path_invocation_id(Unit
*u
) {
3382 if (sd_id128_is_null(u
->invocation_id
))
3385 return unit_dbus_path_from_name(u
->invocation_id_string
);
3388 int unit_set_invocation_id(Unit
*u
, sd_id128_t id
) {
3393 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
3395 if (sd_id128_equal(u
->invocation_id
, id
))
3398 if (!sd_id128_is_null(u
->invocation_id
))
3399 (void) hashmap_remove_value(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3401 if (sd_id128_is_null(id
)) {
3406 r
= hashmap_ensure_allocated(&u
->manager
->units_by_invocation_id
, &id128_hash_ops
);
3410 u
->invocation_id
= id
;
3411 sd_id128_to_string(id
, u
->invocation_id_string
);
3413 r
= hashmap_put(u
->manager
->units_by_invocation_id
, &u
->invocation_id
, u
);
3420 u
->invocation_id
= SD_ID128_NULL
;
3421 u
->invocation_id_string
[0] = 0;
3425 int unit_set_slice(Unit
*u
, Unit
*slice
) {
3431 /* Sets the unit slice if it has not been set before. Is extra careful, to only allow this for units
3432 * that actually have a cgroup context. Also, we don't allow to set this for slices (since the parent
3433 * slice is derived from the name). Make sure the unit we set is actually a slice. */
3435 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
3438 if (u
->type
== UNIT_SLICE
)
3441 if (unit_active_state(u
) != UNIT_INACTIVE
)
3444 if (slice
->type
!= UNIT_SLICE
)
3447 if (unit_has_name(u
, SPECIAL_INIT_SCOPE
) &&
3448 !unit_has_name(slice
, SPECIAL_ROOT_SLICE
))
3451 if (UNIT_GET_SLICE(u
) == slice
)
3454 /* Disallow slice changes if @u is already bound to cgroups */
3455 if (UNIT_GET_SLICE(u
) && u
->cgroup_realized
)
3458 /* Remove any slices assigned prior; we should only have one UNIT_IN_SLICE dependency */
3459 if (UNIT_GET_SLICE(u
))
3460 unit_remove_dependencies(u
, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3462 r
= unit_add_dependency(u
, UNIT_IN_SLICE
, slice
, true, UNIT_DEPENDENCY_SLICE_PROPERTY
);
3469 int unit_set_default_slice(Unit
*u
) {
3470 const char *slice_name
;
3476 if (u
->manager
&& FLAGS_SET(u
->manager
->test_run_flags
, MANAGER_TEST_RUN_IGNORE_DEPENDENCIES
))
3479 if (UNIT_GET_SLICE(u
))
3483 _cleanup_free_
char *prefix
= NULL
, *escaped
= NULL
;
3485 /* Implicitly place all instantiated units in their
3486 * own per-template slice */
3488 r
= unit_name_to_prefix(u
->id
, &prefix
);
3492 /* The prefix is already escaped, but it might include
3493 * "-" which has a special meaning for slice units,
3494 * hence escape it here extra. */
3495 escaped
= unit_name_escape(prefix
);
3499 if (MANAGER_IS_SYSTEM(u
->manager
))
3500 slice_name
= strjoina("system-", escaped
, ".slice");
3502 slice_name
= strjoina("app-", escaped
, ".slice");
3504 } else if (unit_is_extrinsic(u
))
3505 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3506 * the root slice. They don't really belong in one of the subslices. */
3507 slice_name
= SPECIAL_ROOT_SLICE
;
3509 else if (MANAGER_IS_SYSTEM(u
->manager
))
3510 slice_name
= SPECIAL_SYSTEM_SLICE
;
3512 slice_name
= SPECIAL_APP_SLICE
;
3514 r
= manager_load_unit(u
->manager
, slice_name
, NULL
, NULL
, &slice
);
3518 return unit_set_slice(u
, slice
);
3521 const char *unit_slice_name(Unit
*u
) {
3525 slice
= UNIT_GET_SLICE(u
);
3532 int unit_load_related_unit(Unit
*u
, const char *type
, Unit
**_found
) {
3533 _cleanup_free_
char *t
= NULL
;
3540 r
= unit_name_change_suffix(u
->id
, type
, &t
);
3543 if (unit_has_name(u
, t
))
3546 r
= manager_load_unit(u
->manager
, t
, NULL
, NULL
, _found
);
3547 assert(r
< 0 || *_found
!= u
);
3551 static int signal_name_owner_changed(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3552 const char *new_owner
;
3553 Unit
*u
= ASSERT_PTR(userdata
);
3558 r
= sd_bus_message_read(message
, "sss", NULL
, NULL
, &new_owner
);
3560 bus_log_parse_error(r
);
3564 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3565 UNIT_VTABLE(u
)->bus_name_owner_change(u
, empty_to_null(new_owner
));
3570 static int get_name_owner_handler(sd_bus_message
*message
, void *userdata
, sd_bus_error
*error
) {
3571 const sd_bus_error
*e
;
3572 const char *new_owner
;
3573 Unit
*u
= ASSERT_PTR(userdata
);
3578 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3580 e
= sd_bus_message_get_error(message
);
3582 if (!sd_bus_error_has_name(e
, SD_BUS_ERROR_NAME_HAS_NO_OWNER
)) {
3583 r
= sd_bus_error_get_errno(e
);
3584 log_unit_error_errno(u
, r
,
3585 "Unexpected error response from GetNameOwner(): %s",
3586 bus_error_message(e
, r
));
3591 r
= sd_bus_message_read(message
, "s", &new_owner
);
3593 return bus_log_parse_error(r
);
3595 assert(!isempty(new_owner
));
3598 if (UNIT_VTABLE(u
)->bus_name_owner_change
)
3599 UNIT_VTABLE(u
)->bus_name_owner_change(u
, new_owner
);
3604 int unit_install_bus_match(Unit
*u
, sd_bus
*bus
, const char *name
) {
3605 _cleanup_(sd_bus_message_unrefp
) sd_bus_message
*m
= NULL
;
3607 usec_t timeout_usec
= 0;
3614 if (u
->match_bus_slot
|| u
->get_name_owner_slot
)
3617 /* NameOwnerChanged and GetNameOwner is used to detect when a service finished starting up. The dbus
3618 * call timeout shouldn't be earlier than that. If we couldn't get the start timeout, use the default
3619 * value defined above. */
3620 if (UNIT_VTABLE(u
)->get_timeout_start_usec
)
3621 timeout_usec
= UNIT_VTABLE(u
)->get_timeout_start_usec(u
);
3623 match
= strjoina("type='signal',"
3624 "sender='org.freedesktop.DBus',"
3625 "path='/org/freedesktop/DBus',"
3626 "interface='org.freedesktop.DBus',"
3627 "member='NameOwnerChanged',"
3628 "arg0='", name
, "'");
3630 r
= bus_add_match_full(
3635 signal_name_owner_changed
,
3642 r
= sd_bus_message_new_method_call(
3645 "org.freedesktop.DBus",
3646 "/org/freedesktop/DBus",
3647 "org.freedesktop.DBus",
3652 r
= sd_bus_message_append(m
, "s", name
);
3656 r
= sd_bus_call_async(
3658 &u
->get_name_owner_slot
,
3660 get_name_owner_handler
,
3665 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3669 log_unit_debug(u
, "Watching D-Bus name '%s'.", name
);
3673 int unit_watch_bus_name(Unit
*u
, const char *name
) {
3679 /* Watch a specific name on the bus. We only support one unit
3680 * watching each name for now. */
3682 if (u
->manager
->api_bus
) {
3683 /* If the bus is already available, install the match directly.
3684 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3685 r
= unit_install_bus_match(u
, u
->manager
->api_bus
, name
);
3687 return log_warning_errno(r
, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name
);
3690 r
= hashmap_put(u
->manager
->watch_bus
, name
, u
);
3692 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3693 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3694 return log_warning_errno(r
, "Failed to put bus name to hashmap: %m");
3700 void unit_unwatch_bus_name(Unit
*u
, const char *name
) {
3704 (void) hashmap_remove_value(u
->manager
->watch_bus
, name
, u
);
3705 u
->match_bus_slot
= sd_bus_slot_unref(u
->match_bus_slot
);
3706 u
->get_name_owner_slot
= sd_bus_slot_unref(u
->get_name_owner_slot
);
3709 int unit_add_node_dependency(Unit
*u
, const char *what
, UnitDependency dep
, UnitDependencyMask mask
) {
3710 _cleanup_free_
char *e
= NULL
;
3716 /* Adds in links to the device node that this unit is based on */
3720 if (!is_device_path(what
))
3723 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3724 if (!unit_type_supported(UNIT_DEVICE
))
3727 r
= unit_name_from_path(what
, ".device", &e
);
3731 r
= manager_load_unit(u
->manager
, e
, NULL
, NULL
, &device
);
3735 if (dep
== UNIT_REQUIRES
&& device_shall_be_bound_by(device
, u
))
3736 dep
= UNIT_BINDS_TO
;
3738 return unit_add_two_dependencies(u
, UNIT_AFTER
,
3739 MANAGER_IS_SYSTEM(u
->manager
) ? dep
: UNIT_WANTS
,
3740 device
, true, mask
);
3743 int unit_add_blockdev_dependency(Unit
*u
, const char *what
, UnitDependencyMask mask
) {
3744 _cleanup_free_
char *escaped
= NULL
, *target
= NULL
;
3752 if (!path_startswith(what
, "/dev/"))
3755 /* If we don't support devices, then also don't bother with blockdev@.target */
3756 if (!unit_type_supported(UNIT_DEVICE
))
3759 r
= unit_name_path_escape(what
, &escaped
);
3763 r
= unit_name_build("blockdev", escaped
, ".target", &target
);
3767 return unit_add_dependency_by_name(u
, UNIT_AFTER
, target
, true, mask
);
3770 int unit_coldplug(Unit
*u
) {
3775 /* Make sure we don't enter a loop, when coldplugging recursively. */
3779 u
->coldplugged
= true;
3781 STRV_FOREACH(i
, u
->deserialized_refs
)
3782 RET_GATHER(r
, bus_unit_track_add_name(u
, *i
));
3784 u
->deserialized_refs
= strv_free(u
->deserialized_refs
);
3786 if (UNIT_VTABLE(u
)->coldplug
)
3787 RET_GATHER(r
, UNIT_VTABLE(u
)->coldplug(u
));
3790 RET_GATHER(r
, job_coldplug(u
->job
));
3792 RET_GATHER(r
, job_coldplug(u
->nop_job
));
3794 unit_modify_nft_set(u
, /* add = */ true);
3798 void unit_catchup(Unit
*u
) {
3801 if (UNIT_VTABLE(u
)->catchup
)
3802 UNIT_VTABLE(u
)->catchup(u
);
3804 unit_cgroup_catchup(u
);
3807 static bool fragment_mtime_newer(const char *path
, usec_t mtime
, bool path_masked
) {
3813 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3814 * are never out-of-date. */
3815 if (PATH_STARTSWITH_SET(path
, "/proc", "/sys"))
3818 if (stat(path
, &st
) < 0)
3819 /* What, cannot access this anymore? */
3823 /* For masked files check if they are still so */
3824 return !null_or_empty(&st
);
3826 /* For non-empty files check the mtime */
3827 return timespec_load(&st
.st_mtim
) > mtime
;
3832 bool unit_need_daemon_reload(Unit
*u
) {
3833 _cleanup_strv_free_
char **dropins
= NULL
;
3838 if (u
->manager
->unit_file_state_outdated
)
3841 /* For unit files, we allow masking… */
3842 if (fragment_mtime_newer(u
->fragment_path
, u
->fragment_mtime
,
3843 u
->load_state
== UNIT_MASKED
))
3846 /* Source paths should not be masked… */
3847 if (fragment_mtime_newer(u
->source_path
, u
->source_mtime
, false))
3850 if (u
->load_state
== UNIT_LOADED
)
3851 (void) unit_find_dropin_paths(u
, &dropins
);
3852 if (!strv_equal(u
->dropin_paths
, dropins
))
3855 /* … any drop-ins that are masked are simply omitted from the list. */
3856 STRV_FOREACH(path
, u
->dropin_paths
)
3857 if (fragment_mtime_newer(*path
, u
->dropin_mtime
, false))
3863 void unit_reset_failed(Unit
*u
) {
3866 if (UNIT_VTABLE(u
)->reset_failed
)
3867 UNIT_VTABLE(u
)->reset_failed(u
);
3869 ratelimit_reset(&u
->start_ratelimit
);
3870 u
->start_limit_hit
= false;
3873 Unit
*unit_following(Unit
*u
) {
3876 if (UNIT_VTABLE(u
)->following
)
3877 return UNIT_VTABLE(u
)->following(u
);
3882 bool unit_stop_pending(Unit
*u
) {
3885 /* This call does check the current state of the unit. It's
3886 * hence useful to be called from state change calls of the
3887 * unit itself, where the state isn't updated yet. This is
3888 * different from unit_inactive_or_pending() which checks both
3889 * the current state and for a queued job. */
3891 return unit_has_job_type(u
, JOB_STOP
);
3894 bool unit_inactive_or_pending(Unit
*u
) {
3897 /* Returns true if the unit is inactive or going down */
3899 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u
)))
3902 if (unit_stop_pending(u
))
3908 bool unit_active_or_pending(Unit
*u
) {
3911 /* Returns true if the unit is active or going up */
3913 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u
)))
3917 IN_SET(u
->job
->type
, JOB_START
, JOB_RELOAD_OR_START
, JOB_RESTART
))
3923 bool unit_will_restart_default(Unit
*u
) {
3926 return unit_has_job_type(u
, JOB_START
);
3929 bool unit_will_restart(Unit
*u
) {
3932 if (!UNIT_VTABLE(u
)->will_restart
)
3935 return UNIT_VTABLE(u
)->will_restart(u
);
3938 void unit_notify_cgroup_oom(Unit
*u
, bool managed_oom
) {
3941 if (UNIT_VTABLE(u
)->notify_cgroup_oom
)
3942 UNIT_VTABLE(u
)->notify_cgroup_oom(u
, managed_oom
);
3945 static int unit_pid_set(Unit
*u
, Set
**pid_set
) {
3951 set_clear(*pid_set
); /* This updates input. */
3953 /* Exclude the main/control pids from being killed via the cgroup */
3956 FOREACH_ARGUMENT(pid
, unit_main_pid(u
), unit_control_pid(u
))
3957 if (pidref_is_set(pid
)) {
3958 r
= set_ensure_put(pid_set
, NULL
, PID_TO_PTR(pid
->pid
));
3966 static int kill_common_log(const PidRef
*pid
, int signo
, void *userdata
) {
3967 _cleanup_free_
char *comm
= NULL
;
3968 Unit
*u
= ASSERT_PTR(userdata
);
3970 (void) pidref_get_comm(pid
, &comm
);
3972 log_unit_info(u
, "Sending signal SIG%s to process " PID_FMT
" (%s) on client request.",
3973 signal_to_string(signo
), pid
->pid
, strna(comm
));
3978 static int kill_or_sigqueue(PidRef
* pidref
, int signo
, int code
, int value
) {
3979 assert(pidref_is_set(pidref
));
3980 assert(SIGNAL_VALID(signo
));
3985 log_debug("Killing " PID_FMT
" with signal SIG%s.", pidref
->pid
, signal_to_string(signo
));
3986 return pidref_kill(pidref
, signo
);
3989 log_debug("Enqueuing value %i to " PID_FMT
" on signal SIG%s.", value
, pidref
->pid
, signal_to_string(signo
));
3990 return pidref_sigqueue(pidref
, signo
, value
);
3993 assert_not_reached();
3997 static int unit_kill_one(
4004 sd_bus_error
*ret_error
) {
4011 if (!pidref_is_set(pidref
))
4014 _cleanup_free_
char *comm
= NULL
;
4015 (void) pidref_get_comm(pidref
, &comm
);
4017 r
= kill_or_sigqueue(pidref
, signo
, code
, value
);
4021 /* Report this failure both to the logs and to the client */
4023 sd_bus_error_set_errnof(
4025 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s): %m",
4026 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4028 return log_unit_warning_errno(
4030 "Failed to send signal SIG%s to %s process " PID_FMT
" (%s) on client request: %m",
4031 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4034 log_unit_info(u
, "Sent signal SIG%s to %s process " PID_FMT
" (%s) on client request.",
4035 signal_to_string(signo
), type
, pidref
->pid
, strna(comm
));
4036 return 1; /* killed */
4045 sd_bus_error
*ret_error
) {
4047 PidRef
*main_pid
, *control_pid
;
4048 bool killed
= false;
4051 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
4052 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
4053 * stop a service ourselves. */
4057 assert(who
< _KILL_WHO_MAX
);
4058 assert(SIGNAL_VALID(signo
));
4059 assert(IN_SET(code
, SI_USER
, SI_QUEUE
));
4061 main_pid
= unit_main_pid(u
);
4062 control_pid
= unit_control_pid(u
);
4064 if (!UNIT_HAS_CGROUP_CONTEXT(u
) && !main_pid
&& !control_pid
)
4065 return sd_bus_error_setf(ret_error
, SD_BUS_ERROR_NOT_SUPPORTED
, "Unit type does not support process killing.");
4067 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
)) {
4069 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no main processes", unit_type_to_string(u
->type
));
4070 if (!pidref_is_set(main_pid
))
4071 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No main process to kill");
4074 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
)) {
4076 return sd_bus_error_setf(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "%s units have no control processes", unit_type_to_string(u
->type
));
4077 if (!pidref_is_set(control_pid
))
4078 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No control process to kill");
4081 if (IN_SET(who
, KILL_CONTROL
, KILL_CONTROL_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4082 r
= unit_kill_one(u
, control_pid
, "control", signo
, code
, value
, ret_error
);
4084 killed
= killed
|| r
> 0;
4087 if (IN_SET(who
, KILL_MAIN
, KILL_MAIN_FAIL
, KILL_ALL
, KILL_ALL_FAIL
)) {
4088 r
= unit_kill_one(u
, main_pid
, "main", signo
, code
, value
, ret
>= 0 ? ret_error
: NULL
);
4090 killed
= killed
|| r
> 0;
4093 /* Note: if we shall enqueue rather than kill we won't do this via the cgroup mechanism, since it
4094 * doesn't really make much sense (and given that enqueued values are a relatively expensive
4095 * resource, and we shouldn't allow us to be subjects for such allocation sprees) */
4096 if (IN_SET(who
, KILL_ALL
, KILL_ALL_FAIL
) && u
->cgroup_path
&& code
== SI_USER
) {
4097 _cleanup_set_free_ Set
*pid_set
= NULL
;
4099 /* Exclude the main/control pids from being killed via the cgroup */
4100 r
= unit_pid_set(u
, &pid_set
);
4104 r
= cg_kill_recursive(u
->cgroup_path
, signo
, 0, pid_set
, kill_common_log
, u
);
4105 if (r
< 0 && !IN_SET(r
, -ESRCH
, -ENOENT
)) {
4107 sd_bus_error_set_errnof(
4109 "Failed to send signal SIG%s to auxiliary processes: %m",
4110 signal_to_string(signo
));
4112 log_unit_warning_errno(
4114 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
4115 signal_to_string(signo
));
4120 killed
= killed
|| r
>= 0;
4123 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
4124 if (ret
>= 0 && !killed
&& IN_SET(who
, KILL_ALL_FAIL
, KILL_CONTROL_FAIL
, KILL_MAIN_FAIL
))
4125 return sd_bus_error_set_const(ret_error
, BUS_ERROR_NO_SUCH_PROCESS
, "No matching processes to kill");
4130 int unit_following_set(Unit
*u
, Set
**s
) {
4134 if (UNIT_VTABLE(u
)->following_set
)
4135 return UNIT_VTABLE(u
)->following_set(u
, s
);
4141 UnitFileState
unit_get_unit_file_state(Unit
*u
) {
4146 if (u
->unit_file_state
< 0 && u
->fragment_path
) {
4147 r
= unit_file_get_state(
4148 u
->manager
->runtime_scope
,
4151 &u
->unit_file_state
);
4153 u
->unit_file_state
= UNIT_FILE_BAD
;
4156 return u
->unit_file_state
;
4159 PresetAction
unit_get_unit_file_preset(Unit
*u
) {
4164 if (u
->unit_file_preset
< 0 && u
->fragment_path
) {
4165 _cleanup_free_
char *bn
= NULL
;
4167 r
= path_extract_filename(u
->fragment_path
, &bn
);
4169 return (u
->unit_file_preset
= r
);
4171 if (r
== O_DIRECTORY
)
4172 return (u
->unit_file_preset
= -EISDIR
);
4174 u
->unit_file_preset
= unit_file_query_preset(
4175 u
->manager
->runtime_scope
,
4181 return u
->unit_file_preset
;
4184 Unit
* unit_ref_set(UnitRef
*ref
, Unit
*source
, Unit
*target
) {
4190 unit_ref_unset(ref
);
4192 ref
->source
= source
;
4193 ref
->target
= target
;
4194 LIST_PREPEND(refs_by_target
, target
->refs_by_target
, ref
);
4198 void unit_ref_unset(UnitRef
*ref
) {
4204 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4205 * be unreferenced now. */
4206 unit_add_to_gc_queue(ref
->target
);
4208 LIST_REMOVE(refs_by_target
, ref
->target
->refs_by_target
, ref
);
4209 ref
->source
= ref
->target
= NULL
;
4212 static int user_from_unit_name(Unit
*u
, char **ret
) {
4214 static const uint8_t hash_key
[] = {
4215 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4216 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4219 _cleanup_free_
char *n
= NULL
;
4222 r
= unit_name_to_prefix(u
->id
, &n
);
4226 if (valid_user_group_name(n
, 0)) {
4231 /* If we can't use the unit name as a user name, then let's hash it and use that */
4232 if (asprintf(ret
, "_du%016" PRIx64
, siphash24(n
, strlen(n
), hash_key
)) < 0)
4238 int unit_patch_contexts(Unit
*u
) {
4245 /* Patch in the manager defaults into the exec and cgroup
4246 * contexts, _after_ the rest of the settings have been
4249 ec
= unit_get_exec_context(u
);
4251 /* This only copies in the ones that need memory */
4252 for (unsigned i
= 0; i
< _RLIMIT_MAX
; i
++)
4253 if (u
->manager
->defaults
.rlimit
[i
] && !ec
->rlimit
[i
]) {
4254 ec
->rlimit
[i
] = newdup(struct rlimit
, u
->manager
->defaults
.rlimit
[i
], 1);
4259 if (MANAGER_IS_USER(u
->manager
) &&
4260 !ec
->working_directory
) {
4262 r
= get_home_dir(&ec
->working_directory
);
4266 /* Allow user services to run, even if the
4267 * home directory is missing */
4268 ec
->working_directory_missing_ok
= true;
4271 if (ec
->private_devices
)
4272 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_MKNOD
) | (UINT64_C(1) << CAP_SYS_RAWIO
));
4274 if (ec
->protect_kernel_modules
)
4275 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYS_MODULE
);
4277 if (ec
->protect_kernel_logs
)
4278 ec
->capability_bounding_set
&= ~(UINT64_C(1) << CAP_SYSLOG
);
4280 if (ec
->protect_clock
)
4281 ec
->capability_bounding_set
&= ~((UINT64_C(1) << CAP_SYS_TIME
) | (UINT64_C(1) << CAP_WAKE_ALARM
));
4283 if (ec
->dynamic_user
) {
4285 r
= user_from_unit_name(u
, &ec
->user
);
4291 ec
->group
= strdup(ec
->user
);
4296 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4297 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4300 ec
->private_tmp
= true;
4301 ec
->remove_ipc
= true;
4302 ec
->protect_system
= PROTECT_SYSTEM_STRICT
;
4303 if (ec
->protect_home
== PROTECT_HOME_NO
)
4304 ec
->protect_home
= PROTECT_HOME_READ_ONLY
;
4306 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4308 ec
->no_new_privileges
= true;
4309 ec
->restrict_suid_sgid
= true;
4312 for (ExecDirectoryType dt
= 0; dt
< _EXEC_DIRECTORY_TYPE_MAX
; dt
++)
4313 exec_directory_sort(ec
->directories
+ dt
);
4316 cc
= unit_get_cgroup_context(u
);
4319 if (ec
->private_devices
&&
4320 cc
->device_policy
== CGROUP_DEVICE_POLICY_AUTO
)
4321 cc
->device_policy
= CGROUP_DEVICE_POLICY_CLOSED
;
4323 /* Only add these if needed, as they imply that everything else is blocked. */
4324 if (cc
->device_policy
!= CGROUP_DEVICE_POLICY_AUTO
|| cc
->device_allow
) {
4325 if (ec
->root_image
|| ec
->mount_images
) {
4327 /* When RootImage= or MountImages= is specified, the following devices are touched. */
4328 FOREACH_STRING(p
, "/dev/loop-control", "/dev/mapper/control") {
4329 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4333 FOREACH_STRING(p
, "block-loop", "block-blkext", "block-device-mapper") {
4334 r
= cgroup_context_add_device_allow(cc
, p
, CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
|CGROUP_DEVICE_MKNOD
);
4339 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
4340 * Same for mapper and verity. */
4341 FOREACH_STRING(p
, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
4342 r
= unit_add_two_dependencies_by_name(u
, UNIT_AFTER
, UNIT_WANTS
, p
, true, UNIT_DEPENDENCY_FILE
);
4348 if (ec
->protect_clock
) {
4349 r
= cgroup_context_add_device_allow(cc
, "char-rtc", CGROUP_DEVICE_READ
);
4354 /* If there are encrypted credentials we might need to access the TPM. */
4355 if (exec_context_has_encrypted_credentials(ec
)) {
4356 r
= cgroup_context_add_device_allow(cc
, "char-tpm", CGROUP_DEVICE_READ
|CGROUP_DEVICE_WRITE
);
4366 ExecContext
*unit_get_exec_context(const Unit
*u
) {
4373 offset
= UNIT_VTABLE(u
)->exec_context_offset
;
4377 return (ExecContext
*) ((uint8_t*) u
+ offset
);
4380 KillContext
*unit_get_kill_context(Unit
*u
) {
4387 offset
= UNIT_VTABLE(u
)->kill_context_offset
;
4391 return (KillContext
*) ((uint8_t*) u
+ offset
);
4394 CGroupContext
*unit_get_cgroup_context(Unit
*u
) {
4400 offset
= UNIT_VTABLE(u
)->cgroup_context_offset
;
4404 return (CGroupContext
*) ((uint8_t*) u
+ offset
);
4407 ExecRuntime
*unit_get_exec_runtime(Unit
*u
) {
4413 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4417 return *(ExecRuntime
**) ((uint8_t*) u
+ offset
);
4420 static const char* unit_drop_in_dir(Unit
*u
, UnitWriteFlags flags
) {
4423 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4426 if (u
->transient
) /* Redirect drop-ins for transient units always into the transient directory. */
4427 return u
->manager
->lookup_paths
.transient
;
4429 if (flags
& UNIT_PERSISTENT
)
4430 return u
->manager
->lookup_paths
.persistent_control
;
4432 if (flags
& UNIT_RUNTIME
)
4433 return u
->manager
->lookup_paths
.runtime_control
;
4438 const char* unit_escape_setting(const char *s
, UnitWriteFlags flags
, char **buf
) {
4440 assert(popcount(flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
| UNIT_ESCAPE_C
)) <= 1);
4443 _cleanup_free_
char *t
= NULL
;
4445 /* Returns a string with any escaping done. If no escaping was necessary, *buf is set to NULL, and
4446 * the input pointer is returned as-is. If an allocation was needed, the return buffer pointer is
4447 * written to *buf. This means the return value always contains a properly escaped version, but *buf
4448 * only contains a pointer if an allocation was made. Callers can use this to optimize memory
4451 if (flags
& UNIT_ESCAPE_SPECIFIERS
) {
4452 t
= specifier_escape(s
);
4459 /* We either do C-escaping or shell-escaping, to additionally escape characters that we parse for
4460 * ExecStart= and friends, i.e. '$' and quotes. */
4462 if (flags
& (UNIT_ESCAPE_EXEC_SYNTAX_ENV
| UNIT_ESCAPE_EXEC_SYNTAX
)) {
4465 if (flags
& UNIT_ESCAPE_EXEC_SYNTAX_ENV
) {
4466 t2
= strreplace(s
, "$", "$$");
4469 free_and_replace(t
, t2
);
4472 t2
= shell_escape(t
?: s
, "\"");
4475 free_and_replace(t
, t2
);
4479 } else if (flags
& UNIT_ESCAPE_C
) {
4485 free_and_replace(t
, t2
);
4494 char* unit_concat_strv(char **l
, UnitWriteFlags flags
) {
4495 _cleanup_free_
char *result
= NULL
;
4498 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command
4499 * lines in a way suitable for ExecStart= stanzas. */
4501 STRV_FOREACH(i
, l
) {
4502 _cleanup_free_
char *buf
= NULL
;
4507 p
= unit_escape_setting(*i
, flags
, &buf
);
4511 a
= (n
> 0) + 1 + strlen(p
) + 1; /* separating space + " + entry + " */
4512 if (!GREEDY_REALLOC(result
, n
+ a
+ 1))
4526 if (!GREEDY_REALLOC(result
, n
+ 1))
4531 return TAKE_PTR(result
);
4534 int unit_write_setting(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *data
) {
4535 _cleanup_free_
char *p
= NULL
, *q
= NULL
, *escaped
= NULL
;
4536 const char *dir
, *wrapped
;
4543 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4546 data
= unit_escape_setting(data
, flags
, &escaped
);
4550 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4551 * previous section header is the same */
4553 if (flags
& UNIT_PRIVATE
) {
4554 if (!UNIT_VTABLE(u
)->private_section
)
4557 if (!u
->transient_file
|| u
->last_section_private
< 0)
4558 data
= strjoina("[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4559 else if (u
->last_section_private
== 0)
4560 data
= strjoina("\n[", UNIT_VTABLE(u
)->private_section
, "]\n", data
);
4562 if (!u
->transient_file
|| u
->last_section_private
< 0)
4563 data
= strjoina("[Unit]\n", data
);
4564 else if (u
->last_section_private
> 0)
4565 data
= strjoina("\n[Unit]\n", data
);
4568 if (u
->transient_file
) {
4569 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4570 * write to the transient unit file. */
4571 fputs(data
, u
->transient_file
);
4573 if (!endswith(data
, "\n"))
4574 fputc('\n', u
->transient_file
);
4576 /* Remember which section we wrote this entry to */
4577 u
->last_section_private
= !!(flags
& UNIT_PRIVATE
);
4581 dir
= unit_drop_in_dir(u
, flags
);
4585 wrapped
= strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4586 "# or an equivalent operation. Do not edit.\n",
4590 r
= drop_in_file(dir
, u
->id
, 50, name
, &p
, &q
);
4594 (void) mkdir_p_label(p
, 0755);
4596 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4597 * recreate the cache after every drop-in we write. */
4598 if (u
->manager
->unit_path_cache
) {
4599 r
= set_put_strdup(&u
->manager
->unit_path_cache
, p
);
4604 r
= write_string_file_atomic_label(q
, wrapped
);
4608 r
= strv_push(&u
->dropin_paths
, q
);
4613 strv_uniq(u
->dropin_paths
);
4615 u
->dropin_mtime
= now(CLOCK_REALTIME
);
4620 int unit_write_settingf(Unit
*u
, UnitWriteFlags flags
, const char *name
, const char *format
, ...) {
4621 _cleanup_free_
char *p
= NULL
;
4629 if (UNIT_WRITE_FLAGS_NOOP(flags
))
4632 va_start(ap
, format
);
4633 r
= vasprintf(&p
, format
, ap
);
4639 return unit_write_setting(u
, flags
, name
, p
);
4642 int unit_make_transient(Unit
*u
) {
4643 _cleanup_free_
char *path
= NULL
;
4648 if (!UNIT_VTABLE(u
)->can_transient
)
4651 (void) mkdir_p_label(u
->manager
->lookup_paths
.transient
, 0755);
4653 path
= path_join(u
->manager
->lookup_paths
.transient
, u
->id
);
4657 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4658 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4661 f
= fopen(path
, "we");
4666 safe_fclose(u
->transient_file
);
4667 u
->transient_file
= f
;
4669 free_and_replace(u
->fragment_path
, path
);
4671 u
->source_path
= mfree(u
->source_path
);
4672 u
->dropin_paths
= strv_free(u
->dropin_paths
);
4673 u
->fragment_mtime
= u
->source_mtime
= u
->dropin_mtime
= 0;
4675 u
->load_state
= UNIT_STUB
;
4677 u
->transient
= true;
4679 unit_add_to_dbus_queue(u
);
4680 unit_add_to_gc_queue(u
);
4682 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4688 static int log_kill(const PidRef
*pid
, int sig
, void *userdata
) {
4689 _cleanup_free_
char *comm
= NULL
;
4691 assert(pidref_is_set(pid
));
4693 (void) pidref_get_comm(pid
, &comm
);
4695 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4696 only, like for example systemd's own PAM stub process. */
4697 if (comm
&& comm
[0] == '(')
4698 /* Although we didn't log anything, as this callback is used in unit_kill_context we must return 1
4699 * here to let the manager know that a process was killed. */
4702 log_unit_notice(userdata
,
4703 "Killing process " PID_FMT
" (%s) with signal SIG%s.",
4706 signal_to_string(sig
));
4711 static int operation_to_signal(
4712 const KillContext
*c
,
4714 bool *ret_noteworthy
) {
4720 case KILL_TERMINATE
:
4721 case KILL_TERMINATE_AND_LOG
:
4722 *ret_noteworthy
= false;
4723 return c
->kill_signal
;
4726 *ret_noteworthy
= false;
4727 return restart_kill_signal(c
);
4730 *ret_noteworthy
= true;
4731 return c
->final_kill_signal
;
4734 *ret_noteworthy
= true;
4735 return c
->watchdog_signal
;
4738 assert_not_reached();
4742 static int unit_kill_context_one(
4744 const PidRef
*pidref
,
4749 cg_kill_log_func_t log_func
) {
4756 /* This returns > 0 if it makes sense to wait for SIGCHLD for the process, == 0 if not. */
4758 if (!pidref_is_set(pidref
))
4762 log_func(pidref
, sig
, u
);
4764 r
= pidref_kill_and_sigcont(pidref
, sig
);
4768 _cleanup_free_
char *comm
= NULL
;
4770 (void) pidref_get_comm(pidref
, &comm
);
4771 return log_unit_warning_errno(u
, r
, "Failed to kill %s process " PID_FMT
" (%s), ignoring: %m", type
, pidref
->pid
, strna(comm
));
4775 (void) pidref_kill(pidref
, SIGHUP
);
4780 int unit_kill_context(Unit
*u
, KillOperation k
) {
4781 bool wait_for_exit
= false, send_sighup
;
4782 cg_kill_log_func_t log_func
= NULL
;
4787 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4788 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4789 * which is used for user-requested killing of unit processes. */
4791 KillContext
*c
= unit_get_kill_context(u
);
4792 if (!c
|| c
->kill_mode
== KILL_NONE
)
4796 sig
= operation_to_signal(c
, k
, ¬eworthy
);
4798 log_func
= log_kill
;
4802 IN_SET(k
, KILL_TERMINATE
, KILL_TERMINATE_AND_LOG
) &&
4806 PidRef
*main_pid
= unit_main_pid_full(u
, &is_alien
);
4807 r
= unit_kill_context_one(u
, main_pid
, "main", is_alien
, sig
, send_sighup
, log_func
);
4808 wait_for_exit
= wait_for_exit
|| r
> 0;
4810 r
= unit_kill_context_one(u
, unit_control_pid(u
), "control", /* is_alien = */ false, sig
, send_sighup
, log_func
);
4811 wait_for_exit
= wait_for_exit
|| r
> 0;
4813 if (u
->cgroup_path
&&
4814 (c
->kill_mode
== KILL_CONTROL_GROUP
|| (c
->kill_mode
== KILL_MIXED
&& k
== KILL_KILL
))) {
4815 _cleanup_set_free_ Set
*pid_set
= NULL
;
4817 /* Exclude the main/control pids from being killed via the cgroup */
4818 r
= unit_pid_set(u
, &pid_set
);
4822 r
= cg_kill_recursive(
4825 CGROUP_SIGCONT
|CGROUP_IGNORE_SELF
,
4829 if (!IN_SET(r
, -EAGAIN
, -ESRCH
, -ENOENT
))
4830 log_unit_warning_errno(u
, r
, "Failed to kill control group %s, ignoring: %m", empty_to_root(u
->cgroup_path
));
4834 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4835 * we are running in a container or if this is a delegation unit, simply because cgroup
4836 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4837 * of containers it can be confused easily by left-over directories in the cgroup — which
4838 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4839 * there we get proper events. Hence rely on them. */
4841 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER
) > 0 ||
4842 (detect_container() == 0 && !unit_cgroup_delegate(u
)))
4843 wait_for_exit
= true;
4846 r
= unit_pid_set(u
, &pid_set
);
4850 (void) cg_kill_recursive(
4855 /* kill_log= */ NULL
,
4856 /* userdata= */ NULL
);
4861 return wait_for_exit
;
4864 int unit_add_mounts_for(Unit
*u
, const char *path
, UnitDependencyMask mask
, UnitMountDependencyType type
) {
4865 Hashmap
**unit_map
, **manager_map
;
4870 assert(type
>= 0 && type
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
);
4872 unit_map
= &u
->mounts_for
[type
];
4873 manager_map
= &u
->manager
->units_needing_mounts_for
[type
];
4875 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these
4876 * paths in the unit (from the path to the UnitDependencyInfo structure indicating how to the
4877 * dependency came to be). However, we build a prefix table for all possible prefixes so that new
4878 * appearing mount units can easily determine which units to make themselves a dependency of. */
4880 if (!path_is_absolute(path
))
4883 if (hashmap_contains(*unit_map
, path
)) /* Exit quickly if the path is already covered. */
4886 /* Use the canonical form of the path as the stored key. We call path_is_normalized()
4887 * only after simplification, since path_is_normalized() rejects paths with '.'.
4888 * path_is_normalized() also verifies that the path fits in PATH_MAX. */
4889 _cleanup_free_
char *p
= NULL
;
4890 r
= path_simplify_alloc(path
, &p
);
4895 if (!path_is_normalized(path
))
4898 UnitDependencyInfo di
= {
4902 r
= hashmap_ensure_put(unit_map
, &path_hash_ops
, p
, di
.data
);
4906 TAKE_PTR(p
); /* path remains a valid pointer to the string stored in the hashmap */
4908 char prefix
[strlen(path
) + 1];
4909 PATH_FOREACH_PREFIX_MORE(prefix
, path
) {
4912 x
= hashmap_get(*manager_map
, prefix
);
4914 _cleanup_free_
char *q
= NULL
;
4916 r
= hashmap_ensure_allocated(manager_map
, &path_hash_ops
);
4928 r
= hashmap_put(*manager_map
, q
, x
);
4944 int unit_setup_exec_runtime(Unit
*u
) {
4945 _cleanup_(exec_shared_runtime_unrefp
) ExecSharedRuntime
*esr
= NULL
;
4946 _cleanup_(dynamic_creds_unrefp
) DynamicCreds
*dcreds
= NULL
;
4947 _cleanup_set_free_ Set
*units
= NULL
;
4954 offset
= UNIT_VTABLE(u
)->exec_runtime_offset
;
4957 /* Check if there already is an ExecRuntime for this unit? */
4958 rt
= (ExecRuntime
**) ((uint8_t*) u
+ offset
);
4962 ec
= ASSERT_PTR(unit_get_exec_context(u
));
4964 r
= unit_get_transitive_dependency_set(u
, UNIT_ATOM_JOINS_NAMESPACE_OF
, &units
);
4968 /* Try to get it from somebody else */
4969 SET_FOREACH(other
, units
) {
4970 r
= exec_shared_runtime_acquire(u
->manager
, NULL
, other
->id
, false, &esr
);
4978 r
= exec_shared_runtime_acquire(u
->manager
, ec
, u
->id
, true, &esr
);
4983 if (ec
->dynamic_user
) {
4984 r
= dynamic_creds_make(u
->manager
, ec
->user
, ec
->group
, &dcreds
);
4989 r
= exec_runtime_make(u
, ec
, esr
, dcreds
, rt
);
4999 bool unit_type_supported(UnitType t
) {
5000 static int8_t cache
[_UNIT_TYPE_MAX
] = {}; /* -1: disabled, 1: enabled: 0: don't know */
5003 assert(t
>= 0 && t
< _UNIT_TYPE_MAX
);
5005 if (cache
[t
] == 0) {
5008 e
= strjoina("SYSTEMD_SUPPORT_", unit_type_to_string(t
));
5010 r
= getenv_bool(ascii_strupper(e
));
5011 if (r
< 0 && r
!= -ENXIO
)
5012 log_debug_errno(r
, "Failed to parse $%s, ignoring: %m", e
);
5014 cache
[t
] = r
== 0 ? -1 : 1;
5019 if (!unit_vtable
[t
]->supported
)
5022 return unit_vtable
[t
]->supported();
5025 void unit_warn_if_dir_nonempty(Unit
*u
, const char* where
) {
5031 if (!unit_log_level_test(u
, LOG_NOTICE
))
5034 r
= dir_is_empty(where
, /* ignore_hidden_or_backup= */ false);
5035 if (r
> 0 || r
== -ENOTDIR
)
5038 log_unit_warning_errno(u
, r
, "Failed to check directory %s: %m", where
);
5042 log_unit_struct(u
, LOG_NOTICE
,
5043 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5044 LOG_UNIT_INVOCATION_ID(u
),
5045 LOG_UNIT_MESSAGE(u
, "Directory %s to mount over is not empty, mounting anyway.", where
),
5049 int unit_fail_if_noncanonical(Unit
*u
, const char* where
) {
5050 _cleanup_free_
char *canonical_where
= NULL
;
5056 r
= chase(where
, NULL
, CHASE_NONEXISTENT
, &canonical_where
, NULL
);
5058 log_unit_debug_errno(u
, r
, "Failed to check %s for symlinks, ignoring: %m", where
);
5062 /* We will happily ignore a trailing slash (or any redundant slashes) */
5063 if (path_equal(where
, canonical_where
))
5066 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5067 log_unit_struct(u
, LOG_ERR
,
5068 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR
,
5069 LOG_UNIT_INVOCATION_ID(u
),
5070 LOG_UNIT_MESSAGE(u
, "Mount path %s is not canonical (contains a symlink).", where
),
5076 bool unit_is_pristine(Unit
*u
) {
5079 /* Check if the unit already exists or is already around, in a number of different ways. Note that to
5080 * cater for unit types such as slice, we are generally fine with units that are marked UNIT_LOADED
5081 * even though nothing was actually loaded, as those unit types don't require a file on disk.
5083 * Note that we don't check for drop-ins here, because we allow drop-ins for transient units
5084 * identically to non-transient units, both unit-specific and hierarchical. E.g. for a-b-c.service:
5085 * service.d/….conf, a-.service.d/….conf, a-b-.service.d/….conf, a-b-c.service.d/….conf.
5088 return IN_SET(u
->load_state
, UNIT_NOT_FOUND
, UNIT_LOADED
) &&
5089 !u
->fragment_path
&&
5095 PidRef
* unit_control_pid(Unit
*u
) {
5098 if (UNIT_VTABLE(u
)->control_pid
)
5099 return UNIT_VTABLE(u
)->control_pid(u
);
5104 PidRef
* unit_main_pid_full(Unit
*u
, bool *ret_is_alien
) {
5107 if (UNIT_VTABLE(u
)->main_pid
)
5108 return UNIT_VTABLE(u
)->main_pid(u
, ret_is_alien
);
5111 *ret_is_alien
= false;
5115 static void unit_modify_user_nft_set(Unit
*u
, bool add
, NFTSetSource source
, uint32_t element
) {
5120 if (!MANAGER_IS_SYSTEM(u
->manager
))
5124 c
= unit_get_cgroup_context(u
);
5128 if (!u
->manager
->fw_ctx
) {
5129 r
= fw_ctx_new_full(&u
->manager
->fw_ctx
, /* init_tables= */ false);
5133 assert(u
->manager
->fw_ctx
);
5136 FOREACH_ARRAY(nft_set
, c
->nft_set_context
.sets
, c
->nft_set_context
.n_sets
) {
5137 if (nft_set
->source
!= source
)
5140 r
= nft_set_element_modify_any(u
->manager
->fw_ctx
, add
, nft_set
->nfproto
, nft_set
->table
, nft_set
->set
, &element
, sizeof(element
));
5142 log_warning_errno(r
, "Failed to %s NFT set: family %s, table %s, set %s, ID %u, ignoring: %m",
5143 add
? "add" : "delete", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5145 log_debug("%s NFT set: family %s, table %s, set %s, ID %u",
5146 add
? "Added" : "Deleted", nfproto_to_string(nft_set
->nfproto
), nft_set
->table
, nft_set
->set
, element
);
5150 static void unit_unref_uid_internal(
5154 void (*_manager_unref_uid
)(Manager
*m
, uid_t uid
, bool destroy_now
)) {
5158 assert(_manager_unref_uid
);
5160 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5161 * gid_t are actually the same time, with the same validity rules.
5163 * Drops a reference to UID/GID from a unit. */
5165 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5166 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5168 if (!uid_is_valid(*ref_uid
))
5171 _manager_unref_uid(u
->manager
, *ref_uid
, destroy_now
);
5172 *ref_uid
= UID_INVALID
;
5175 static void unit_unref_uid(Unit
*u
, bool destroy_now
) {
5178 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_USER
, u
->ref_uid
);
5180 unit_unref_uid_internal(u
, &u
->ref_uid
, destroy_now
, manager_unref_uid
);
5183 static void unit_unref_gid(Unit
*u
, bool destroy_now
) {
5186 unit_modify_user_nft_set(u
, /* add = */ false, NFT_SET_SOURCE_GROUP
, u
->ref_gid
);
5188 unit_unref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, destroy_now
, manager_unref_gid
);
5191 void unit_unref_uid_gid(Unit
*u
, bool destroy_now
) {
5194 unit_unref_uid(u
, destroy_now
);
5195 unit_unref_gid(u
, destroy_now
);
5198 static int unit_ref_uid_internal(
5203 int (*_manager_ref_uid
)(Manager
*m
, uid_t uid
, bool clean_ipc
)) {
5209 assert(uid_is_valid(uid
));
5210 assert(_manager_ref_uid
);
5212 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5213 * are actually the same type, and have the same validity rules.
5215 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5216 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5219 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
5220 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
5222 if (*ref_uid
== uid
)
5225 if (uid_is_valid(*ref_uid
)) /* Already set? */
5228 r
= _manager_ref_uid(u
->manager
, uid
, clean_ipc
);
5236 static int unit_ref_uid(Unit
*u
, uid_t uid
, bool clean_ipc
) {
5237 return unit_ref_uid_internal(u
, &u
->ref_uid
, uid
, clean_ipc
, manager_ref_uid
);
5240 static int unit_ref_gid(Unit
*u
, gid_t gid
, bool clean_ipc
) {
5241 return unit_ref_uid_internal(u
, (uid_t
*) &u
->ref_gid
, (uid_t
) gid
, clean_ipc
, manager_ref_gid
);
5244 static int unit_ref_uid_gid_internal(Unit
*u
, uid_t uid
, gid_t gid
, bool clean_ipc
) {
5249 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5251 if (uid_is_valid(uid
)) {
5252 r
= unit_ref_uid(u
, uid
, clean_ipc
);
5257 if (gid_is_valid(gid
)) {
5258 q
= unit_ref_gid(u
, gid
, clean_ipc
);
5261 unit_unref_uid(u
, false);
5267 return r
> 0 || q
> 0;
5270 int unit_ref_uid_gid(Unit
*u
, uid_t uid
, gid_t gid
) {
5276 c
= unit_get_exec_context(u
);
5278 r
= unit_ref_uid_gid_internal(u
, uid
, gid
, c
? c
->remove_ipc
: false);
5280 return log_unit_warning_errno(u
, r
, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5282 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_USER
, uid
);
5283 unit_modify_user_nft_set(u
, /* add = */ true, NFT_SET_SOURCE_GROUP
, gid
);
5288 void unit_notify_user_lookup(Unit
*u
, uid_t uid
, gid_t gid
) {
5293 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5294 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5295 * objects when no service references the UID/GID anymore. */
5297 r
= unit_ref_uid_gid(u
, uid
, gid
);
5299 unit_add_to_dbus_queue(u
);
5302 int unit_acquire_invocation_id(Unit
*u
) {
5308 r
= sd_id128_randomize(&id
);
5310 return log_unit_error_errno(u
, r
, "Failed to generate invocation ID for unit: %m");
5312 r
= unit_set_invocation_id(u
, id
);
5314 return log_unit_error_errno(u
, r
, "Failed to set invocation ID for unit: %m");
5316 unit_add_to_dbus_queue(u
);
5320 int unit_set_exec_params(Unit
*u
, ExecParameters
*p
) {
5326 /* Copy parameters from manager */
5327 r
= manager_get_effective_environment(u
->manager
, &p
->environment
);
5331 p
->runtime_scope
= u
->manager
->runtime_scope
;
5333 r
= strdup_or_null(manager_get_confirm_spawn(u
->manager
), &p
->confirm_spawn
);
5337 p
->cgroup_supported
= u
->manager
->cgroup_supported
;
5338 p
->prefix
= u
->manager
->prefix
;
5339 SET_FLAG(p
->flags
, EXEC_PASS_LOG_UNIT
|EXEC_CHOWN_DIRECTORIES
, MANAGER_IS_SYSTEM(u
->manager
));
5341 /* Copy parameters from unit */
5342 p
->cgroup_path
= u
->cgroup_path
;
5343 SET_FLAG(p
->flags
, EXEC_CGROUP_DELEGATE
, unit_cgroup_delegate(u
));
5345 p
->received_credentials_directory
= u
->manager
->received_credentials_directory
;
5346 p
->received_encrypted_credentials_directory
= u
->manager
->received_encrypted_credentials_directory
;
5348 p
->shall_confirm_spawn
= u
->manager
->confirm_spawn
;
5350 p
->fallback_smack_process_label
= u
->manager
->defaults
.smack_process_label
;
5352 if (u
->manager
->restrict_fs
&& p
->bpf_restrict_fs_map_fd
< 0) {
5353 int fd
= bpf_restrict_fs_map_fd(u
);
5357 p
->bpf_restrict_fs_map_fd
= fd
;
5360 p
->user_lookup_fd
= u
->manager
->user_lookup_fds
[1];
5362 p
->cgroup_id
= u
->cgroup_id
;
5363 p
->invocation_id
= u
->invocation_id
;
5364 sd_id128_to_string(p
->invocation_id
, p
->invocation_id_string
);
5365 p
->unit_id
= strdup(u
->id
);
5372 int unit_fork_helper_process(Unit
*u
, const char *name
, PidRef
*ret
) {
5379 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5380 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5382 (void) unit_realize_cgroup(u
);
5384 r
= safe_fork(name
, FORK_REOPEN_LOG
|FORK_DEATHSIG_SIGTERM
, &pid
);
5388 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
5393 q
= pidref_set_pid(&pidref
, pid
);
5397 *ret
= TAKE_PIDREF(pidref
);
5403 (void) default_signals(SIGNALS_CRASH_HANDLER
, SIGNALS_IGNORE
);
5404 (void) ignore_signals(SIGPIPE
);
5406 if (u
->cgroup_path
) {
5407 r
= cg_attach_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, 0, NULL
, NULL
);
5409 log_unit_error_errno(u
, r
, "Failed to join unit cgroup %s: %m", empty_to_root(u
->cgroup_path
));
5417 int unit_fork_and_watch_rm_rf(Unit
*u
, char **paths
, PidRef
*ret_pid
) {
5418 _cleanup_(pidref_done
) PidRef pid
= PIDREF_NULL
;
5424 r
= unit_fork_helper_process(u
, "(sd-rmrf)", &pid
);
5428 int ret
= EXIT_SUCCESS
;
5430 STRV_FOREACH(i
, paths
) {
5431 r
= rm_rf(*i
, REMOVE_ROOT
|REMOVE_PHYSICAL
|REMOVE_MISSING_OK
);
5433 log_error_errno(r
, "Failed to remove '%s': %m", *i
);
5441 r
= unit_watch_pidref(u
, &pid
, /* exclusive= */ true);
5445 *ret_pid
= TAKE_PIDREF(pid
);
5449 static void unit_update_dependency_mask(Hashmap
*deps
, Unit
*other
, UnitDependencyInfo di
) {
5453 if (di
.origin_mask
== 0 && di
.destination_mask
== 0)
5454 /* No bit set anymore, let's drop the whole entry */
5455 assert_se(hashmap_remove(deps
, other
));
5457 /* Mask was reduced, let's update the entry */
5458 assert_se(hashmap_update(deps
, other
, di
.data
) == 0);
5461 void unit_remove_dependencies(Unit
*u
, UnitDependencyMask mask
) {
5465 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5470 HASHMAP_FOREACH(deps
, u
->dependencies
) {
5474 UnitDependencyInfo di
;
5479 HASHMAP_FOREACH_KEY(di
.data
, other
, deps
) {
5480 Hashmap
*other_deps
;
5482 if (FLAGS_SET(~mask
, di
.origin_mask
))
5485 di
.origin_mask
&= ~mask
;
5486 unit_update_dependency_mask(deps
, other
, di
);
5488 /* We updated the dependency from our unit to the other unit now. But most
5489 * dependencies imply a reverse dependency. Hence, let's delete that one
5490 * too. For that we go through all dependency types on the other unit and
5491 * delete all those which point to us and have the right mask set. */
5493 HASHMAP_FOREACH(other_deps
, other
->dependencies
) {
5494 UnitDependencyInfo dj
;
5496 dj
.data
= hashmap_get(other_deps
, u
);
5497 if (FLAGS_SET(~mask
, dj
.destination_mask
))
5500 dj
.destination_mask
&= ~mask
;
5501 unit_update_dependency_mask(other_deps
, u
, dj
);
5504 unit_add_to_gc_queue(other
);
5506 /* The unit 'other' may not be wanted by the unit 'u'. */
5507 unit_submit_to_stop_when_unneeded_queue(other
);
5517 static int unit_get_invocation_path(Unit
*u
, char **ret
) {
5524 if (MANAGER_IS_SYSTEM(u
->manager
))
5525 p
= strjoin("/run/systemd/units/invocation:", u
->id
);
5527 _cleanup_free_
char *user_path
= NULL
;
5528 r
= xdg_user_runtime_dir(&user_path
, "/systemd/units/invocation:");
5531 p
= strjoin(user_path
, u
->id
);
5541 static int unit_export_invocation_id(Unit
*u
) {
5542 _cleanup_free_
char *p
= NULL
;
5547 if (u
->exported_invocation_id
)
5550 if (sd_id128_is_null(u
->invocation_id
))
5553 r
= unit_get_invocation_path(u
, &p
);
5555 return log_unit_debug_errno(u
, r
, "Failed to get invocation path: %m");
5557 r
= symlink_atomic_label(u
->invocation_id_string
, p
);
5559 return log_unit_debug_errno(u
, r
, "Failed to create invocation ID symlink %s: %m", p
);
5561 u
->exported_invocation_id
= true;
5565 static int unit_export_log_level_max(Unit
*u
, const ExecContext
*c
) {
5573 if (u
->exported_log_level_max
)
5576 if (c
->log_level_max
< 0)
5579 assert(c
->log_level_max
<= 7);
5581 buf
[0] = '0' + c
->log_level_max
;
5584 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5585 r
= symlink_atomic(buf
, p
);
5587 return log_unit_debug_errno(u
, r
, "Failed to create maximum log level symlink %s: %m", p
);
5589 u
->exported_log_level_max
= true;
5593 static int unit_export_log_extra_fields(Unit
*u
, const ExecContext
*c
) {
5594 _cleanup_close_
int fd
= -EBADF
;
5595 struct iovec
*iovec
;
5602 if (u
->exported_log_extra_fields
)
5605 if (c
->n_log_extra_fields
<= 0)
5608 sizes
= newa(le64_t
, c
->n_log_extra_fields
);
5609 iovec
= newa(struct iovec
, c
->n_log_extra_fields
* 2);
5611 for (size_t i
= 0; i
< c
->n_log_extra_fields
; i
++) {
5612 sizes
[i
] = htole64(c
->log_extra_fields
[i
].iov_len
);
5614 iovec
[i
*2] = IOVEC_MAKE(sizes
+ i
, sizeof(le64_t
));
5615 iovec
[i
*2+1] = c
->log_extra_fields
[i
];
5618 p
= strjoina("/run/systemd/units/log-extra-fields:", u
->id
);
5619 pattern
= strjoina(p
, ".XXXXXX");
5621 fd
= mkostemp_safe(pattern
);
5623 return log_unit_debug_errno(u
, fd
, "Failed to create extra fields file %s: %m", p
);
5625 n
= writev(fd
, iovec
, c
->n_log_extra_fields
*2);
5627 r
= log_unit_debug_errno(u
, errno
, "Failed to write extra fields: %m");
5631 (void) fchmod(fd
, 0644);
5633 if (rename(pattern
, p
) < 0) {
5634 r
= log_unit_debug_errno(u
, errno
, "Failed to rename extra fields file: %m");
5638 u
->exported_log_extra_fields
= true;
5642 (void) unlink(pattern
);
5646 static int unit_export_log_ratelimit_interval(Unit
*u
, const ExecContext
*c
) {
5647 _cleanup_free_
char *buf
= NULL
;
5654 if (u
->exported_log_ratelimit_interval
)
5657 if (c
->log_ratelimit_interval_usec
== 0)
5660 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5662 if (asprintf(&buf
, "%" PRIu64
, c
->log_ratelimit_interval_usec
) < 0)
5665 r
= symlink_atomic(buf
, p
);
5667 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit interval symlink %s: %m", p
);
5669 u
->exported_log_ratelimit_interval
= true;
5673 static int unit_export_log_ratelimit_burst(Unit
*u
, const ExecContext
*c
) {
5674 _cleanup_free_
char *buf
= NULL
;
5681 if (u
->exported_log_ratelimit_burst
)
5684 if (c
->log_ratelimit_burst
== 0)
5687 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5689 if (asprintf(&buf
, "%u", c
->log_ratelimit_burst
) < 0)
5692 r
= symlink_atomic(buf
, p
);
5694 return log_unit_debug_errno(u
, r
, "Failed to create log rate limit burst symlink %s: %m", p
);
5696 u
->exported_log_ratelimit_burst
= true;
5700 void unit_export_state_files(Unit
*u
) {
5701 const ExecContext
*c
;
5708 if (MANAGER_IS_TEST_RUN(u
->manager
))
5711 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5712 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5713 * the IPC system itself and PID 1 also log to the journal.
5715 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5716 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5717 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5718 * namespace at least.
5720 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5721 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5724 (void) unit_export_invocation_id(u
);
5726 if (!MANAGER_IS_SYSTEM(u
->manager
))
5729 c
= unit_get_exec_context(u
);
5731 (void) unit_export_log_level_max(u
, c
);
5732 (void) unit_export_log_extra_fields(u
, c
);
5733 (void) unit_export_log_ratelimit_interval(u
, c
);
5734 (void) unit_export_log_ratelimit_burst(u
, c
);
5738 void unit_unlink_state_files(Unit
*u
) {
5746 /* Undoes the effect of unit_export_state() */
5748 if (u
->exported_invocation_id
) {
5749 _cleanup_free_
char *invocation_path
= NULL
;
5750 int r
= unit_get_invocation_path(u
, &invocation_path
);
5752 (void) unlink(invocation_path
);
5753 u
->exported_invocation_id
= false;
5757 if (!MANAGER_IS_SYSTEM(u
->manager
))
5760 if (u
->exported_log_level_max
) {
5761 p
= strjoina("/run/systemd/units/log-level-max:", u
->id
);
5764 u
->exported_log_level_max
= false;
5767 if (u
->exported_log_extra_fields
) {
5768 p
= strjoina("/run/systemd/units/extra-fields:", u
->id
);
5771 u
->exported_log_extra_fields
= false;
5774 if (u
->exported_log_ratelimit_interval
) {
5775 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", u
->id
);
5778 u
->exported_log_ratelimit_interval
= false;
5781 if (u
->exported_log_ratelimit_burst
) {
5782 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", u
->id
);
5785 u
->exported_log_ratelimit_burst
= false;
5789 int unit_prepare_exec(Unit
*u
) {
5794 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5795 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5796 r
= bpf_firewall_load_custom(u
);
5800 /* Prepares everything so that we can fork of a process for this unit */
5802 (void) unit_realize_cgroup(u
);
5804 if (u
->reset_accounting
) {
5805 (void) unit_reset_accounting(u
);
5806 u
->reset_accounting
= false;
5809 unit_export_state_files(u
);
5811 r
= unit_setup_exec_runtime(u
);
5818 static bool ignore_leftover_process(const char *comm
) {
5819 return comm
&& comm
[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5822 int unit_log_leftover_process_start(const PidRef
*pid
, int sig
, void *userdata
) {
5823 _cleanup_free_
char *comm
= NULL
;
5825 assert(pidref_is_set(pid
));
5827 (void) pidref_get_comm(pid
, &comm
);
5829 if (ignore_leftover_process(comm
))
5832 /* During start we print a warning */
5834 log_unit_warning(userdata
,
5835 "Found left-over process " PID_FMT
" (%s) in control group while starting unit. Ignoring.\n"
5836 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5837 pid
->pid
, strna(comm
));
5842 int unit_log_leftover_process_stop(const PidRef
*pid
, int sig
, void *userdata
) {
5843 _cleanup_free_
char *comm
= NULL
;
5845 assert(pidref_is_set(pid
));
5847 (void) pidref_get_comm(pid
, &comm
);
5849 if (ignore_leftover_process(comm
))
5852 /* During stop we only print an informational message */
5854 log_unit_info(userdata
,
5855 "Unit process " PID_FMT
" (%s) remains running after unit stopped.",
5856 pid
->pid
, strna(comm
));
5861 int unit_warn_leftover_processes(Unit
*u
, cg_kill_log_func_t log_func
) {
5864 (void) unit_pick_cgroup_path(u
);
5866 if (!u
->cgroup_path
)
5869 return cg_kill_recursive(
5878 bool unit_needs_console(Unit
*u
) {
5880 UnitActiveState state
;
5884 state
= unit_active_state(u
);
5886 if (UNIT_IS_INACTIVE_OR_FAILED(state
))
5889 if (UNIT_VTABLE(u
)->needs_console
)
5890 return UNIT_VTABLE(u
)->needs_console(u
);
5892 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5893 ec
= unit_get_exec_context(u
);
5897 return exec_context_may_touch_console(ec
);
5900 int unit_pid_attachable(Unit
*u
, const PidRef
*pid
, sd_bus_error
*error
) {
5905 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5906 * and not a kernel thread either */
5908 /* First, a simple range check */
5909 if (!pidref_is_set(pid
))
5910 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process identifier is not valid.");
5912 /* Some extra safety check */
5913 if (pid
->pid
== 1 || pidref_is_self(pid
))
5914 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a manager process, refusing.", pid
->pid
);
5916 /* Don't even begin to bother with kernel threads */
5917 r
= pidref_is_kernel_thread(pid
);
5919 return sd_bus_error_setf(error
, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN
, "Process with ID " PID_FMT
" does not exist.", pid
->pid
);
5921 return sd_bus_error_set_errnof(error
, r
, "Failed to determine whether process " PID_FMT
" is a kernel thread: %m", pid
->pid
);
5923 return sd_bus_error_setf(error
, SD_BUS_ERROR_INVALID_ARGS
, "Process " PID_FMT
" is a kernel thread, refusing.", pid
->pid
);
5928 void unit_log_success(Unit
*u
) {
5931 /* Let's show message "Deactivated successfully" in debug mode (when manager is user) rather than in info mode.
5932 * This message has low information value for regular users and it might be a bit overwhelming on a system with
5933 * a lot of devices. */
5935 MANAGER_IS_USER(u
->manager
) ? LOG_DEBUG
: LOG_INFO
,
5936 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR
,
5937 LOG_UNIT_INVOCATION_ID(u
),
5938 LOG_UNIT_MESSAGE(u
, "Deactivated successfully."));
5941 void unit_log_failure(Unit
*u
, const char *result
) {
5945 log_unit_struct(u
, LOG_WARNING
,
5946 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR
,
5947 LOG_UNIT_INVOCATION_ID(u
),
5948 LOG_UNIT_MESSAGE(u
, "Failed with result '%s'.", result
),
5949 "UNIT_RESULT=%s", result
);
5952 void unit_log_skip(Unit
*u
, const char *result
) {
5956 log_unit_struct(u
, LOG_INFO
,
5957 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR
,
5958 LOG_UNIT_INVOCATION_ID(u
),
5959 LOG_UNIT_MESSAGE(u
, "Skipped due to '%s'.", result
),
5960 "UNIT_RESULT=%s", result
);
5963 void unit_log_process_exit(
5966 const char *command
,
5976 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5977 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5978 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5982 else if (code
== CLD_EXITED
)
5985 level
= LOG_WARNING
;
5987 log_unit_struct(u
, level
,
5988 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR
,
5989 LOG_UNIT_MESSAGE(u
, "%s exited, code=%s, status=%i/%s%s",
5991 sigchld_code_to_string(code
), status
,
5992 strna(code
== CLD_EXITED
5993 ? exit_status_to_string(status
, EXIT_STATUS_FULL
)
5994 : signal_to_string(status
)),
5995 success
? " (success)" : ""),
5996 "EXIT_CODE=%s", sigchld_code_to_string(code
),
5997 "EXIT_STATUS=%i", status
,
5998 "COMMAND=%s", strna(command
),
5999 LOG_UNIT_INVOCATION_ID(u
));
6002 int unit_exit_status(Unit
*u
) {
6005 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
6006 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
6007 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
6008 * service process has exited abnormally (signal/coredump). */
6010 if (!UNIT_VTABLE(u
)->exit_status
)
6013 return UNIT_VTABLE(u
)->exit_status(u
);
6016 int unit_failure_action_exit_status(Unit
*u
) {
6021 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
6023 if (u
->failure_action_exit_status
>= 0)
6024 return u
->failure_action_exit_status
;
6026 r
= unit_exit_status(u
);
6027 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6033 int unit_success_action_exit_status(Unit
*u
) {
6038 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
6040 if (u
->success_action_exit_status
>= 0)
6041 return u
->success_action_exit_status
;
6043 r
= unit_exit_status(u
);
6044 if (r
== -EBADE
) /* Exited, but not cleanly (i.e. by signal or such) */
6050 int unit_test_trigger_loaded(Unit
*u
) {
6053 /* Tests whether the unit to trigger is loaded */
6055 trigger
= UNIT_TRIGGER(u
);
6057 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6058 "Refusing to start, no unit to trigger.");
6059 if (trigger
->load_state
!= UNIT_LOADED
)
6060 return log_unit_error_errno(u
, SYNTHETIC_ERRNO(ENOENT
),
6061 "Refusing to start, unit %s to trigger not loaded.", trigger
->id
);
6066 void unit_destroy_runtime_data(Unit
*u
, const ExecContext
*context
) {
6070 /* EXEC_PRESERVE_RESTART is handled via unit_release_resources()! */
6071 if (context
->runtime_directory_preserve_mode
== EXEC_PRESERVE_NO
)
6072 exec_context_destroy_runtime_directory(context
, u
->manager
->prefix
[EXEC_DIRECTORY_RUNTIME
]);
6074 exec_context_destroy_credentials(u
);
6075 exec_context_destroy_mount_ns_dir(u
);
6078 int unit_clean(Unit
*u
, ExecCleanMask mask
) {
6079 UnitActiveState state
;
6083 /* Special return values:
6085 * -EOPNOTSUPP → cleaning not supported for this unit type
6086 * -EUNATCH → cleaning not defined for this resource type
6087 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6088 * a job queued or similar
6091 if (!UNIT_VTABLE(u
)->clean
)
6097 if (u
->load_state
!= UNIT_LOADED
)
6103 state
= unit_active_state(u
);
6104 if (state
!= UNIT_INACTIVE
)
6107 return UNIT_VTABLE(u
)->clean(u
, mask
);
6110 int unit_can_clean(Unit
*u
, ExecCleanMask
*ret
) {
6113 if (!UNIT_VTABLE(u
)->clean
||
6114 u
->load_state
!= UNIT_LOADED
) {
6119 /* When the clean() method is set, can_clean() really should be set too */
6120 assert(UNIT_VTABLE(u
)->can_clean
);
6122 return UNIT_VTABLE(u
)->can_clean(u
, ret
);
6125 bool unit_can_start_refuse_manual(Unit
*u
) {
6126 return unit_can_start(u
) && !u
->refuse_manual_start
;
6129 bool unit_can_stop_refuse_manual(Unit
*u
) {
6130 return unit_can_stop(u
) && !u
->refuse_manual_stop
;
6133 bool unit_can_isolate_refuse_manual(Unit
*u
) {
6134 return unit_can_isolate(u
) && !u
->refuse_manual_start
;
6137 void unit_next_freezer_state(Unit
*u
, FreezerAction action
, FreezerState
*ret
, FreezerState
*ret_target
) {
6139 FreezerState curr
, parent
, next
, tgt
;
6142 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_PARENT_FREEZE
,
6143 FREEZER_THAW
, FREEZER_PARENT_THAW
));
6147 /* This function determines the correct freezer state transitions for a unit
6148 * given the action being requested. It returns the next state, and also the "target",
6149 * which is either FREEZER_FROZEN or FREEZER_RUNNING, depending on what actual state we
6150 * ultimately want to achieve. */
6152 curr
= u
->freezer_state
;
6153 slice
= UNIT_GET_SLICE(u
);
6155 parent
= slice
->freezer_state
;
6157 parent
= FREEZER_RUNNING
;
6159 if (action
== FREEZER_FREEZE
) {
6160 /* We always "promote" a freeze initiated by parent into a normal freeze */
6161 if (IN_SET(curr
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6162 next
= FREEZER_FROZEN
;
6164 next
= FREEZER_FREEZING
;
6165 } else if (action
== FREEZER_THAW
) {
6166 /* Thawing is the most complicated operation here, because we can't thaw a unit
6167 * if its parent is frozen. So we instead "demote" a normal freeze into a freeze
6168 * initiated by parent if the parent is frozen */
6169 if (IN_SET(curr
, FREEZER_RUNNING
, FREEZER_THAWING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6171 else if (curr
== FREEZER_FREEZING
) {
6172 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6173 next
= FREEZER_THAWING
;
6175 next
= FREEZER_FREEZING_BY_PARENT
;
6177 assert(curr
== FREEZER_FROZEN
);
6178 if (IN_SET(parent
, FREEZER_RUNNING
, FREEZER_THAWING
))
6179 next
= FREEZER_THAWING
;
6181 next
= FREEZER_FROZEN_BY_PARENT
;
6183 } else if (action
== FREEZER_PARENT_FREEZE
) {
6184 /* We need to avoid accidentally demoting units frozen manually */
6185 if (IN_SET(curr
, FREEZER_FREEZING
, FREEZER_FROZEN
, FREEZER_FROZEN_BY_PARENT
))
6188 next
= FREEZER_FREEZING_BY_PARENT
;
6190 assert(action
== FREEZER_PARENT_THAW
);
6192 /* We don't want to thaw units from a parent if they were frozen
6193 * manually, so for such units this action is a no-op */
6194 if (IN_SET(curr
, FREEZER_RUNNING
, FREEZER_FREEZING
, FREEZER_FROZEN
))
6197 next
= FREEZER_THAWING
;
6200 tgt
= freezer_state_finish(next
);
6201 if (tgt
== FREEZER_FROZEN_BY_PARENT
)
6202 tgt
= FREEZER_FROZEN
;
6203 assert(IN_SET(tgt
, FREEZER_RUNNING
, FREEZER_FROZEN
));
6209 bool unit_can_freeze(Unit
*u
) {
6212 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
) || unit_has_name(u
, SPECIAL_INIT_SCOPE
))
6215 if (UNIT_VTABLE(u
)->can_freeze
)
6216 return UNIT_VTABLE(u
)->can_freeze(u
);
6218 return UNIT_VTABLE(u
)->freezer_action
;
6221 void unit_frozen(Unit
*u
) {
6224 u
->freezer_state
= u
->freezer_state
== FREEZER_FREEZING_BY_PARENT
6225 ? FREEZER_FROZEN_BY_PARENT
6228 log_unit_debug(u
, "Unit now %s.", freezer_state_to_string(u
->freezer_state
));
6230 bus_unit_send_pending_freezer_message(u
, false);
6233 void unit_thawed(Unit
*u
) {
6236 u
->freezer_state
= FREEZER_RUNNING
;
6238 log_unit_debug(u
, "Unit thawed.");
6240 bus_unit_send_pending_freezer_message(u
, false);
6243 int unit_freezer_action(Unit
*u
, FreezerAction action
) {
6248 assert(IN_SET(action
, FREEZER_FREEZE
, FREEZER_THAW
));
6250 if (!cg_freezer_supported() || !unit_can_freeze(u
))
6256 if (u
->load_state
!= UNIT_LOADED
)
6259 s
= unit_active_state(u
);
6260 if (s
!= UNIT_ACTIVE
)
6263 if (action
== FREEZER_FREEZE
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
))
6265 if (action
== FREEZER_THAW
&& u
->freezer_state
== FREEZER_THAWING
)
6267 if (action
== FREEZER_THAW
&& IN_SET(u
->freezer_state
, FREEZER_FREEZING_BY_PARENT
, FREEZER_FROZEN_BY_PARENT
))
6270 r
= UNIT_VTABLE(u
)->freezer_action(u
, action
);
6274 assert(IN_SET(u
->freezer_state
, FREEZER_FREEZING
, FREEZER_FREEZING_BY_PARENT
, FREEZER_THAWING
));
6278 Condition
*unit_find_failed_condition(Unit
*u
) {
6279 Condition
*failed_trigger
= NULL
;
6280 bool has_succeeded_trigger
= false;
6282 if (u
->condition_result
)
6285 LIST_FOREACH(conditions
, c
, u
->conditions
)
6287 if (c
->result
== CONDITION_SUCCEEDED
)
6288 has_succeeded_trigger
= true;
6289 else if (!failed_trigger
)
6291 } else if (c
->result
!= CONDITION_SUCCEEDED
)
6294 return failed_trigger
&& !has_succeeded_trigger
? failed_trigger
: NULL
;
6297 static const char* const collect_mode_table
[_COLLECT_MODE_MAX
] = {
6298 [COLLECT_INACTIVE
] = "inactive",
6299 [COLLECT_INACTIVE_OR_FAILED
] = "inactive-or-failed",
6302 DEFINE_STRING_TABLE_LOOKUP(collect_mode
, CollectMode
);
6304 Unit
* unit_has_dependency(const Unit
*u
, UnitDependencyAtom atom
, Unit
*other
) {
6309 /* Checks if the unit has a dependency on 'other' with the specified dependency atom. If 'other' is
6310 * NULL checks if the unit has *any* dependency of that atom. Returns 'other' if found (or if 'other'
6311 * is NULL the first entry found), or NULL if not found. */
6313 UNIT_FOREACH_DEPENDENCY(i
, u
, atom
)
6314 if (!other
|| other
== i
)
6320 int unit_get_dependency_array(const Unit
*u
, UnitDependencyAtom atom
, Unit
***ret_array
) {
6321 _cleanup_free_ Unit
**array
= NULL
;
6328 /* Gets a list of units matching a specific atom as array. This is useful when iterating through
6329 * dependencies while modifying them: the array is an "atomic snapshot" of sorts, that can be read
6330 * while the dependency table is continuously updated. */
6332 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6333 if (!GREEDY_REALLOC(array
, n
+ 1))
6339 *ret_array
= TAKE_PTR(array
);
6341 assert(n
<= INT_MAX
);
6345 int unit_get_transitive_dependency_set(Unit
*u
, UnitDependencyAtom atom
, Set
**ret
) {
6346 _cleanup_set_free_ Set
*units
= NULL
, *queue
= NULL
;
6353 /* Similar to unit_get_dependency_array(), but also search the same dependency in other units. */
6356 UNIT_FOREACH_DEPENDENCY(other
, u
, atom
) {
6357 r
= set_ensure_put(&units
, NULL
, other
);
6362 r
= set_ensure_put(&queue
, NULL
, other
);
6366 } while ((u
= set_steal_first(queue
)));
6368 *ret
= TAKE_PTR(units
);
6374 sd_event_source
**source
,
6377 sd_event_time_handler_t handler
) {
6386 if (usec
== USEC_INFINITY
)
6387 return sd_event_source_set_enabled(*source
, SD_EVENT_OFF
);
6389 r
= (relative
? sd_event_source_set_time_relative
: sd_event_source_set_time
)(*source
, usec
);
6393 return sd_event_source_set_enabled(*source
, SD_EVENT_ONESHOT
);
6396 if (usec
== USEC_INFINITY
)
6399 r
= (relative
? sd_event_add_time_relative
: sd_event_add_time
)(
6409 const char *d
= strjoina(unit_type_to_string(u
->type
), "-timer");
6410 (void) sd_event_source_set_description(*source
, d
);
6415 static int unit_get_nice(Unit
*u
) {
6418 ec
= unit_get_exec_context(u
);
6419 return ec
? ec
->nice
: 0;
6422 static uint64_t unit_get_cpu_weight(Unit
*u
) {
6425 cc
= unit_get_cgroup_context(u
);
6426 return cc
? cgroup_context_cpu_weight(cc
, manager_state(u
->manager
)) : CGROUP_WEIGHT_DEFAULT
;
6429 int unit_compare_priority(Unit
*a
, Unit
*b
) {
6432 ret
= CMP(a
->type
, b
->type
);
6436 ret
= CMP(unit_get_cpu_weight(a
), unit_get_cpu_weight(b
));
6440 ret
= CMP(unit_get_nice(a
), unit_get_nice(b
));
6444 return strcmp(a
->id
, b
->id
);
6447 const ActivationDetailsVTable
* const activation_details_vtable
[_UNIT_TYPE_MAX
] = {
6448 [UNIT_PATH
] = &activation_details_path_vtable
,
6449 [UNIT_TIMER
] = &activation_details_timer_vtable
,
6452 ActivationDetails
*activation_details_new(Unit
*trigger_unit
) {
6453 _cleanup_free_ ActivationDetails
*details
= NULL
;
6455 assert(trigger_unit
);
6456 assert(trigger_unit
->type
!= _UNIT_TYPE_INVALID
);
6457 assert(trigger_unit
->id
);
6459 details
= malloc0(activation_details_vtable
[trigger_unit
->type
]->object_size
);
6463 *details
= (ActivationDetails
) {
6465 .trigger_unit_type
= trigger_unit
->type
,
6468 details
->trigger_unit_name
= strdup(trigger_unit
->id
);
6469 if (!details
->trigger_unit_name
)
6472 if (ACTIVATION_DETAILS_VTABLE(details
)->init
)
6473 ACTIVATION_DETAILS_VTABLE(details
)->init(details
, trigger_unit
);
6475 return TAKE_PTR(details
);
6478 static ActivationDetails
*activation_details_free(ActivationDetails
*details
) {
6482 if (ACTIVATION_DETAILS_VTABLE(details
)->done
)
6483 ACTIVATION_DETAILS_VTABLE(details
)->done(details
);
6485 free(details
->trigger_unit_name
);
6487 return mfree(details
);
6490 void activation_details_serialize(ActivationDetails
*details
, FILE *f
) {
6491 if (!details
|| details
->trigger_unit_type
== _UNIT_TYPE_INVALID
)
6494 (void) serialize_item(f
, "activation-details-unit-type", unit_type_to_string(details
->trigger_unit_type
));
6495 if (details
->trigger_unit_name
)
6496 (void) serialize_item(f
, "activation-details-unit-name", details
->trigger_unit_name
);
6497 if (ACTIVATION_DETAILS_VTABLE(details
)->serialize
)
6498 ACTIVATION_DETAILS_VTABLE(details
)->serialize(details
, f
);
6501 int activation_details_deserialize(const char *key
, const char *value
, ActivationDetails
**details
) {
6511 if (!streq(key
, "activation-details-unit-type"))
6514 t
= unit_type_from_string(value
);
6518 /* The activation details vtable has defined ops only for path and timer units */
6519 if (!activation_details_vtable
[t
])
6522 *details
= malloc0(activation_details_vtable
[t
]->object_size
);
6526 **details
= (ActivationDetails
) {
6528 .trigger_unit_type
= t
,
6534 if (streq(key
, "activation-details-unit-name")) {
6535 r
= free_and_strdup(&(*details
)->trigger_unit_name
, value
);
6542 if (ACTIVATION_DETAILS_VTABLE(*details
)->deserialize
)
6543 return ACTIVATION_DETAILS_VTABLE(*details
)->deserialize(key
, value
, details
);
6548 int activation_details_append_env(ActivationDetails
*details
, char ***strv
) {
6556 if (!isempty(details
->trigger_unit_name
)) {
6557 char *s
= strjoin("TRIGGER_UNIT=", details
->trigger_unit_name
);
6561 r
= strv_consume(strv
, TAKE_PTR(s
));
6566 if (ACTIVATION_DETAILS_VTABLE(details
)->append_env
) {
6567 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_env(details
, strv
);
6572 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of variables added to the env block */
6575 int activation_details_append_pair(ActivationDetails
*details
, char ***strv
) {
6583 if (!isempty(details
->trigger_unit_name
)) {
6584 r
= strv_extend_many(strv
, "trigger_unit", details
->trigger_unit_name
);
6589 if (ACTIVATION_DETAILS_VTABLE(details
)->append_pair
) {
6590 r
= ACTIVATION_DETAILS_VTABLE(details
)->append_pair(details
, strv
);
6595 return r
+ !isempty(details
->trigger_unit_name
); /* Return the number of pairs added to the strv */
6598 DEFINE_TRIVIAL_REF_UNREF_FUNC(ActivationDetails
, activation_details
, activation_details_free
);
6600 static const char* const unit_mount_dependency_type_table
[_UNIT_MOUNT_DEPENDENCY_TYPE_MAX
] = {
6601 [UNIT_MOUNT_WANTS
] = "WantsMountsFor",
6602 [UNIT_MOUNT_REQUIRES
] = "RequiresMountsFor",
6605 DEFINE_STRING_TABLE_LOOKUP(unit_mount_dependency_type
, UnitMountDependencyType
);
6607 UnitDependency
unit_mount_dependency_type_to_dependency_type(UnitMountDependencyType t
) {
6610 case UNIT_MOUNT_WANTS
:
6613 case UNIT_MOUNT_REQUIRES
:
6614 return UNIT_REQUIRES
;
6617 assert_not_reached();