1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include <sys/inotify.h>
8 #include <sys/reboot.h>
13 #include "sd-daemon.h"
14 #include "sd-messages.h"
17 #include "all-units.h"
18 #include "alloc-util.h"
19 #include "architecture.h"
21 #include "boot-timestamps.h"
22 #include "bpf-restrict-fs.h"
23 #include "build-path.h"
24 #include "bus-common-errors.h"
25 #include "bus-error.h"
26 #include "clean-ipc.h"
27 #include "common-signal.h"
28 #include "confidential-virt.h"
29 #include "constants.h"
30 #include "creds-util.h"
31 #include "daemon-util.h"
33 #include "dbus-manager.h"
34 #include "dbus-unit.h"
36 #include "dirent-util.h"
37 #include "dynamic-user.h"
40 #include "event-util.h"
41 #include "exec-util.h"
43 #include "exit-status.h"
46 #include "format-util.h"
48 #include "generator-setup.h"
50 #include "initrd-util.h"
51 #include "inotify-util.h"
54 #include "iovec-util.h"
55 #include "libaudit-util.h"
56 #include "locale-setup.h"
58 #include "manager-dump.h"
59 #include "manager-serialize.h"
61 #include "mkdir-label.h"
62 #include "mount-util.h"
63 #include "notify-recv.h"
64 #include "parse-util.h"
65 #include "path-lookup.h"
66 #include "path-util.h"
67 #include "plymouth-util.h"
68 #include "pretty-print.h"
70 #include "process-util.h"
72 #include "ratelimit.h"
73 #include "rlimit-util.h"
75 #include "selinux-util.h"
76 #include "serialize.h"
78 #include "signal-util.h"
79 #include "socket-util.h"
81 #include "stat-util.h"
82 #include "string-table.h"
83 #include "string-util.h"
86 #include "sysctl-util.h"
87 #include "syslog-util.h"
89 #include "terminal-util.h"
90 #include "time-util.h"
91 #include "transaction.h"
92 #include "umask-util.h"
93 #include "unit-name.h"
94 #include "user-util.h"
99 /* Make sure clients notifying us don't block */
100 #define MANAGER_SOCKET_RCVBUF_SIZE (8*U64_MB)
102 /* Initial delay and the interval for printing status messages about running jobs */
103 #define JOBS_IN_PROGRESS_WAIT_USEC (2*USEC_PER_SEC)
104 #define JOBS_IN_PROGRESS_QUIET_WAIT_USEC (25*USEC_PER_SEC)
105 #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
106 #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
108 /* If there are more than 1K bus messages queue across our API and direct buses, then let's not add more on top until
109 * the queue gets more empty. */
110 #define MANAGER_BUS_BUSY_THRESHOLD 1024LU
112 /* How many units and jobs to process of the bus queue before returning to the event loop. */
113 #define MANAGER_BUS_MESSAGE_BUDGET 100U
115 #define DEFAULT_TASKS_MAX ((CGroupTasksMax) { 15U, 100U }) /* 15% */
117 static int manager_dispatch_notify_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
118 static int manager_dispatch_signal_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
119 static int manager_dispatch_time_change_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
120 static int manager_dispatch_idle_pipe_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
121 static int manager_dispatch_user_lookup_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
122 static int manager_dispatch_handoff_timestamp_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
123 static int manager_dispatch_pidref_transport_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
);
124 static int manager_dispatch_jobs_in_progress(sd_event_source
*source
, usec_t usec
, void *userdata
);
125 static int manager_dispatch_run_queue(sd_event_source
*source
, void *userdata
);
126 static int manager_dispatch_sigchld(sd_event_source
*source
, void *userdata
);
127 static int manager_dispatch_timezone_change(sd_event_source
*source
, const struct inotify_event
*event
, void *userdata
);
128 static int manager_run_environment_generators(Manager
*m
);
129 static int manager_run_generators(Manager
*m
);
130 static void manager_vacuum(Manager
*m
);
132 static usec_t
manager_watch_jobs_next_time(Manager
*m
) {
135 if (MANAGER_IS_USER(m
))
136 /* Let the user manager without a timeout show status quickly, so the system manager can make
137 * use of it, if it wants to. */
138 timeout
= JOBS_IN_PROGRESS_WAIT_USEC
* 2 / 3;
139 else if (show_status_on(m
->show_status
))
140 /* When status is on, just use the usual timeout. */
141 timeout
= JOBS_IN_PROGRESS_WAIT_USEC
;
143 timeout
= JOBS_IN_PROGRESS_QUIET_WAIT_USEC
;
145 return usec_add(now(CLOCK_MONOTONIC
), timeout
);
148 static bool manager_is_confirm_spawn_disabled(Manager
*m
) {
151 if (!m
->confirm_spawn
)
154 return access("/run/systemd/confirm_spawn_disabled", F_OK
) >= 0;
157 static void manager_watch_jobs_in_progress(Manager
*m
) {
163 /* We do not want to show the cylon animation if the user
164 * needs to confirm service executions otherwise confirmation
165 * messages will be screwed by the cylon animation. */
166 if (!manager_is_confirm_spawn_disabled(m
))
169 if (m
->jobs_in_progress_event_source
)
172 next
= manager_watch_jobs_next_time(m
);
173 r
= sd_event_add_time(
175 &m
->jobs_in_progress_event_source
,
178 manager_dispatch_jobs_in_progress
, m
);
182 (void) sd_event_source_set_description(m
->jobs_in_progress_event_source
, "manager-jobs-in-progress");
185 static void manager_flip_auto_status(Manager
*m
, bool enable
, const char *reason
) {
189 if (m
->show_status
== SHOW_STATUS_AUTO
)
190 manager_set_show_status(m
, SHOW_STATUS_TEMPORARY
, reason
);
192 if (m
->show_status
== SHOW_STATUS_TEMPORARY
)
193 manager_set_show_status(m
, SHOW_STATUS_AUTO
, reason
);
197 static void manager_print_jobs_in_progress(Manager
*m
) {
199 unsigned counter
= 0, print_nr
;
200 char cylon
[6 + CYLON_BUFFER_EXTRA
+ 1];
202 uint64_t timeout
= 0;
205 assert(m
->n_running_jobs
> 0);
207 manager_flip_auto_status(m
, true, "delay");
209 print_nr
= (m
->jobs_in_progress_iteration
/ JOBS_IN_PROGRESS_PERIOD_DIVISOR
) % m
->n_running_jobs
;
211 HASHMAP_FOREACH(j
, m
->jobs
)
212 if (j
->state
== JOB_RUNNING
&& counter
++ == print_nr
)
215 /* m->n_running_jobs must be consistent with the contents of m->jobs,
216 * so the above loop must have succeeded in finding j. */
217 assert(counter
== print_nr
+ 1);
220 cylon_pos
= m
->jobs_in_progress_iteration
% 14;
222 cylon_pos
= 14 - cylon_pos
;
223 draw_cylon(cylon
, sizeof(cylon
), 6, cylon_pos
);
225 m
->jobs_in_progress_iteration
++;
227 char job_of_n
[STRLEN("( of ) ") + DECIMAL_STR_MAX(unsigned)*2] = "";
228 if (m
->n_running_jobs
> 1)
229 xsprintf(job_of_n
, "(%u of %u) ", counter
, m
->n_running_jobs
);
231 (void) job_get_timeout(j
, &timeout
);
233 /* We want to use enough information for the user to identify previous lines talking about the same
234 * unit, but keep the message as short as possible. So if 'Starting foo.service' or 'Starting
235 * foo.service - Description' were used, 'foo.service' is enough here. On the other hand, if we used
236 * 'Starting Description' before, then we shall also use 'Description' here. So we pass NULL as the
237 * second argument to unit_status_string(). */
238 const char *ident
= unit_status_string(j
->unit
, NULL
);
240 const char *time
= FORMAT_TIMESPAN(now(CLOCK_MONOTONIC
) - j
->begin_usec
, 1*USEC_PER_SEC
);
241 const char *limit
= timeout
> 0 ? FORMAT_TIMESPAN(timeout
- j
->begin_usec
, 1*USEC_PER_SEC
) : "no limit";
243 if (m
->status_unit_format
== STATUS_UNIT_FORMAT_DESCRIPTION
)
244 /* When using 'Description', we effectively don't have enough space to show the nested status
245 * without ellipsization, so let's not even try. */
246 manager_status_printf(m
, STATUS_TYPE_EPHEMERAL
, cylon
,
247 "%sA %s job is running for %s (%s / %s)",
249 job_type_to_string(j
->type
),
253 const char *status_text
= unit_status_text(j
->unit
);
255 manager_status_printf(m
, STATUS_TYPE_EPHEMERAL
, cylon
,
256 "%sJob %s/%s running (%s / %s)%s%s",
259 job_type_to_string(j
->type
),
261 status_text
? ": " : "",
262 strempty(status_text
));
265 (void) sd_notifyf(/* unset_environment= */ false,
266 "STATUS=%sUser job %s/%s running (%s / %s)...",
268 ident
, job_type_to_string(j
->type
),
270 m
->status_ready
= false;
273 static int have_ask_password(void) {
274 _cleanup_closedir_
DIR *dir
= NULL
;
276 dir
= opendir("/run/systemd/ask-password");
284 FOREACH_DIRENT_ALL(de
, dir
, return -errno
) {
285 if (!IN_SET(de
->d_type
, DT_REG
, DT_UNKNOWN
))
288 if (startswith(de
->d_name
, "ask."))
295 static int manager_dispatch_ask_password_fd(sd_event_source
*source
,
296 int fd
, uint32_t revents
, void *userdata
) {
297 Manager
*m
= ASSERT_PTR(userdata
);
301 m
->have_ask_password
= have_ask_password();
302 if (m
->have_ask_password
< 0)
303 /* Log error but continue. Negative have_ask_password is treated as unknown status. */
304 log_warning_errno(m
->have_ask_password
, "Failed to list /run/systemd/ask-password/, ignoring: %m");
309 static void manager_close_ask_password(Manager
*m
) {
312 m
->ask_password_event_source
= sd_event_source_disable_unref(m
->ask_password_event_source
);
313 m
->have_ask_password
= -EINVAL
;
316 static int manager_check_ask_password(Manager
*m
) {
321 /* We only care about passwords prompts when running in system mode (because that's the only time we
322 * manage a console) */
323 if (!MANAGER_IS_SYSTEM(m
))
326 if (!m
->ask_password_event_source
) {
327 _cleanup_close_
int inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
329 return log_error_errno(errno
, "Failed to create inotify object: %m");
331 (void) mkdir_label("/run/systemd/ask-password", 0755);
332 r
= inotify_add_watch_and_warn(inotify_fd
, "/run/systemd/ask-password", IN_CLOSE_WRITE
|IN_DELETE
|IN_MOVED_TO
|IN_ONLYDIR
);
336 _cleanup_(sd_event_source_disable_unrefp
) sd_event_source
*event_source
= NULL
;
342 manager_dispatch_ask_password_fd
,
345 return log_error_errno(r
, "Failed to add event source for /run/systemd/ask-password/: %m");
347 r
= sd_event_source_set_io_fd_own(event_source
, true);
349 return log_error_errno(r
, "Failed to pass ownership of /run/systemd/ask-password/ inotify fd to event source: %m");
352 (void) sd_event_source_set_description(event_source
, "manager-ask-password");
354 m
->ask_password_event_source
= TAKE_PTR(event_source
);
356 /* Queries might have been added meanwhile... */
357 (void) manager_dispatch_ask_password_fd(m
->ask_password_event_source
, sd_event_source_get_io_fd(m
->ask_password_event_source
), EPOLLIN
, m
);
360 return m
->have_ask_password
;
363 static int manager_watch_idle_pipe(Manager
*m
) {
368 if (m
->idle_pipe_event_source
)
371 if (m
->idle_pipe
[2] < 0)
374 r
= sd_event_add_io(m
->event
, &m
->idle_pipe_event_source
, m
->idle_pipe
[2], EPOLLIN
, manager_dispatch_idle_pipe_fd
, m
);
376 return log_error_errno(r
, "Failed to watch idle pipe: %m");
378 (void) sd_event_source_set_description(m
->idle_pipe_event_source
, "manager-idle-pipe");
383 static void manager_close_idle_pipe(Manager
*m
) {
386 m
->idle_pipe_event_source
= sd_event_source_disable_unref(m
->idle_pipe_event_source
);
388 safe_close_pair(m
->idle_pipe
);
389 safe_close_pair(m
->idle_pipe
+ 2);
392 static int manager_setup_time_change(Manager
*m
) {
397 if (MANAGER_IS_TEST_RUN(m
))
400 m
->time_change_event_source
= sd_event_source_disable_unref(m
->time_change_event_source
);
402 r
= event_add_time_change(m
->event
, &m
->time_change_event_source
, manager_dispatch_time_change_fd
, m
);
404 return log_error_errno(r
, "Failed to create time change event source: %m");
406 /* Schedule this slightly earlier than the .timer event sources */
407 r
= sd_event_source_set_priority(m
->time_change_event_source
, EVENT_PRIORITY_TIME_CHANGE
);
409 return log_error_errno(r
, "Failed to set priority of time change event sources: %m");
411 log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
416 static int manager_read_timezone_stat(Manager
*m
) {
422 /* Read the current stat() data of /etc/localtime so that we detect changes */
423 if (lstat(etc_localtime(), &st
) < 0) {
424 log_debug_errno(errno
, "Failed to stat /etc/localtime, ignoring: %m");
425 changed
= m
->etc_localtime_accessible
;
426 m
->etc_localtime_accessible
= false;
430 k
= timespec_load(&st
.st_mtim
);
431 changed
= !m
->etc_localtime_accessible
|| k
!= m
->etc_localtime_mtime
;
433 m
->etc_localtime_mtime
= k
;
434 m
->etc_localtime_accessible
= true;
440 static int manager_setup_timezone_change(Manager
*m
) {
441 _cleanup_(sd_event_source_unrefp
) sd_event_source
*new_event
= NULL
;
446 if (MANAGER_IS_TEST_RUN(m
))
449 /* We watch /etc/localtime for three events: change of the link count (which might mean removal from /etc even
450 * though another link might be kept), renames, and file close operations after writing. Note we don't bother
451 * with IN_DELETE_SELF, as that would just report when the inode is removed entirely, i.e. after the link count
452 * went to zero and all fds to it are closed.
454 * Note that we never follow symlinks here. This is a simplification, but should cover almost all cases
457 * Note that we create the new event source first here, before releasing the old one. This should optimize
458 * behaviour as this way sd-event can reuse the old watch in case the inode didn't change. */
460 r
= sd_event_add_inotify(m
->event
, &new_event
, etc_localtime(),
461 IN_ATTRIB
|IN_MOVE_SELF
|IN_CLOSE_WRITE
|IN_DONT_FOLLOW
, manager_dispatch_timezone_change
, m
);
463 /* If the file doesn't exist yet, subscribe to /etc instead, and wait until it is created either by
464 * O_CREATE or by rename() */
465 _cleanup_free_
char *localtime_dir
= NULL
;
467 int dir_r
= path_extract_directory(etc_localtime(), &localtime_dir
);
469 return log_error_errno(dir_r
, "Failed to extract directory from path '%s': %m", etc_localtime());
471 log_debug_errno(r
, "%s doesn't exist yet, watching %s instead.", etc_localtime(), localtime_dir
);
473 r
= sd_event_add_inotify(m
->event
, &new_event
, localtime_dir
,
474 IN_CREATE
|IN_MOVED_TO
|IN_ONLYDIR
, manager_dispatch_timezone_change
, m
);
477 return log_error_errno(r
, "Failed to create timezone change event source: %m");
479 /* Schedule this slightly earlier than the .timer event sources */
480 r
= sd_event_source_set_priority(new_event
, EVENT_PRIORITY_TIME_ZONE
);
482 return log_error_errno(r
, "Failed to set priority of timezone change event sources: %m");
484 sd_event_source_unref(m
->timezone_change_event_source
);
485 m
->timezone_change_event_source
= TAKE_PTR(new_event
);
490 static int manager_enable_special_signals(Manager
*m
) {
491 _cleanup_close_
int fd
= -EBADF
;
495 if (!MANAGER_IS_SYSTEM(m
) || MANAGER_IS_TEST_RUN(m
))
498 /* Enable that we get SIGINT on control-alt-del. In containers this will fail with EPERM (older) or
499 * EINVAL (newer), so ignore that. */
500 if (reboot(RB_DISABLE_CAD
) < 0 && !IN_SET(errno
, EPERM
, EINVAL
))
501 log_warning_errno(errno
, "Failed to enable ctrl-alt-del handling, ignoring: %m");
503 fd
= open_terminal("/dev/tty0", O_RDWR
|O_NOCTTY
|O_CLOEXEC
);
505 /* Support systems without virtual console (ENOENT) gracefully */
506 log_full_errno(fd
== -ENOENT
? LOG_DEBUG
: LOG_WARNING
, fd
, "Failed to open %s, ignoring: %m", "/dev/tty0");
508 /* Enable that we get SIGWINCH on kbrequest */
509 if (ioctl(fd
, KDSIGACCEPT
, SIGWINCH
) < 0)
510 log_warning_errno(errno
, "Failed to enable kbrequest handling, ignoring: %m");
516 static int manager_setup_signals(Manager
*m
) {
517 static const struct sigaction sa
= {
518 .sa_handler
= SIG_DFL
,
519 .sa_flags
= SA_NOCLDSTOP
|SA_RESTART
,
526 assert_se(sigaction(SIGCHLD
, &sa
, NULL
) == 0);
528 /* We make liberal use of realtime signals here. On Linux/glibc we have 30 of them, between
529 * SIGRTMIN+0 ... SIGRTMIN+30 (aka SIGRTMAX). */
531 assert_se(sigemptyset(&mask
) == 0);
532 sigset_add_many(&mask
,
533 SIGCHLD
, /* Child died */
534 SIGTERM
, /* Reexecute daemon */
535 SIGHUP
, /* Reload configuration */
536 SIGUSR1
, /* systemd: reconnect to D-Bus */
537 SIGUSR2
, /* systemd: dump status */
538 SIGINT
, /* Kernel sends us this on control-alt-del */
539 SIGWINCH
, /* Kernel sends us this on kbrequest (alt-arrowup) */
540 SIGPWR
, /* Some kernel drivers and upsd send us this on power failure */
542 SIGRTMIN
+0, /* systemd: start default.target */
543 SIGRTMIN
+1, /* systemd: isolate rescue.target */
544 SIGRTMIN
+2, /* systemd: isolate emergency.target */
545 SIGRTMIN
+3, /* systemd: start halt.target */
546 SIGRTMIN
+4, /* systemd: start poweroff.target */
547 SIGRTMIN
+5, /* systemd: start reboot.target */
548 SIGRTMIN
+6, /* systemd: start kexec.target */
549 SIGRTMIN
+7, /* systemd: start soft-reboot.target */
551 /* ... space for more special targets ... */
553 SIGRTMIN
+13, /* systemd: Immediate halt */
554 SIGRTMIN
+14, /* systemd: Immediate poweroff */
555 SIGRTMIN
+15, /* systemd: Immediate reboot */
556 SIGRTMIN
+16, /* systemd: Immediate kexec */
557 SIGRTMIN
+17, /* systemd: Immediate soft-reboot */
558 SIGRTMIN
+18, /* systemd: control command */
562 SIGRTMIN
+20, /* systemd: enable status messages */
563 SIGRTMIN
+21, /* systemd: disable status messages */
564 SIGRTMIN
+22, /* systemd: set log level to LOG_DEBUG */
565 SIGRTMIN
+23, /* systemd: set log level to LOG_INFO */
566 SIGRTMIN
+24, /* systemd: Immediate exit (--user only) */
567 SIGRTMIN
+25, /* systemd: reexecute manager */
569 SIGRTMIN
+26, /* systemd: set log target to journal-or-kmsg */
570 SIGRTMIN
+27, /* systemd: set log target to console */
571 SIGRTMIN
+28, /* systemd: set log target to kmsg */
572 SIGRTMIN
+29, /* systemd: set log target to syslog-or-kmsg (obsolete) */
574 /* ... one free signal here SIGRTMIN+30 ... */
576 assert_se(sigprocmask(SIG_SETMASK
, &mask
, NULL
) == 0);
578 m
->signal_fd
= signalfd(-1, &mask
, SFD_NONBLOCK
|SFD_CLOEXEC
);
579 if (m
->signal_fd
< 0)
582 r
= sd_event_add_io(m
->event
, &m
->signal_event_source
, m
->signal_fd
, EPOLLIN
, manager_dispatch_signal_fd
, m
);
586 (void) sd_event_source_set_description(m
->signal_event_source
, "manager-signal");
588 /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
589 * notify processing can still figure out to which process/service a message belongs, before we reap the
590 * process. Also, process this before handling cgroup notifications, so that we always collect child exit
591 * status information before detecting that there's no process in a cgroup. */
592 r
= sd_event_source_set_priority(m
->signal_event_source
, EVENT_PRIORITY_SIGNALS
);
596 /* Report to supervisor that we now process the above signals. We report this as level "2", to
597 * indicate that we support more than sysvinit's signals (of course, sysvinit never sent this
598 * message, but conceptually it makes sense to consider level "1" to be equivalent to sysvinit's
599 * signal handling). Also, by setting this to "2" people looking for this hopefully won't
600 * misunderstand this as a boolean concept. Signal level 2 shall refer to the signals PID 1
601 * understands at the time of release of systemd v256, i.e. including basic SIGRTMIN+18 handling for
602 * memory pressure and stuff. When more signals are hooked up (or more SIGRTMIN+18 multiplex
603 * operations added, this level should be increased). */
604 (void) sd_notify(/* unset_environment= */ false,
605 "X_SYSTEMD_SIGNALS_LEVEL=2");
607 return manager_enable_special_signals(m
);
610 static char** sanitize_environment(char **l
) {
612 /* Let's remove some environment variables that we need ourselves to communicate with our clients */
616 "CONFIGURATION_DIRECTORY",
617 "CREDENTIALS_DIRECTORY",
629 "MEMORY_PRESSURE_WATCH",
630 "MEMORY_PRESSURE_WRITE",
632 "MONITOR_EXIT_STATUS",
633 "MONITOR_INVOCATION_ID",
634 "MONITOR_SERVICE_RESULT",
645 "TRIGGER_TIMER_MONOTONIC_USEC",
646 "TRIGGER_TIMER_REALTIME_USEC",
651 /* Let's order the environment alphabetically, just to make it pretty */
655 int manager_default_environment(Manager
*m
) {
658 m
->transient_environment
= strv_free(m
->transient_environment
);
660 if (MANAGER_IS_SYSTEM(m
)) {
661 /* The system manager always starts with a clean environment for its children. It does not
662 * import the kernel's or the parents' exported variables.
664 * The initial passed environment is untouched to keep /proc/self/environ valid; it is used
665 * for tagging the init process inside containers. */
666 char *path
= strjoin("PATH=", default_PATH());
670 if (strv_consume(&m
->transient_environment
, path
) < 0)
673 /* Import locale variables LC_*= from configuration */
674 (void) locale_setup(&m
->transient_environment
);
676 /* The user manager passes its own environment along to its children, except for $PATH and
679 m
->transient_environment
= strv_copy(environ
);
680 if (!m
->transient_environment
)
683 char *path
= strjoin("PATH=", default_user_PATH());
687 if (strv_env_replace_consume(&m
->transient_environment
, path
) < 0)
690 /* Envvars set for our 'manager' class session are private and should not be propagated
691 * to children. Also it's likely that the graphical session will set these on their own. */
692 strv_env_unset_many(m
->transient_environment
,
696 "XDG_SESSION_DESKTOP",
701 sanitize_environment(m
->transient_environment
);
705 static int manager_setup_prefix(Manager
*m
) {
711 static const struct table_entry paths_system
[_EXEC_DIRECTORY_TYPE_MAX
] = {
712 [EXEC_DIRECTORY_RUNTIME
] = { SD_PATH_SYSTEM_RUNTIME
, NULL
},
713 [EXEC_DIRECTORY_STATE
] = { SD_PATH_SYSTEM_STATE_PRIVATE
, NULL
},
714 [EXEC_DIRECTORY_CACHE
] = { SD_PATH_SYSTEM_STATE_CACHE
, NULL
},
715 [EXEC_DIRECTORY_LOGS
] = { SD_PATH_SYSTEM_STATE_LOGS
, NULL
},
716 [EXEC_DIRECTORY_CONFIGURATION
] = { SD_PATH_SYSTEM_CONFIGURATION
, NULL
},
719 static const struct table_entry paths_user
[_EXEC_DIRECTORY_TYPE_MAX
] = {
720 [EXEC_DIRECTORY_RUNTIME
] = { SD_PATH_USER_RUNTIME
, NULL
},
721 [EXEC_DIRECTORY_STATE
] = { SD_PATH_USER_STATE_PRIVATE
, NULL
},
722 [EXEC_DIRECTORY_CACHE
] = { SD_PATH_USER_STATE_CACHE
, NULL
},
723 [EXEC_DIRECTORY_LOGS
] = { SD_PATH_USER_STATE_PRIVATE
, "log" },
724 [EXEC_DIRECTORY_CONFIGURATION
] = { SD_PATH_USER_CONFIGURATION
, NULL
},
729 const struct table_entry
*p
= MANAGER_IS_SYSTEM(m
) ? paths_system
: paths_user
;
732 for (ExecDirectoryType i
= 0; i
< _EXEC_DIRECTORY_TYPE_MAX
; i
++) {
733 r
= sd_path_lookup(p
[i
].type
, p
[i
].suffix
, &m
->prefix
[i
]);
735 return log_warning_errno(r
, "Failed to lookup %s path: %m",
736 exec_directory_type_to_string(i
));
742 static void manager_free_unit_name_maps(Manager
*m
) {
743 m
->unit_id_map
= hashmap_free(m
->unit_id_map
);
744 m
->unit_name_map
= hashmap_free(m
->unit_name_map
);
745 m
->unit_path_cache
= set_free(m
->unit_path_cache
);
746 m
->unit_cache_timestamp_hash
= 0;
749 static int manager_setup_run_queue(Manager
*m
) {
753 assert(!m
->run_queue_event_source
);
755 r
= sd_event_add_defer(m
->event
, &m
->run_queue_event_source
, manager_dispatch_run_queue
, m
);
759 r
= sd_event_source_set_priority(m
->run_queue_event_source
, EVENT_PRIORITY_RUN_QUEUE
);
763 r
= sd_event_source_set_enabled(m
->run_queue_event_source
, SD_EVENT_OFF
);
767 (void) sd_event_source_set_description(m
->run_queue_event_source
, "manager-run-queue");
772 static int manager_setup_sigchld_event_source(Manager
*m
) {
776 assert(!m
->sigchld_event_source
);
778 r
= sd_event_add_defer(m
->event
, &m
->sigchld_event_source
, manager_dispatch_sigchld
, m
);
782 r
= sd_event_source_set_priority(m
->sigchld_event_source
, EVENT_PRIORITY_SIGCHLD
);
786 r
= sd_event_source_set_enabled(m
->sigchld_event_source
, SD_EVENT_OFF
);
790 (void) sd_event_source_set_description(m
->sigchld_event_source
, "manager-sigchld");
795 int manager_setup_memory_pressure_event_source(Manager
*m
) {
800 m
->memory_pressure_event_source
= sd_event_source_disable_unref(m
->memory_pressure_event_source
);
802 r
= sd_event_add_memory_pressure(m
->event
, &m
->memory_pressure_event_source
, NULL
, NULL
);
804 log_full_errno(ERRNO_IS_NOT_SUPPORTED(r
) || ERRNO_IS_PRIVILEGE(r
) || (r
== -EHOSTDOWN
) ? LOG_DEBUG
: LOG_NOTICE
, r
,
805 "Failed to establish memory pressure event source, ignoring: %m");
806 else if (m
->defaults
.memory_pressure_threshold_usec
!= USEC_INFINITY
) {
808 /* If there's a default memory pressure threshold set, also apply it to the service manager itself */
809 r
= sd_event_source_set_memory_pressure_period(
810 m
->memory_pressure_event_source
,
811 m
->defaults
.memory_pressure_threshold_usec
,
812 MEMORY_PRESSURE_DEFAULT_WINDOW_USEC
);
814 log_warning_errno(r
, "Failed to adjust memory pressure threshold, ignoring: %m");
820 static int manager_find_credentials_dirs(Manager
*m
) {
826 r
= get_credentials_dir(&e
);
829 log_debug_errno(r
, "Failed to determine credentials directory, ignoring: %m");
831 m
->received_credentials_directory
= strdup(e
);
832 if (!m
->received_credentials_directory
)
836 r
= get_encrypted_credentials_dir(&e
);
839 log_debug_errno(r
, "Failed to determine encrypted credentials directory, ignoring: %m");
841 m
->received_encrypted_credentials_directory
= strdup(e
);
842 if (!m
->received_encrypted_credentials_directory
)
849 void manager_set_switching_root(Manager
*m
, bool switching_root
) {
852 m
->switching_root
= MANAGER_IS_SYSTEM(m
) && switching_root
;
855 double manager_get_progress(Manager
*m
) {
858 if (MANAGER_IS_FINISHED(m
) || m
->n_installed_jobs
== 0)
861 return 1.0 - ((double) hashmap_size(m
->jobs
) / (double) m
->n_installed_jobs
);
864 static int compare_job_priority(const void *a
, const void *b
) {
865 const Job
*x
= a
, *y
= b
;
867 return unit_compare_priority(x
->unit
, y
->unit
);
870 usec_t
manager_default_timeout(RuntimeScope scope
) {
871 return scope
== RUNTIME_SCOPE_SYSTEM
? DEFAULT_TIMEOUT_USEC
: DEFAULT_USER_TIMEOUT_USEC
;
874 int manager_new(RuntimeScope runtime_scope
, ManagerTestRunFlags test_run_flags
, Manager
**ret
) {
875 _cleanup_(manager_freep
) Manager
*m
= NULL
;
878 assert(IN_SET(runtime_scope
, RUNTIME_SCOPE_SYSTEM
, RUNTIME_SCOPE_USER
));
886 .runtime_scope
= runtime_scope
,
887 .objective
= _MANAGER_OBJECTIVE_INVALID
,
888 .previous_objective
= _MANAGER_OBJECTIVE_INVALID
,
890 .status_unit_format
= STATUS_UNIT_FORMAT_DEFAULT
,
892 .original_log_level
= -1,
893 .original_log_target
= _LOG_TARGET_INVALID
,
895 .watchdog_overridden
[WATCHDOG_RUNTIME
] = USEC_INFINITY
,
896 .watchdog_overridden
[WATCHDOG_REBOOT
] = USEC_INFINITY
,
897 .watchdog_overridden
[WATCHDOG_KEXEC
] = USEC_INFINITY
,
898 .watchdog_overridden
[WATCHDOG_PRETIMEOUT
] = USEC_INFINITY
,
900 .show_status_overridden
= _SHOW_STATUS_INVALID
,
904 .user_lookup_fds
= EBADF_PAIR
,
905 .handoff_timestamp_fds
= EBADF_PAIR
,
906 .pidref_transport_fds
= EBADF_PAIR
,
907 .private_listen_fd
= -EBADF
,
908 .dev_autofs_fd
= -EBADF
,
909 .cgroup_inotify_fd
= -EBADF
,
910 .pin_cgroupfs_fd
= -EBADF
,
911 .idle_pipe
= { -EBADF
, -EBADF
, -EBADF
, -EBADF
},
913 /* start as id #1, so that we can leave #0 around as "null-like" value */
916 .have_ask_password
= -EINVAL
, /* we don't know */
918 .test_run_flags
= test_run_flags
,
920 .dump_ratelimit
= (const RateLimit
) { .interval
= 10 * USEC_PER_MINUTE
, .burst
= 10 },
922 .executor_fd
= -EBADF
,
925 unit_defaults_init(&m
->defaults
, runtime_scope
);
928 if (MANAGER_IS_SYSTEM(m
) && detect_container() <= 0)
929 boot_timestamps(m
->timestamps
+ MANAGER_TIMESTAMP_USERSPACE
,
930 m
->timestamps
+ MANAGER_TIMESTAMP_FIRMWARE
,
931 m
->timestamps
+ MANAGER_TIMESTAMP_LOADER
);
934 /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
935 m
->ctrl_alt_del_ratelimit
= (const RateLimit
) { .interval
= 2 * USEC_PER_SEC
, .burst
= 7 };
937 r
= manager_default_environment(m
);
941 r
= hashmap_ensure_allocated(&m
->units
, &string_hash_ops
);
945 r
= hashmap_ensure_allocated(&m
->cgroup_unit
, &path_hash_ops
);
949 r
= hashmap_ensure_allocated(&m
->watch_bus
, &string_hash_ops
);
953 r
= prioq_ensure_allocated(&m
->run_queue
, compare_job_priority
);
957 r
= manager_setup_prefix(m
);
961 r
= manager_find_credentials_dirs(m
);
965 r
= sd_event_default(&m
->event
);
969 r
= manager_setup_run_queue(m
);
973 if (FLAGS_SET(test_run_flags
, MANAGER_TEST_RUN_MINIMAL
)) {
974 m
->cgroup_root
= strdup("");
978 r
= manager_setup_signals(m
);
982 r
= manager_setup_cgroup(m
);
986 r
= manager_setup_time_change(m
);
990 r
= manager_read_timezone_stat(m
);
994 (void) manager_setup_timezone_change(m
);
996 r
= manager_setup_sigchld_event_source(m
);
1000 r
= manager_setup_memory_pressure_event_source(m
);
1005 if (MANAGER_IS_SYSTEM(m
) && bpf_restrict_fs_supported(/* initialize = */ true)) {
1006 r
= bpf_restrict_fs_setup(m
);
1008 log_warning_errno(r
, "Failed to setup LSM BPF, ignoring: %m");
1013 if (test_run_flags
== 0) {
1014 if (MANAGER_IS_SYSTEM(m
))
1015 r
= mkdir_label("/run/systemd/units", 0755);
1017 _cleanup_free_
char *units_path
= NULL
;
1018 r
= xdg_user_runtime_dir("/systemd/units", &units_path
);
1022 r
= mkdir_label(units_path
, 0755);
1024 if (r
< 0 && r
!= -EEXIST
)
1028 if (!FLAGS_SET(test_run_flags
, MANAGER_TEST_DONT_OPEN_EXECUTOR
)) {
1029 m
->executor_fd
= pin_callout_binary(SYSTEMD_EXECUTOR_BINARY_PATH
, &m
->executor_path
);
1030 if (m
->executor_fd
< 0)
1031 return log_debug_errno(m
->executor_fd
, "Failed to pin executor binary: %m");
1033 log_debug("Using systemd-executor binary from '%s'.", m
->executor_path
);
1036 /* Note that we do not set up the notify fd here. We do that after deserialization,
1037 * since they might have gotten serialized across the reexec. */
1044 static int manager_setup_notify(Manager
*m
) {
1047 if (MANAGER_IS_TEST_RUN(m
))
1050 if (m
->notify_fd
< 0) {
1051 _cleanup_close_
int fd
= -EBADF
;
1052 union sockaddr_union sa
;
1055 /* First free all secondary fields */
1056 m
->notify_socket
= mfree(m
->notify_socket
);
1057 m
->notify_event_source
= sd_event_source_disable_unref(m
->notify_event_source
);
1059 fd
= socket(AF_UNIX
, SOCK_DGRAM
|SOCK_CLOEXEC
|SOCK_NONBLOCK
, 0);
1061 return log_error_errno(errno
, "Failed to allocate notification socket: %m");
1063 (void) fd_increase_rxbuf(fd
, MANAGER_SOCKET_RCVBUF_SIZE
);
1065 m
->notify_socket
= path_join(m
->prefix
[EXEC_DIRECTORY_RUNTIME
], "systemd/notify");
1066 if (!m
->notify_socket
)
1069 r
= sockaddr_un_set_path(&sa
.un
, m
->notify_socket
);
1071 return log_error_errno(r
, "Notify socket '%s' not valid for AF_UNIX socket address, refusing.",
1075 (void) sockaddr_un_unlink(&sa
.un
);
1077 r
= mac_selinux_bind(fd
, &sa
.sa
, sa_len
);
1079 return log_error_errno(r
, "Failed to bind notify fd to '%s': %m", m
->notify_socket
);
1081 r
= setsockopt_int(fd
, SOL_SOCKET
, SO_PASSCRED
, true);
1083 return log_error_errno(r
, "Failed to enable SO_PASSCRED for notify socket: %m");
1085 // TODO: enforce SO_PASSPIDFD when our baseline of the kernel version is bumped to >= 6.5.
1086 r
= setsockopt_int(fd
, SOL_SOCKET
, SO_PASSPIDFD
, true);
1087 if (r
< 0 && r
!= -ENOPROTOOPT
)
1088 log_warning_errno(r
, "Failed to enable SO_PASSPIDFD for notify socket, ignoring: %m");
1090 m
->notify_fd
= TAKE_FD(fd
);
1092 log_debug("Using notification socket %s", m
->notify_socket
);
1095 if (!m
->notify_event_source
) {
1096 r
= sd_event_add_io(m
->event
, &m
->notify_event_source
, m
->notify_fd
, EPOLLIN
, manager_dispatch_notify_fd
, m
);
1098 return log_error_errno(r
, "Failed to allocate notify event source: %m");
1100 /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
1101 * service an exit message belongs. */
1102 r
= sd_event_source_set_priority(m
->notify_event_source
, EVENT_PRIORITY_NOTIFY
);
1104 return log_error_errno(r
, "Failed to set priority of notify event source: %m");
1106 (void) sd_event_source_set_description(m
->notify_event_source
, "manager-notify");
1112 static int manager_setup_user_lookup_fd(Manager
*m
) {
1117 /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
1118 * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
1119 * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
1120 * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
1121 * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
1122 * hence we establish this communication channel so that forked off processes can pass their UID/GID
1123 * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
1124 * datagram, along with their unit name, so that we can share one communication socket pair among all units for
1127 * You might wonder why we need a communication channel for this that is independent of the usual notification
1128 * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
1129 * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
1130 * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
1132 * Note that this function is called under two circumstances: when we first initialize (in which case we
1133 * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
1134 * (in which case the socket pair already exists but we still need to allocate the event source for it). */
1136 if (m
->user_lookup_fds
[0] < 0) {
1138 /* Free all secondary fields */
1139 safe_close_pair(m
->user_lookup_fds
);
1140 m
->user_lookup_event_source
= sd_event_source_disable_unref(m
->user_lookup_event_source
);
1142 if (socketpair(AF_UNIX
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, m
->user_lookup_fds
) < 0)
1143 return log_error_errno(errno
, "Failed to allocate user lookup socket: %m");
1145 r
= setsockopt_int(m
->user_lookup_fds
[0], SOL_SOCKET
, SO_PASSRIGHTS
, false);
1146 if (r
< 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r
))
1147 log_warning_errno(r
, "Failed to turn off SO_PASSRIGHTS on user lookup socket, ignoring: %m");
1149 (void) fd_increase_rxbuf(m
->user_lookup_fds
[0], MANAGER_SOCKET_RCVBUF_SIZE
);
1152 if (!m
->user_lookup_event_source
) {
1153 r
= sd_event_add_io(m
->event
, &m
->user_lookup_event_source
, m
->user_lookup_fds
[0], EPOLLIN
, manager_dispatch_user_lookup_fd
, m
);
1155 return log_error_errno(r
, "Failed to allocate user lookup event source: %m");
1157 /* Process even earlier than the notify event source, so that we always know first about valid UID/GID
1159 r
= sd_event_source_set_priority(m
->user_lookup_event_source
, EVENT_PRIORITY_USER_LOOKUP
);
1161 return log_error_errno(r
, "Failed to set priority of user lookup event source: %m");
1163 (void) sd_event_source_set_description(m
->user_lookup_event_source
, "user-lookup");
1169 static int manager_setup_handoff_timestamp_fd(Manager
*m
) {
1174 /* Set up the socket pair used for passing timestamps back when the executor processes we fork
1175 * off invokes execve(), i.e. when we hand off control to our payload processes. */
1177 if (m
->handoff_timestamp_fds
[0] < 0) {
1178 m
->handoff_timestamp_event_source
= sd_event_source_disable_unref(m
->handoff_timestamp_event_source
);
1179 safe_close_pair(m
->handoff_timestamp_fds
);
1181 if (socketpair(AF_UNIX
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, m
->handoff_timestamp_fds
) < 0)
1182 return log_error_errno(errno
, "Failed to allocate handoff timestamp socket: %m");
1184 /* Make sure children never have to block */
1185 (void) fd_increase_rxbuf(m
->handoff_timestamp_fds
[0], MANAGER_SOCKET_RCVBUF_SIZE
);
1187 r
= setsockopt_int(m
->handoff_timestamp_fds
[0], SOL_SOCKET
, SO_PASSCRED
, true);
1189 return log_error_errno(r
, "Failed to enable SO_PASSCRED on handoff timestamp socket: %m");
1191 r
= setsockopt_int(m
->handoff_timestamp_fds
[0], SOL_SOCKET
, SO_PASSRIGHTS
, false);
1192 if (r
< 0 && !ERRNO_IS_NEG_NOT_SUPPORTED(r
))
1193 log_warning_errno(r
, "Failed to turn off SO_PASSRIGHTS on handoff timestamp socket, ignoring: %m");
1195 /* Mark the receiving socket as O_NONBLOCK (but leave sending side as-is) */
1196 r
= fd_nonblock(m
->handoff_timestamp_fds
[0], true);
1198 return log_error_errno(r
, "Failed to make handoff timestamp socket O_NONBLOCK: %m");
1201 if (!m
->handoff_timestamp_event_source
) {
1202 r
= sd_event_add_io(m
->event
, &m
->handoff_timestamp_event_source
, m
->handoff_timestamp_fds
[0], EPOLLIN
, manager_dispatch_handoff_timestamp_fd
, m
);
1204 return log_error_errno(r
, "Failed to allocate handoff timestamp event source: %m");
1206 r
= sd_event_source_set_priority(m
->handoff_timestamp_event_source
, EVENT_PRIORITY_HANDOFF_TIMESTAMP
);
1208 return log_error_errno(r
, "Failed to set priority of handoff timestamp event source: %m");
1210 (void) sd_event_source_set_description(m
->handoff_timestamp_event_source
, "handoff-timestamp");
1216 static int manager_setup_pidref_transport_fd(Manager
*m
) {
1221 /* Set up the socket pair used for passing parent and child pidrefs back when the executor unshares
1222 * a PID namespace and forks again when using PrivatePIDs=yes. */
1224 if (m
->pidref_transport_fds
[0] < 0) {
1225 m
->pidref_event_source
= sd_event_source_disable_unref(m
->pidref_event_source
);
1226 safe_close_pair(m
->pidref_transport_fds
);
1228 if (socketpair(AF_UNIX
, SOCK_DGRAM
|SOCK_CLOEXEC
, 0, m
->pidref_transport_fds
) < 0)
1229 return log_error_errno(errno
, "Failed to allocate pidref socket: %m");
1231 /* Make sure children never have to block */
1232 (void) fd_increase_rxbuf(m
->pidref_transport_fds
[0], MANAGER_SOCKET_RCVBUF_SIZE
);
1234 r
= setsockopt_int(m
->pidref_transport_fds
[0], SOL_SOCKET
, SO_PASSCRED
, true);
1236 return log_error_errno(r
, "Failed to enable SO_PASSCRED for pidref socket: %m");
1238 r
= setsockopt_int(m
->pidref_transport_fds
[0], SOL_SOCKET
, SO_PASSPIDFD
, true);
1239 if (ERRNO_IS_NEG_NOT_SUPPORTED(r
))
1240 log_debug_errno(r
, "SO_PASSPIDFD is not supported for pidref socket, ignoring.");
1242 log_warning_errno(r
, "Failed to enable SO_PASSPIDFD for pidref socket, ignoring: %m");
1244 /* Mark the receiving socket as O_NONBLOCK (but leave sending side as-is) */
1245 r
= fd_nonblock(m
->pidref_transport_fds
[0], true);
1247 return log_error_errno(r
, "Failed to make pidref socket O_NONBLOCK: %m");
1250 if (!m
->pidref_event_source
) {
1251 r
= sd_event_add_io(m
->event
, &m
->pidref_event_source
, m
->pidref_transport_fds
[0], EPOLLIN
, manager_dispatch_pidref_transport_fd
, m
);
1253 return log_error_errno(r
, "Failed to allocate pidref event source: %m");
1255 r
= sd_event_source_set_priority(m
->pidref_event_source
, EVENT_PRIORITY_PIDREF
);
1257 return log_error_errno(r
, "Failed to set priority of pidref event source: %m");
1259 (void) sd_event_source_set_description(m
->pidref_event_source
, "pidref");
1265 static unsigned manager_dispatch_cleanup_queue(Manager
*m
) {
1271 while ((u
= m
->cleanup_queue
)) {
1272 assert(u
->in_cleanup_queue
);
1281 static unsigned manager_dispatch_release_resources_queue(Manager
*m
) {
1287 while ((u
= LIST_POP(release_resources_queue
, m
->release_resources_queue
))) {
1288 assert(u
->in_release_resources_queue
);
1289 u
->in_release_resources_queue
= false;
1293 unit_release_resources(u
);
1300 GC_OFFSET_IN_PATH
, /* This one is on the path we were traveling */
1301 GC_OFFSET_UNSURE
, /* No clue */
1302 GC_OFFSET_GOOD
, /* We still need this unit */
1303 GC_OFFSET_BAD
, /* We don't need this unit anymore */
1307 static void unit_gc_mark_good(Unit
*u
, unsigned gc_marker
) {
1310 u
->gc_marker
= gc_marker
+ GC_OFFSET_GOOD
;
1312 /* Recursively mark referenced units as GOOD as well */
1313 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_REFERENCES
)
1314 if (other
->gc_marker
== gc_marker
+ GC_OFFSET_UNSURE
)
1315 unit_gc_mark_good(other
, gc_marker
);
1318 static void unit_gc_sweep(Unit
*u
, unsigned gc_marker
) {
1324 if (IN_SET(u
->gc_marker
- gc_marker
,
1325 GC_OFFSET_GOOD
, GC_OFFSET_BAD
, GC_OFFSET_UNSURE
, GC_OFFSET_IN_PATH
))
1328 if (u
->in_cleanup_queue
)
1331 if (!unit_may_gc(u
))
1334 u
->gc_marker
= gc_marker
+ GC_OFFSET_IN_PATH
;
1338 UNIT_FOREACH_DEPENDENCY(other
, u
, UNIT_ATOM_REFERENCED_BY
) {
1339 unit_gc_sweep(other
, gc_marker
);
1341 if (other
->gc_marker
== gc_marker
+ GC_OFFSET_GOOD
)
1344 if (other
->gc_marker
!= gc_marker
+ GC_OFFSET_BAD
)
1348 LIST_FOREACH(refs_by_target
, ref
, u
->refs_by_target
) {
1349 unit_gc_sweep(ref
->source
, gc_marker
);
1351 if (ref
->source
->gc_marker
== gc_marker
+ GC_OFFSET_GOOD
)
1354 if (ref
->source
->gc_marker
!= gc_marker
+ GC_OFFSET_BAD
)
1361 /* We were unable to find anything out about this entry, so
1362 * let's investigate it later */
1363 u
->gc_marker
= gc_marker
+ GC_OFFSET_UNSURE
;
1364 unit_add_to_gc_queue(u
);
1368 /* We definitely know that this one is not useful anymore, so
1369 * let's mark it for deletion */
1370 u
->gc_marker
= gc_marker
+ GC_OFFSET_BAD
;
1371 unit_add_to_cleanup_queue(u
);
1375 unit_gc_mark_good(u
, gc_marker
);
1378 static unsigned manager_dispatch_gc_unit_queue(Manager
*m
) {
1379 unsigned n
= 0, gc_marker
;
1383 /* log_debug("Running GC..."); */
1385 m
->gc_marker
+= _GC_OFFSET_MAX
;
1386 if (m
->gc_marker
+ _GC_OFFSET_MAX
<= _GC_OFFSET_MAX
)
1389 gc_marker
= m
->gc_marker
;
1392 while ((u
= m
->gc_unit_queue
)) {
1393 assert(u
->in_gc_queue
);
1395 unit_gc_sweep(u
, gc_marker
);
1397 LIST_REMOVE(gc_queue
, m
->gc_unit_queue
, u
);
1398 u
->in_gc_queue
= false;
1402 if (IN_SET(u
->gc_marker
- gc_marker
,
1403 GC_OFFSET_BAD
, GC_OFFSET_UNSURE
)) {
1405 log_unit_debug(u
, "Collecting.");
1406 u
->gc_marker
= gc_marker
+ GC_OFFSET_BAD
;
1407 unit_add_to_cleanup_queue(u
);
1414 static unsigned manager_dispatch_gc_job_queue(Manager
*m
) {
1420 while ((j
= LIST_POP(gc_queue
, m
->gc_job_queue
))) {
1421 assert(j
->in_gc_queue
);
1422 j
->in_gc_queue
= false;
1429 log_unit_debug(j
->unit
, "Collecting job.");
1430 (void) job_finish_and_invalidate(j
, JOB_COLLECTED
, false, false);
1436 static int manager_ratelimit_requeue(sd_event_source
*s
, uint64_t usec
, void *userdata
) {
1440 assert(s
== u
->auto_start_stop_event_source
);
1442 u
->auto_start_stop_event_source
= sd_event_source_unref(u
->auto_start_stop_event_source
);
1444 /* Re-queue to all queues, if the rate limit hit we might have been throttled on any of them. */
1445 unit_submit_to_stop_when_unneeded_queue(u
);
1446 unit_submit_to_start_when_upheld_queue(u
);
1447 unit_submit_to_stop_when_bound_queue(u
);
1452 static int manager_ratelimit_check_and_queue(Unit
*u
) {
1457 if (ratelimit_below(&u
->auto_start_stop_ratelimit
))
1460 /* Already queued, no need to requeue */
1461 if (u
->auto_start_stop_event_source
)
1464 r
= sd_event_add_time(
1466 &u
->auto_start_stop_event_source
,
1468 ratelimit_end(&u
->auto_start_stop_ratelimit
),
1470 manager_ratelimit_requeue
,
1473 return log_unit_error_errno(u
, r
, "Failed to queue timer on event loop: %m");
1478 static unsigned manager_dispatch_stop_when_unneeded_queue(Manager
*m
) {
1485 while ((u
= LIST_POP(stop_when_unneeded_queue
, m
->stop_when_unneeded_queue
))) {
1486 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1488 assert(u
->in_stop_when_unneeded_queue
);
1489 u
->in_stop_when_unneeded_queue
= false;
1493 if (!unit_is_unneeded(u
))
1496 log_unit_debug(u
, "Unit is not needed anymore.");
1498 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1499 * service being unnecessary after a while. */
1501 r
= manager_ratelimit_check_and_queue(u
);
1504 "Unit not needed anymore, but not stopping since we tried this too often recently.%s",
1505 r
== 0 ? " Will retry later." : "");
1509 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1510 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_FAIL
, &error
, /* ret = */ NULL
);
1512 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1518 static unsigned manager_dispatch_start_when_upheld_queue(Manager
*m
) {
1525 while ((u
= LIST_POP(start_when_upheld_queue
, m
->start_when_upheld_queue
))) {
1526 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1527 Unit
*culprit
= NULL
;
1529 assert(u
->in_start_when_upheld_queue
);
1530 u
->in_start_when_upheld_queue
= false;
1534 if (!unit_is_upheld_by_active(u
, &culprit
))
1537 log_unit_debug(u
, "Unit is started because upheld by active unit %s.", culprit
->id
);
1539 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1540 * service being unnecessary after a while. */
1542 r
= manager_ratelimit_check_and_queue(u
);
1545 "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.%s",
1547 r
== 0 ? " Will retry later." : "");
1551 r
= manager_add_job(u
->manager
, JOB_START
, u
, JOB_FAIL
, &error
, /* ret = */ NULL
);
1553 log_unit_warning_errno(u
, r
, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error
, r
));
1559 static unsigned manager_dispatch_stop_when_bound_queue(Manager
*m
) {
1566 while ((u
= LIST_POP(stop_when_bound_queue
, m
->stop_when_bound_queue
))) {
1567 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
1568 Unit
*culprit
= NULL
;
1570 assert(u
->in_stop_when_bound_queue
);
1571 u
->in_stop_when_bound_queue
= false;
1575 if (!unit_is_bound_by_inactive(u
, &culprit
))
1578 log_unit_debug(u
, "Unit is stopped because bound to inactive unit %s.", culprit
->id
);
1580 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1581 * service being unnecessary after a while. */
1583 r
= manager_ratelimit_check_and_queue(u
);
1586 "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.%s",
1588 r
== 0 ? " Will retry later." : "");
1592 r
= manager_add_job(u
->manager
, JOB_STOP
, u
, JOB_REPLACE
, &error
, /* ret = */ NULL
);
1594 log_unit_warning_errno(u
, r
, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error
, r
));
1600 static unsigned manager_dispatch_stop_notify_queue(Manager
*m
) {
1605 if (m
->may_dispatch_stop_notify_queue
< 0)
1606 m
->may_dispatch_stop_notify_queue
= hashmap_isempty(m
->jobs
);
1608 if (!m
->may_dispatch_stop_notify_queue
)
1611 m
->may_dispatch_stop_notify_queue
= false;
1613 LIST_FOREACH(stop_notify_queue
, u
, m
->stop_notify_queue
) {
1614 assert(u
->in_stop_notify_queue
);
1616 assert(UNIT_VTABLE(u
)->stop_notify
);
1617 if (UNIT_VTABLE(u
)->stop_notify(u
)) {
1618 assert(!u
->in_stop_notify_queue
);
1626 static void manager_clear_jobs_and_units(Manager
*m
) {
1631 while ((u
= hashmap_first(m
->units
)))
1634 manager_dispatch_cleanup_queue(m
);
1636 assert(!m
->load_queue
);
1637 assert(prioq_isempty(m
->run_queue
));
1638 assert(!m
->dbus_unit_queue
);
1639 assert(!m
->dbus_job_queue
);
1640 assert(!m
->cleanup_queue
);
1641 assert(!m
->gc_unit_queue
);
1642 assert(!m
->gc_job_queue
);
1643 assert(!m
->cgroup_realize_queue
);
1644 assert(!m
->cgroup_empty_queue
);
1645 assert(!m
->cgroup_oom_queue
);
1646 assert(!m
->target_deps_queue
);
1647 assert(!m
->stop_when_unneeded_queue
);
1648 assert(!m
->start_when_upheld_queue
);
1649 assert(!m
->stop_when_bound_queue
);
1650 assert(!m
->release_resources_queue
);
1652 assert(hashmap_isempty(m
->jobs
));
1653 assert(hashmap_isempty(m
->units
));
1654 assert(hashmap_isempty(m
->units_by_invocation_id
));
1656 m
->n_on_console
= 0;
1657 m
->n_running_jobs
= 0;
1658 m
->n_installed_jobs
= 0;
1659 m
->n_failed_jobs
= 0;
1662 Manager
* manager_free(Manager
*m
) {
1666 manager_clear_jobs_and_units(m
);
1668 for (UnitType c
= 0; c
< _UNIT_TYPE_MAX
; c
++)
1669 if (unit_vtable
[c
]->shutdown
)
1670 unit_vtable
[c
]->shutdown(m
);
1672 /* Keep the cgroup hierarchy in place except when we know we are going down for good */
1673 manager_shutdown_cgroup(m
, /* delete= */ IN_SET(m
->objective
, MANAGER_EXIT
, MANAGER_REBOOT
, MANAGER_POWEROFF
, MANAGER_HALT
, MANAGER_KEXEC
));
1675 lookup_paths_flush_generator(&m
->lookup_paths
);
1678 manager_varlink_done(m
);
1680 exec_shared_runtime_vacuum(m
);
1681 hashmap_free(m
->exec_shared_runtime_by_id
);
1683 dynamic_user_vacuum(m
, false);
1684 hashmap_free(m
->dynamic_users
);
1686 hashmap_free(m
->units
);
1687 hashmap_free(m
->units_by_invocation_id
);
1688 hashmap_free(m
->jobs
);
1689 hashmap_free(m
->watch_pids
);
1690 hashmap_free(m
->watch_pids_more
);
1691 hashmap_free(m
->watch_bus
);
1693 prioq_free(m
->run_queue
);
1695 set_free(m
->startup_units
);
1696 set_free(m
->failed_units
);
1698 sd_event_source_unref(m
->signal_event_source
);
1699 sd_event_source_unref(m
->sigchld_event_source
);
1700 sd_event_source_unref(m
->notify_event_source
);
1701 sd_event_source_unref(m
->time_change_event_source
);
1702 sd_event_source_unref(m
->timezone_change_event_source
);
1703 sd_event_source_unref(m
->jobs_in_progress_event_source
);
1704 sd_event_source_unref(m
->run_queue_event_source
);
1705 sd_event_source_unref(m
->user_lookup_event_source
);
1706 sd_event_source_unref(m
->handoff_timestamp_event_source
);
1707 sd_event_source_unref(m
->pidref_event_source
);
1708 sd_event_source_unref(m
->memory_pressure_event_source
);
1710 safe_close(m
->signal_fd
);
1711 safe_close(m
->notify_fd
);
1712 safe_close_pair(m
->user_lookup_fds
);
1713 safe_close_pair(m
->handoff_timestamp_fds
);
1714 safe_close_pair(m
->pidref_transport_fds
);
1716 manager_close_ask_password(m
);
1718 manager_close_idle_pipe(m
);
1720 sd_event_unref(m
->event
);
1722 free(m
->notify_socket
);
1724 lookup_paths_done(&m
->lookup_paths
);
1725 strv_free(m
->transient_environment
);
1726 strv_free(m
->client_environment
);
1728 hashmap_free(m
->cgroup_unit
);
1729 manager_free_unit_name_maps(m
);
1731 free(m
->switch_root
);
1732 free(m
->switch_root_init
);
1734 sd_bus_track_unref(m
->subscribed
);
1735 strv_free(m
->subscribed_as_strv
);
1737 unit_defaults_done(&m
->defaults
);
1739 FOREACH_ARRAY(map
, m
->units_needing_mounts_for
, _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
) {
1740 assert(hashmap_isempty(*map
));
1744 hashmap_free(m
->uid_refs
);
1745 hashmap_free(m
->gid_refs
);
1747 FOREACH_ARRAY(i
, m
->prefix
, _EXEC_DIRECTORY_TYPE_MAX
)
1750 free(m
->received_credentials_directory
);
1751 free(m
->received_encrypted_credentials_directory
);
1753 free(m
->watchdog_pretimeout_governor
);
1754 free(m
->watchdog_pretimeout_governor_overridden
);
1756 fw_ctx_free(m
->fw_ctx
);
1759 bpf_restrict_fs_destroy(m
->restrict_fs
);
1762 safe_close(m
->executor_fd
);
1763 free(m
->executor_path
);
1768 static void manager_enumerate_perpetual(Manager
*m
) {
1771 if (FLAGS_SET(m
->test_run_flags
, MANAGER_TEST_RUN_MINIMAL
))
1774 /* Let's ask every type to load all units from disk/kernel that it might know */
1775 for (UnitType c
= 0; c
< _UNIT_TYPE_MAX
; c
++) {
1776 if (!unit_type_supported(c
)) {
1777 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c
));
1781 if (unit_vtable
[c
]->enumerate_perpetual
)
1782 unit_vtable
[c
]->enumerate_perpetual(m
);
1786 static void manager_enumerate(Manager
*m
) {
1789 if (FLAGS_SET(m
->test_run_flags
, MANAGER_TEST_RUN_MINIMAL
))
1792 /* Let's ask every type to load all units from disk/kernel that it might know */
1793 for (UnitType c
= 0; c
< _UNIT_TYPE_MAX
; c
++) {
1794 if (!unit_type_supported(c
)) {
1795 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c
));
1799 if (unit_vtable
[c
]->enumerate
)
1800 unit_vtable
[c
]->enumerate(m
);
1803 manager_dispatch_load_queue(m
);
1806 static void manager_coldplug(Manager
*m
) {
1813 log_debug("Invoking unit coldplug() handlers%s", glyph(GLYPH_ELLIPSIS
));
1815 /* Let's place the units back into their deserialized state */
1816 HASHMAP_FOREACH_KEY(u
, k
, m
->units
) {
1818 /* ignore aliases */
1822 r
= unit_coldplug(u
);
1824 log_warning_errno(r
, "We couldn't coldplug %s, proceeding anyway: %m", u
->id
);
1828 static void manager_catchup(Manager
*m
) {
1834 log_debug("Invoking unit catchup() handlers%s", glyph(GLYPH_ELLIPSIS
));
1836 /* Let's catch up on any state changes that happened while we were reloading/reexecing */
1837 HASHMAP_FOREACH_KEY(u
, k
, m
->units
) {
1839 /* ignore aliases */
1847 static void manager_distribute_fds(Manager
*m
, FDSet
*fds
) {
1852 HASHMAP_FOREACH(u
, m
->units
) {
1854 if (fdset_isempty(fds
))
1857 if (!UNIT_VTABLE(u
)->distribute_fds
)
1860 UNIT_VTABLE(u
)->distribute_fds(u
, fds
);
1864 static bool manager_dbus_is_running(Manager
*m
, bool deserialized
) {
1869 /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket
1870 * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit
1871 * rather than the current one. */
1873 if (MANAGER_IS_TEST_RUN(m
))
1876 u
= manager_get_unit(m
, SPECIAL_DBUS_SOCKET
);
1879 if ((deserialized
? SOCKET(u
)->deserialized_state
: SOCKET(u
)->state
) != SOCKET_RUNNING
)
1882 u
= manager_get_unit(m
, SPECIAL_DBUS_SERVICE
);
1885 if (!IN_SET((deserialized
? SERVICE(u
)->deserialized_state
: SERVICE(u
)->state
),
1889 SERVICE_RELOAD_NOTIFY
,
1890 SERVICE_REFRESH_EXTENSIONS
,
1891 SERVICE_RELOAD_SIGNAL
))
1897 static void manager_setup_bus(Manager
*m
) {
1900 if (MANAGER_IS_TEST_RUN(m
))
1903 /* Let's set up our private bus connection now, unconditionally */
1904 (void) bus_init_private(m
);
1906 /* If we are in --user mode also connect to the system bus now */
1907 if (MANAGER_IS_USER(m
))
1908 (void) bus_init_system(m
);
1910 /* Let's connect to the bus now, but only if the unit is supposed to be up */
1911 if (manager_dbus_is_running(m
, MANAGER_IS_RELOADING(m
))) {
1912 (void) bus_init_api(m
);
1914 if (MANAGER_IS_SYSTEM(m
))
1915 (void) bus_init_system(m
);
1919 static void manager_preset_all(Manager
*m
) {
1924 if (m
->first_boot
<= 0)
1927 if (!MANAGER_IS_SYSTEM(m
))
1930 if (MANAGER_IS_TEST_RUN(m
))
1933 /* If this is the first boot, and we are in the host system, then preset everything */
1934 UnitFilePresetMode mode
=
1935 ENABLE_FIRST_BOOT_FULL_PRESET
? UNIT_FILE_PRESET_FULL
: UNIT_FILE_PRESET_ENABLE_ONLY
;
1936 InstallChange
*changes
= NULL
;
1937 size_t n_changes
= 0;
1939 CLEANUP_ARRAY(changes
, n_changes
, install_changes_free
);
1941 log_info("Applying preset policy.");
1942 r
= unit_file_preset_all(RUNTIME_SCOPE_SYSTEM
, /* file_flags = */ 0,
1943 /* root_dir = */ NULL
, mode
, &changes
, &n_changes
);
1944 install_changes_dump(r
, "preset", changes
, n_changes
, /* quiet = */ false);
1946 log_full_errno(r
== -EEXIST
? LOG_NOTICE
: LOG_WARNING
, r
,
1947 "Failed to populate /etc with preset unit settings, ignoring: %m");
1949 log_info("Populated /etc with preset unit settings.");
1952 static void manager_ready(Manager
*m
) {
1955 /* After having loaded everything, do the final round of catching up with what might have changed */
1957 m
->objective
= MANAGER_OK
; /* Tell everyone we are up now */
1959 /* It might be safe to log to the journal now and connect to dbus */
1960 manager_recheck_journal(m
);
1961 manager_recheck_dbus(m
);
1963 /* Let's finally catch up with any changes that took place while we were reloading/reexecing */
1966 /* Create a file which will indicate when the manager started loading units the last time. */
1967 if (MANAGER_IS_SYSTEM(m
))
1968 (void) touch_file("/run/systemd/systemd-units-load", false,
1969 m
->timestamps
[MANAGER_TIMESTAMP_UNITS_LOAD
].realtime
?: now(CLOCK_REALTIME
),
1970 UID_INVALID
, GID_INVALID
, 0444);
1973 Manager
* manager_reloading_start(Manager
*m
) {
1975 dual_timestamp_now(m
->timestamps
+ MANAGER_TIMESTAMP_UNITS_LOAD
);
1979 void manager_reloading_stopp(Manager
**m
) {
1981 assert((*m
)->n_reloading
> 0);
1982 (*m
)->n_reloading
--;
1986 static int manager_make_runtime_dir(Manager
*m
) {
1991 _cleanup_free_
char *d
= path_join(m
->prefix
[EXEC_DIRECTORY_RUNTIME
], "systemd");
1995 r
= mkdir_label(d
, 0755);
1996 if (r
< 0 && r
!= -EEXIST
)
1997 return log_error_errno(r
, "Failed to create directory '%s/': %m", d
);
2002 int manager_startup(Manager
*m
, FILE *serialization
, FDSet
*fds
, const char *root
) {
2007 r
= manager_make_runtime_dir(m
);
2011 /* If we are running in test mode, we still want to run the generators,
2012 * but we should not touch the real generator directories. */
2013 r
= lookup_paths_init_or_warn(&m
->lookup_paths
, m
->runtime_scope
,
2014 MANAGER_IS_TEST_RUN(m
) ? LOOKUP_PATHS_TEMPORARY_GENERATED
: 0,
2019 dual_timestamp_now(m
->timestamps
+ manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_START
));
2020 r
= manager_run_environment_generators(m
);
2022 r
= manager_run_generators(m
);
2023 dual_timestamp_now(m
->timestamps
+ manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_FINISH
));
2027 manager_preset_all(m
);
2029 lookup_paths_log(&m
->lookup_paths
);
2032 /* This block is (optionally) done with the reloading counter bumped */
2033 _unused_
_cleanup_(manager_reloading_stopp
) Manager
*reloading
= NULL
;
2035 /* Make sure we don't have a left-over from a previous run */
2037 (void) rm_rf(m
->lookup_paths
.transient
, 0);
2039 /* If we will deserialize make sure that during enumeration this is already known, so we increase the
2040 * counter here already */
2042 reloading
= manager_reloading_start(m
);
2044 /* First, enumerate what we can from all config files */
2045 dual_timestamp_now(m
->timestamps
+ manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_START
));
2046 manager_enumerate_perpetual(m
);
2047 manager_enumerate(m
);
2048 dual_timestamp_now(m
->timestamps
+ manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_FINISH
));
2050 /* Second, deserialize if there is something to deserialize */
2051 if (serialization
) {
2052 r
= manager_deserialize(m
, serialization
, fds
);
2054 return log_error_errno(r
, "Deserialization failed: %m");
2057 if (m
->previous_objective
>= 0) {
2058 if (IN_SET(m
->previous_objective
, MANAGER_REEXECUTE
, MANAGER_SOFT_REBOOT
, MANAGER_SWITCH_ROOT
))
2059 log_debug("Launching as effect of a '%s' operation.",
2060 manager_objective_to_string(m
->previous_objective
));
2062 log_warning("Got unexpected previous objective '%s', ignoring.",
2063 manager_objective_to_string(m
->previous_objective
));
2066 /* If we are in a new soft-reboot iteration bump the counter now before starting units, so
2067 * that they can reliably read it. We get the previous objective from serialized state. */
2068 if (m
->previous_objective
== MANAGER_SOFT_REBOOT
)
2069 m
->soft_reboots_count
++;
2071 /* Any fds left? Find some unit which wants them. This is useful to allow container managers to pass
2072 * some file descriptors to us pre-initialized. This enables socket-based activation of entire
2074 manager_distribute_fds(m
, fds
);
2076 /* We might have deserialized the notify fd, but if we didn't then let's create it now */
2077 r
= manager_setup_notify(m
);
2079 /* No sense to continue without notifications, our children would fail anyway. */
2082 r
= manager_setup_user_lookup_fd(m
);
2084 /* This shouldn't fail, except if things are really broken. */
2087 r
= manager_setup_handoff_timestamp_fd(m
);
2089 /* This shouldn't fail, except if things are really broken. */
2092 r
= manager_setup_pidref_transport_fd(m
);
2094 /* This shouldn't fail, except if things are really broken. */
2097 /* Connect to the bus if we are good for it */
2098 manager_setup_bus(m
);
2100 r
= manager_varlink_init(m
);
2102 log_warning_errno(r
, "Failed to set up Varlink, ignoring: %m");
2104 /* Third, fire things up! */
2105 manager_coldplug(m
);
2107 /* Clean up runtime objects */
2111 /* Let's wait for the UnitNew/JobNew messages being sent, before we notify that the
2112 * reload is finished */
2113 m
->send_reloading_done
= true;
2118 manager_set_switching_root(m
, false);
2123 int manager_add_job_full(
2128 TransactionAddFlags extra_flags
,
2130 sd_bus_error
*error
,
2133 _cleanup_(transaction_abort_and_freep
) Transaction
*tr
= NULL
;
2137 assert(type
>= 0 && type
< _JOB_TYPE_MAX
);
2139 assert(mode
>= 0 && mode
< _JOB_MODE_MAX
);
2140 assert((extra_flags
& ~_TRANSACTION_FLAGS_MASK_PUBLIC
) == 0);
2142 if (mode
== JOB_ISOLATE
&& type
!= JOB_START
)
2143 return sd_bus_error_set(error
, SD_BUS_ERROR_INVALID_ARGS
, "Isolate is only valid for start.");
2145 if (mode
== JOB_ISOLATE
&& !unit
->allow_isolate
)
2146 return sd_bus_error_set(error
, BUS_ERROR_NO_ISOLATION
, "Operation refused, unit may not be isolated.");
2148 if (mode
== JOB_TRIGGERING
&& type
!= JOB_STOP
)
2149 return sd_bus_error_set(error
, SD_BUS_ERROR_INVALID_ARGS
, "--job-mode=triggering is only valid for stop.");
2151 if (mode
== JOB_RESTART_DEPENDENCIES
&& type
!= JOB_START
)
2152 return sd_bus_error_set(error
, SD_BUS_ERROR_INVALID_ARGS
, "--job-mode=restart-dependencies is only valid for start.");
2154 log_unit_debug(unit
, "Trying to enqueue job %s/%s/%s", unit
->id
, job_type_to_string(type
), job_mode_to_string(mode
));
2156 type
= job_type_collapse(type
, unit
);
2158 tr
= transaction_new(mode
== JOB_REPLACE_IRREVERSIBLY
);
2162 r
= transaction_add_job_and_dependencies(
2167 TRANSACTION_MATTERS
|
2168 (IN_SET(mode
, JOB_IGNORE_DEPENDENCIES
, JOB_IGNORE_REQUIREMENTS
) ? TRANSACTION_IGNORE_REQUIREMENTS
: 0) |
2169 (mode
== JOB_IGNORE_DEPENDENCIES
? TRANSACTION_IGNORE_ORDER
: 0) |
2170 (mode
== JOB_RESTART_DEPENDENCIES
? TRANSACTION_PROPAGATE_START_AS_RESTART
: 0) |
2176 if (mode
== JOB_ISOLATE
) {
2177 r
= transaction_add_isolate_jobs(tr
, m
);
2182 if (mode
== JOB_TRIGGERING
) {
2183 r
= transaction_add_triggering_jobs(tr
, unit
);
2188 r
= transaction_activate(tr
, m
, mode
, affected_jobs
, error
);
2192 log_unit_debug(unit
,
2193 "Enqueued job %s/%s as %u", unit
->id
,
2194 job_type_to_string(type
), (unsigned) tr
->anchor_job
->id
);
2197 *ret
= tr
->anchor_job
;
2199 tr
= transaction_free(tr
);
2203 int manager_add_job(
2208 sd_bus_error
*error
,
2211 return manager_add_job_full(m
, type
, unit
, mode
, 0, NULL
, error
, ret
);
2214 int manager_add_job_by_name(Manager
*m
, JobType type
, const char *name
, JobMode mode
, Set
*affected_jobs
, sd_bus_error
*e
, Job
**ret
) {
2215 Unit
*unit
= NULL
; /* just to appease gcc, initialization is not really necessary */
2219 assert(type
< _JOB_TYPE_MAX
);
2221 assert(mode
< _JOB_MODE_MAX
);
2223 r
= manager_load_unit(m
, name
, NULL
, NULL
, &unit
);
2228 return manager_add_job_full(m
, type
, unit
, mode
, /* extra_flags = */ 0, affected_jobs
, e
, ret
);
2231 int manager_add_job_by_name_and_warn(Manager
*m
, JobType type
, const char *name
, JobMode mode
, Set
*affected_jobs
, Job
**ret
) {
2232 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2236 assert(type
< _JOB_TYPE_MAX
);
2238 assert(mode
< _JOB_MODE_MAX
);
2240 r
= manager_add_job_by_name(m
, type
, name
, mode
, affected_jobs
, &error
, ret
);
2242 return log_warning_errno(r
, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode
), name
, bus_error_message(&error
, r
));
2247 int manager_propagate_reload(Manager
*m
, Unit
*unit
, JobMode mode
, sd_bus_error
*e
) {
2249 _cleanup_(transaction_abort_and_freep
) Transaction
*tr
= NULL
;
2253 assert(mode
< _JOB_MODE_MAX
);
2254 assert(mode
!= JOB_ISOLATE
); /* Isolate is only valid for start */
2256 tr
= transaction_new(mode
== JOB_REPLACE_IRREVERSIBLY
);
2260 /* We need an anchor job */
2261 r
= transaction_add_job_and_dependencies(tr
, JOB_NOP
, unit
, NULL
, TRANSACTION_IGNORE_REQUIREMENTS
|TRANSACTION_IGNORE_ORDER
, e
);
2265 /* Failure in adding individual dependencies is ignored, so this always succeeds. */
2266 transaction_add_propagate_reload_jobs(
2270 mode
== JOB_IGNORE_DEPENDENCIES
? TRANSACTION_IGNORE_ORDER
: 0);
2272 r
= transaction_activate(tr
, m
, mode
, NULL
, e
);
2276 tr
= transaction_free(tr
);
2280 Job
*manager_get_job(Manager
*m
, uint32_t id
) {
2283 return hashmap_get(m
->jobs
, UINT32_TO_PTR(id
));
2286 Unit
*manager_get_unit(Manager
*m
, const char *name
) {
2290 return hashmap_get(m
->units
, name
);
2293 static int manager_dispatch_target_deps_queue(Manager
*m
) {
2299 while ((u
= LIST_POP(target_deps_queue
, m
->target_deps_queue
))) {
2300 _cleanup_free_ Unit
**targets
= NULL
;
2303 assert(u
->in_target_deps_queue
);
2305 u
->in_target_deps_queue
= false;
2307 /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the
2308 * dependencies, and we can't have it that hash tables we iterate through are modified while
2309 * we are iterating through them. */
2310 n_targets
= unit_get_dependency_array(u
, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES
, &targets
);
2314 FOREACH_ARRAY(i
, targets
, n_targets
) {
2315 r
= unit_add_default_target_dependency(u
, *i
);
2324 unsigned manager_dispatch_load_queue(Manager
*m
) {
2330 /* Make sure we are not run recursively */
2331 if (m
->dispatching_load_queue
)
2334 m
->dispatching_load_queue
= true;
2336 /* Dispatches the load queue. Takes a unit from the queue and
2337 * tries to load its data until the queue is empty */
2339 while ((u
= m
->load_queue
)) {
2340 assert(u
->in_load_queue
);
2346 m
->dispatching_load_queue
= false;
2348 /* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about
2349 * should be loaded and have aliases resolved */
2350 (void) manager_dispatch_target_deps_queue(m
);
2355 bool manager_unit_cache_should_retry_load(Unit
*u
) {
2358 /* Automatic reloading from disk only applies to units which were not found sometime in the past, and
2359 * the not-found stub is kept pinned in the unit graph by dependencies. For units that were
2360 * previously loaded, we don't do automatic reloading, and daemon-reload is necessary to update. */
2361 if (u
->load_state
!= UNIT_NOT_FOUND
)
2364 /* The cache has been updated since the last time we tried to load the unit. There might be new
2365 * fragment paths to read. */
2366 if (u
->manager
->unit_cache_timestamp_hash
!= u
->fragment_not_found_timestamp_hash
)
2369 /* The cache needs to be updated because there are modifications on disk. */
2370 return !lookup_paths_timestamp_hash_same(&u
->manager
->lookup_paths
, u
->manager
->unit_cache_timestamp_hash
, NULL
);
2373 int manager_load_unit_prepare(
2380 _cleanup_(unit_freep
) Unit
*cleanup_unit
= NULL
;
2381 _cleanup_free_
char *nbuf
= NULL
;
2386 assert(name
|| path
);
2388 /* This will prepare the unit for loading, but not actually load anything from disk. */
2390 if (path
&& !path_is_absolute(path
))
2391 return sd_bus_error_setf(e
, SD_BUS_ERROR_INVALID_ARGS
, "Path %s is not absolute.", path
);
2394 r
= path_extract_filename(path
, &nbuf
);
2397 if (r
== O_DIRECTORY
)
2398 return sd_bus_error_setf(e
, SD_BUS_ERROR_INVALID_ARGS
, "Path '%s' refers to directory, refusing.", path
);
2403 UnitType t
= unit_name_to_type(name
);
2405 if (t
== _UNIT_TYPE_INVALID
|| !unit_name_is_valid(name
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
)) {
2406 if (unit_name_is_valid(name
, UNIT_NAME_TEMPLATE
))
2407 return sd_bus_error_setf(e
, SD_BUS_ERROR_INVALID_ARGS
, "Unit name %s is missing the instance name.", name
);
2409 return sd_bus_error_setf(e
, SD_BUS_ERROR_INVALID_ARGS
, "Unit name %s is not valid.", name
);
2412 Unit
*unit
= manager_get_unit(m
, name
);
2414 /* The time-based cache allows new units to be started without daemon-reload,
2415 * but if they are already referenced (because of dependencies or ordering)
2416 * then we have to force a load of the fragment. As an optimization, check
2417 * first if anything in the usual paths was modified since the last time
2418 * the cache was loaded. Also check if the last time an attempt to load the
2419 * unit was made was before the most recent cache refresh, so that we know
2420 * we need to try again — even if the cache is current, it might have been
2421 * updated in a different context before we had a chance to retry loading
2422 * this particular unit. */
2423 if (manager_unit_cache_should_retry_load(unit
))
2424 unit
->load_state
= UNIT_STUB
;
2427 return 0; /* The unit was already loaded */
2430 unit
= cleanup_unit
= unit_new(m
, unit_vtable
[t
]->object_size
);
2436 r
= free_and_strdup(&unit
->fragment_path
, path
);
2441 r
= unit_add_name(unit
, name
);
2445 unit_add_to_load_queue(unit
);
2446 unit_add_to_dbus_queue(unit
);
2447 unit_add_to_gc_queue(unit
);
2450 TAKE_PTR(cleanup_unit
);
2452 return 1; /* The unit was added the load queue */
2455 int manager_load_unit(
2466 /* This will load the unit config, but not actually start any services or anything. */
2468 r
= manager_load_unit_prepare(m
, name
, path
, e
, ret
);
2472 /* Unit was newly loaded */
2473 manager_dispatch_load_queue(m
);
2474 *ret
= unit_follow_merge(*ret
);
2478 int manager_load_startable_unit_or_warn(
2484 /* Load a unit, make sure it loaded fully and is not masked. */
2486 _cleanup_(sd_bus_error_free
) sd_bus_error error
= SD_BUS_ERROR_NULL
;
2490 r
= manager_load_unit(m
, name
, path
, &error
, &unit
);
2492 return log_error_errno(r
, "Failed to load %s %s: %s",
2493 name
? "unit" : "unit file", name
?: path
,
2494 bus_error_message(&error
, r
));
2496 r
= bus_unit_validate_load_state(unit
, &error
);
2498 return log_error_errno(r
, "%s", bus_error_message(&error
, r
));
2504 void manager_clear_jobs(Manager
*m
) {
2509 while ((j
= hashmap_first(m
->jobs
)))
2510 /* No need to recurse. We're cancelling all jobs. */
2511 job_finish_and_invalidate(j
, JOB_CANCELED
, false, false);
2514 void manager_unwatch_pidref(Manager
*m
, const PidRef
*pid
) {
2520 u
= manager_get_unit_by_pidref_watching(m
, pid
);
2524 unit_unwatch_pidref(u
, pid
);
2528 static int manager_dispatch_run_queue(sd_event_source
*source
, void *userdata
) {
2529 Manager
*m
= ASSERT_PTR(userdata
);
2534 while ((j
= prioq_peek(m
->run_queue
))) {
2535 assert(j
->installed
);
2536 assert(j
->in_run_queue
);
2538 (void) job_run_and_invalidate(j
);
2541 if (m
->n_running_jobs
> 0)
2542 manager_watch_jobs_in_progress(m
);
2544 if (m
->n_on_console
> 0)
2545 manager_watch_idle_pipe(m
);
2550 void manager_trigger_run_queue(Manager
*m
) {
2555 r
= sd_event_source_set_enabled(
2556 m
->run_queue_event_source
,
2557 prioq_isempty(m
->run_queue
) ? SD_EVENT_OFF
: SD_EVENT_ONESHOT
);
2559 log_warning_errno(r
, "Failed to enable job run queue event source, ignoring: %m");
2562 static unsigned manager_dispatch_dbus_queue(Manager
*m
) {
2563 unsigned n
= 0, budget
;
2569 /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly
2570 * as we can. There's no point in throttling generation of signals in that case. */
2571 if (MANAGER_IS_RELOADING(m
) || m
->send_reloading_done
|| m
->pending_reload_message
)
2572 budget
= UINT_MAX
; /* infinite budget in this case */
2574 /* Anything to do at all? */
2575 if (!m
->dbus_unit_queue
&& !m
->dbus_job_queue
)
2578 /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's
2579 * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */
2580 if (manager_bus_n_queued_write(m
) > MANAGER_BUS_BUSY_THRESHOLD
)
2583 /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't
2584 * overly full before this call we shouldn't increase it in size too wildly in one step, and we
2585 * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of
2586 * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message,
2587 * regardless how many buses/direct connections it is enqueued on, while the "threshold" is applied to
2588 * each queued instance of bus message, i.e. if the same message is enqueued to five buses/direct
2589 * connections it will be counted five times. This difference in counting ("references"
2590 * vs. "instances") is primarily a result of the fact that it's easier to implement it this way,
2591 * however it also reflects the thinking that the "threshold" should put a limit on used queue memory,
2592 * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is
2593 * currently chosen much higher than the "budget". */
2594 budget
= MANAGER_BUS_MESSAGE_BUDGET
;
2597 while (budget
!= 0 && (u
= m
->dbus_unit_queue
)) {
2599 assert(u
->in_dbus_queue
);
2601 bus_unit_send_change_signal(u
);
2604 if (budget
!= UINT_MAX
)
2608 while (budget
!= 0 && (j
= m
->dbus_job_queue
)) {
2609 assert(j
->in_dbus_queue
);
2611 bus_job_send_change_signal(j
);
2614 if (budget
!= UINT_MAX
)
2618 if (m
->send_reloading_done
) {
2619 m
->send_reloading_done
= false;
2620 bus_manager_send_reloading(m
, false);
2624 if (m
->pending_reload_message
) {
2625 bus_send_pending_reload_message(m
);
2632 static bool manager_process_barrier_fd(char * const *tags
, FDSet
*fds
) {
2634 /* nothing else must be sent when using BARRIER=1 */
2635 if (strv_contains(tags
, "BARRIER=1")) {
2636 if (strv_length(tags
) != 1)
2637 log_warning("Extra notification messages sent with BARRIER=1, ignoring everything.");
2638 else if (fdset_size(fds
) != 1)
2639 log_warning("Got incorrect number of fds with BARRIER=1, closing them.");
2641 /* Drop the message if BARRIER=1 was found */
2648 static void manager_invoke_notify_message(
2652 const struct ucred
*ucred
,
2658 assert(pidref_is_set(pidref
));
2660 assert(pidref
->pid
== ucred
->pid
);
2663 if (u
->notifygen
== m
->notifygen
) /* Already invoked on this same unit in this same iteration? */
2665 u
->notifygen
= m
->notifygen
;
2667 if (UNIT_VTABLE(u
)->notify_message
)
2668 UNIT_VTABLE(u
)->notify_message(u
, pidref
, ucred
, tags
, fds
);
2670 else if (DEBUG_LOGGING
) {
2671 _cleanup_free_
char *joined
= strv_join(tags
, ", ");
2672 char buf
[CELLESCAPE_DEFAULT_LENGTH
];
2674 log_unit_debug(u
, "Got notification message from unexpected unit type, ignoring: %s",
2675 joined
? cellescape(buf
, sizeof(buf
), joined
) : "(null)");
2679 static int manager_get_units_for_pidref(Manager
*m
, const PidRef
*pidref
, Unit
***ret_units
) {
2680 /* Determine array of every unit that is interested in the specified process */
2683 assert(pidref_is_set(pidref
));
2685 Unit
*u1
, *u2
, **array
;
2686 u1
= manager_get_unit_by_pidref_cgroup(m
, pidref
);
2687 u2
= hashmap_get(m
->watch_pids
, pidref
);
2688 array
= hashmap_get(m
->watch_pids_more
, pidref
);
2696 for (size_t j
= 0; array
[j
]; j
++)
2699 assert(n
<= INT_MAX
); /* Make sure we can reasonably return the counter as "int" */
2702 _cleanup_free_ Unit
**units
= NULL
;
2705 units
= new(Unit
*, n
+ 1);
2709 /* We return a dense array, and put the "main" unit first, i.e. unit in whose cgroup
2710 * the process currently is. Note that we do not bother with filtering duplicates
2719 for (size_t j
= 0; array
[j
]; j
++)
2720 units
[i
++] = array
[j
];
2723 units
[i
] = NULL
; /* end array in an extra NULL */
2726 *ret_units
= TAKE_PTR(units
);
2732 static int manager_dispatch_notify_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
2733 Manager
*m
= ASSERT_PTR(userdata
);
2734 _cleanup_(pidref_done
) PidRef pidref
= PIDREF_NULL
;
2736 _cleanup_(fdset_free_asyncp
) FDSet
*fds
= NULL
;
2739 assert(m
->notify_fd
== fd
);
2741 if (revents
!= EPOLLIN
) {
2742 log_warning("Got unexpected poll event for notify fd.");
2746 _cleanup_strv_free_
char **tags
= NULL
;
2747 r
= notify_recv_with_fds_strv(m
->notify_fd
, &tags
, &ucred
, &pidref
, &fds
);
2751 /* If this is any other, real error, then stop processing this socket. This of course means
2752 * we won't take notification messages anymore, but that's still better than busy looping:
2753 * being woken up over and over again, but being unable to actually read the message from the
2757 /* Possibly a barrier fd, let's see. */
2758 if (manager_process_barrier_fd(tags
, fds
)) {
2759 log_debug("Received barrier notification message from PID " PID_FMT
".", pidref
.pid
);
2763 /* Increase the generation counter used for filtering out duplicate unit invocations. */
2766 /* Notify every unit that might be interested, which might be multiple. */
2767 _cleanup_free_ Unit
**array
= NULL
;
2769 int n_array
= manager_get_units_for_pidref(m
, &pidref
, &array
);
2771 log_warning_errno(n_array
, "Failed to determine units for PID " PID_FMT
", ignoring: %m", pidref
.pid
);
2775 log_debug("Cannot find unit for notify message of PID "PID_FMT
", ignoring.", pidref
.pid
);
2777 /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle
2778 * duplicate units – making sure we only invoke each unit's handler once. */
2779 FOREACH_ARRAY(u
, array
, n_array
)
2780 manager_invoke_notify_message(m
, *u
, &pidref
, &ucred
, tags
, fds
);
2782 if (!fdset_isempty(fds
))
2783 log_warning("Got extra auxiliary fds with notification message, closing them.");
2788 static void manager_invoke_sigchld_event(
2791 const siginfo_t
*si
) {
2797 /* Already invoked the handler of this unit in this iteration? Then don't process this again */
2798 if (u
->sigchldgen
== m
->sigchldgen
)
2800 u
->sigchldgen
= m
->sigchldgen
;
2802 log_unit_debug(u
, "Child "PID_FMT
" belongs to %s.", si
->si_pid
, u
->id
);
2803 unit_unwatch_pidref(u
, &PIDREF_MAKE_FROM_PID(si
->si_pid
));
2805 if (UNIT_VTABLE(u
)->sigchld_event
)
2806 UNIT_VTABLE(u
)->sigchld_event(u
, si
->si_pid
, si
->si_code
, si
->si_status
);
2809 static int manager_dispatch_sigchld(sd_event_source
*source
, void *userdata
) {
2810 Manager
*m
= ASSERT_PTR(userdata
);
2816 /* First we call waitid() for a PID and do not reap the zombie. That way we can still access
2817 * /proc/$PID for it while it is a zombie. */
2819 if (waitid(P_ALL
, 0, &si
, WEXITED
|WNOHANG
|WNOWAIT
) < 0) {
2821 if (errno
!= ECHILD
)
2822 log_error_errno(errno
, "Failed to peek for child with waitid(), ignoring: %m");
2830 if (SIGINFO_CODE_IS_DEAD(si
.si_code
)) {
2831 _cleanup_free_
char *name
= NULL
;
2832 (void) pid_get_comm(si
.si_pid
, &name
);
2834 log_debug("Child "PID_FMT
" (%s) died (code=%s, status=%i/%s)",
2835 si
.si_pid
, strna(name
),
2836 sigchld_code_to_string(si
.si_code
),
2838 strna(si
.si_code
== CLD_EXITED
2839 ? exit_status_to_string(si
.si_status
, EXIT_STATUS_FULL
)
2840 : signal_to_string(si
.si_status
)));
2842 /* Increase the generation counter used for filtering out duplicate unit invocations */
2845 /* We look this up by a PidRef that only consists of the PID. After all we couldn't create a
2846 * pidfd here any more even if we wanted (since the process just exited). */
2847 PidRef pidref
= PIDREF_MAKE_FROM_PID(si
.si_pid
);
2849 /* And now figure out the units this belongs to, there might be multiple... */
2850 _cleanup_free_ Unit
**array
= NULL
;
2851 int n_array
= manager_get_units_for_pidref(m
, &pidref
, &array
);
2853 log_warning_errno(n_array
, "Failed to get units for process " PID_FMT
", ignoring: %m", si
.si_pid
);
2854 else if (n_array
== 0)
2855 log_debug("Got SIGCHLD for process " PID_FMT
" we weren't interested in, ignoring.", si
.si_pid
);
2857 /* We check for an OOM condition, in case we got SIGCHLD before the OOM notification.
2858 * We only do this for the cgroup the PID belonged to, which is the f */
2859 (void) unit_check_oom(array
[0]);
2861 /* We check if systemd-oomd performed a kill so that we log and notify appropriately */
2862 (void) unit_check_oomd_kill(array
[0]);
2864 /* Finally, execute them all. Note that the array might contain duplicates, but that's fine,
2865 * manager_invoke_sigchld_event() will ensure we only invoke the handlers once for each
2867 FOREACH_ARRAY(u
, array
, n_array
)
2868 manager_invoke_sigchld_event(m
, *u
, &si
);
2872 /* And now, we actually reap the zombie. */
2873 if (waitid(P_PID
, si
.si_pid
, &si
, WEXITED
) < 0) {
2874 log_error_errno(errno
, "Failed to dequeue child, ignoring: %m");
2881 /* All children processed for now, turn off event source */
2883 r
= sd_event_source_set_enabled(m
->sigchld_event_source
, SD_EVENT_OFF
);
2885 return log_error_errno(r
, "Failed to disable SIGCHLD event source: %m");
2890 static void manager_start_special(Manager
*m
, const char *name
, JobMode mode
) {
2893 if (manager_add_job_by_name_and_warn(m
, JOB_START
, name
, mode
, NULL
, &job
) < 0)
2896 const char *s
= unit_status_string(job
->unit
, NULL
);
2898 log_info("Activating special unit %s...", s
);
2900 (void) sd_notifyf(/* unset_environment= */ false,
2901 "STATUS=Activating special unit %s...", s
);
2902 m
->status_ready
= false;
2905 static void manager_handle_ctrl_alt_del(Manager
*m
) {
2908 /* If the user presses C-A-D more than 7 times within 2s, we reboot/shutdown immediately,
2909 * unless it was disabled in system.conf. */
2911 if (ratelimit_below(&m
->ctrl_alt_del_ratelimit
) || m
->cad_burst_action
== EMERGENCY_ACTION_NONE
)
2912 manager_start_special(m
, SPECIAL_CTRL_ALT_DEL_TARGET
, JOB_REPLACE_IRREVERSIBLY
);
2916 m
->cad_burst_action
,
2917 EMERGENCY_ACTION_WARN
,
2918 /* reboot_arg= */ NULL
,
2919 /* exit_status= */ -1,
2920 "Ctrl-Alt-Del was pressed more than 7 times within 2s");
2923 static int manager_dispatch_signal_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
2924 Manager
*m
= ASSERT_PTR(userdata
);
2926 struct signalfd_siginfo sfsi
;
2929 assert(m
->signal_fd
== fd
);
2931 if (revents
!= EPOLLIN
) {
2932 log_warning("Got unexpected events from signal file descriptor.");
2936 n
= read(m
->signal_fd
, &sfsi
, sizeof(sfsi
));
2938 if (ERRNO_IS_TRANSIENT(errno
))
2941 /* We return an error here, which will kill this handler,
2942 * to avoid a busy loop on read error. */
2943 return log_error_errno(errno
, "Reading from signal fd failed: %m");
2945 if (n
!= sizeof(sfsi
)) {
2946 log_warning("Truncated read from signal fd (%zi bytes), ignoring!", n
);
2950 log_received_signal(sfsi
.ssi_signo
== SIGCHLD
||
2951 (sfsi
.ssi_signo
== SIGTERM
&& MANAGER_IS_USER(m
))
2952 ? LOG_DEBUG
: LOG_INFO
,
2955 switch (sfsi
.ssi_signo
) {
2958 r
= sd_event_source_set_enabled(m
->sigchld_event_source
, SD_EVENT_ON
);
2960 log_warning_errno(r
, "Failed to enable SIGCHLD event source, ignoring: %m");
2965 if (MANAGER_IS_SYSTEM(m
)) {
2966 /* This is for compatibility with the original sysvinit */
2967 m
->objective
= MANAGER_REEXECUTE
;
2973 if (MANAGER_IS_SYSTEM(m
))
2974 manager_handle_ctrl_alt_del(m
);
2976 manager_start_special(m
, SPECIAL_EXIT_TARGET
, JOB_REPLACE_IRREVERSIBLY
);
2980 /* This is a nop on non-init */
2981 if (MANAGER_IS_SYSTEM(m
))
2982 manager_start_special(m
, SPECIAL_KBREQUEST_TARGET
, JOB_REPLACE
);
2987 /* This is a nop on non-init */
2988 if (MANAGER_IS_SYSTEM(m
))
2989 manager_start_special(m
, SPECIAL_SIGPWR_TARGET
, JOB_REPLACE
);
2994 if (manager_dbus_is_running(m
, false)) {
2995 log_info("Trying to reconnect to bus...");
2997 (void) bus_init_api(m
);
2999 if (MANAGER_IS_SYSTEM(m
))
3000 (void) bus_init_system(m
);
3002 manager_start_special(m
, SPECIAL_DBUS_SERVICE
, JOB_REPLACE
);
3007 _cleanup_free_
char *dump
= NULL
;
3009 r
= manager_get_dump_string(m
, /* patterns= */ NULL
, &dump
);
3011 log_warning_errno(r
, "Failed to acquire manager dump: %m");
3015 log_dump(LOG_INFO
, dump
);
3020 m
->objective
= MANAGER_RELOAD
;
3025 if (MANAGER_IS_SYSTEM(m
)) {
3026 /* Starting SIGRTMIN+0 */
3027 static const struct {
3030 } target_table
[] = {
3031 [0] = { SPECIAL_DEFAULT_TARGET
, JOB_ISOLATE
},
3032 [1] = { SPECIAL_RESCUE_TARGET
, JOB_ISOLATE
},
3033 [2] = { SPECIAL_EMERGENCY_TARGET
, JOB_ISOLATE
},
3034 [3] = { SPECIAL_HALT_TARGET
, JOB_REPLACE_IRREVERSIBLY
},
3035 [4] = { SPECIAL_POWEROFF_TARGET
, JOB_REPLACE_IRREVERSIBLY
},
3036 [5] = { SPECIAL_REBOOT_TARGET
, JOB_REPLACE_IRREVERSIBLY
},
3037 [6] = { SPECIAL_KEXEC_TARGET
, JOB_REPLACE_IRREVERSIBLY
},
3038 [7] = { SPECIAL_SOFT_REBOOT_TARGET
, JOB_REPLACE_IRREVERSIBLY
},
3041 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
3042 static const ManagerObjective objective_table
[] = {
3044 [1] = MANAGER_POWEROFF
,
3045 [2] = MANAGER_REBOOT
,
3046 [3] = MANAGER_KEXEC
,
3047 [4] = MANAGER_SOFT_REBOOT
,
3050 if ((int) sfsi
.ssi_signo
>= SIGRTMIN
+0 &&
3051 (int) sfsi
.ssi_signo
< SIGRTMIN
+(int) ELEMENTSOF(target_table
)) {
3052 int idx
= (int) sfsi
.ssi_signo
- SIGRTMIN
;
3053 manager_start_special(m
, target_table
[idx
].target
, target_table
[idx
].mode
);
3057 if ((int) sfsi
.ssi_signo
>= SIGRTMIN
+13 &&
3058 (int) sfsi
.ssi_signo
< SIGRTMIN
+13+(int) ELEMENTSOF(objective_table
)) {
3059 m
->objective
= objective_table
[sfsi
.ssi_signo
- SIGRTMIN
- 13];
3064 switch (sfsi
.ssi_signo
- SIGRTMIN
) {
3067 bool generic
= false;
3069 if (sfsi
.ssi_code
!= SI_QUEUE
)
3072 /* Override a few select commands by our own PID1-specific logic */
3074 switch (sfsi
.ssi_int
) {
3076 case _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE
..._COMMON_SIGNAL_COMMAND_LOG_LEVEL_END
:
3077 manager_override_log_level(m
, sfsi
.ssi_int
- _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE
);
3080 case COMMON_SIGNAL_COMMAND_CONSOLE
:
3081 manager_override_log_target(m
, LOG_TARGET_CONSOLE
);
3084 case COMMON_SIGNAL_COMMAND_JOURNAL
:
3085 manager_override_log_target(m
, LOG_TARGET_JOURNAL
);
3088 case COMMON_SIGNAL_COMMAND_KMSG
:
3089 manager_override_log_target(m
, LOG_TARGET_KMSG
);
3092 case COMMON_SIGNAL_COMMAND_NULL
:
3093 manager_override_log_target(m
, LOG_TARGET_NULL
);
3096 case MANAGER_SIGNAL_COMMAND_DUMP_JOBS
: {
3097 _cleanup_free_
char *dump_jobs
= NULL
;
3099 r
= manager_get_dump_jobs_string(m
, /* patterns= */ NULL
, " ", &dump_jobs
);
3101 log_warning_errno(r
, "Failed to acquire manager jobs dump: %m");
3105 log_dump(LOG_INFO
, dump_jobs
);
3115 return sigrtmin18_handler(source
, &sfsi
, NULL
);
3121 manager_override_show_status(m
, SHOW_STATUS_YES
, "signal");
3125 manager_override_show_status(m
, SHOW_STATUS_NO
, "signal");
3129 manager_override_log_level(m
, LOG_DEBUG
);
3133 manager_restore_original_log_level(m
);
3137 if (MANAGER_IS_USER(m
)) {
3138 m
->objective
= MANAGER_EXIT
;
3142 /* This is a nop on init */
3146 m
->objective
= MANAGER_REEXECUTE
;
3150 case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
3151 manager_restore_original_log_target(m
);
3155 manager_override_log_target(m
, LOG_TARGET_CONSOLE
);
3159 manager_override_log_target(m
, LOG_TARGET_KMSG
);
3163 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi
.ssi_signo
));
3170 static int manager_dispatch_time_change_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
3171 Manager
*m
= ASSERT_PTR(userdata
);
3174 log_struct(LOG_DEBUG
,
3175 LOG_MESSAGE_ID(SD_MESSAGE_TIME_CHANGE_STR
),
3176 LOG_MESSAGE("Time has been changed"));
3178 /* Restart the watch */
3179 (void) manager_setup_time_change(m
);
3181 HASHMAP_FOREACH(u
, m
->units
)
3182 if (UNIT_VTABLE(u
)->time_change
)
3183 UNIT_VTABLE(u
)->time_change(u
);
3188 static int manager_dispatch_timezone_change(
3189 sd_event_source
*source
,
3190 const struct inotify_event
*e
,
3193 Manager
*m
= ASSERT_PTR(userdata
);
3197 log_debug("inotify event for /etc/localtime");
3199 changed
= manager_read_timezone_stat(m
);
3203 /* Something changed, restart the watch, to ensure we watch the new /etc/localtime if it changed */
3204 (void) manager_setup_timezone_change(m
);
3206 /* Read the new timezone */
3209 log_debug("Timezone has been changed (now: %s).", tzname
[daylight
]);
3211 HASHMAP_FOREACH(u
, m
->units
)
3212 if (UNIT_VTABLE(u
)->timezone_change
)
3213 UNIT_VTABLE(u
)->timezone_change(u
);
3218 static int manager_dispatch_idle_pipe_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
3219 Manager
*m
= ASSERT_PTR(userdata
);
3221 assert(m
->idle_pipe
[2] == fd
);
3223 /* There's at least one Type=idle child that just gave up on us waiting for the boot process to
3224 * complete. Let's now turn off any further console output if there's at least one service that needs
3225 * console access, so that from now on our own output should not spill into that service's output
3226 * anymore. After all, we support Type=idle only to beautify console output and it generally is set
3227 * on services that want to own the console exclusively without our interference. */
3228 m
->no_console_output
= m
->n_on_console
> 0;
3230 /* Acknowledge the child's request, and let all other children know too that they shouldn't wait
3231 * any longer by closing the pipes towards them, which is what they are waiting for. */
3232 manager_close_idle_pipe(m
);
3237 static int manager_dispatch_jobs_in_progress(sd_event_source
*source
, usec_t usec
, void *userdata
) {
3238 Manager
*m
= ASSERT_PTR(userdata
);
3243 manager_print_jobs_in_progress(m
);
3245 r
= sd_event_source_set_time_relative(source
, JOBS_IN_PROGRESS_PERIOD_USEC
);
3249 return sd_event_source_set_enabled(source
, SD_EVENT_ONESHOT
);
3252 int manager_loop(Manager
*m
) {
3253 RateLimit rl
= { .interval
= 1*USEC_PER_SEC
, .burst
= 50000 };
3257 assert(m
->objective
== MANAGER_OK
); /* Ensure manager_startup() has been called */
3259 manager_check_finished(m
);
3261 /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
3262 r
= sd_event_source_set_enabled(m
->sigchld_event_source
, SD_EVENT_ON
);
3264 return log_error_errno(r
, "Failed to enable SIGCHLD event source: %m");
3266 while (m
->objective
== MANAGER_OK
) {
3268 if (!ratelimit_below(&rl
)) {
3269 /* Yay, something is going seriously wrong, pause a little */
3270 log_warning("Looping too fast. Throttling execution a little.");
3274 (void) watchdog_ping();
3276 if (manager_dispatch_load_queue(m
) > 0)
3279 if (manager_dispatch_gc_job_queue(m
) > 0)
3282 if (manager_dispatch_gc_unit_queue(m
) > 0)
3285 if (manager_dispatch_cleanup_queue(m
) > 0)
3288 if (manager_dispatch_cgroup_realize_queue(m
) > 0)
3291 if (manager_dispatch_start_when_upheld_queue(m
) > 0)
3294 if (manager_dispatch_stop_when_bound_queue(m
) > 0)
3297 if (manager_dispatch_stop_when_unneeded_queue(m
) > 0)
3300 if (manager_dispatch_release_resources_queue(m
) > 0)
3303 if (manager_dispatch_stop_notify_queue(m
) > 0)
3306 if (manager_dispatch_dbus_queue(m
) > 0)
3309 /* Sleep for watchdog runtime wait time */
3310 r
= sd_event_run(m
->event
, watchdog_runtime_wait(/* divisor= */ 2));
3312 return log_error_errno(r
, "Failed to run event loop: %m");
3315 return m
->objective
;
3318 int manager_load_unit_from_dbus_path(Manager
*m
, const char *s
, sd_bus_error
*e
, Unit
**_u
) {
3319 _cleanup_free_
char *n
= NULL
;
3320 sd_id128_t invocation_id
;
3328 r
= unit_name_from_dbus_path(s
, &n
);
3332 /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128-bit ID then
3333 * we use it as invocation ID. */
3334 r
= sd_id128_from_string(n
, &invocation_id
);
3336 u
= hashmap_get(m
->units_by_invocation_id
, &invocation_id
);
3342 return sd_bus_error_setf(e
, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID
,
3343 "No unit with the specified invocation ID " SD_ID128_FORMAT_STR
" known.",
3344 SD_ID128_FORMAT_VAL(invocation_id
));
3347 /* If this didn't work, we check if this is a unit name */
3348 if (!unit_name_is_valid(n
, UNIT_NAME_PLAIN
|UNIT_NAME_INSTANCE
)) {
3349 _cleanup_free_
char *nn
= NULL
;
3352 return sd_bus_error_setf(e
, SD_BUS_ERROR_INVALID_ARGS
,
3353 "Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn
));
3356 r
= manager_load_unit(m
, n
, NULL
, e
, &u
);
3364 int manager_get_job_from_dbus_path(Manager
*m
, const char *s
, Job
**_j
) {
3374 p
= startswith(s
, "/org/freedesktop/systemd1/job/");
3378 r
= safe_atou(p
, &id
);
3382 j
= manager_get_job(m
, id
);
3391 void manager_send_unit_audit(Manager
*m
, Unit
*u
, int type
, bool success
) {
3394 _cleanup_free_
char *p
= NULL
;
3401 if (!MANAGER_IS_SYSTEM(m
))
3404 /* Don't generate audit events if the service was already started and we're just deserializing */
3405 if (MANAGER_IS_RELOADING(m
))
3408 audit_fd
= get_core_audit_fd();
3412 r
= unit_name_to_prefix_and_instance(u
->id
, &p
);
3414 log_warning_errno(r
, "Failed to extract prefix and instance of unit name, ignoring: %m");
3418 msg
= strjoina("unit=", p
);
3419 if (audit_log_user_comm_message(audit_fd
, type
, msg
, "systemd", NULL
, NULL
, NULL
, success
) < 0) {
3420 if (ERRNO_IS_PRIVILEGE(errno
)) {
3421 /* We aren't allowed to send audit messages? Then let's not retry again. */
3422 log_debug_errno(errno
, "Failed to send audit message, closing audit socket: %m");
3423 close_core_audit_fd();
3425 log_warning_errno(errno
, "Failed to send audit message, ignoring: %m");
3430 void manager_send_unit_plymouth(Manager
*m
, Unit
*u
) {
3431 _cleanup_free_
char *message
= NULL
;
3437 if (!MANAGER_IS_SYSTEM(m
))
3440 /* Don't generate plymouth events if the service was already started and we're just deserializing */
3441 if (MANAGER_IS_RELOADING(m
))
3444 if (detect_container() > 0)
3447 if (!UNIT_VTABLE(u
)->notify_plymouth
)
3450 c
= asprintf(&message
, "U\x02%c%s%c", (int) (strlen(u
->id
) + 1), u
->id
, '\x00');
3452 return (void) log_oom();
3454 /* We set SOCK_NONBLOCK here so that we rather drop the message then wait for plymouth */
3455 r
= plymouth_send_raw(message
, c
, SOCK_NONBLOCK
);
3457 log_full_errno(ERRNO_IS_NO_PLYMOUTH(r
) ? LOG_DEBUG
: LOG_WARNING
, r
,
3458 "Failed to communicate with plymouth: %m");
3461 void manager_send_unit_supervisor(Manager
*m
, Unit
*u
, bool active
) {
3465 /* Notify a "supervisor" process about our progress, i.e. a container manager, hypervisor, or
3466 * surrounding service manager. */
3468 if (MANAGER_IS_RELOADING(m
))
3471 if (!UNIT_VTABLE(u
)->notify_supervisor
)
3474 if (in_initrd()) /* Only send these once we left the initrd */
3477 (void) sd_notifyf(/* unset_environment= */ false,
3478 active
? "X_SYSTEMD_UNIT_ACTIVE=%s" : "X_SYSTEMD_UNIT_INACTIVE=%s",
3482 usec_t
manager_get_watchdog(Manager
*m
, WatchdogType t
) {
3485 if (MANAGER_IS_USER(m
))
3486 return USEC_INFINITY
;
3488 if (m
->watchdog_overridden
[t
] != USEC_INFINITY
)
3489 return m
->watchdog_overridden
[t
];
3491 return m
->watchdog
[t
];
3494 void manager_set_watchdog(Manager
*m
, WatchdogType t
, usec_t timeout
) {
3498 if (MANAGER_IS_USER(m
))
3501 if (m
->watchdog_overridden
[t
] == USEC_INFINITY
) {
3502 if (t
== WATCHDOG_RUNTIME
)
3503 (void) watchdog_setup(timeout
);
3504 else if (t
== WATCHDOG_PRETIMEOUT
)
3505 (void) watchdog_setup_pretimeout(timeout
);
3508 m
->watchdog
[t
] = timeout
;
3511 void manager_override_watchdog(Manager
*m
, WatchdogType t
, usec_t timeout
) {
3516 if (MANAGER_IS_USER(m
))
3519 usec
= timeout
== USEC_INFINITY
? m
->watchdog
[t
] : timeout
;
3520 if (t
== WATCHDOG_RUNTIME
)
3521 (void) watchdog_setup(usec
);
3522 else if (t
== WATCHDOG_PRETIMEOUT
)
3523 (void) watchdog_setup_pretimeout(usec
);
3525 m
->watchdog_overridden
[t
] = timeout
;
3528 int manager_set_watchdog_pretimeout_governor(Manager
*m
, const char *governor
) {
3529 _cleanup_free_
char *p
= NULL
;
3534 if (MANAGER_IS_USER(m
))
3537 if (streq_ptr(m
->watchdog_pretimeout_governor
, governor
))
3540 p
= strdup(governor
);
3544 r
= watchdog_setup_pretimeout_governor(governor
);
3548 return free_and_replace(m
->watchdog_pretimeout_governor
, p
);
3551 int manager_override_watchdog_pretimeout_governor(Manager
*m
, const char *governor
) {
3552 _cleanup_free_
char *p
= NULL
;
3557 if (MANAGER_IS_USER(m
))
3560 if (streq_ptr(m
->watchdog_pretimeout_governor_overridden
, governor
))
3563 p
= strdup(governor
);
3567 r
= watchdog_setup_pretimeout_governor(governor
);
3571 return free_and_replace(m
->watchdog_pretimeout_governor_overridden
, p
);
3574 int manager_reload(Manager
*m
) {
3575 _unused_
_cleanup_(manager_reloading_stopp
) Manager
*reloading
= NULL
;
3576 _cleanup_fdset_free_ FDSet
*fds
= NULL
;
3577 _cleanup_fclose_
FILE *f
= NULL
;
3582 r
= manager_open_serialization(m
, &f
);
3584 return log_error_errno(r
, "Failed to create serialization file: %m");
3590 /* We are officially in reload mode from here on. */
3591 reloading
= manager_reloading_start(m
);
3593 r
= manager_serialize(m
, f
, fds
, false);
3597 r
= finish_serialization_file(f
);
3599 return log_error_errno(r
, "Failed to finish serialization: %m");
3601 /* 💀 This is the point of no return, from here on there is no way back. 💀 */
3604 bus_manager_send_reloading(m
, true);
3606 /* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users
3607 * and everything else that is worth flushing out. We'll get it all back from the serialization — if we need
3610 manager_clear_jobs_and_units(m
);
3611 lookup_paths_flush_generator(&m
->lookup_paths
);
3612 exec_shared_runtime_vacuum(m
);
3613 dynamic_user_vacuum(m
, false);
3614 m
->uid_refs
= hashmap_free(m
->uid_refs
);
3615 m
->gid_refs
= hashmap_free(m
->gid_refs
);
3617 (void) manager_run_environment_generators(m
);
3618 (void) manager_run_generators(m
);
3620 /* We flushed out generated files, for which we don't watch mtime, so we should flush the old map. */
3621 manager_free_unit_name_maps(m
);
3622 m
->unit_file_state_outdated
= false;
3624 /* First, enumerate what we can from kernel and suchlike */
3625 manager_enumerate_perpetual(m
);
3626 manager_enumerate(m
);
3628 /* Second, deserialize our stored data */
3629 r
= manager_deserialize(m
, f
, fds
);
3631 log_warning_errno(r
, "Deserialization failed, proceeding anyway: %m");
3633 /* We don't need the serialization anymore */
3636 /* Re-register notify_fd as event source, and set up other sockets/communication channels we might need */
3637 (void) manager_setup_notify(m
);
3638 (void) manager_setup_user_lookup_fd(m
);
3639 (void) manager_setup_handoff_timestamp_fd(m
);
3640 (void) manager_setup_pidref_transport_fd(m
);
3642 /* Clean up deserialized bus track information. They're never consumed during reload (as opposed to
3643 * reexec) since we do not disconnect from the bus. */
3644 m
->subscribed_as_strv
= strv_free(m
->subscribed_as_strv
);
3645 m
->deserialized_bus_id
= SD_ID128_NULL
;
3647 /* Third, fire things up! */
3648 manager_coldplug(m
);
3650 /* Clean up runtime objects no longer referenced */
3653 /* Consider the reload process complete now. */
3654 assert(m
->n_reloading
> 0);
3659 m
->send_reloading_done
= true;
3663 void manager_reset_failed(Manager
*m
) {
3668 HASHMAP_FOREACH(u
, m
->units
)
3669 unit_reset_failed(u
);
3672 bool manager_unit_inactive_or_pending(Manager
*m
, const char *name
) {
3678 /* Returns true if the unit is inactive or going down */
3679 u
= manager_get_unit(m
, name
);
3683 return unit_inactive_or_pending(u
);
3686 static void log_taint_string(Manager
*m
) {
3689 if (MANAGER_IS_USER(m
) || m
->taint_logged
)
3692 m
->taint_logged
= true; /* only check for taint once */
3694 _cleanup_free_
char *taint
= taint_string();
3698 log_struct(LOG_NOTICE
,
3699 LOG_MESSAGE("System is tainted: %s", taint
),
3700 LOG_ITEM("TAINT=%s", taint
),
3701 LOG_MESSAGE_ID(SD_MESSAGE_TAINTED_STR
));
3704 static void manager_notify_finished(Manager
*m
) {
3705 usec_t firmware_usec
, loader_usec
, kernel_usec
, initrd_usec
, userspace_usec
, total_usec
;
3707 if (MANAGER_IS_TEST_RUN(m
))
3710 if (MANAGER_IS_SYSTEM(m
) && m
->soft_reboots_count
> 0) {
3711 /* The soft-reboot case, where we only report data for the last reboot */
3712 firmware_usec
= loader_usec
= initrd_usec
= kernel_usec
= 0;
3713 total_usec
= userspace_usec
= usec_sub_unsigned(m
->timestamps
[MANAGER_TIMESTAMP_FINISH
].monotonic
,
3714 m
->timestamps
[MANAGER_TIMESTAMP_SHUTDOWN_START
].monotonic
);
3716 log_struct(LOG_INFO
,
3717 LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR
),
3718 LOG_ITEM("USERSPACE_USEC="USEC_FMT
, userspace_usec
),
3719 LOG_MESSAGE("Soft-reboot finished in %s, counter is now at %u.",
3720 FORMAT_TIMESPAN(total_usec
, USEC_PER_MSEC
),
3721 m
->soft_reboots_count
));
3722 } else if (MANAGER_IS_SYSTEM(m
) && detect_container() <= 0) {
3723 char buf
[FORMAT_TIMESPAN_MAX
+ STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX
+ STRLEN(" (loader) + ")]
3726 size_t size
= sizeof buf
;
3728 /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
3729 * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
3730 * negative values. */
3732 firmware_usec
= m
->timestamps
[MANAGER_TIMESTAMP_FIRMWARE
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_LOADER
].monotonic
;
3733 loader_usec
= m
->timestamps
[MANAGER_TIMESTAMP_LOADER
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_KERNEL
].monotonic
;
3734 userspace_usec
= m
->timestamps
[MANAGER_TIMESTAMP_FINISH
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_USERSPACE
].monotonic
;
3735 total_usec
= m
->timestamps
[MANAGER_TIMESTAMP_FIRMWARE
].monotonic
+ m
->timestamps
[MANAGER_TIMESTAMP_FINISH
].monotonic
;
3737 if (firmware_usec
> 0)
3738 size
= strpcpyf(&p
, size
, "%s (firmware) + ", FORMAT_TIMESPAN(firmware_usec
, USEC_PER_MSEC
));
3739 if (loader_usec
> 0)
3740 size
= strpcpyf(&p
, size
, "%s (loader) + ", FORMAT_TIMESPAN(loader_usec
, USEC_PER_MSEC
));
3742 if (dual_timestamp_is_set(&m
->timestamps
[MANAGER_TIMESTAMP_INITRD
])) {
3744 /* The initrd case on bare-metal */
3745 kernel_usec
= m
->timestamps
[MANAGER_TIMESTAMP_INITRD
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_KERNEL
].monotonic
;
3746 initrd_usec
= m
->timestamps
[MANAGER_TIMESTAMP_USERSPACE
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_INITRD
].monotonic
;
3748 log_struct(LOG_INFO
,
3749 LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR
),
3750 LOG_ITEM("KERNEL_USEC="USEC_FMT
, kernel_usec
),
3751 LOG_ITEM("INITRD_USEC="USEC_FMT
, initrd_usec
),
3752 LOG_ITEM("USERSPACE_USEC="USEC_FMT
, userspace_usec
),
3753 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.",
3755 FORMAT_TIMESPAN(kernel_usec
, USEC_PER_MSEC
),
3756 FORMAT_TIMESPAN(initrd_usec
, USEC_PER_MSEC
),
3757 FORMAT_TIMESPAN(userspace_usec
, USEC_PER_MSEC
),
3758 FORMAT_TIMESPAN(total_usec
, USEC_PER_MSEC
)));
3760 /* The initrd-less case on bare-metal */
3762 kernel_usec
= m
->timestamps
[MANAGER_TIMESTAMP_USERSPACE
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_KERNEL
].monotonic
;
3765 log_struct(LOG_INFO
,
3766 LOG_MESSAGE_ID(SD_MESSAGE_STARTUP_FINISHED_STR
),
3767 LOG_ITEM("KERNEL_USEC="USEC_FMT
, kernel_usec
),
3768 LOG_ITEM("USERSPACE_USEC="USEC_FMT
, userspace_usec
),
3769 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.",
3771 FORMAT_TIMESPAN(kernel_usec
, USEC_PER_MSEC
),
3772 FORMAT_TIMESPAN(userspace_usec
, USEC_PER_MSEC
),
3773 FORMAT_TIMESPAN(total_usec
, USEC_PER_MSEC
)));
3776 /* The container and --user case */
3777 firmware_usec
= loader_usec
= initrd_usec
= kernel_usec
= 0;
3778 total_usec
= userspace_usec
= m
->timestamps
[MANAGER_TIMESTAMP_FINISH
].monotonic
- m
->timestamps
[MANAGER_TIMESTAMP_USERSPACE
].monotonic
;
3780 log_struct(LOG_INFO
,
3781 LOG_MESSAGE_ID(SD_MESSAGE_USER_STARTUP_FINISHED_STR
),
3782 LOG_ITEM("USERSPACE_USEC="USEC_FMT
, userspace_usec
),
3783 LOG_MESSAGE("Startup finished in %s.",
3784 FORMAT_TIMESPAN(total_usec
, USEC_PER_MSEC
)));
3787 bus_manager_send_finished(m
, firmware_usec
, loader_usec
, kernel_usec
, initrd_usec
, userspace_usec
, total_usec
);
3789 if (MANAGER_IS_SYSTEM(m
) && detect_container() <= 0)
3790 watchdog_report_if_missing();
3792 log_taint_string(m
);
3795 static void manager_send_ready_on_basic_target(Manager
*m
) {
3800 /* We send READY=1 on reaching basic.target only when running in --user mode. */
3801 if (!MANAGER_IS_USER(m
) || m
->ready_sent
)
3804 r
= sd_notify(/* unset_environment= */ false,
3806 "STATUS=Reached " SPECIAL_BASIC_TARGET
".");
3808 log_warning_errno(r
, "Failed to send readiness notification, ignoring: %m");
3810 m
->ready_sent
= true;
3811 m
->status_ready
= false;
3814 static void manager_send_ready_on_idle(Manager
*m
) {
3819 /* Skip the notification if nothing changed. */
3820 if (m
->ready_sent
&& m
->status_ready
)
3823 /* Note that for user managers, we might have already sent READY=1 in manager_send_ready_user_scope().
3824 * But we still need to flush STATUS=. The second READY=1 will be treated as a noop so it doesn't
3825 * hurt to send it twice. */
3826 r
= sd_notify(/* unset_environment= */ false,
3830 log_full_errno(m
->ready_sent
? LOG_DEBUG
: LOG_WARNING
, r
,
3831 "Failed to send readiness notification, ignoring: %m");
3833 m
->ready_sent
= m
->status_ready
= true;
3836 static void manager_check_basic_target(Manager
*m
) {
3841 /* Small shortcut */
3842 if (m
->ready_sent
&& m
->taint_logged
)
3845 u
= manager_get_unit(m
, SPECIAL_BASIC_TARGET
);
3846 if (!u
|| !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
3849 /* For user managers, send out READY=1 as soon as we reach basic.target */
3850 manager_send_ready_on_basic_target(m
);
3852 /* Log the taint string as soon as we reach basic.target */
3853 log_taint_string(m
);
3856 void manager_check_finished(Manager
*m
) {
3859 if (MANAGER_IS_RELOADING(m
))
3862 /* Verify that we have entered the event loop already, and not left it again. */
3863 if (!MANAGER_IS_RUNNING(m
))
3866 manager_check_basic_target(m
);
3868 if (!hashmap_isempty(m
->jobs
)) {
3869 if (m
->jobs_in_progress_event_source
)
3870 /* Ignore any failure, this is only for feedback */
3871 (void) sd_event_source_set_time(m
->jobs_in_progress_event_source
,
3872 manager_watch_jobs_next_time(m
));
3876 /* The jobs hashmap tends to grow a lot during boot, and then it's not reused until shutdown. Let's
3877 kill the hashmap if it is relatively large. */
3878 if (hashmap_buckets(m
->jobs
) > hashmap_size(m
->units
) / 10)
3879 m
->jobs
= hashmap_free(m
->jobs
);
3881 manager_send_ready_on_idle(m
);
3883 /* Notify Type=idle units that we are done now */
3884 manager_close_idle_pipe(m
);
3886 if (MANAGER_IS_FINISHED(m
))
3889 manager_flip_auto_status(m
, false, "boot finished");
3891 /* Turn off confirm spawn now */
3892 m
->confirm_spawn
= NULL
;
3894 /* No need to update ask password status when we're going non-interactive */
3895 manager_close_ask_password(m
);
3897 /* This is no longer the first boot */
3898 manager_set_first_boot(m
, false);
3900 dual_timestamp_now(m
->timestamps
+ MANAGER_TIMESTAMP_FINISH
);
3902 manager_notify_finished(m
);
3904 manager_invalidate_startup_units(m
);
3907 void manager_send_reloading(Manager
*m
) {
3910 /* Let whoever invoked us know that we are now reloading */
3911 (void) notify_reloading_full(/* status = */ NULL
);
3913 /* And ensure that we'll send READY=1 again as soon as we are ready again */
3914 m
->ready_sent
= false;
3917 static bool generator_path_any(char * const *paths
) {
3919 /* Optimize by skipping the whole process by not creating output directories if no generators are found. */
3921 STRV_FOREACH(i
, paths
) {
3922 if (access(*i
, F_OK
) >= 0)
3924 if (errno
!= ENOENT
)
3925 log_warning_errno(errno
, "Failed to check if generator dir '%s' exists, assuming not: %m", *i
);
3931 static int manager_run_environment_generators(Manager
*m
) {
3932 _cleanup_strv_free_
char **paths
= NULL
;
3937 if (MANAGER_IS_TEST_RUN(m
) && !(m
->test_run_flags
& MANAGER_TEST_RUN_ENV_GENERATORS
))
3940 paths
= env_generator_binary_paths(m
->runtime_scope
);
3944 if (!generator_path_any(paths
))
3947 char **tmp
= NULL
; /* this is only used in the forked process, no cleanup here */
3948 void *args
[_STDOUT_CONSUME_MAX
] = {
3949 [STDOUT_GENERATE
] = &tmp
,
3950 [STDOUT_COLLECT
] = &tmp
,
3951 [STDOUT_CONSUME
] = &m
->transient_environment
,
3955 r
= execute_directories(
3956 (const char* const*) paths
,
3957 DEFAULT_TIMEOUT_USEC
,
3961 m
->transient_environment
,
3962 EXEC_DIR_PARALLEL
| EXEC_DIR_IGNORE_ERRORS
| EXEC_DIR_SET_SYSTEMD_EXEC_PID
);
3966 static int build_generator_environment(Manager
*m
, char ***ret
) {
3967 _cleanup_strv_free_
char **nl
= NULL
;
3969 ConfidentialVirtualization cv
;
3975 /* Generators oftentimes want to know some basic facts about the environment they run in, in order to
3976 * adjust generated units to that. Let's pass down some bits of information that are easy for us to
3977 * determine (but a bit harder for generator scripts to determine), as environment variables. */
3979 nl
= strv_copy(m
->transient_environment
);
3983 r
= strv_env_assign(&nl
, "SYSTEMD_SCOPE", runtime_scope_to_string(m
->runtime_scope
));
3987 if (MANAGER_IS_SYSTEM(m
)) {
3988 /* Note that $SYSTEMD_IN_INITRD may be used to override the initrd detection in much of our
3989 * codebase. This is hence more than purely informational. It will shortcut detection of the
3990 * initrd state if generators invoke our own tools. But that's OK, as it would come to the
3991 * same results (hopefully). */
3992 r
= strv_env_assign(&nl
, "SYSTEMD_IN_INITRD", one_zero(in_initrd()));
3996 if (m
->soft_reboots_count
> 0) {
3997 r
= strv_env_assignf(&nl
, "SYSTEMD_SOFT_REBOOTS_COUNT", "%u", m
->soft_reboots_count
);
4002 if (m
->first_boot
>= 0) {
4003 r
= strv_env_assign(&nl
, "SYSTEMD_FIRST_BOOT", one_zero(m
->first_boot
));
4009 v
= detect_virtualization();
4011 log_debug_errno(v
, "Failed to detect virtualization, ignoring: %m");
4015 s
= strjoina(VIRTUALIZATION_IS_VM(v
) ? "vm:" :
4016 VIRTUALIZATION_IS_CONTAINER(v
) ? "container:" : ":",
4017 virtualization_to_string(v
));
4019 r
= strv_env_assign(&nl
, "SYSTEMD_VIRTUALIZATION", s
);
4024 cv
= detect_confidential_virtualization();
4026 log_debug_errno(cv
, "Failed to detect confidential virtualization, ignoring: %m");
4028 r
= strv_env_assign(&nl
, "SYSTEMD_CONFIDENTIAL_VIRTUALIZATION", confidential_virtualization_to_string(cv
));
4033 r
= strv_env_assign(&nl
, "SYSTEMD_ARCHITECTURE", architecture_to_string(uname_architecture()));
4037 *ret
= TAKE_PTR(nl
);
4041 static int manager_execute_generators(Manager
*m
, char * const *paths
, bool remount_ro
) {
4042 _cleanup_strv_free_
char **ge
= NULL
;
4047 r
= build_generator_environment(m
, &ge
);
4049 return log_error_errno(r
, "Failed to build generator environment: %m");
4052 /* Remount most of the filesystem tree read-only. We leave /sys/ as-is, because our code
4053 * checks whether it is read-only to detect containerized execution environments. We leave
4054 * /run/ as-is too, because that's where our output goes. We also leave /proc/ and /dev/shm/
4055 * because they're API, and /tmp/ that safe_fork() mounted for us.
4057 r
= bind_remount_recursive("/", MS_RDONLY
, MS_RDONLY
,
4058 STRV_MAKE("/sys", "/run", "/proc", "/dev/shm", "/tmp"));
4060 log_warning_errno(r
, "Read-only bind remount failed, ignoring: %m");
4063 const char *argv
[] = {
4064 NULL
, /* Leave this empty, execute_directory() will fill something in */
4065 m
->lookup_paths
.generator
,
4066 m
->lookup_paths
.generator_early
,
4067 m
->lookup_paths
.generator_late
,
4071 BLOCK_WITH_UMASK(0022);
4072 return execute_directories(
4073 (const char* const*) paths
,
4074 DEFAULT_TIMEOUT_USEC
,
4075 /* callbacks= */ NULL
, /* callback_args= */ NULL
,
4078 EXEC_DIR_PARALLEL
| EXEC_DIR_IGNORE_ERRORS
| EXEC_DIR_SET_SYSTEMD_EXEC_PID
| EXEC_DIR_WARN_WORLD_WRITABLE
);
4081 static int manager_run_generators(Manager
*m
) {
4082 ForkFlags flags
= FORK_RESET_SIGNALS
| FORK_WAIT
| FORK_NEW_MOUNTNS
| FORK_MOUNTNS_SLAVE
;
4083 _cleanup_strv_free_
char **paths
= NULL
;
4088 if (MANAGER_IS_TEST_RUN(m
) && !(m
->test_run_flags
& MANAGER_TEST_RUN_GENERATORS
))
4091 paths
= generator_binary_paths(m
->runtime_scope
);
4095 if (!generator_path_any(paths
))
4098 r
= lookup_paths_mkdir_generator(&m
->lookup_paths
);
4100 log_error_errno(r
, "Failed to create generator directories: %m");
4104 /* If we are the system manager, we fork and invoke the generators in a sanitized mount namespace. If
4105 * we are the user manager, let's just execute the generators directly. We might not have the
4106 * necessary privileges, and the system manager has already mounted /tmp/ and everything else for us.
4108 if (MANAGER_IS_USER(m
)) {
4109 r
= manager_execute_generators(m
, paths
, /* remount_ro= */ false);
4113 /* On some systems /tmp/ doesn't exist, and on some other systems we cannot create it at all. Avoid
4114 * trying to mount a private tmpfs on it as there's no one size fits all. */
4115 if (is_dir("/tmp", /* follow= */ false) > 0 && !MANAGER_IS_TEST_RUN(m
))
4116 flags
|= FORK_PRIVATE_TMP
;
4118 r
= safe_fork("(sd-gens)", flags
, NULL
);
4120 r
= manager_execute_generators(m
, paths
, /* remount_ro= */ true);
4121 _exit(r
>= 0 ? EXIT_SUCCESS
: EXIT_FAILURE
);
4124 if (!ERRNO_IS_PRIVILEGE(r
) && r
!= -EINVAL
) {
4125 log_error_errno(r
, "Failed to fork off sandboxing environment for executing generators: %m");
4129 /* Failed to fork with new mount namespace? Maybe, running in a container environment with
4130 * seccomp or without capability.
4132 * We also allow -EINVAL to allow running without CLONE_NEWNS.
4134 * Also, when running on non-native userland architecture via systemd-nspawn and
4135 * qemu-user-static QEMU-emulator, clone() with CLONE_NEWNS fails with EINVAL, see
4136 * https://github.com/systemd/systemd/issues/28901.
4139 "Failed to fork off sandboxing environment for executing generators. "
4140 "Falling back to execute generators without sandboxing: %m");
4141 r
= manager_execute_generators(m
, paths
, /* remount_ro= */ false);
4145 lookup_paths_trim_generator(&m
->lookup_paths
);
4149 int manager_transient_environment_add(Manager
*m
, char **plus
) {
4154 if (strv_isempty(plus
))
4157 a
= strv_env_merge(m
->transient_environment
, plus
);
4161 sanitize_environment(a
);
4163 return strv_free_and_replace(m
->transient_environment
, a
);
4166 int manager_client_environment_modify(
4171 char **a
= NULL
, **b
= NULL
, **l
;
4175 if (strv_isempty(minus
) && strv_isempty(plus
))
4178 l
= m
->client_environment
;
4180 if (!strv_isempty(minus
)) {
4181 a
= strv_env_delete(l
, 1, minus
);
4188 if (!strv_isempty(plus
)) {
4189 b
= strv_env_merge(l
, plus
);
4198 if (m
->client_environment
!= l
)
4199 strv_free(m
->client_environment
);
4206 m
->client_environment
= sanitize_environment(l
);
4210 int manager_get_effective_environment(Manager
*m
, char ***ret
) {
4216 l
= strv_env_merge(m
->transient_environment
, m
->client_environment
);
4224 int manager_set_unit_defaults(Manager
*m
, const UnitDefaults
*defaults
) {
4225 _cleanup_free_
char *label
= NULL
;
4226 struct rlimit
*rlimit
[_RLIMIT_MAX
];
4232 if (streq_ptr(defaults
->smack_process_label
, "/"))
4235 const char *l
= defaults
->smack_process_label
;
4236 #ifdef SMACK_DEFAULT_PROCESS_LABEL
4238 l
= SMACK_DEFAULT_PROCESS_LABEL
;
4248 r
= rlimit_copy_all(rlimit
, defaults
->rlimit
);
4252 m
->defaults
.std_output
= defaults
->std_output
;
4253 m
->defaults
.std_error
= defaults
->std_error
;
4255 m
->defaults
.restart_usec
= defaults
->restart_usec
;
4256 m
->defaults
.timeout_start_usec
= defaults
->timeout_start_usec
;
4257 m
->defaults
.timeout_stop_usec
= defaults
->timeout_stop_usec
;
4258 m
->defaults
.timeout_abort_usec
= defaults
->timeout_abort_usec
;
4259 m
->defaults
.timeout_abort_set
= defaults
->timeout_abort_set
;
4260 m
->defaults
.device_timeout_usec
= defaults
->device_timeout_usec
;
4262 m
->defaults
.start_limit
= defaults
->start_limit
;
4264 m
->defaults
.memory_accounting
= defaults
->memory_accounting
;
4265 m
->defaults
.io_accounting
= defaults
->io_accounting
;
4266 m
->defaults
.tasks_accounting
= defaults
->tasks_accounting
;
4267 m
->defaults
.ip_accounting
= defaults
->ip_accounting
;
4269 m
->defaults
.tasks_max
= defaults
->tasks_max
;
4270 m
->defaults
.timer_accuracy_usec
= defaults
->timer_accuracy_usec
;
4272 m
->defaults
.oom_policy
= defaults
->oom_policy
;
4273 m
->defaults
.oom_score_adjust
= defaults
->oom_score_adjust
;
4274 m
->defaults
.oom_score_adjust_set
= defaults
->oom_score_adjust_set
;
4276 m
->defaults
.memory_pressure_watch
= defaults
->memory_pressure_watch
;
4277 m
->defaults
.memory_pressure_threshold_usec
= defaults
->memory_pressure_threshold_usec
;
4279 free_and_replace(m
->defaults
.smack_process_label
, label
);
4280 rlimit_free_all(m
->defaults
.rlimit
);
4281 memcpy(m
->defaults
.rlimit
, rlimit
, sizeof(struct rlimit
*) * _RLIMIT_MAX
);
4286 void manager_recheck_dbus(Manager
*m
) {
4289 /* Connects to the bus if the dbus service and socket are running. If we are running in user mode
4290 * this is all it does. In system mode we'll also connect to the system bus (which will most likely
4291 * just reuse the connection of the API bus). That's because the system bus after all runs as service
4292 * of the system instance, while in the user instance we can assume it's already there. */
4294 if (MANAGER_IS_RELOADING(m
))
4295 return; /* don't check while we are reloading… */
4297 if (manager_dbus_is_running(m
, false)) {
4298 (void) bus_init_api(m
);
4300 if (MANAGER_IS_SYSTEM(m
))
4301 (void) bus_init_system(m
);
4303 (void) bus_done_api(m
);
4305 if (MANAGER_IS_SYSTEM(m
))
4306 (void) bus_done_system(m
);
4310 static bool manager_journal_is_running(Manager
*m
) {
4315 if (MANAGER_IS_TEST_RUN(m
))
4318 /* If we are the user manager we can safely assume that the journal is up */
4319 if (!MANAGER_IS_SYSTEM(m
))
4322 /* Check that the socket is not only up, but in RUNNING state */
4323 u
= manager_get_unit(m
, SPECIAL_JOURNALD_SOCKET
);
4326 if (SOCKET(u
)->state
!= SOCKET_RUNNING
)
4329 /* Similar, check if the daemon itself is fully up, too */
4330 u
= manager_get_unit(m
, SPECIAL_JOURNALD_SERVICE
);
4333 if (!IN_SET(SERVICE(u
)->state
, SERVICE_RELOAD
, SERVICE_RUNNING
))
4339 void disable_printk_ratelimit(void) {
4340 /* Disable kernel's printk ratelimit.
4342 * Logging to /dev/kmsg is most useful during early boot and shutdown, where normal logging
4343 * mechanisms are not available. The semantics of this sysctl are such that any kernel command-line
4344 * setting takes precedence. */
4347 r
= sysctl_write("kernel/printk_devkmsg", "on");
4349 log_debug_errno(r
, "Failed to set sysctl kernel.printk_devkmsg=on: %m");
4352 void manager_recheck_journal(Manager
*m
) {
4356 /* Don't bother with this unless we are in the special situation of being PID 1 */
4357 if (getpid_cached() != 1)
4360 /* Don't check this while we are reloading, things might still change */
4361 if (MANAGER_IS_RELOADING(m
))
4364 /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If
4365 * the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we
4366 * might trigger an activation ourselves we can't fulfill. */
4367 log_set_prohibit_ipc(!manager_journal_is_running(m
));
4371 static ShowStatus
manager_get_show_status(Manager
*m
) {
4374 if (MANAGER_IS_USER(m
))
4375 return _SHOW_STATUS_INVALID
;
4377 if (m
->show_status_overridden
!= _SHOW_STATUS_INVALID
)
4378 return m
->show_status_overridden
;
4380 return m
->show_status
;
4383 bool manager_get_show_status_on(Manager
*m
) {
4386 return show_status_on(manager_get_show_status(m
));
4389 static void set_show_status_marker(bool b
) {
4391 (void) touch("/run/systemd/show-status");
4393 (void) unlink("/run/systemd/show-status");
4396 void manager_set_show_status(Manager
*m
, ShowStatus mode
, const char *reason
) {
4399 assert(mode
>= 0 && mode
< _SHOW_STATUS_MAX
);
4401 if (MANAGER_IS_USER(m
))
4404 if (mode
== m
->show_status
)
4407 if (m
->show_status_overridden
== _SHOW_STATUS_INVALID
) {
4410 enabled
= show_status_on(mode
);
4411 log_debug("%s (%s) showing of status (%s).",
4412 enabled
? "Enabling" : "Disabling",
4413 strna(show_status_to_string(mode
)),
4416 set_show_status_marker(enabled
);
4419 m
->show_status
= mode
;
4422 void manager_override_show_status(Manager
*m
, ShowStatus mode
, const char *reason
) {
4424 assert(mode
< _SHOW_STATUS_MAX
);
4426 if (MANAGER_IS_USER(m
))
4429 if (mode
== m
->show_status_overridden
)
4432 m
->show_status_overridden
= mode
;
4434 if (mode
== _SHOW_STATUS_INVALID
)
4435 mode
= m
->show_status
;
4437 log_debug("%s (%s) showing of status (%s).",
4438 m
->show_status_overridden
!= _SHOW_STATUS_INVALID
? "Overriding" : "Restoring",
4439 strna(show_status_to_string(mode
)),
4442 set_show_status_marker(show_status_on(mode
));
4445 const char* manager_get_confirm_spawn(Manager
*m
) {
4446 static int last_errno
= 0;
4452 /* Here's the deal: we want to test the validity of the console but don't want
4453 * PID1 to go through the whole console process which might block. But we also
4454 * want to warn the user only once if something is wrong with the console so we
4455 * cannot do the sanity checks after spawning our children. So here we simply do
4456 * really basic tests to hopefully trap common errors.
4458 * If the console suddenly disappear at the time our children will really it
4459 * then they will simply fail to acquire it and a positive answer will be
4460 * assumed. New children will fall back to /dev/console though.
4462 * Note: TTYs are devices that can come and go any time, and frequently aren't
4463 * available yet during early boot (consider a USB rs232 dongle...). If for any
4464 * reason the configured console is not ready, we fall back to the default
4467 if (!m
->confirm_spawn
|| path_equal(m
->confirm_spawn
, "/dev/console"))
4468 return m
->confirm_spawn
;
4470 if (stat(m
->confirm_spawn
, &st
) < 0) {
4475 if (!S_ISCHR(st
.st_mode
)) {
4481 return m
->confirm_spawn
;
4484 if (last_errno
!= r
)
4485 last_errno
= log_warning_errno(r
, "Failed to open %s, using default console: %m", m
->confirm_spawn
);
4487 return "/dev/console";
4490 void manager_set_first_boot(Manager
*m
, bool b
) {
4493 if (!MANAGER_IS_SYSTEM(m
))
4496 if (m
->first_boot
!= (int) b
) {
4498 (void) touch("/run/systemd/first-boot");
4500 (void) unlink("/run/systemd/first-boot");
4506 void manager_disable_confirm_spawn(void) {
4507 (void) touch("/run/systemd/confirm_spawn_disabled");
4510 static bool manager_should_show_status(Manager
*m
, StatusType type
) {
4513 if (!MANAGER_IS_SYSTEM(m
))
4516 if (m
->no_console_output
)
4519 if (!IN_SET(manager_state(m
), MANAGER_INITIALIZING
, MANAGER_STARTING
, MANAGER_STOPPING
))
4522 /* If we cannot find out the status properly, just proceed. */
4523 if (type
!= STATUS_TYPE_EMERGENCY
&& manager_check_ask_password(m
) > 0)
4526 if (type
== STATUS_TYPE_NOTICE
&& m
->show_status
!= SHOW_STATUS_NO
)
4529 return manager_get_show_status_on(m
);
4532 void manager_status_printf(Manager
*m
, StatusType type
, const char *status
, const char *format
, ...) {
4535 /* If m is NULL, assume we're after shutdown and let the messages through. */
4537 if (m
&& !manager_should_show_status(m
, type
))
4540 /* XXX We should totally drop the check for ephemeral here
4541 * and thus effectively make 'Type=idle' pointless. */
4542 if (type
== STATUS_TYPE_EPHEMERAL
&& m
&& m
->n_on_console
> 0)
4545 va_start(ap
, format
);
4546 status_vprintf(status
, SHOW_STATUS_ELLIPSIZE
|(type
== STATUS_TYPE_EPHEMERAL
? SHOW_STATUS_EPHEMERAL
: 0), format
, ap
);
4550 Set
* manager_get_units_needing_mounts_for(Manager
*m
, const char *path
, UnitMountDependencyType t
) {
4553 assert(t
>= 0 && t
< _UNIT_MOUNT_DEPENDENCY_TYPE_MAX
);
4555 if (path_equal(path
, "/"))
4558 return hashmap_get(m
->units_needing_mounts_for
[t
], path
);
4561 int manager_update_failed_units(Manager
*m
, Unit
*u
, bool failed
) {
4566 assert(u
->manager
== m
);
4568 size
= set_size(m
->failed_units
);
4571 r
= set_ensure_put(&m
->failed_units
, NULL
, u
);
4575 (void) set_remove(m
->failed_units
, u
);
4577 if (set_size(m
->failed_units
) != size
)
4578 bus_manager_send_change_signal(m
);
4583 ManagerState
manager_state(Manager
*m
) {
4588 /* Is the special shutdown target active or queued? If so, we are in shutdown state */
4589 u
= manager_get_unit(m
, SPECIAL_SHUTDOWN_TARGET
);
4590 if (u
&& unit_active_or_pending(u
))
4591 return MANAGER_STOPPING
;
4593 /* Did we ever finish booting? If not then we are still starting up */
4594 if (!MANAGER_IS_FINISHED(m
)) {
4596 u
= manager_get_unit(m
, SPECIAL_BASIC_TARGET
);
4597 if (!u
|| !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u
)))
4598 return MANAGER_INITIALIZING
;
4600 return MANAGER_STARTING
;
4603 if (MANAGER_IS_SYSTEM(m
)) {
4604 /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
4605 u
= manager_get_unit(m
, SPECIAL_RESCUE_TARGET
);
4606 if (u
&& unit_active_or_pending(u
))
4607 return MANAGER_MAINTENANCE
;
4609 u
= manager_get_unit(m
, SPECIAL_EMERGENCY_TARGET
);
4610 if (u
&& unit_active_or_pending(u
))
4611 return MANAGER_MAINTENANCE
;
4614 /* Are there any failed units? If so, we are in degraded mode */
4615 if (!set_isempty(m
->failed_units
))
4616 return MANAGER_DEGRADED
;
4618 return MANAGER_RUNNING
;
4621 static void manager_unref_uid_internal(
4625 int (*_clean_ipc
)(uid_t uid
)) {
4629 assert(uid_is_valid(uid
));
4632 /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the
4633 * assumption that uid_t and gid_t are actually defined the same way, with the same validity rules.
4635 * We store a hashmap where the key is the UID/GID and the value is a 32-bit reference counter, whose
4636 * highest bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last
4637 * reference to the UID/GID is dropped. The flag is set to on, once at least one reference from a
4638 * unit where RemoveIPC= is set is added on a UID/GID. It is reset when the UID's/GID's reference
4639 * counter drops to 0 again. */
4641 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4642 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4644 if (uid
== 0) /* We don't keep track of root, and will never destroy it */
4647 c
= PTR_TO_UINT32(hashmap_get(uid_refs
, UID_TO_PTR(uid
)));
4649 n
= c
& ~DESTROY_IPC_FLAG
;
4653 if (destroy_now
&& n
== 0) {
4654 hashmap_remove(uid_refs
, UID_TO_PTR(uid
));
4656 if (c
& DESTROY_IPC_FLAG
) {
4657 log_debug("%s " UID_FMT
" is no longer referenced, cleaning up its IPC.",
4658 _clean_ipc
== clean_ipc_by_uid
? "UID" : "GID",
4660 (void) _clean_ipc(uid
);
4663 c
= n
| (c
& DESTROY_IPC_FLAG
);
4664 assert_se(hashmap_update(uid_refs
, UID_TO_PTR(uid
), UINT32_TO_PTR(c
)) >= 0);
4668 void manager_unref_uid(Manager
*m
, uid_t uid
, bool destroy_now
) {
4669 manager_unref_uid_internal(m
->uid_refs
, uid
, destroy_now
, clean_ipc_by_uid
);
4672 void manager_unref_gid(Manager
*m
, gid_t gid
, bool destroy_now
) {
4673 manager_unref_uid_internal(m
->gid_refs
, (uid_t
) gid
, destroy_now
, clean_ipc_by_gid
);
4676 static int manager_ref_uid_internal(
4685 assert(uid_is_valid(uid
));
4687 /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the
4688 * assumption that uid_t and gid_t are actually defined the same way, with the same validity
4691 assert_cc(sizeof(uid_t
) == sizeof(gid_t
));
4692 assert_cc(UID_INVALID
== (uid_t
) GID_INVALID
);
4694 if (uid
== 0) /* We don't keep track of root, and will never destroy it */
4697 r
= hashmap_ensure_allocated(uid_refs
, &trivial_hash_ops
);
4701 c
= PTR_TO_UINT32(hashmap_get(*uid_refs
, UID_TO_PTR(uid
)));
4703 n
= c
& ~DESTROY_IPC_FLAG
;
4706 if (n
& DESTROY_IPC_FLAG
) /* check for overflow */
4709 c
= n
| (c
& DESTROY_IPC_FLAG
) | (clean_ipc
? DESTROY_IPC_FLAG
: 0);
4711 return hashmap_replace(*uid_refs
, UID_TO_PTR(uid
), UINT32_TO_PTR(c
));
4714 int manager_ref_uid(Manager
*m
, uid_t uid
, bool clean_ipc
) {
4715 return manager_ref_uid_internal(&m
->uid_refs
, uid
, clean_ipc
);
4718 int manager_ref_gid(Manager
*m
, gid_t gid
, bool clean_ipc
) {
4719 return manager_ref_uid_internal(&m
->gid_refs
, (uid_t
) gid
, clean_ipc
);
4722 static void manager_vacuum_uid_refs_internal(
4724 int (*_clean_ipc
)(uid_t uid
)) {
4730 HASHMAP_FOREACH_KEY(p
, k
, uid_refs
) {
4734 uid
= PTR_TO_UID(k
);
4735 c
= PTR_TO_UINT32(p
);
4737 n
= c
& ~DESTROY_IPC_FLAG
;
4741 if (c
& DESTROY_IPC_FLAG
) {
4742 log_debug("Found unreferenced %s " UID_FMT
" after reload/reexec. Cleaning up.",
4743 _clean_ipc
== clean_ipc_by_uid
? "UID" : "GID",
4745 (void) _clean_ipc(uid
);
4748 assert_se(hashmap_remove(uid_refs
, k
) == p
);
4752 static void manager_vacuum_uid_refs(Manager
*m
) {
4753 manager_vacuum_uid_refs_internal(m
->uid_refs
, clean_ipc_by_uid
);
4756 static void manager_vacuum_gid_refs(Manager
*m
) {
4757 manager_vacuum_uid_refs_internal(m
->gid_refs
, clean_ipc_by_gid
);
4760 static void manager_vacuum(Manager
*m
) {
4763 /* Release any dynamic users no longer referenced */
4764 dynamic_user_vacuum(m
, true);
4766 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
4767 manager_vacuum_uid_refs(m
);
4768 manager_vacuum_gid_refs(m
);
4770 /* Release any runtimes no longer referenced */
4771 exec_shared_runtime_vacuum(m
);
4774 static int manager_dispatch_user_lookup_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
4778 char unit_name
[UNIT_NAME_MAX
+1];
4781 Manager
*m
= ASSERT_PTR(userdata
);
4788 /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the
4789 * resulting UID/GID in a datagram. We parse the datagram here and pass it off to the unit, so that
4790 * it can add a reference to the UID/GID so that it can destroy the UID/GID's IPC objects when the
4791 * reference counter drops to 0. */
4793 l
= recv(fd
, &buffer
, sizeof(buffer
), MSG_DONTWAIT
);
4795 if (ERRNO_IS_TRANSIENT(errno
))
4798 return log_error_errno(errno
, "Failed to read from user lookup fd: %m");
4801 if ((size_t) l
<= offsetof(struct buffer
, unit_name
)) {
4802 log_warning("Received too short user lookup message, ignoring.");
4806 if ((size_t) l
> offsetof(struct buffer
, unit_name
) + UNIT_NAME_MAX
) {
4807 log_warning("Received too long user lookup message, ignoring.");
4811 if (!uid_is_valid(buffer
.uid
) && !gid_is_valid(buffer
.gid
)) {
4812 log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
4816 n
= (size_t) l
- offsetof(struct buffer
, unit_name
);
4817 if (memchr(buffer
.unit_name
, 0, n
)) {
4818 log_warning("Received lookup message with embedded NUL character, ignoring.");
4822 buffer
.unit_name
[n
] = 0;
4823 u
= manager_get_unit(m
, buffer
.unit_name
);
4825 log_debug("Got user lookup message but unit doesn't exist, ignoring.");
4829 log_unit_debug(u
, "User lookup succeeded: uid=" UID_FMT
" gid=" GID_FMT
, buffer
.uid
, buffer
.gid
);
4831 unit_notify_user_lookup(u
, buffer
.uid
, buffer
.gid
);
4835 static int manager_dispatch_handoff_timestamp_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
4836 Manager
*m
= ASSERT_PTR(userdata
);
4838 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred
))) control
;
4839 struct msghdr msghdr
= {
4840 .msg_iov
= &IOVEC_MAKE(ts
, sizeof(ts
)),
4842 .msg_control
= &control
,
4843 .msg_controllen
= sizeof(control
),
4849 n
= recvmsg_safe(m
->handoff_timestamp_fds
[0], &msghdr
, MSG_DONTWAIT
|MSG_CMSG_CLOEXEC
);
4850 if (ERRNO_IS_NEG_TRANSIENT(n
))
4851 return 0; /* Spurious wakeup, try again */
4853 log_warning_errno(n
, "Got message with truncated control data (unexpected fds sent?), ignoring.");
4857 log_warning_errno(n
, "Got message with truncated payload data, ignoring.");
4861 return log_error_errno(n
, "Failed to receive handoff timestamp message: %m");
4863 cmsg_close_all(&msghdr
);
4865 if (n
!= sizeof(ts
)) {
4866 log_warning("Got handoff timestamp message of unexpected size %zi (expected %zu), ignoring.", n
, sizeof(ts
));
4870 struct ucred
*ucred
= CMSG_FIND_DATA(&msghdr
, SOL_SOCKET
, SCM_CREDENTIALS
, struct ucred
);
4871 if (!ucred
|| !pid_is_valid(ucred
->pid
)) {
4872 log_warning("Received handoff timestamp message without valid credentials. Ignoring.");
4876 log_debug("Got handoff timestamp event for PID " PID_FMT
".", ucred
->pid
);
4878 _cleanup_free_ Unit
**units
= NULL
;
4879 int n_units
= manager_get_units_for_pidref(m
, &PIDREF_MAKE_FROM_PID(ucred
->pid
), &units
);
4881 log_warning_errno(n_units
, "Unable to determine units for PID " PID_FMT
", ignoring: %m", ucred
->pid
);
4885 log_debug("Got handoff timestamp for process " PID_FMT
" we are not interested in, ignoring.", ucred
->pid
);
4889 dual_timestamp dt
= {
4894 FOREACH_ARRAY(u
, units
, n_units
) {
4895 if (!UNIT_VTABLE(*u
)->notify_handoff_timestamp
)
4898 UNIT_VTABLE(*u
)->notify_handoff_timestamp(*u
, ucred
, &dt
);
4904 static int manager_dispatch_pidref_transport_fd(sd_event_source
*source
, int fd
, uint32_t revents
, void *userdata
) {
4905 Manager
*m
= ASSERT_PTR(userdata
);
4906 _cleanup_(pidref_done
) PidRef child_pidref
= PIDREF_NULL
, parent_pidref
= PIDREF_NULL
;
4907 _cleanup_close_
int child_pidfd
= -EBADF
, parent_pidfd
= -EBADF
;
4908 struct ucred
*ucred
= NULL
;
4909 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred
)) + CMSG_SPACE(sizeof(int)) * 2) control
;
4910 pid_t child_pid
= 0; /* silence false-positive warning by coverity */
4911 struct msghdr msghdr
= {
4912 .msg_iov
= &IOVEC_MAKE(&child_pid
, sizeof(child_pid
)),
4914 .msg_control
= &control
,
4915 .msg_controllen
= sizeof(control
),
4917 struct cmsghdr
*cmsg
;
4924 * - Parent PID in ucreds enabled via SO_PASSCRED
4925 * - Parent PIDFD in SCM_PIDFD message enabled via SO_PASSPIDFD
4926 * - Child PIDFD in SCM_RIGHTS in message body
4927 * - Child PID in message IOV
4929 * SO_PASSPIDFD may not be supported by the kernel (it is supported since v6.5) so we fall back to
4930 * using parent PID from ucreds and accept some raciness. */
4931 n
= recvmsg_safe(m
->pidref_transport_fds
[0], &msghdr
, MSG_DONTWAIT
|MSG_CMSG_CLOEXEC
|MSG_TRUNC
);
4932 if (ERRNO_IS_NEG_TRANSIENT(n
))
4933 return 0; /* Spurious wakeup, try again */
4935 log_warning_errno(n
, "Got message with truncated control data (unexpected fds sent?), ignoring.");
4939 log_warning_errno(n
, "Got message with truncated payload data, ignoring.");
4943 return log_error_errno(n
, "Failed to receive pidref message: %m");
4945 if (n
!= sizeof(child_pid
)) {
4946 log_warning("Got pidref message of unexpected size %zi (expected %zu), ignoring.", n
, sizeof(child_pid
));
4950 CMSG_FOREACH(cmsg
, &msghdr
) {
4951 if (cmsg
->cmsg_level
!= SOL_SOCKET
)
4954 if (cmsg
->cmsg_type
== SCM_CREDENTIALS
&& cmsg
->cmsg_len
== CMSG_LEN(sizeof(struct ucred
))) {
4956 ucred
= CMSG_TYPED_DATA(cmsg
, struct ucred
);
4957 } else if (cmsg
->cmsg_type
== SCM_PIDFD
) {
4958 assert(parent_pidfd
< 0);
4959 parent_pidfd
= *CMSG_TYPED_DATA(cmsg
, int);
4960 } else if (cmsg
->cmsg_type
== SCM_RIGHTS
) {
4961 assert(child_pidfd
< 0);
4962 child_pidfd
= *CMSG_TYPED_DATA(cmsg
, int);
4966 /* Verify and set parent pidref. */
4967 if (!ucred
|| !pid_is_valid(ucred
->pid
)) {
4968 log_warning("Received pidref message without valid credentials. Ignoring.");
4972 /* Need to handle kernels without SO_PASSPIDFD where SCM_PIDFD will not be set. */
4973 if (parent_pidfd
>= 0)
4974 r
= pidref_set_pidfd_consume(&parent_pidref
, TAKE_FD(parent_pidfd
));
4976 r
= pidref_set_pid(&parent_pidref
, ucred
->pid
);
4979 log_debug_errno(r
, "PidRef child process died before message is processed. Ignoring.");
4981 log_warning_errno(r
, "Failed to pin pidref child process, ignoring message: %m");
4985 if (parent_pidref
.pid
!= ucred
->pid
) {
4986 assert(parent_pidref
.fd
>= 0);
4987 log_warning("Got SCM_PIDFD for parent process " PID_FMT
" but got SCM_CREDENTIALS for parent process " PID_FMT
". Ignoring.",
4988 parent_pidref
.pid
, ucred
->pid
);
4992 /* Verify and set child pidref. */
4993 if (!pid_is_valid(child_pid
)) {
4994 log_warning("Received pidref message without valid child PID. Ignoring.");
4998 /* Need to handle kernels without PIDFD support. */
4999 if (child_pidfd
>= 0)
5000 r
= pidref_set_pidfd_consume(&child_pidref
, TAKE_FD(child_pidfd
));
5002 r
= pidref_set_pid(&child_pidref
, child_pid
);
5005 log_debug_errno(r
, "PidRef child process died before message is processed. Ignoring.");
5007 log_warning_errno(r
, "Failed to pin pidref child process, ignoring message: %m");
5011 if (child_pidref
.pid
!= child_pid
) {
5012 assert(child_pidref
.fd
>= 0);
5013 log_warning("Got SCM_RIGHTS for child process " PID_FMT
" but PID in IOV message is " PID_FMT
". Ignoring.",
5014 child_pidref
.pid
, child_pid
);
5018 log_debug("Got pidref event with parent PID " PID_FMT
" and child PID " PID_FMT
".", parent_pidref
.pid
, child_pidref
.pid
);
5020 /* Try finding cgroup of parent process. But if parent process exited and we're not using PIDFD, this could return NULL.
5021 * Then fall back to finding cgroup of the child process. */
5022 Unit
*u
= manager_get_unit_by_pidref_cgroup(m
, &parent_pidref
);
5024 u
= manager_get_unit_by_pidref_cgroup(m
, &child_pidref
);
5026 log_debug("Got pidref for parent process " PID_FMT
" and child process " PID_FMT
" we are not interested in, ignoring.", parent_pidref
.pid
, child_pidref
.pid
);
5030 if (!UNIT_VTABLE(u
)->notify_pidref
) {
5031 log_unit_warning(u
, "Received pidref event from unexpected unit type '%s'.", unit_type_to_string(u
->type
));
5035 UNIT_VTABLE(u
)->notify_pidref(u
, &parent_pidref
, &child_pidref
);
5040 void manager_ref_console(Manager
*m
) {
5046 void manager_unref_console(Manager
*m
) {
5048 assert(m
->n_on_console
> 0);
5051 if (m
->n_on_console
== 0)
5052 m
->no_console_output
= false; /* unset no_console_output flag, since the console is definitely free now */
5055 void manager_override_log_level(Manager
*m
, int level
) {
5056 _cleanup_free_
char *s
= NULL
;
5059 if (!m
->log_level_overridden
) {
5060 m
->original_log_level
= log_get_max_level();
5061 m
->log_level_overridden
= true;
5064 (void) log_level_to_string_alloc(level
, &s
);
5065 log_info("Setting log level to %s.", strna(s
));
5067 log_set_max_level(level
);
5070 void manager_restore_original_log_level(Manager
*m
) {
5071 _cleanup_free_
char *s
= NULL
;
5074 if (!m
->log_level_overridden
)
5077 (void) log_level_to_string_alloc(m
->original_log_level
, &s
);
5078 log_info("Restoring log level to original (%s).", strna(s
));
5080 log_set_max_level(m
->original_log_level
);
5081 m
->log_level_overridden
= false;
5084 void manager_override_log_target(Manager
*m
, LogTarget target
) {
5087 if (!m
->log_target_overridden
) {
5088 m
->original_log_target
= log_get_target();
5089 m
->log_target_overridden
= true;
5092 log_info("Setting log target to %s.", log_target_to_string(target
));
5093 log_set_target(target
);
5096 void manager_restore_original_log_target(Manager
*m
) {
5099 if (!m
->log_target_overridden
)
5102 log_info("Restoring log target to original %s.", log_target_to_string(m
->original_log_target
));
5104 log_set_target(m
->original_log_target
);
5105 m
->log_target_overridden
= false;
5108 ManagerTimestamp
manager_timestamp_initrd_mangle(ManagerTimestamp s
) {
5110 s
>= MANAGER_TIMESTAMP_SECURITY_START
&&
5111 s
<= MANAGER_TIMESTAMP_UNITS_LOAD_FINISH
)
5112 return s
- MANAGER_TIMESTAMP_SECURITY_START
+ MANAGER_TIMESTAMP_INITRD_SECURITY_START
;
5116 int manager_allocate_idle_pipe(Manager
*m
) {
5121 if (m
->idle_pipe
[0] >= 0) {
5122 assert(m
->idle_pipe
[1] >= 0);
5123 assert(m
->idle_pipe
[2] >= 0);
5124 assert(m
->idle_pipe
[3] >= 0);
5128 assert(m
->idle_pipe
[1] < 0);
5129 assert(m
->idle_pipe
[2] < 0);
5130 assert(m
->idle_pipe
[3] < 0);
5132 r
= RET_NERRNO(pipe2(m
->idle_pipe
+ 0, O_NONBLOCK
|O_CLOEXEC
));
5136 r
= RET_NERRNO(pipe2(m
->idle_pipe
+ 2, O_NONBLOCK
|O_CLOEXEC
));
5138 safe_close_pair(m
->idle_pipe
+ 0);
5145 void unit_defaults_init(UnitDefaults
*defaults
, RuntimeScope scope
) {
5148 assert(scope
< _RUNTIME_SCOPE_MAX
);
5150 *defaults
= (UnitDefaults
) {
5151 .std_output
= EXEC_OUTPUT_JOURNAL
,
5152 .std_error
= EXEC_OUTPUT_INHERIT
,
5153 .restart_usec
= DEFAULT_RESTART_USEC
,
5154 .timeout_start_usec
= manager_default_timeout(scope
),
5155 .timeout_stop_usec
= manager_default_timeout(scope
),
5156 .timeout_abort_usec
= manager_default_timeout(scope
),
5157 .timeout_abort_set
= false,
5158 .device_timeout_usec
= manager_default_timeout(scope
),
5159 .start_limit
= { DEFAULT_START_LIMIT_INTERVAL
, DEFAULT_START_LIMIT_BURST
},
5161 .memory_accounting
= MEMORY_ACCOUNTING_DEFAULT
,
5162 .io_accounting
= false,
5163 .tasks_accounting
= true,
5164 .ip_accounting
= false,
5166 .tasks_max
= DEFAULT_TASKS_MAX
,
5167 .timer_accuracy_usec
= 1 * USEC_PER_MINUTE
,
5169 .memory_pressure_watch
= CGROUP_PRESSURE_WATCH_AUTO
,
5170 .memory_pressure_threshold_usec
= MEMORY_PRESSURE_DEFAULT_THRESHOLD_USEC
,
5172 .oom_policy
= OOM_STOP
,
5173 .oom_score_adjust_set
= false,
5177 void unit_defaults_done(UnitDefaults
*defaults
) {
5180 defaults
->smack_process_label
= mfree(defaults
->smack_process_label
);
5181 rlimit_free_all(defaults
->rlimit
);
5184 LogTarget
manager_get_executor_log_target(Manager
*m
) {
5187 /* If journald is not available tell sd-executor to go to kmsg, as it might be starting journald */
5188 if (!MANAGER_IS_TEST_RUN(m
) && !manager_journal_is_running(m
))
5189 return LOG_TARGET_KMSG
;
5191 return log_get_target();
5194 static const char* const manager_state_table
[_MANAGER_STATE_MAX
] = {
5195 [MANAGER_INITIALIZING
] = "initializing",
5196 [MANAGER_STARTING
] = "starting",
5197 [MANAGER_RUNNING
] = "running",
5198 [MANAGER_DEGRADED
] = "degraded",
5199 [MANAGER_MAINTENANCE
] = "maintenance",
5200 [MANAGER_STOPPING
] = "stopping",
5203 DEFINE_STRING_TABLE_LOOKUP(manager_state
, ManagerState
);
5205 static const char* const manager_objective_table
[_MANAGER_OBJECTIVE_MAX
] = {
5206 [MANAGER_OK
] = "ok",
5207 [MANAGER_EXIT
] = "exit",
5208 [MANAGER_RELOAD
] = "reload",
5209 [MANAGER_REEXECUTE
] = "reexecute",
5210 [MANAGER_REBOOT
] = "reboot",
5211 [MANAGER_SOFT_REBOOT
] = "soft-reboot",
5212 [MANAGER_POWEROFF
] = "poweroff",
5213 [MANAGER_HALT
] = "halt",
5214 [MANAGER_KEXEC
] = "kexec",
5215 [MANAGER_SWITCH_ROOT
] = "switch-root",
5218 DEFINE_STRING_TABLE_LOOKUP(manager_objective
, ManagerObjective
);
5220 static const char* const manager_timestamp_table
[_MANAGER_TIMESTAMP_MAX
] = {
5221 [MANAGER_TIMESTAMP_FIRMWARE
] = "firmware",
5222 [MANAGER_TIMESTAMP_LOADER
] = "loader",
5223 [MANAGER_TIMESTAMP_KERNEL
] = "kernel",
5224 [MANAGER_TIMESTAMP_INITRD
] = "initrd",
5225 [MANAGER_TIMESTAMP_USERSPACE
] = "userspace",
5226 [MANAGER_TIMESTAMP_FINISH
] = "finish",
5227 [MANAGER_TIMESTAMP_SECURITY_START
] = "security-start",
5228 [MANAGER_TIMESTAMP_SECURITY_FINISH
] = "security-finish",
5229 [MANAGER_TIMESTAMP_GENERATORS_START
] = "generators-start",
5230 [MANAGER_TIMESTAMP_GENERATORS_FINISH
] = "generators-finish",
5231 [MANAGER_TIMESTAMP_UNITS_LOAD_START
] = "units-load-start",
5232 [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH
] = "units-load-finish",
5233 [MANAGER_TIMESTAMP_UNITS_LOAD
] = "units-load",
5234 [MANAGER_TIMESTAMP_INITRD_SECURITY_START
] = "initrd-security-start",
5235 [MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH
] = "initrd-security-finish",
5236 [MANAGER_TIMESTAMP_INITRD_GENERATORS_START
] = "initrd-generators-start",
5237 [MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH
] = "initrd-generators-finish",
5238 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START
] = "initrd-units-load-start",
5239 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH
] = "initrd-units-load-finish",
5240 [MANAGER_TIMESTAMP_SHUTDOWN_START
] = "shutdown-start",
5243 DEFINE_STRING_TABLE_LOOKUP(manager_timestamp
, ManagerTimestamp
);