]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/manager.c
core: notify supervisor over targets we reach, as we reach them
[thirdparty/systemd.git] / src / core / manager.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/kd.h>
6 #include <sys/epoll.h>
7 #include <sys/inotify.h>
8 #include <sys/ioctl.h>
9 #include <sys/mount.h>
10 #include <sys/reboot.h>
11 #include <sys/timerfd.h>
12 #include <sys/utsname.h>
13 #include <sys/wait.h>
14 #include <unistd.h>
15
16 #if HAVE_AUDIT
17 #include <libaudit.h>
18 #endif
19
20 #include "sd-daemon.h"
21 #include "sd-messages.h"
22 #include "sd-path.h"
23
24 #include "all-units.h"
25 #include "alloc-util.h"
26 #include "audit-fd.h"
27 #include "boot-timestamps.h"
28 #include "build-path.h"
29 #include "bus-common-errors.h"
30 #include "bus-error.h"
31 #include "bus-kernel.h"
32 #include "bus-util.h"
33 #include "clean-ipc.h"
34 #include "clock-util.h"
35 #include "common-signal.h"
36 #include "confidential-virt.h"
37 #include "constants.h"
38 #include "core-varlink.h"
39 #include "creds-util.h"
40 #include "dbus-job.h"
41 #include "dbus-manager.h"
42 #include "dbus-unit.h"
43 #include "dbus.h"
44 #include "dirent-util.h"
45 #include "env-util.h"
46 #include "escape.h"
47 #include "event-util.h"
48 #include "exec-util.h"
49 #include "execute.h"
50 #include "exit-status.h"
51 #include "fd-util.h"
52 #include "fileio.h"
53 #include "generator-setup.h"
54 #include "hashmap.h"
55 #include "initrd-util.h"
56 #include "inotify-util.h"
57 #include "install.h"
58 #include "io-util.h"
59 #include "label-util.h"
60 #include "load-fragment.h"
61 #include "locale-setup.h"
62 #include "log.h"
63 #include "macro.h"
64 #include "manager.h"
65 #include "manager-dump.h"
66 #include "manager-serialize.h"
67 #include "memory-util.h"
68 #include "mkdir-label.h"
69 #include "mount-util.h"
70 #include "os-util.h"
71 #include "parse-util.h"
72 #include "path-lookup.h"
73 #include "path-util.h"
74 #include "plymouth-util.h"
75 #include "pretty-print.h"
76 #include "process-util.h"
77 #include "psi-util.h"
78 #include "ratelimit.h"
79 #include "rlimit-util.h"
80 #include "rm-rf.h"
81 #include "selinux-util.h"
82 #include "signal-util.h"
83 #include "socket-util.h"
84 #include "special.h"
85 #include "stat-util.h"
86 #include "string-table.h"
87 #include "string-util.h"
88 #include "strv.h"
89 #include "strxcpyx.h"
90 #include "sysctl-util.h"
91 #include "syslog-util.h"
92 #include "terminal-util.h"
93 #include "time-util.h"
94 #include "transaction.h"
95 #include "uid-range.h"
96 #include "umask-util.h"
97 #include "unit-name.h"
98 #include "user-util.h"
99 #include "virt.h"
100 #include "watchdog.h"
101
102 #define NOTIFY_RCVBUF_SIZE (8*1024*1024)
103 #define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024)
104
105 /* Initial delay and the interval for printing status messages about running jobs */
106 #define JOBS_IN_PROGRESS_WAIT_USEC (2*USEC_PER_SEC)
107 #define JOBS_IN_PROGRESS_QUIET_WAIT_USEC (25*USEC_PER_SEC)
108 #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
109 #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
110
111 /* If there are more than 1K bus messages queue across our API and direct buses, then let's not add more on top until
112 * the queue gets more empty. */
113 #define MANAGER_BUS_BUSY_THRESHOLD 1024LU
114
115 /* How many units and jobs to process of the bus queue before returning to the event loop. */
116 #define MANAGER_BUS_MESSAGE_BUDGET 100U
117
118 #define DEFAULT_TASKS_MAX ((CGroupTasksMax) { 15U, 100U }) /* 15% */
119
120 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
121 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
122 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
123 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
124 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
125 static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
126 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata);
127 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata);
128 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata);
129 static int manager_dispatch_timezone_change(sd_event_source *source, const struct inotify_event *event, void *userdata);
130 static int manager_run_environment_generators(Manager *m);
131 static int manager_run_generators(Manager *m);
132 static void manager_vacuum(Manager *m);
133
134 static usec_t manager_watch_jobs_next_time(Manager *m) {
135 usec_t timeout;
136
137 if (MANAGER_IS_USER(m))
138 /* Let the user manager without a timeout show status quickly, so the system manager can make
139 * use of it, if it wants to. */
140 timeout = JOBS_IN_PROGRESS_WAIT_USEC * 2 / 3;
141 else if (show_status_on(m->show_status))
142 /* When status is on, just use the usual timeout. */
143 timeout = JOBS_IN_PROGRESS_WAIT_USEC;
144 else
145 timeout = JOBS_IN_PROGRESS_QUIET_WAIT_USEC;
146
147 return usec_add(now(CLOCK_MONOTONIC), timeout);
148 }
149
150 static bool manager_is_confirm_spawn_disabled(Manager *m) {
151 assert(m);
152
153 if (!m->confirm_spawn)
154 return true;
155
156 return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0;
157 }
158
159 static void manager_watch_jobs_in_progress(Manager *m) {
160 usec_t next;
161 int r;
162
163 assert(m);
164
165 /* We do not want to show the cylon animation if the user
166 * needs to confirm service executions otherwise confirmation
167 * messages will be screwed by the cylon animation. */
168 if (!manager_is_confirm_spawn_disabled(m))
169 return;
170
171 if (m->jobs_in_progress_event_source)
172 return;
173
174 next = manager_watch_jobs_next_time(m);
175 r = sd_event_add_time(
176 m->event,
177 &m->jobs_in_progress_event_source,
178 CLOCK_MONOTONIC,
179 next, 0,
180 manager_dispatch_jobs_in_progress, m);
181 if (r < 0)
182 return;
183
184 (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress");
185 }
186
187 static void manager_flip_auto_status(Manager *m, bool enable, const char *reason) {
188 assert(m);
189
190 if (enable) {
191 if (m->show_status == SHOW_STATUS_AUTO)
192 manager_set_show_status(m, SHOW_STATUS_TEMPORARY, reason);
193 } else {
194 if (m->show_status == SHOW_STATUS_TEMPORARY)
195 manager_set_show_status(m, SHOW_STATUS_AUTO, reason);
196 }
197 }
198
199 static void manager_print_jobs_in_progress(Manager *m) {
200 Job *j;
201 unsigned counter = 0, print_nr;
202 char cylon[6 + CYLON_BUFFER_EXTRA + 1];
203 unsigned cylon_pos;
204 uint64_t timeout = 0;
205
206 assert(m);
207 assert(m->n_running_jobs > 0);
208
209 manager_flip_auto_status(m, true, "delay");
210
211 print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs;
212
213 HASHMAP_FOREACH(j, m->jobs)
214 if (j->state == JOB_RUNNING && counter++ == print_nr)
215 break;
216
217 /* m->n_running_jobs must be consistent with the contents of m->jobs,
218 * so the above loop must have succeeded in finding j. */
219 assert(counter == print_nr + 1);
220 assert(j);
221
222 cylon_pos = m->jobs_in_progress_iteration % 14;
223 if (cylon_pos >= 8)
224 cylon_pos = 14 - cylon_pos;
225 draw_cylon(cylon, sizeof(cylon), 6, cylon_pos);
226
227 m->jobs_in_progress_iteration++;
228
229 char job_of_n[STRLEN("( of ) ") + DECIMAL_STR_MAX(unsigned)*2] = "";
230 if (m->n_running_jobs > 1)
231 xsprintf(job_of_n, "(%u of %u) ", counter, m->n_running_jobs);
232
233 (void) job_get_timeout(j, &timeout);
234
235 /* We want to use enough information for the user to identify previous lines talking about the same
236 * unit, but keep the message as short as possible. So if 'Starting foo.service' or 'Starting
237 * foo.service - Description' were used, 'foo.service' is enough here. On the other hand, if we used
238 * 'Starting Description' before, then we shall also use 'Description' here. So we pass NULL as the
239 * second argument to unit_status_string(). */
240 const char *ident = unit_status_string(j->unit, NULL);
241
242 const char *time = FORMAT_TIMESPAN(now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC);
243 const char *limit = timeout > 0 ? FORMAT_TIMESPAN(timeout - j->begin_usec, 1*USEC_PER_SEC) : "no limit";
244
245 if (m->status_unit_format == STATUS_UNIT_FORMAT_DESCRIPTION)
246 /* When using 'Description', we effectively don't have enough space to show the nested status
247 * without ellipsization, so let's not even try. */
248 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
249 "%sA %s job is running for %s (%s / %s)",
250 job_of_n,
251 job_type_to_string(j->type),
252 ident,
253 time, limit);
254 else {
255 const char *status_text = unit_status_text(j->unit);
256
257 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
258 "%sJob %s/%s running (%s / %s)%s%s",
259 job_of_n,
260 ident,
261 job_type_to_string(j->type),
262 time, limit,
263 status_text ? ": " : "",
264 strempty(status_text));
265 }
266
267 (void) sd_notifyf(/* unset_environment= */ false,
268 "STATUS=%sUser job %s/%s running (%s / %s)...",
269 job_of_n,
270 ident, job_type_to_string(j->type),
271 time, limit);
272 m->status_ready = false;
273 }
274
275 static int have_ask_password(void) {
276 _cleanup_closedir_ DIR *dir = NULL;
277
278 dir = opendir("/run/systemd/ask-password");
279 if (!dir) {
280 if (errno == ENOENT)
281 return false;
282 else
283 return -errno;
284 }
285
286 FOREACH_DIRENT_ALL(de, dir, return -errno)
287 if (startswith(de->d_name, "ask."))
288 return true;
289 return false;
290 }
291
292 static int manager_dispatch_ask_password_fd(sd_event_source *source,
293 int fd, uint32_t revents, void *userdata) {
294 Manager *m = ASSERT_PTR(userdata);
295
296 (void) flush_fd(fd);
297
298 m->have_ask_password = have_ask_password();
299 if (m->have_ask_password < 0)
300 /* Log error but continue. Negative have_ask_password
301 * is treated as unknown status. */
302 log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m");
303
304 return 0;
305 }
306
307 static void manager_close_ask_password(Manager *m) {
308 assert(m);
309
310 m->ask_password_event_source = sd_event_source_disable_unref(m->ask_password_event_source);
311 m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
312 m->have_ask_password = -EINVAL;
313 }
314
315 static int manager_check_ask_password(Manager *m) {
316 int r;
317
318 assert(m);
319
320 if (!m->ask_password_event_source) {
321 assert(m->ask_password_inotify_fd < 0);
322
323 (void) mkdir_p_label("/run/systemd/ask-password", 0755);
324
325 m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
326 if (m->ask_password_inotify_fd < 0)
327 return log_error_errno(errno, "Failed to create inotify object: %m");
328
329 r = inotify_add_watch_and_warn(m->ask_password_inotify_fd,
330 "/run/systemd/ask-password",
331 IN_CREATE|IN_DELETE|IN_MOVE);
332 if (r < 0) {
333 manager_close_ask_password(m);
334 return r;
335 }
336
337 r = sd_event_add_io(m->event, &m->ask_password_event_source,
338 m->ask_password_inotify_fd, EPOLLIN,
339 manager_dispatch_ask_password_fd, m);
340 if (r < 0) {
341 log_error_errno(r, "Failed to add event source for /run/systemd/ask-password: %m");
342 manager_close_ask_password(m);
343 return r;
344 }
345
346 (void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password");
347
348 /* Queries might have been added meanwhile... */
349 manager_dispatch_ask_password_fd(m->ask_password_event_source,
350 m->ask_password_inotify_fd, EPOLLIN, m);
351 }
352
353 return m->have_ask_password;
354 }
355
356 static int manager_watch_idle_pipe(Manager *m) {
357 int r;
358
359 assert(m);
360
361 if (m->idle_pipe_event_source)
362 return 0;
363
364 if (m->idle_pipe[2] < 0)
365 return 0;
366
367 r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m);
368 if (r < 0)
369 return log_error_errno(r, "Failed to watch idle pipe: %m");
370
371 (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe");
372
373 return 0;
374 }
375
376 static void manager_close_idle_pipe(Manager *m) {
377 assert(m);
378
379 m->idle_pipe_event_source = sd_event_source_disable_unref(m->idle_pipe_event_source);
380
381 safe_close_pair(m->idle_pipe);
382 safe_close_pair(m->idle_pipe + 2);
383 }
384
385 static int manager_setup_time_change(Manager *m) {
386 int r;
387
388 assert(m);
389
390 if (MANAGER_IS_TEST_RUN(m))
391 return 0;
392
393 m->time_change_event_source = sd_event_source_disable_unref(m->time_change_event_source);
394
395 r = event_add_time_change(m->event, &m->time_change_event_source, manager_dispatch_time_change_fd, m);
396 if (r < 0)
397 return log_error_errno(r, "Failed to create time change event source: %m");
398
399 /* Schedule this slightly earlier than the .timer event sources */
400 r = sd_event_source_set_priority(m->time_change_event_source, EVENT_PRIORITY_TIME_CHANGE);
401 if (r < 0)
402 return log_error_errno(r, "Failed to set priority of time change event sources: %m");
403
404 log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
405
406 return 0;
407 }
408
409 static int manager_read_timezone_stat(Manager *m) {
410 struct stat st;
411 bool changed;
412
413 assert(m);
414
415 /* Read the current stat() data of /etc/localtime so that we detect changes */
416 if (lstat("/etc/localtime", &st) < 0) {
417 log_debug_errno(errno, "Failed to stat /etc/localtime, ignoring: %m");
418 changed = m->etc_localtime_accessible;
419 m->etc_localtime_accessible = false;
420 } else {
421 usec_t k;
422
423 k = timespec_load(&st.st_mtim);
424 changed = !m->etc_localtime_accessible || k != m->etc_localtime_mtime;
425
426 m->etc_localtime_mtime = k;
427 m->etc_localtime_accessible = true;
428 }
429
430 return changed;
431 }
432
433 static int manager_setup_timezone_change(Manager *m) {
434 _cleanup_(sd_event_source_unrefp) sd_event_source *new_event = NULL;
435 int r;
436
437 assert(m);
438
439 if (MANAGER_IS_TEST_RUN(m))
440 return 0;
441
442 /* We watch /etc/localtime for three events: change of the link count (which might mean removal from /etc even
443 * though another link might be kept), renames, and file close operations after writing. Note we don't bother
444 * with IN_DELETE_SELF, as that would just report when the inode is removed entirely, i.e. after the link count
445 * went to zero and all fds to it are closed.
446 *
447 * Note that we never follow symlinks here. This is a simplification, but should cover almost all cases
448 * correctly.
449 *
450 * Note that we create the new event source first here, before releasing the old one. This should optimize
451 * behaviour as this way sd-event can reuse the old watch in case the inode didn't change. */
452
453 r = sd_event_add_inotify(m->event, &new_event, "/etc/localtime",
454 IN_ATTRIB|IN_MOVE_SELF|IN_CLOSE_WRITE|IN_DONT_FOLLOW, manager_dispatch_timezone_change, m);
455 if (r == -ENOENT) {
456 /* If the file doesn't exist yet, subscribe to /etc instead, and wait until it is created either by
457 * O_CREATE or by rename() */
458
459 log_debug_errno(r, "/etc/localtime doesn't exist yet, watching /etc instead.");
460 r = sd_event_add_inotify(m->event, &new_event, "/etc",
461 IN_CREATE|IN_MOVED_TO|IN_ONLYDIR, manager_dispatch_timezone_change, m);
462 }
463 if (r < 0)
464 return log_error_errno(r, "Failed to create timezone change event source: %m");
465
466 /* Schedule this slightly earlier than the .timer event sources */
467 r = sd_event_source_set_priority(new_event, EVENT_PRIORITY_TIME_ZONE);
468 if (r < 0)
469 return log_error_errno(r, "Failed to set priority of timezone change event sources: %m");
470
471 sd_event_source_unref(m->timezone_change_event_source);
472 m->timezone_change_event_source = TAKE_PTR(new_event);
473
474 return 0;
475 }
476
477 static int enable_special_signals(Manager *m) {
478 _cleanup_close_ int fd = -EBADF;
479
480 assert(m);
481
482 if (MANAGER_IS_TEST_RUN(m))
483 return 0;
484
485 /* Enable that we get SIGINT on control-alt-del. In containers
486 * this will fail with EPERM (older) or EINVAL (newer), so
487 * ignore that. */
488 if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL))
489 log_warning_errno(errno, "Failed to enable ctrl-alt-del handling: %m");
490
491 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
492 if (fd < 0) {
493 /* Support systems without virtual console */
494 if (fd != -ENOENT)
495 log_warning_errno(errno, "Failed to open /dev/tty0: %m");
496 } else {
497 /* Enable that we get SIGWINCH on kbrequest */
498 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
499 log_warning_errno(errno, "Failed to enable kbrequest handling: %m");
500 }
501
502 return 0;
503 }
504
505 #define RTSIG_IF_AVAILABLE(signum) (signum <= SIGRTMAX ? signum : -1)
506
507 static int manager_setup_signals(Manager *m) {
508 struct sigaction sa = {
509 .sa_handler = SIG_DFL,
510 .sa_flags = SA_NOCLDSTOP|SA_RESTART,
511 };
512 sigset_t mask;
513 int r;
514
515 assert(m);
516
517 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
518
519 /* We make liberal use of realtime signals here. On
520 * Linux/glibc we have 30 of them (with the exception of Linux
521 * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30
522 * (aka SIGRTMAX). */
523
524 assert_se(sigemptyset(&mask) == 0);
525 sigset_add_many(&mask,
526 SIGCHLD, /* Child died */
527 SIGTERM, /* Reexecute daemon */
528 SIGHUP, /* Reload configuration */
529 SIGUSR1, /* systemd: reconnect to D-Bus */
530 SIGUSR2, /* systemd: dump status */
531 SIGINT, /* Kernel sends us this on control-alt-del */
532 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
533 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
534
535 SIGRTMIN+0, /* systemd: start default.target */
536 SIGRTMIN+1, /* systemd: isolate rescue.target */
537 SIGRTMIN+2, /* systemd: isolate emergency.target */
538 SIGRTMIN+3, /* systemd: start halt.target */
539 SIGRTMIN+4, /* systemd: start poweroff.target */
540 SIGRTMIN+5, /* systemd: start reboot.target */
541 SIGRTMIN+6, /* systemd: start kexec.target */
542 SIGRTMIN+7, /* systemd: start soft-reboot.target */
543
544 /* ... space for more special targets ... */
545
546 SIGRTMIN+13, /* systemd: Immediate halt */
547 SIGRTMIN+14, /* systemd: Immediate poweroff */
548 SIGRTMIN+15, /* systemd: Immediate reboot */
549 SIGRTMIN+16, /* systemd: Immediate kexec */
550 SIGRTMIN+17, /* systemd: Immediate soft-reboot */
551 SIGRTMIN+18, /* systemd: control command */
552
553 /* ... space ... */
554
555 SIGRTMIN+20, /* systemd: enable status messages */
556 SIGRTMIN+21, /* systemd: disable status messages */
557 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
558 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
559 SIGRTMIN+24, /* systemd: Immediate exit (--user only) */
560 SIGRTMIN+25, /* systemd: reexecute manager */
561
562 /* Apparently Linux on hppa had fewer RT signals until v3.18,
563 * SIGRTMAX was SIGRTMIN+25, and then SIGRTMIN was lowered,
564 * see commit v3.17-7614-g1f25df2eff.
565 *
566 * We cannot unconditionally make use of those signals here,
567 * so let's use a runtime check. Since these commands are
568 * accessible by different means and only really a safety
569 * net, the missing functionality on hppa shouldn't matter.
570 */
571
572 RTSIG_IF_AVAILABLE(SIGRTMIN+26), /* systemd: set log target to journal-or-kmsg */
573 RTSIG_IF_AVAILABLE(SIGRTMIN+27), /* systemd: set log target to console */
574 RTSIG_IF_AVAILABLE(SIGRTMIN+28), /* systemd: set log target to kmsg */
575 RTSIG_IF_AVAILABLE(SIGRTMIN+29), /* systemd: set log target to syslog-or-kmsg (obsolete) */
576
577 /* ... one free signal here SIGRTMIN+30 ... */
578 -1);
579 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
580
581 m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
582 if (m->signal_fd < 0)
583 return -errno;
584
585 r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m);
586 if (r < 0)
587 return r;
588
589 (void) sd_event_source_set_description(m->signal_event_source, "manager-signal");
590
591 /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
592 * notify processing can still figure out to which process/service a message belongs, before we reap the
593 * process. Also, process this before handling cgroup notifications, so that we always collect child exit
594 * status information before detecting that there's no process in a cgroup. */
595 r = sd_event_source_set_priority(m->signal_event_source, EVENT_PRIORITY_SIGNALS);
596 if (r < 0)
597 return r;
598
599 if (MANAGER_IS_SYSTEM(m))
600 return enable_special_signals(m);
601
602 return 0;
603 }
604
605 static char** sanitize_environment(char **l) {
606
607 /* Let's remove some environment variables that we need ourselves to communicate with our clients */
608 strv_env_unset_many(
609 l,
610 "CACHE_DIRECTORY",
611 "CONFIGURATION_DIRECTORY",
612 "CREDENTIALS_DIRECTORY",
613 "EXIT_CODE",
614 "EXIT_STATUS",
615 "INVOCATION_ID",
616 "JOURNAL_STREAM",
617 "LISTEN_FDNAMES",
618 "LISTEN_FDS",
619 "LISTEN_PID",
620 "LOGS_DIRECTORY",
621 "LOG_NAMESPACE",
622 "MAINPID",
623 "MANAGERPID",
624 "MEMORY_PRESSURE_WATCH",
625 "MEMORY_PRESSURE_WRITE",
626 "MONITOR_EXIT_CODE",
627 "MONITOR_EXIT_STATUS",
628 "MONITOR_INVOCATION_ID",
629 "MONITOR_SERVICE_RESULT",
630 "MONITOR_UNIT",
631 "NOTIFY_SOCKET",
632 "PIDFILE",
633 "REMOTE_ADDR",
634 "REMOTE_PORT",
635 "RUNTIME_DIRECTORY",
636 "SERVICE_RESULT",
637 "STATE_DIRECTORY",
638 "SYSTEMD_EXEC_PID",
639 "TRIGGER_PATH",
640 "TRIGGER_TIMER_MONOTONIC_USEC",
641 "TRIGGER_TIMER_REALTIME_USEC",
642 "TRIGGER_UNIT",
643 "WATCHDOG_PID",
644 "WATCHDOG_USEC");
645
646 /* Let's order the environment alphabetically, just to make it pretty */
647 return strv_sort(l);
648 }
649
650 int manager_default_environment(Manager *m) {
651 int r;
652
653 assert(m);
654
655 m->transient_environment = strv_free(m->transient_environment);
656
657 if (MANAGER_IS_SYSTEM(m)) {
658 /* The system manager always starts with a clean environment for its children. It does not
659 * import the kernel's or the parents' exported variables.
660 *
661 * The initial passed environment is untouched to keep /proc/self/environ valid; it is used
662 * for tagging the init process inside containers. */
663 m->transient_environment = strv_new("PATH=" DEFAULT_PATH);
664 if (!m->transient_environment)
665 return log_oom();
666
667 /* Import locale variables LC_*= from configuration */
668 (void) locale_setup(&m->transient_environment);
669 } else {
670 /* The user manager passes its own environment along to its children, except for $PATH and
671 * session envs. */
672
673 m->transient_environment = strv_copy(environ);
674 if (!m->transient_environment)
675 return log_oom();
676
677 r = strv_env_replace_strdup(&m->transient_environment, "PATH=" DEFAULT_USER_PATH);
678 if (r < 0)
679 return log_oom();
680
681 /* Envvars set for our 'manager' class session are private and should not be propagated
682 * to children. Also it's likely that the graphical session will set these on their own. */
683 strv_env_unset_many(m->transient_environment,
684 "XDG_SESSION_ID",
685 "XDG_SESSION_CLASS",
686 "XDG_SESSION_TYPE",
687 "XDG_SESSION_DESKTOP",
688 "XDG_SEAT",
689 "XDG_VTNR");
690 }
691
692 sanitize_environment(m->transient_environment);
693 return 0;
694 }
695
696 static int manager_setup_prefix(Manager *m) {
697 struct table_entry {
698 uint64_t type;
699 const char *suffix;
700 };
701
702 static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = {
703 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL },
704 [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL },
705 [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL },
706 [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL },
707 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL },
708 };
709
710 static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = {
711 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL },
712 [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_STATE_PRIVATE, NULL },
713 [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL },
714 [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_STATE_PRIVATE, "log" },
715 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL },
716 };
717
718 assert(m);
719
720 const struct table_entry *p = MANAGER_IS_SYSTEM(m) ? paths_system : paths_user;
721 int r;
722
723 for (ExecDirectoryType i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
724 r = sd_path_lookup(p[i].type, p[i].suffix, &m->prefix[i]);
725 if (r < 0)
726 return log_warning_errno(r, "Failed to lookup %s path: %m",
727 exec_directory_type_to_string(i));
728 }
729
730 return 0;
731 }
732
733 static void manager_free_unit_name_maps(Manager *m) {
734 m->unit_id_map = hashmap_free(m->unit_id_map);
735 m->unit_name_map = hashmap_free(m->unit_name_map);
736 m->unit_path_cache = set_free(m->unit_path_cache);
737 m->unit_cache_timestamp_hash = 0;
738 }
739
740 static int manager_setup_run_queue(Manager *m) {
741 int r;
742
743 assert(m);
744 assert(!m->run_queue_event_source);
745
746 r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m);
747 if (r < 0)
748 return r;
749
750 r = sd_event_source_set_priority(m->run_queue_event_source, EVENT_PRIORITY_RUN_QUEUE);
751 if (r < 0)
752 return r;
753
754 r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF);
755 if (r < 0)
756 return r;
757
758 (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue");
759
760 return 0;
761 }
762
763 static int manager_setup_sigchld_event_source(Manager *m) {
764 int r;
765
766 assert(m);
767 assert(!m->sigchld_event_source);
768
769 r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m);
770 if (r < 0)
771 return r;
772
773 r = sd_event_source_set_priority(m->sigchld_event_source, EVENT_PRIORITY_SIGCHLD);
774 if (r < 0)
775 return r;
776
777 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
778 if (r < 0)
779 return r;
780
781 (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld");
782
783 return 0;
784 }
785
786 int manager_setup_memory_pressure_event_source(Manager *m) {
787 int r;
788
789 assert(m);
790
791 m->memory_pressure_event_source = sd_event_source_disable_unref(m->memory_pressure_event_source);
792
793 r = sd_event_add_memory_pressure(m->event, &m->memory_pressure_event_source, NULL, NULL);
794 if (r < 0)
795 log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r,
796 "Failed to establish memory pressure event source, ignoring: %m");
797 else if (m->defaults.memory_pressure_threshold_usec != USEC_INFINITY) {
798
799 /* If there's a default memory pressure threshold set, also apply it to the service manager itself */
800 r = sd_event_source_set_memory_pressure_period(
801 m->memory_pressure_event_source,
802 m->defaults.memory_pressure_threshold_usec,
803 MEMORY_PRESSURE_DEFAULT_WINDOW_USEC);
804 if (r < 0)
805 log_warning_errno(r, "Failed to adjust memory pressure threshold, ignoring: %m");
806 }
807
808 return 0;
809 }
810
811 static int manager_find_credentials_dirs(Manager *m) {
812 const char *e;
813 int r;
814
815 assert(m);
816
817 r = get_credentials_dir(&e);
818 if (r < 0) {
819 if (r != -ENXIO)
820 log_debug_errno(r, "Failed to determine credentials directory, ignoring: %m");
821 } else {
822 m->received_credentials_directory = strdup(e);
823 if (!m->received_credentials_directory)
824 return -ENOMEM;
825 }
826
827 r = get_encrypted_credentials_dir(&e);
828 if (r < 0) {
829 if (r != -ENXIO)
830 log_debug_errno(r, "Failed to determine encrypted credentials directory, ignoring: %m");
831 } else {
832 m->received_encrypted_credentials_directory = strdup(e);
833 if (!m->received_encrypted_credentials_directory)
834 return -ENOMEM;
835 }
836
837 return 0;
838 }
839
840 void manager_set_switching_root(Manager *m, bool switching_root) {
841 assert(m);
842
843 m->switching_root = MANAGER_IS_SYSTEM(m) && switching_root;
844 }
845
846 double manager_get_progress(Manager *m) {
847 assert(m);
848
849 if (MANAGER_IS_FINISHED(m) || m->n_installed_jobs == 0)
850 return 1.0;
851
852 return 1.0 - ((double) hashmap_size(m->jobs) / (double) m->n_installed_jobs);
853 }
854
855 static int compare_job_priority(const void *a, const void *b) {
856 const Job *x = a, *y = b;
857
858 return unit_compare_priority(x->unit, y->unit);
859 }
860
861 int manager_new(RuntimeScope runtime_scope, ManagerTestRunFlags test_run_flags, Manager **ret) {
862 _cleanup_(manager_freep) Manager *m = NULL;
863 int r;
864
865 assert(IN_SET(runtime_scope, RUNTIME_SCOPE_SYSTEM, RUNTIME_SCOPE_USER));
866 assert(ret);
867
868 m = new(Manager, 1);
869 if (!m)
870 return -ENOMEM;
871
872 *m = (Manager) {
873 .runtime_scope = runtime_scope,
874 .objective = _MANAGER_OBJECTIVE_INVALID,
875
876 .status_unit_format = STATUS_UNIT_FORMAT_DEFAULT,
877
878 .original_log_level = -1,
879 .original_log_target = _LOG_TARGET_INVALID,
880
881 .watchdog_overridden[WATCHDOG_RUNTIME] = USEC_INFINITY,
882 .watchdog_overridden[WATCHDOG_REBOOT] = USEC_INFINITY,
883 .watchdog_overridden[WATCHDOG_KEXEC] = USEC_INFINITY,
884 .watchdog_overridden[WATCHDOG_PRETIMEOUT] = USEC_INFINITY,
885
886 .show_status_overridden = _SHOW_STATUS_INVALID,
887
888 .notify_fd = -EBADF,
889 .cgroups_agent_fd = -EBADF,
890 .signal_fd = -EBADF,
891 .user_lookup_fds = EBADF_PAIR,
892 .private_listen_fd = -EBADF,
893 .dev_autofs_fd = -EBADF,
894 .cgroup_inotify_fd = -EBADF,
895 .pin_cgroupfs_fd = -EBADF,
896 .ask_password_inotify_fd = -EBADF,
897 .idle_pipe = { -EBADF, -EBADF, -EBADF, -EBADF},
898
899 /* start as id #1, so that we can leave #0 around as "null-like" value */
900 .current_job_id = 1,
901
902 .have_ask_password = -EINVAL, /* we don't know */
903 .first_boot = -1,
904 .test_run_flags = test_run_flags,
905
906 .dump_ratelimit = (const RateLimit) { .interval = 10 * USEC_PER_MINUTE, .burst = 10 },
907
908 .executor_fd = -EBADF,
909 };
910
911 unit_defaults_init(&m->defaults, runtime_scope);
912
913 #if ENABLE_EFI
914 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0)
915 boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE,
916 m->timestamps + MANAGER_TIMESTAMP_FIRMWARE,
917 m->timestamps + MANAGER_TIMESTAMP_LOADER);
918 #endif
919
920 /* Prepare log fields we can use for structured logging */
921 if (MANAGER_IS_SYSTEM(m)) {
922 m->unit_log_field = "UNIT=";
923 m->unit_log_format_string = "UNIT=%s";
924
925 m->invocation_log_field = "INVOCATION_ID=";
926 m->invocation_log_format_string = "INVOCATION_ID=%s";
927 } else {
928 m->unit_log_field = "USER_UNIT=";
929 m->unit_log_format_string = "USER_UNIT=%s";
930
931 m->invocation_log_field = "USER_INVOCATION_ID=";
932 m->invocation_log_format_string = "USER_INVOCATION_ID=%s";
933 }
934
935 /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
936 m->ctrl_alt_del_ratelimit = (const RateLimit) { .interval = 2 * USEC_PER_SEC, .burst = 7 };
937
938 r = manager_default_environment(m);
939 if (r < 0)
940 return r;
941
942 r = hashmap_ensure_allocated(&m->units, &string_hash_ops);
943 if (r < 0)
944 return r;
945
946 r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops);
947 if (r < 0)
948 return r;
949
950 r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops);
951 if (r < 0)
952 return r;
953
954 r = prioq_ensure_allocated(&m->run_queue, compare_job_priority);
955 if (r < 0)
956 return r;
957
958 r = manager_setup_prefix(m);
959 if (r < 0)
960 return r;
961
962 r = manager_find_credentials_dirs(m);
963 if (r < 0)
964 return r;
965
966 r = sd_event_default(&m->event);
967 if (r < 0)
968 return r;
969
970 r = manager_setup_run_queue(m);
971 if (r < 0)
972 return r;
973
974 if (FLAGS_SET(test_run_flags, MANAGER_TEST_RUN_MINIMAL)) {
975 m->cgroup_root = strdup("");
976 if (!m->cgroup_root)
977 return -ENOMEM;
978 } else {
979 r = manager_setup_signals(m);
980 if (r < 0)
981 return r;
982
983 r = manager_setup_cgroup(m);
984 if (r < 0)
985 return r;
986
987 r = manager_setup_time_change(m);
988 if (r < 0)
989 return r;
990
991 r = manager_read_timezone_stat(m);
992 if (r < 0)
993 return r;
994
995 (void) manager_setup_timezone_change(m);
996
997 r = manager_setup_sigchld_event_source(m);
998 if (r < 0)
999 return r;
1000
1001 r = manager_setup_memory_pressure_event_source(m);
1002 if (r < 0)
1003 return r;
1004
1005 #if HAVE_LIBBPF
1006 if (MANAGER_IS_SYSTEM(m) && bpf_restrict_fs_supported(/* initialize = */ true)) {
1007 r = bpf_restrict_fs_setup(m);
1008 if (r < 0)
1009 log_warning_errno(r, "Failed to setup LSM BPF, ignoring: %m");
1010 }
1011 #endif
1012 }
1013
1014 if (test_run_flags == 0) {
1015 if (MANAGER_IS_SYSTEM(m))
1016 r = mkdir_label("/run/systemd/units", 0755);
1017 else {
1018 _cleanup_free_ char *units_path = NULL;
1019 r = xdg_user_runtime_dir(&units_path, "/systemd/units");
1020 if (r < 0)
1021 return r;
1022 r = mkdir_p_label(units_path, 0755);
1023 }
1024
1025 if (r < 0 && r != -EEXIST)
1026 return r;
1027 }
1028
1029 if (!FLAGS_SET(test_run_flags, MANAGER_TEST_DONT_OPEN_EXECUTOR)) {
1030 m->executor_fd = pin_callout_binary(SYSTEMD_EXECUTOR_BINARY_PATH);
1031 if (m->executor_fd < 0)
1032 return log_debug_errno(m->executor_fd, "Failed to pin executor binary: %m");
1033
1034 _cleanup_free_ char *executor_path = NULL;
1035 r = fd_get_path(m->executor_fd, &executor_path);
1036 if (r < 0)
1037 return r;
1038
1039 log_debug("Using systemd-executor binary from '%s'.", executor_path);
1040 }
1041
1042 /* Note that we do not set up the notify fd here. We do that after deserialization,
1043 * since they might have gotten serialized across the reexec. */
1044
1045 *ret = TAKE_PTR(m);
1046
1047 return 0;
1048 }
1049
1050 static int manager_setup_notify(Manager *m) {
1051 int r;
1052
1053 if (MANAGER_IS_TEST_RUN(m))
1054 return 0;
1055
1056 if (m->notify_fd < 0) {
1057 _cleanup_close_ int fd = -EBADF;
1058 union sockaddr_union sa;
1059 socklen_t sa_len;
1060
1061 /* First free all secondary fields */
1062 m->notify_socket = mfree(m->notify_socket);
1063 m->notify_event_source = sd_event_source_disable_unref(m->notify_event_source);
1064
1065 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
1066 if (fd < 0)
1067 return log_error_errno(errno, "Failed to allocate notification socket: %m");
1068
1069 fd_increase_rxbuf(fd, NOTIFY_RCVBUF_SIZE);
1070
1071 m->notify_socket = path_join(m->prefix[EXEC_DIRECTORY_RUNTIME], "systemd/notify");
1072 if (!m->notify_socket)
1073 return log_oom();
1074
1075 r = sockaddr_un_set_path(&sa.un, m->notify_socket);
1076 if (r < 0)
1077 return log_error_errno(r, "Notify socket '%s' not valid for AF_UNIX socket address, refusing.",
1078 m->notify_socket);
1079 sa_len = r;
1080
1081 (void) mkdir_parents_label(m->notify_socket, 0755);
1082 (void) sockaddr_un_unlink(&sa.un);
1083
1084 r = mac_selinux_bind(fd, &sa.sa, sa_len);
1085 if (r < 0)
1086 return log_error_errno(r, "bind(%s) failed: %m", m->notify_socket);
1087
1088 r = setsockopt_int(fd, SOL_SOCKET, SO_PASSCRED, true);
1089 if (r < 0)
1090 return log_error_errno(r, "SO_PASSCRED failed: %m");
1091
1092 m->notify_fd = TAKE_FD(fd);
1093
1094 log_debug("Using notification socket %s", m->notify_socket);
1095 }
1096
1097 if (!m->notify_event_source) {
1098 r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m);
1099 if (r < 0)
1100 return log_error_errno(r, "Failed to allocate notify event source: %m");
1101
1102 /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
1103 * service an exit message belongs. */
1104 r = sd_event_source_set_priority(m->notify_event_source, EVENT_PRIORITY_NOTIFY);
1105 if (r < 0)
1106 return log_error_errno(r, "Failed to set priority of notify event source: %m");
1107
1108 (void) sd_event_source_set_description(m->notify_event_source, "manager-notify");
1109 }
1110
1111 return 0;
1112 }
1113
1114 static int manager_setup_cgroups_agent(Manager *m) {
1115
1116 static const union sockaddr_union sa = {
1117 .un.sun_family = AF_UNIX,
1118 .un.sun_path = "/run/systemd/cgroups-agent",
1119 };
1120 int r;
1121
1122 /* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering
1123 * these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and
1124 * each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on
1125 * overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number
1126 * of D-Bus connections may be queued until the kernel will start dropping further incoming connections,
1127 * possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX
1128 * socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and
1129 * we thus won't lose messages.
1130 *
1131 * Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen
1132 * to it. The system instance hence listens on this special socket, but the user instances listen on the system
1133 * bus for these messages. */
1134
1135 if (MANAGER_IS_TEST_RUN(m))
1136 return 0;
1137
1138 if (!MANAGER_IS_SYSTEM(m))
1139 return 0;
1140
1141 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
1142 if (r < 0)
1143 return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m");
1144 if (r > 0) /* We don't need this anymore on the unified hierarchy */
1145 return 0;
1146
1147 if (m->cgroups_agent_fd < 0) {
1148 _cleanup_close_ int fd = -EBADF;
1149
1150 /* First free all secondary fields */
1151 m->cgroups_agent_event_source = sd_event_source_disable_unref(m->cgroups_agent_event_source);
1152
1153 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
1154 if (fd < 0)
1155 return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m");
1156
1157 fd_increase_rxbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE);
1158
1159 (void) sockaddr_un_unlink(&sa.un);
1160
1161 /* Only allow root to connect to this socket */
1162 WITH_UMASK(0077)
1163 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
1164 if (r < 0)
1165 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
1166
1167 m->cgroups_agent_fd = TAKE_FD(fd);
1168 }
1169
1170 if (!m->cgroups_agent_event_source) {
1171 r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m);
1172 if (r < 0)
1173 return log_error_errno(r, "Failed to allocate cgroups agent event source: %m");
1174
1175 /* Process cgroups notifications early. Note that when the agent notification is received
1176 * we'll just enqueue the unit in the cgroup empty queue, hence pick a high priority than
1177 * that. Also see handling of cgroup inotify for the unified cgroup stuff. */
1178 r = sd_event_source_set_priority(m->cgroups_agent_event_source, EVENT_PRIORITY_CGROUP_AGENT);
1179 if (r < 0)
1180 return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m");
1181
1182 (void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent");
1183 }
1184
1185 return 0;
1186 }
1187
1188 static int manager_setup_user_lookup_fd(Manager *m) {
1189 int r;
1190
1191 assert(m);
1192
1193 /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
1194 * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
1195 * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
1196 * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
1197 * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
1198 * hence we establish this communication channel so that forked off processes can pass their UID/GID
1199 * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
1200 * datagram, along with their unit name, so that we can share one communication socket pair among all units for
1201 * this purpose.
1202 *
1203 * You might wonder why we need a communication channel for this that is independent of the usual notification
1204 * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
1205 * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
1206 * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
1207 *
1208 * Note that this function is called under two circumstances: when we first initialize (in which case we
1209 * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
1210 * (in which case the socket pair already exists but we still need to allocate the event source for it). */
1211
1212 if (m->user_lookup_fds[0] < 0) {
1213
1214 /* Free all secondary fields */
1215 safe_close_pair(m->user_lookup_fds);
1216 m->user_lookup_event_source = sd_event_source_disable_unref(m->user_lookup_event_source);
1217
1218 if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
1219 return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
1220
1221 (void) fd_increase_rxbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE);
1222 }
1223
1224 if (!m->user_lookup_event_source) {
1225 r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m);
1226 if (r < 0)
1227 return log_error_errno(errno, "Failed to allocate user lookup event source: %m");
1228
1229 /* Process even earlier than the notify event source, so that we always know first about valid UID/GID
1230 * resolutions */
1231 r = sd_event_source_set_priority(m->user_lookup_event_source, EVENT_PRIORITY_USER_LOOKUP);
1232 if (r < 0)
1233 return log_error_errno(errno, "Failed to set priority of user lookup event source: %m");
1234
1235 (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup");
1236 }
1237
1238 return 0;
1239 }
1240
1241 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
1242 Unit *u;
1243 unsigned n = 0;
1244
1245 assert(m);
1246
1247 while ((u = m->cleanup_queue)) {
1248 assert(u->in_cleanup_queue);
1249
1250 unit_free(u);
1251 n++;
1252 }
1253
1254 return n;
1255 }
1256
1257 static unsigned manager_dispatch_release_resources_queue(Manager *m) {
1258 unsigned n = 0;
1259 Unit *u;
1260
1261 assert(m);
1262
1263 while ((u = LIST_POP(release_resources_queue, m->release_resources_queue))) {
1264 assert(u->in_release_resources_queue);
1265 u->in_release_resources_queue = false;
1266
1267 n++;
1268
1269 unit_release_resources(u);
1270 }
1271
1272 return n;
1273 }
1274
1275 enum {
1276 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
1277 GC_OFFSET_UNSURE, /* No clue */
1278 GC_OFFSET_GOOD, /* We still need this unit */
1279 GC_OFFSET_BAD, /* We don't need this unit anymore */
1280 _GC_OFFSET_MAX
1281 };
1282
1283 static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
1284 Unit *other;
1285
1286 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
1287
1288 /* Recursively mark referenced units as GOOD as well */
1289 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCES)
1290 if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
1291 unit_gc_mark_good(other, gc_marker);
1292 }
1293
1294 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
1295 Unit *other;
1296 bool is_bad;
1297
1298 assert(u);
1299
1300 if (IN_SET(u->gc_marker - gc_marker,
1301 GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH))
1302 return;
1303
1304 if (u->in_cleanup_queue)
1305 goto bad;
1306
1307 if (!unit_may_gc(u))
1308 goto good;
1309
1310 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
1311
1312 is_bad = true;
1313
1314 UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_REFERENCED_BY) {
1315 unit_gc_sweep(other, gc_marker);
1316
1317 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
1318 goto good;
1319
1320 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
1321 is_bad = false;
1322 }
1323
1324 LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
1325 unit_gc_sweep(ref->source, gc_marker);
1326
1327 if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
1328 goto good;
1329
1330 if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
1331 is_bad = false;
1332 }
1333
1334 if (is_bad)
1335 goto bad;
1336
1337 /* We were unable to find anything out about this entry, so
1338 * let's investigate it later */
1339 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
1340 unit_add_to_gc_queue(u);
1341 return;
1342
1343 bad:
1344 /* We definitely know that this one is not useful anymore, so
1345 * let's mark it for deletion */
1346 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1347 unit_add_to_cleanup_queue(u);
1348 return;
1349
1350 good:
1351 unit_gc_mark_good(u, gc_marker);
1352 }
1353
1354 static unsigned manager_dispatch_gc_unit_queue(Manager *m) {
1355 unsigned n = 0, gc_marker;
1356 Unit *u;
1357
1358 assert(m);
1359
1360 /* log_debug("Running GC..."); */
1361
1362 m->gc_marker += _GC_OFFSET_MAX;
1363 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
1364 m->gc_marker = 1;
1365
1366 gc_marker = m->gc_marker;
1367
1368 while ((u = LIST_POP(gc_queue, m->gc_unit_queue))) {
1369 assert(u->in_gc_queue);
1370
1371 unit_gc_sweep(u, gc_marker);
1372
1373 u->in_gc_queue = false;
1374
1375 n++;
1376
1377 if (IN_SET(u->gc_marker - gc_marker,
1378 GC_OFFSET_BAD, GC_OFFSET_UNSURE)) {
1379 if (u->id)
1380 log_unit_debug(u, "Collecting.");
1381 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1382 unit_add_to_cleanup_queue(u);
1383 }
1384 }
1385
1386 return n;
1387 }
1388
1389 static unsigned manager_dispatch_gc_job_queue(Manager *m) {
1390 unsigned n = 0;
1391 Job *j;
1392
1393 assert(m);
1394
1395 while ((j = LIST_POP(gc_queue, m->gc_job_queue))) {
1396 assert(j->in_gc_queue);
1397 j->in_gc_queue = false;
1398
1399 n++;
1400
1401 if (!job_may_gc(j))
1402 continue;
1403
1404 log_unit_debug(j->unit, "Collecting job.");
1405 (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false);
1406 }
1407
1408 return n;
1409 }
1410
1411 static int manager_ratelimit_requeue(sd_event_source *s, uint64_t usec, void *userdata) {
1412 Unit *u = userdata;
1413
1414 assert(u);
1415 assert(s == u->auto_start_stop_event_source);
1416
1417 u->auto_start_stop_event_source = sd_event_source_unref(u->auto_start_stop_event_source);
1418
1419 /* Re-queue to all queues, if the rate limit hit we might have been throttled on any of them. */
1420 unit_submit_to_stop_when_unneeded_queue(u);
1421 unit_submit_to_start_when_upheld_queue(u);
1422 unit_submit_to_stop_when_bound_queue(u);
1423
1424 return 0;
1425 }
1426
1427 static int manager_ratelimit_check_and_queue(Unit *u) {
1428 int r;
1429
1430 assert(u);
1431
1432 if (ratelimit_below(&u->auto_start_stop_ratelimit))
1433 return 1;
1434
1435 /* Already queued, no need to requeue */
1436 if (u->auto_start_stop_event_source)
1437 return 0;
1438
1439 r = sd_event_add_time(
1440 u->manager->event,
1441 &u->auto_start_stop_event_source,
1442 CLOCK_MONOTONIC,
1443 ratelimit_end(&u->auto_start_stop_ratelimit),
1444 0,
1445 manager_ratelimit_requeue,
1446 u);
1447 if (r < 0)
1448 return log_unit_error_errno(u, r, "Failed to queue timer on event loop: %m");
1449
1450 return 0;
1451 }
1452
1453 static unsigned manager_dispatch_stop_when_unneeded_queue(Manager *m) {
1454 unsigned n = 0;
1455 Unit *u;
1456 int r;
1457
1458 assert(m);
1459
1460 while ((u = LIST_POP(stop_when_unneeded_queue, m->stop_when_unneeded_queue))) {
1461 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1462
1463 assert(u->in_stop_when_unneeded_queue);
1464 u->in_stop_when_unneeded_queue = false;
1465
1466 n++;
1467
1468 if (!unit_is_unneeded(u))
1469 continue;
1470
1471 log_unit_debug(u, "Unit is not needed anymore.");
1472
1473 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1474 * service being unnecessary after a while. */
1475
1476 r = manager_ratelimit_check_and_queue(u);
1477 if (r <= 0) {
1478 log_unit_warning(u,
1479 "Unit not needed anymore, but not stopping since we tried this too often recently.%s",
1480 r == 0 ? " Will retry later." : "");
1481 continue;
1482 }
1483
1484 /* Ok, nobody needs us anymore. Sniff. Then let's commit suicide */
1485 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
1486 if (r < 0)
1487 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1488 }
1489
1490 return n;
1491 }
1492
1493 static unsigned manager_dispatch_start_when_upheld_queue(Manager *m) {
1494 unsigned n = 0;
1495 Unit *u;
1496 int r;
1497
1498 assert(m);
1499
1500 while ((u = LIST_POP(start_when_upheld_queue, m->start_when_upheld_queue))) {
1501 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1502 Unit *culprit = NULL;
1503
1504 assert(u->in_start_when_upheld_queue);
1505 u->in_start_when_upheld_queue = false;
1506
1507 n++;
1508
1509 if (!unit_is_upheld_by_active(u, &culprit))
1510 continue;
1511
1512 log_unit_debug(u, "Unit is started because upheld by active unit %s.", culprit->id);
1513
1514 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1515 * service being unnecessary after a while. */
1516
1517 r = manager_ratelimit_check_and_queue(u);
1518 if (r <= 0) {
1519 log_unit_warning(u,
1520 "Unit needs to be started because active unit %s upholds it, but not starting since we tried this too often recently.%s",
1521 culprit->id,
1522 r == 0 ? " Will retry later." : "");
1523 continue;
1524 }
1525
1526 r = manager_add_job(u->manager, JOB_START, u, JOB_FAIL, NULL, &error, NULL);
1527 if (r < 0)
1528 log_unit_warning_errno(u, r, "Failed to enqueue start job, ignoring: %s", bus_error_message(&error, r));
1529 }
1530
1531 return n;
1532 }
1533
1534 static unsigned manager_dispatch_stop_when_bound_queue(Manager *m) {
1535 unsigned n = 0;
1536 Unit *u;
1537 int r;
1538
1539 assert(m);
1540
1541 while ((u = LIST_POP(stop_when_bound_queue, m->stop_when_bound_queue))) {
1542 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1543 Unit *culprit = NULL;
1544
1545 assert(u->in_stop_when_bound_queue);
1546 u->in_stop_when_bound_queue = false;
1547
1548 n++;
1549
1550 if (!unit_is_bound_by_inactive(u, &culprit))
1551 continue;
1552
1553 log_unit_debug(u, "Unit is stopped because bound to inactive unit %s.", culprit->id);
1554
1555 /* If stopping a unit fails continuously we might enter a stop loop here, hence stop acting on the
1556 * service being unnecessary after a while. */
1557
1558 r = manager_ratelimit_check_and_queue(u);
1559 if (r <= 0) {
1560 log_unit_warning(u,
1561 "Unit needs to be stopped because it is bound to inactive unit %s it, but not stopping since we tried this too often recently.%s",
1562 culprit->id,
1563 r == 0 ? " Will retry later." : "");
1564 continue;
1565 }
1566
1567 r = manager_add_job(u->manager, JOB_STOP, u, JOB_REPLACE, NULL, &error, NULL);
1568 if (r < 0)
1569 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1570 }
1571
1572 return n;
1573 }
1574
1575 static void manager_clear_jobs_and_units(Manager *m) {
1576 Unit *u;
1577
1578 assert(m);
1579
1580 while ((u = hashmap_first(m->units)))
1581 unit_free(u);
1582
1583 manager_dispatch_cleanup_queue(m);
1584
1585 assert(!m->load_queue);
1586 assert(prioq_isempty(m->run_queue));
1587 assert(!m->dbus_unit_queue);
1588 assert(!m->dbus_job_queue);
1589 assert(!m->cleanup_queue);
1590 assert(!m->gc_unit_queue);
1591 assert(!m->gc_job_queue);
1592 assert(!m->cgroup_realize_queue);
1593 assert(!m->cgroup_empty_queue);
1594 assert(!m->cgroup_oom_queue);
1595 assert(!m->target_deps_queue);
1596 assert(!m->stop_when_unneeded_queue);
1597 assert(!m->start_when_upheld_queue);
1598 assert(!m->stop_when_bound_queue);
1599 assert(!m->release_resources_queue);
1600
1601 assert(hashmap_isempty(m->jobs));
1602 assert(hashmap_isempty(m->units));
1603
1604 m->n_on_console = 0;
1605 m->n_running_jobs = 0;
1606 m->n_installed_jobs = 0;
1607 m->n_failed_jobs = 0;
1608 }
1609
1610 Manager* manager_free(Manager *m) {
1611 if (!m)
1612 return NULL;
1613
1614 manager_clear_jobs_and_units(m);
1615
1616 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++)
1617 if (unit_vtable[c]->shutdown)
1618 unit_vtable[c]->shutdown(m);
1619
1620 /* Keep the cgroup hierarchy in place except when we know we are going down for good */
1621 manager_shutdown_cgroup(m, /* delete= */ IN_SET(m->objective, MANAGER_EXIT, MANAGER_REBOOT, MANAGER_POWEROFF, MANAGER_HALT, MANAGER_KEXEC));
1622
1623 lookup_paths_flush_generator(&m->lookup_paths);
1624
1625 bus_done(m);
1626 manager_varlink_done(m);
1627
1628 exec_shared_runtime_vacuum(m);
1629 hashmap_free(m->exec_shared_runtime_by_id);
1630
1631 dynamic_user_vacuum(m, false);
1632 hashmap_free(m->dynamic_users);
1633
1634 hashmap_free(m->units);
1635 hashmap_free(m->units_by_invocation_id);
1636 hashmap_free(m->jobs);
1637 hashmap_free(m->watch_pids);
1638 hashmap_free(m->watch_pids_more);
1639 hashmap_free(m->watch_bus);
1640
1641 prioq_free(m->run_queue);
1642
1643 set_free(m->startup_units);
1644 set_free(m->failed_units);
1645
1646 sd_event_source_unref(m->signal_event_source);
1647 sd_event_source_unref(m->sigchld_event_source);
1648 sd_event_source_unref(m->notify_event_source);
1649 sd_event_source_unref(m->cgroups_agent_event_source);
1650 sd_event_source_unref(m->time_change_event_source);
1651 sd_event_source_unref(m->timezone_change_event_source);
1652 sd_event_source_unref(m->jobs_in_progress_event_source);
1653 sd_event_source_unref(m->run_queue_event_source);
1654 sd_event_source_unref(m->user_lookup_event_source);
1655 sd_event_source_unref(m->memory_pressure_event_source);
1656
1657 safe_close(m->signal_fd);
1658 safe_close(m->notify_fd);
1659 safe_close(m->cgroups_agent_fd);
1660 safe_close_pair(m->user_lookup_fds);
1661
1662 manager_close_ask_password(m);
1663
1664 manager_close_idle_pipe(m);
1665
1666 sd_event_unref(m->event);
1667
1668 free(m->notify_socket);
1669
1670 lookup_paths_done(&m->lookup_paths);
1671 strv_free(m->transient_environment);
1672 strv_free(m->client_environment);
1673
1674 hashmap_free(m->cgroup_unit);
1675 manager_free_unit_name_maps(m);
1676
1677 free(m->switch_root);
1678 free(m->switch_root_init);
1679
1680 unit_defaults_done(&m->defaults);
1681
1682 FOREACH_ARRAY(map, m->units_needing_mounts_for, _UNIT_MOUNT_DEPENDENCY_TYPE_MAX) {
1683 assert(hashmap_isempty(*map));
1684 hashmap_free(*map);
1685 }
1686
1687 hashmap_free(m->uid_refs);
1688 hashmap_free(m->gid_refs);
1689
1690 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
1691 m->prefix[dt] = mfree(m->prefix[dt]);
1692 free(m->received_credentials_directory);
1693 free(m->received_encrypted_credentials_directory);
1694
1695 free(m->watchdog_pretimeout_governor);
1696 free(m->watchdog_pretimeout_governor_overridden);
1697
1698 m->fw_ctx = fw_ctx_free(m->fw_ctx);
1699
1700 #if BPF_FRAMEWORK
1701 bpf_restrict_fs_destroy(m->restrict_fs);
1702 #endif
1703
1704 safe_close(m->executor_fd);
1705
1706 return mfree(m);
1707 }
1708
1709 static void manager_enumerate_perpetual(Manager *m) {
1710 assert(m);
1711
1712 if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
1713 return;
1714
1715 /* Let's ask every type to load all units from disk/kernel that it might know */
1716 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) {
1717 if (!unit_type_supported(c)) {
1718 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
1719 continue;
1720 }
1721
1722 if (unit_vtable[c]->enumerate_perpetual)
1723 unit_vtable[c]->enumerate_perpetual(m);
1724 }
1725 }
1726
1727 static void manager_enumerate(Manager *m) {
1728 assert(m);
1729
1730 if (FLAGS_SET(m->test_run_flags, MANAGER_TEST_RUN_MINIMAL))
1731 return;
1732
1733 /* Let's ask every type to load all units from disk/kernel that it might know */
1734 for (UnitType c = 0; c < _UNIT_TYPE_MAX; c++) {
1735 if (!unit_type_supported(c)) {
1736 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
1737 continue;
1738 }
1739
1740 if (unit_vtable[c]->enumerate)
1741 unit_vtable[c]->enumerate(m);
1742 }
1743
1744 manager_dispatch_load_queue(m);
1745 }
1746
1747 static void manager_coldplug(Manager *m) {
1748 Unit *u;
1749 char *k;
1750 int r;
1751
1752 assert(m);
1753
1754 log_debug("Invoking unit coldplug() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS));
1755
1756 /* Let's place the units back into their deserialized state */
1757 HASHMAP_FOREACH_KEY(u, k, m->units) {
1758
1759 /* ignore aliases */
1760 if (u->id != k)
1761 continue;
1762
1763 r = unit_coldplug(u);
1764 if (r < 0)
1765 log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id);
1766 }
1767 }
1768
1769 static void manager_catchup(Manager *m) {
1770 Unit *u;
1771 char *k;
1772
1773 assert(m);
1774
1775 log_debug("Invoking unit catchup() handlers%s", special_glyph(SPECIAL_GLYPH_ELLIPSIS));
1776
1777 /* Let's catch up on any state changes that happened while we were reloading/reexecing */
1778 HASHMAP_FOREACH_KEY(u, k, m->units) {
1779
1780 /* ignore aliases */
1781 if (u->id != k)
1782 continue;
1783
1784 unit_catchup(u);
1785 }
1786 }
1787
1788 static void manager_distribute_fds(Manager *m, FDSet *fds) {
1789 Unit *u;
1790
1791 assert(m);
1792
1793 HASHMAP_FOREACH(u, m->units) {
1794
1795 if (fdset_isempty(fds))
1796 break;
1797
1798 if (!UNIT_VTABLE(u)->distribute_fds)
1799 continue;
1800
1801 UNIT_VTABLE(u)->distribute_fds(u, fds);
1802 }
1803 }
1804
1805 static bool manager_dbus_is_running(Manager *m, bool deserialized) {
1806 Unit *u;
1807
1808 assert(m);
1809
1810 /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket
1811 * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit
1812 * rather than the current one. */
1813
1814 if (MANAGER_IS_TEST_RUN(m))
1815 return false;
1816
1817 u = manager_get_unit(m, SPECIAL_DBUS_SOCKET);
1818 if (!u)
1819 return false;
1820 if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING)
1821 return false;
1822
1823 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
1824 if (!u)
1825 return false;
1826 if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state),
1827 SERVICE_RUNNING,
1828 SERVICE_RELOAD,
1829 SERVICE_RELOAD_NOTIFY,
1830 SERVICE_RELOAD_SIGNAL))
1831 return false;
1832
1833 return true;
1834 }
1835
1836 static void manager_setup_bus(Manager *m) {
1837 assert(m);
1838
1839 /* Let's set up our private bus connection now, unconditionally */
1840 (void) bus_init_private(m);
1841
1842 /* If we are in --user mode also connect to the system bus now */
1843 if (MANAGER_IS_USER(m))
1844 (void) bus_init_system(m);
1845
1846 /* Let's connect to the bus now, but only if the unit is supposed to be up */
1847 if (manager_dbus_is_running(m, MANAGER_IS_RELOADING(m))) {
1848 (void) bus_init_api(m);
1849
1850 if (MANAGER_IS_SYSTEM(m))
1851 (void) bus_init_system(m);
1852 }
1853 }
1854
1855 static void manager_preset_all(Manager *m) {
1856 int r;
1857
1858 assert(m);
1859
1860 if (m->first_boot <= 0)
1861 return;
1862
1863 if (!MANAGER_IS_SYSTEM(m))
1864 return;
1865
1866 if (MANAGER_IS_TEST_RUN(m))
1867 return;
1868
1869 /* If this is the first boot, and we are in the host system, then preset everything */
1870 UnitFilePresetMode mode =
1871 ENABLE_FIRST_BOOT_FULL_PRESET ? UNIT_FILE_PRESET_FULL : UNIT_FILE_PRESET_ENABLE_ONLY;
1872
1873 r = unit_file_preset_all(RUNTIME_SCOPE_SYSTEM, 0, NULL, mode, NULL, 0);
1874 if (r < 0)
1875 log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r,
1876 "Failed to populate /etc with preset unit settings, ignoring: %m");
1877 else
1878 log_info("Populated /etc with preset unit settings.");
1879 }
1880
1881 static void manager_ready(Manager *m) {
1882 assert(m);
1883
1884 /* After having loaded everything, do the final round of catching up with what might have changed */
1885
1886 m->objective = MANAGER_OK; /* Tell everyone we are up now */
1887
1888 /* It might be safe to log to the journal now and connect to dbus */
1889 manager_recheck_journal(m);
1890 manager_recheck_dbus(m);
1891
1892 /* Let's finally catch up with any changes that took place while we were reloading/reexecing */
1893 manager_catchup(m);
1894
1895 /* Create a file which will indicate when the manager started loading units the last time. */
1896 if (MANAGER_IS_SYSTEM(m))
1897 (void) touch_file("/run/systemd/systemd-units-load", false,
1898 m->timestamps[MANAGER_TIMESTAMP_UNITS_LOAD].realtime ?: now(CLOCK_REALTIME),
1899 UID_INVALID, GID_INVALID, 0444);
1900 }
1901
1902 Manager* manager_reloading_start(Manager *m) {
1903 m->n_reloading++;
1904 dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD);
1905 return m;
1906 }
1907
1908 void manager_reloading_stopp(Manager **m) {
1909 if (*m) {
1910 assert((*m)->n_reloading > 0);
1911 (*m)->n_reloading--;
1912 }
1913 }
1914
1915 int manager_startup(Manager *m, FILE *serialization, FDSet *fds, const char *root) {
1916 int r;
1917
1918 assert(m);
1919
1920 /* If we are running in test mode, we still want to run the generators,
1921 * but we should not touch the real generator directories. */
1922 r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope,
1923 MANAGER_IS_TEST_RUN(m) ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0,
1924 root);
1925 if (r < 0)
1926 return r;
1927
1928 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_START));
1929 r = manager_run_environment_generators(m);
1930 if (r >= 0)
1931 r = manager_run_generators(m);
1932 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_GENERATORS_FINISH));
1933 if (r < 0)
1934 return r;
1935
1936 manager_preset_all(m);
1937
1938 lookup_paths_log(&m->lookup_paths);
1939
1940 {
1941 /* This block is (optionally) done with the reloading counter bumped */
1942 _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL;
1943
1944 /* Make sure we don't have a left-over from a previous run */
1945 if (!serialization)
1946 (void) rm_rf(m->lookup_paths.transient, 0);
1947
1948 /* If we will deserialize make sure that during enumeration this is already known, so we increase the
1949 * counter here already */
1950 if (serialization)
1951 reloading = manager_reloading_start(m);
1952
1953 /* First, enumerate what we can from all config files */
1954 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_START));
1955 manager_enumerate_perpetual(m);
1956 manager_enumerate(m);
1957 dual_timestamp_now(m->timestamps + manager_timestamp_initrd_mangle(MANAGER_TIMESTAMP_UNITS_LOAD_FINISH));
1958
1959 /* Second, deserialize if there is something to deserialize */
1960 if (serialization) {
1961 r = manager_deserialize(m, serialization, fds);
1962 if (r < 0)
1963 return log_error_errno(r, "Deserialization failed: %m");
1964 }
1965
1966 /* Any fds left? Find some unit which wants them. This is useful to allow container managers to pass
1967 * some file descriptors to us pre-initialized. This enables socket-based activation of entire
1968 * containers. */
1969 manager_distribute_fds(m, fds);
1970
1971 /* We might have deserialized the notify fd, but if we didn't then let's create the bus now */
1972 r = manager_setup_notify(m);
1973 if (r < 0)
1974 /* No sense to continue without notifications, our children would fail anyway. */
1975 return r;
1976
1977 r = manager_setup_cgroups_agent(m);
1978 if (r < 0)
1979 /* Likewise, no sense to continue without empty cgroup notifications. */
1980 return r;
1981
1982 r = manager_setup_user_lookup_fd(m);
1983 if (r < 0)
1984 /* This shouldn't fail, except if things are really broken. */
1985 return r;
1986
1987 /* Connect to the bus if we are good for it */
1988 manager_setup_bus(m);
1989
1990 /* Now that we are connected to all possible buses, let's deserialize who is tracking us. */
1991 r = bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed);
1992 if (r < 0)
1993 log_warning_errno(r, "Failed to deserialized tracked clients, ignoring: %m");
1994 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
1995
1996 r = manager_varlink_init(m);
1997 if (r < 0)
1998 log_warning_errno(r, "Failed to set up Varlink, ignoring: %m");
1999
2000 /* Third, fire things up! */
2001 manager_coldplug(m);
2002
2003 /* Clean up runtime objects */
2004 manager_vacuum(m);
2005
2006 if (serialization)
2007 /* Let's wait for the UnitNew/JobNew messages being sent, before we notify that the
2008 * reload is finished */
2009 m->send_reloading_done = true;
2010 }
2011
2012 manager_ready(m);
2013
2014 manager_set_switching_root(m, false);
2015
2016 return 0;
2017 }
2018
2019 int manager_add_job(
2020 Manager *m,
2021 JobType type,
2022 Unit *unit,
2023 JobMode mode,
2024 Set *affected_jobs,
2025 sd_bus_error *error,
2026 Job **ret) {
2027
2028 _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL;
2029 int r;
2030
2031 assert(m);
2032 assert(type < _JOB_TYPE_MAX);
2033 assert(unit);
2034 assert(mode < _JOB_MODE_MAX);
2035
2036 if (mode == JOB_ISOLATE && type != JOB_START)
2037 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
2038
2039 if (mode == JOB_ISOLATE && !unit->allow_isolate)
2040 return sd_bus_error_set(error, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
2041
2042 if (mode == JOB_TRIGGERING && type != JOB_STOP)
2043 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=triggering is only valid for stop.");
2044
2045 if (mode == JOB_RESTART_DEPENDENCIES && type != JOB_START)
2046 return sd_bus_error_set(error, SD_BUS_ERROR_INVALID_ARGS, "--job-mode=restart-dependencies is only valid for start.");
2047
2048 log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
2049
2050 type = job_type_collapse(type, unit);
2051
2052 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
2053 if (!tr)
2054 return -ENOMEM;
2055
2056 r = transaction_add_job_and_dependencies(
2057 tr,
2058 type,
2059 unit,
2060 /* by= */ NULL,
2061 TRANSACTION_MATTERS |
2062 (IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS) ? TRANSACTION_IGNORE_REQUIREMENTS : 0) |
2063 (mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0) |
2064 (mode == JOB_RESTART_DEPENDENCIES ? TRANSACTION_PROPAGATE_START_AS_RESTART : 0),
2065 error);
2066 if (r < 0)
2067 return r;
2068
2069 if (mode == JOB_ISOLATE) {
2070 r = transaction_add_isolate_jobs(tr, m);
2071 if (r < 0)
2072 return r;
2073 }
2074
2075 if (mode == JOB_TRIGGERING) {
2076 r = transaction_add_triggering_jobs(tr, unit);
2077 if (r < 0)
2078 return r;
2079 }
2080
2081 r = transaction_activate(tr, m, mode, affected_jobs, error);
2082 if (r < 0)
2083 return r;
2084
2085 log_unit_debug(unit,
2086 "Enqueued job %s/%s as %u", unit->id,
2087 job_type_to_string(type), (unsigned) tr->anchor_job->id);
2088
2089 if (ret)
2090 *ret = tr->anchor_job;
2091
2092 tr = transaction_free(tr);
2093 return 0;
2094 }
2095
2096 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **ret) {
2097 Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */
2098 int r;
2099
2100 assert(m);
2101 assert(type < _JOB_TYPE_MAX);
2102 assert(name);
2103 assert(mode < _JOB_MODE_MAX);
2104
2105 r = manager_load_unit(m, name, NULL, NULL, &unit);
2106 if (r < 0)
2107 return r;
2108 assert(unit);
2109
2110 return manager_add_job(m, type, unit, mode, affected_jobs, e, ret);
2111 }
2112
2113 int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, Job **ret) {
2114 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2115 int r;
2116
2117 assert(m);
2118 assert(type < _JOB_TYPE_MAX);
2119 assert(name);
2120 assert(mode < _JOB_MODE_MAX);
2121
2122 r = manager_add_job_by_name(m, type, name, mode, affected_jobs, &error, ret);
2123 if (r < 0)
2124 return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r));
2125
2126 return r;
2127 }
2128
2129 int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) {
2130 int r;
2131 _cleanup_(transaction_abort_and_freep) Transaction *tr = NULL;
2132
2133 assert(m);
2134 assert(unit);
2135 assert(mode < _JOB_MODE_MAX);
2136 assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */
2137
2138 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
2139 if (!tr)
2140 return -ENOMEM;
2141
2142 /* We need an anchor job */
2143 r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, TRANSACTION_IGNORE_REQUIREMENTS|TRANSACTION_IGNORE_ORDER, e);
2144 if (r < 0)
2145 return r;
2146
2147 /* Failure in adding individual dependencies is ignored, so this always succeeds. */
2148 transaction_add_propagate_reload_jobs(
2149 tr,
2150 unit,
2151 tr->anchor_job,
2152 mode == JOB_IGNORE_DEPENDENCIES ? TRANSACTION_IGNORE_ORDER : 0);
2153
2154 r = transaction_activate(tr, m, mode, NULL, e);
2155 if (r < 0)
2156 return r;
2157
2158 tr = transaction_free(tr);
2159 return 0;
2160 }
2161
2162 Job *manager_get_job(Manager *m, uint32_t id) {
2163 assert(m);
2164
2165 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
2166 }
2167
2168 Unit *manager_get_unit(Manager *m, const char *name) {
2169 assert(m);
2170 assert(name);
2171
2172 return hashmap_get(m->units, name);
2173 }
2174
2175 static int manager_dispatch_target_deps_queue(Manager *m) {
2176 Unit *u;
2177 int r = 0;
2178
2179 assert(m);
2180
2181 while ((u = LIST_POP(target_deps_queue, m->target_deps_queue))) {
2182 _cleanup_free_ Unit **targets = NULL;
2183 int n_targets;
2184
2185 assert(u->in_target_deps_queue);
2186
2187 u->in_target_deps_queue = false;
2188
2189 /* Take an "atomic" snapshot of dependencies here, as the call below will likely modify the
2190 * dependencies, and we can't have it that hash tables we iterate through are modified while
2191 * we are iterating through them. */
2192 n_targets = unit_get_dependency_array(u, UNIT_ATOM_DEFAULT_TARGET_DEPENDENCIES, &targets);
2193 if (n_targets < 0)
2194 return n_targets;
2195
2196 for (int i = 0; i < n_targets; i++) {
2197 r = unit_add_default_target_dependency(u, targets[i]);
2198 if (r < 0)
2199 return r;
2200 }
2201 }
2202
2203 return r;
2204 }
2205
2206 unsigned manager_dispatch_load_queue(Manager *m) {
2207 Unit *u;
2208 unsigned n = 0;
2209
2210 assert(m);
2211
2212 /* Make sure we are not run recursively */
2213 if (m->dispatching_load_queue)
2214 return 0;
2215
2216 m->dispatching_load_queue = true;
2217
2218 /* Dispatches the load queue. Takes a unit from the queue and
2219 * tries to load its data until the queue is empty */
2220
2221 while ((u = m->load_queue)) {
2222 assert(u->in_load_queue);
2223
2224 unit_load(u);
2225 n++;
2226 }
2227
2228 m->dispatching_load_queue = false;
2229
2230 /* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about
2231 * should be loaded and have aliases resolved */
2232 (void) manager_dispatch_target_deps_queue(m);
2233
2234 return n;
2235 }
2236
2237 bool manager_unit_cache_should_retry_load(Unit *u) {
2238 assert(u);
2239
2240 /* Automatic reloading from disk only applies to units which were not found sometime in the past, and
2241 * the not-found stub is kept pinned in the unit graph by dependencies. For units that were
2242 * previously loaded, we don't do automatic reloading, and daemon-reload is necessary to update. */
2243 if (u->load_state != UNIT_NOT_FOUND)
2244 return false;
2245
2246 /* The cache has been updated since the last time we tried to load the unit. There might be new
2247 * fragment paths to read. */
2248 if (u->manager->unit_cache_timestamp_hash != u->fragment_not_found_timestamp_hash)
2249 return true;
2250
2251 /* The cache needs to be updated because there are modifications on disk. */
2252 return !lookup_paths_timestamp_hash_same(&u->manager->lookup_paths, u->manager->unit_cache_timestamp_hash, NULL);
2253 }
2254
2255 int manager_load_unit_prepare(
2256 Manager *m,
2257 const char *name,
2258 const char *path,
2259 sd_bus_error *e,
2260 Unit **ret) {
2261
2262 _cleanup_(unit_freep) Unit *cleanup_unit = NULL;
2263 _cleanup_free_ char *nbuf = NULL;
2264 int r;
2265
2266 assert(m);
2267 assert(ret);
2268 assert(name || path);
2269
2270 /* This will prepare the unit for loading, but not actually load anything from disk. */
2271
2272 if (path && !path_is_absolute(path))
2273 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path);
2274
2275 if (!name) {
2276 r = path_extract_filename(path, &nbuf);
2277 if (r < 0)
2278 return r;
2279 if (r == O_DIRECTORY)
2280 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path '%s' refers to directory, refusing.", path);
2281
2282 name = nbuf;
2283 }
2284
2285 UnitType t = unit_name_to_type(name);
2286
2287 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
2288 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE))
2289 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name);
2290
2291 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name);
2292 }
2293
2294 Unit *unit = manager_get_unit(m, name);
2295 if (unit) {
2296 /* The time-based cache allows to start new units without daemon-reload,
2297 * but if they are already referenced (because of dependencies or ordering)
2298 * then we have to force a load of the fragment. As an optimization, check
2299 * first if anything in the usual paths was modified since the last time
2300 * the cache was loaded. Also check if the last time an attempt to load the
2301 * unit was made was before the most recent cache refresh, so that we know
2302 * we need to try again — even if the cache is current, it might have been
2303 * updated in a different context before we had a chance to retry loading
2304 * this particular unit. */
2305 if (manager_unit_cache_should_retry_load(unit))
2306 unit->load_state = UNIT_STUB;
2307 else {
2308 *ret = unit;
2309 return 0; /* The unit was already loaded */
2310 }
2311 } else {
2312 unit = cleanup_unit = unit_new(m, unit_vtable[t]->object_size);
2313 if (!unit)
2314 return -ENOMEM;
2315 }
2316
2317 if (path) {
2318 r = free_and_strdup(&unit->fragment_path, path);
2319 if (r < 0)
2320 return r;
2321 }
2322
2323 r = unit_add_name(unit, name);
2324 if (r < 0)
2325 return r;
2326
2327 unit_add_to_load_queue(unit);
2328 unit_add_to_dbus_queue(unit);
2329 unit_add_to_gc_queue(unit);
2330
2331 *ret = unit;
2332 TAKE_PTR(cleanup_unit);
2333
2334 return 1; /* The unit was added the load queue */
2335 }
2336
2337 int manager_load_unit(
2338 Manager *m,
2339 const char *name,
2340 const char *path,
2341 sd_bus_error *e,
2342 Unit **ret) {
2343 int r;
2344
2345 assert(m);
2346 assert(ret);
2347
2348 /* This will load the unit config, but not actually start any services or anything. */
2349
2350 r = manager_load_unit_prepare(m, name, path, e, ret);
2351 if (r <= 0)
2352 return r;
2353
2354 /* Unit was newly loaded */
2355 manager_dispatch_load_queue(m);
2356 *ret = unit_follow_merge(*ret);
2357 return 0;
2358 }
2359
2360 int manager_load_startable_unit_or_warn(
2361 Manager *m,
2362 const char *name,
2363 const char *path,
2364 Unit **ret) {
2365
2366 /* Load a unit, make sure it loaded fully and is not masked. */
2367
2368 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2369 Unit *unit;
2370 int r;
2371
2372 r = manager_load_unit(m, name, path, &error, &unit);
2373 if (r < 0)
2374 return log_error_errno(r, "Failed to load %s %s: %s",
2375 name ? "unit" : "unit file", name ?: path,
2376 bus_error_message(&error, r));
2377
2378 r = bus_unit_validate_load_state(unit, &error);
2379 if (r < 0)
2380 return log_error_errno(r, "%s", bus_error_message(&error, r));
2381
2382 *ret = unit;
2383 return 0;
2384 }
2385
2386 void manager_clear_jobs(Manager *m) {
2387 Job *j;
2388
2389 assert(m);
2390
2391 while ((j = hashmap_first(m->jobs)))
2392 /* No need to recurse. We're cancelling all jobs. */
2393 job_finish_and_invalidate(j, JOB_CANCELED, false, false);
2394 }
2395
2396 void manager_unwatch_pidref(Manager *m, const PidRef *pid) {
2397 assert(m);
2398
2399 for (;;) {
2400 Unit *u;
2401
2402 u = manager_get_unit_by_pidref_watching(m, pid);
2403 if (!u)
2404 break;
2405
2406 unit_unwatch_pidref(u, pid);
2407 }
2408 }
2409
2410 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) {
2411 Manager *m = ASSERT_PTR(userdata);
2412 Job *j;
2413
2414 assert(source);
2415
2416 while ((j = prioq_peek(m->run_queue))) {
2417 assert(j->installed);
2418 assert(j->in_run_queue);
2419
2420 (void) job_run_and_invalidate(j);
2421 }
2422
2423 if (m->n_running_jobs > 0)
2424 manager_watch_jobs_in_progress(m);
2425
2426 if (m->n_on_console > 0)
2427 manager_watch_idle_pipe(m);
2428
2429 return 1;
2430 }
2431
2432 void manager_trigger_run_queue(Manager *m) {
2433 int r;
2434
2435 assert(m);
2436
2437 r = sd_event_source_set_enabled(
2438 m->run_queue_event_source,
2439 prioq_isempty(m->run_queue) ? SD_EVENT_OFF : SD_EVENT_ONESHOT);
2440 if (r < 0)
2441 log_warning_errno(r, "Failed to enable job run queue event source, ignoring: %m");
2442 }
2443
2444 static unsigned manager_dispatch_dbus_queue(Manager *m) {
2445 unsigned n = 0, budget;
2446 Unit *u;
2447 Job *j;
2448
2449 assert(m);
2450
2451 /* When we are reloading, let's not wait with generating signals, since we need to exit the manager as quickly
2452 * as we can. There's no point in throttling generation of signals in that case. */
2453 if (MANAGER_IS_RELOADING(m) || m->send_reloading_done || m->pending_reload_message)
2454 budget = UINT_MAX; /* infinite budget in this case */
2455 else {
2456 /* Anything to do at all? */
2457 if (!m->dbus_unit_queue && !m->dbus_job_queue)
2458 return 0;
2459
2460 /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's
2461 * sit this cycle out, and process things in a later cycle when the queues got a bit emptier. */
2462 if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
2463 return 0;
2464
2465 /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't
2466 * overly full before this call we shouldn't increase it in size too wildly in one step, and we
2467 * shouldn't monopolize CPU time with generating these messages. Note the difference in counting of
2468 * this "budget" and the "threshold" above: the "budget" is decreased only once per generated message,
2469 * regardless how many buses/direct connections it is enqueued on, while the "threshold" is applied to
2470 * each queued instance of bus message, i.e. if the same message is enqueued to five buses/direct
2471 * connections it will be counted five times. This difference in counting ("references"
2472 * vs. "instances") is primarily a result of the fact that it's easier to implement it this way,
2473 * however it also reflects the thinking that the "threshold" should put a limit on used queue memory,
2474 * i.e. space, while the "budget" should put a limit on time. Also note that the "threshold" is
2475 * currently chosen much higher than the "budget". */
2476 budget = MANAGER_BUS_MESSAGE_BUDGET;
2477 }
2478
2479 while (budget != 0 && (u = m->dbus_unit_queue)) {
2480
2481 assert(u->in_dbus_queue);
2482
2483 bus_unit_send_change_signal(u);
2484 n++;
2485
2486 if (budget != UINT_MAX)
2487 budget--;
2488 }
2489
2490 while (budget != 0 && (j = m->dbus_job_queue)) {
2491 assert(j->in_dbus_queue);
2492
2493 bus_job_send_change_signal(j);
2494 n++;
2495
2496 if (budget != UINT_MAX)
2497 budget--;
2498 }
2499
2500 if (m->send_reloading_done) {
2501 m->send_reloading_done = false;
2502 bus_manager_send_reloading(m, false);
2503 n++;
2504 }
2505
2506 if (m->pending_reload_message) {
2507 bus_send_pending_reload_message(m);
2508 n++;
2509 }
2510
2511 return n;
2512 }
2513
2514 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2515 Manager *m = userdata;
2516 char buf[PATH_MAX];
2517 ssize_t n;
2518
2519 n = recv(fd, buf, sizeof(buf), 0);
2520 if (n < 0)
2521 return log_error_errno(errno, "Failed to read cgroups agent message: %m");
2522 if (n == 0) {
2523 log_error("Got zero-length cgroups agent message, ignoring.");
2524 return 0;
2525 }
2526 if ((size_t) n >= sizeof(buf)) {
2527 log_error("Got overly long cgroups agent message, ignoring.");
2528 return 0;
2529 }
2530
2531 if (memchr(buf, 0, n)) {
2532 log_error("Got cgroups agent message with embedded NUL byte, ignoring.");
2533 return 0;
2534 }
2535 buf[n] = 0;
2536
2537 manager_notify_cgroup_empty(m, buf);
2538 (void) bus_forward_agent_released(m, buf);
2539
2540 return 0;
2541 }
2542
2543 static bool manager_process_barrier_fd(char * const *tags, FDSet *fds) {
2544
2545 /* nothing else must be sent when using BARRIER=1 */
2546 if (strv_contains(tags, "BARRIER=1")) {
2547 if (strv_length(tags) != 1)
2548 log_warning("Extra notification messages sent with BARRIER=1, ignoring everything.");
2549 else if (fdset_size(fds) != 1)
2550 log_warning("Got incorrect number of fds with BARRIER=1, closing them.");
2551
2552 /* Drop the message if BARRIER=1 was found */
2553 return true;
2554 }
2555
2556 return false;
2557 }
2558
2559 static void manager_invoke_notify_message(
2560 Manager *m,
2561 Unit *u,
2562 const struct ucred *ucred,
2563 char * const *tags,
2564 FDSet *fds) {
2565
2566 assert(m);
2567 assert(u);
2568 assert(ucred);
2569 assert(tags);
2570
2571 if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */
2572 return;
2573 u->notifygen = m->notifygen;
2574
2575 if (UNIT_VTABLE(u)->notify_message)
2576 UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds);
2577
2578 else if (DEBUG_LOGGING) {
2579 _cleanup_free_ char *buf = NULL, *x = NULL, *y = NULL;
2580
2581 buf = strv_join(tags, ", ");
2582 if (buf)
2583 x = ellipsize(buf, 20, 90);
2584 if (x)
2585 y = cescape(x);
2586
2587 log_unit_debug(u, "Got notification message \"%s\", ignoring.", strnull(y));
2588 }
2589 }
2590
2591 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2592
2593 _cleanup_fdset_free_ FDSet *fds = NULL;
2594 Manager *m = ASSERT_PTR(userdata);
2595 char buf[NOTIFY_BUFFER_MAX+1];
2596 struct iovec iovec = {
2597 .iov_base = buf,
2598 .iov_len = sizeof(buf)-1,
2599 };
2600 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) +
2601 CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)) control;
2602 struct msghdr msghdr = {
2603 .msg_iov = &iovec,
2604 .msg_iovlen = 1,
2605 .msg_control = &control,
2606 .msg_controllen = sizeof(control),
2607 };
2608
2609 struct cmsghdr *cmsg;
2610 struct ucred *ucred = NULL;
2611 _cleanup_free_ Unit **array_copy = NULL;
2612 _cleanup_strv_free_ char **tags = NULL;
2613 Unit *u1, *u2, **array;
2614 int r, *fd_array = NULL;
2615 size_t n_fds = 0;
2616 bool found = false;
2617 ssize_t n;
2618
2619 assert(m->notify_fd == fd);
2620
2621 if (revents != EPOLLIN) {
2622 log_warning("Got unexpected poll event for notify fd.");
2623 return 0;
2624 }
2625
2626 n = recvmsg_safe(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC);
2627 if (ERRNO_IS_NEG_TRANSIENT(n))
2628 return 0; /* Spurious wakeup, try again */
2629 if (n == -EXFULL) {
2630 log_warning("Got message with truncated control data (too many fds sent?), ignoring.");
2631 return 0;
2632 }
2633 if (n < 0)
2634 /* If this is any other, real error, then stop processing this socket. This of course means
2635 * we won't take notification messages anymore, but that's still better than busy looping:
2636 * being woken up over and over again, but being unable to actually read the message from the
2637 * socket. */
2638 return log_error_errno(n, "Failed to receive notification message: %m");
2639
2640 CMSG_FOREACH(cmsg, &msghdr)
2641 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
2642
2643 assert(!fd_array);
2644 fd_array = CMSG_TYPED_DATA(cmsg, int);
2645 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
2646
2647 } else if (cmsg->cmsg_level == SOL_SOCKET &&
2648 cmsg->cmsg_type == SCM_CREDENTIALS &&
2649 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
2650
2651 assert(!ucred);
2652 ucred = CMSG_TYPED_DATA(cmsg, struct ucred);
2653 }
2654
2655 if (n_fds > 0) {
2656 assert(fd_array);
2657
2658 r = fdset_new_array(&fds, fd_array, n_fds);
2659 if (r < 0) {
2660 close_many(fd_array, n_fds);
2661 log_oom();
2662 return 0;
2663 }
2664 }
2665
2666 if (!ucred || !pid_is_valid(ucred->pid)) {
2667 log_warning("Received notify message without valid credentials. Ignoring.");
2668 return 0;
2669 }
2670
2671 if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) {
2672 log_warning("Received notify message exceeded maximum size. Ignoring.");
2673 return 0;
2674 }
2675
2676 /* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes.
2677 * We permit one trailing NUL byte in the message, but don't expect it. */
2678 if (n > 1 && memchr(buf, 0, n-1)) {
2679 log_warning("Received notify message with embedded NUL bytes. Ignoring.");
2680 return 0;
2681 }
2682
2683 /* Make sure it's NUL-terminated, then parse it to obtain the tags list. */
2684 buf[n] = 0;
2685 tags = strv_split_newlines(buf);
2686 if (!tags) {
2687 log_oom();
2688 return 0;
2689 }
2690
2691 /* Possibly a barrier fd, let's see. */
2692 if (manager_process_barrier_fd(tags, fds)) {
2693 log_debug("Received barrier notification message from PID " PID_FMT ".", ucred->pid);
2694 return 0;
2695 }
2696
2697 /* Increase the generation counter used for filtering out duplicate unit invocations. */
2698 m->notifygen++;
2699
2700 /* Generate lookup key from the PID (we have no pidfd here, after all) */
2701 PidRef pidref = PIDREF_MAKE_FROM_PID(ucred->pid);
2702
2703 /* Notify every unit that might be interested, which might be multiple. */
2704 u1 = manager_get_unit_by_pidref_cgroup(m, &pidref);
2705 u2 = hashmap_get(m->watch_pids, &pidref);
2706 array = hashmap_get(m->watch_pids_more, &pidref);
2707 if (array) {
2708 size_t k = 0;
2709
2710 while (array[k])
2711 k++;
2712
2713 array_copy = newdup(Unit*, array, k+1);
2714 if (!array_copy)
2715 log_oom();
2716 }
2717 /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle
2718 * duplicate units make sure we only invoke each unit's handler once. */
2719 if (u1) {
2720 manager_invoke_notify_message(m, u1, ucred, tags, fds);
2721 found = true;
2722 }
2723 if (u2) {
2724 manager_invoke_notify_message(m, u2, ucred, tags, fds);
2725 found = true;
2726 }
2727 if (array_copy)
2728 for (size_t i = 0; array_copy[i]; i++) {
2729 manager_invoke_notify_message(m, array_copy[i], ucred, tags, fds);
2730 found = true;
2731 }
2732
2733 if (!found)
2734 log_warning("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid);
2735
2736 if (!fdset_isempty(fds))
2737 log_warning("Got extra auxiliary fds with notification message, closing them.");
2738
2739 return 0;
2740 }
2741
2742 static void manager_invoke_sigchld_event(
2743 Manager *m,
2744 Unit *u,
2745 const siginfo_t *si) {
2746
2747 assert(m);
2748 assert(u);
2749 assert(si);
2750
2751 /* Already invoked the handler of this unit in this iteration? Then don't process this again */
2752 if (u->sigchldgen == m->sigchldgen)
2753 return;
2754 u->sigchldgen = m->sigchldgen;
2755
2756 log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id);
2757 unit_unwatch_pid(u, si->si_pid);
2758
2759 if (UNIT_VTABLE(u)->sigchld_event)
2760 UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status);
2761 }
2762
2763 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) {
2764 Manager *m = ASSERT_PTR(userdata);
2765 siginfo_t si = {};
2766 int r;
2767
2768 assert(source);
2769
2770 /* First we call waitid() for a PID and do not reap the zombie. That way we can still access
2771 * /proc/$PID for it while it is a zombie. */
2772
2773 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2774
2775 if (errno != ECHILD)
2776 log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m");
2777
2778 goto turn_off;
2779 }
2780
2781 if (si.si_pid <= 0)
2782 goto turn_off;
2783
2784 if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) {
2785 _cleanup_free_ Unit **array_copy = NULL;
2786 _cleanup_free_ char *name = NULL;
2787 Unit *u1, *u2, **array;
2788
2789 (void) pid_get_comm(si.si_pid, &name);
2790
2791 log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)",
2792 si.si_pid, strna(name),
2793 sigchld_code_to_string(si.si_code),
2794 si.si_status,
2795 strna(si.si_code == CLD_EXITED
2796 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2797 : signal_to_string(si.si_status)));
2798
2799 /* Increase the generation counter used for filtering out duplicate unit invocations */
2800 m->sigchldgen++;
2801
2802 /* We look this up by a PidRef that only consists of the PID. After all we couldn't create a
2803 * pidfd here any more even if we wanted (since the process just exited). */
2804 PidRef pidref = PIDREF_MAKE_FROM_PID(si.si_pid);
2805
2806 /* And now figure out the unit this belongs to, it might be multiple... */
2807 u1 = manager_get_unit_by_pidref_cgroup(m, &pidref);
2808 u2 = hashmap_get(m->watch_pids, &pidref);
2809 array = hashmap_get(m->watch_pids_more, &pidref);
2810 if (array) {
2811 size_t n = 0;
2812
2813 /* Count how many entries the array has */
2814 while (array[n])
2815 n++;
2816
2817 /* Make a copy of the array so that we don't trip up on the array changing beneath us */
2818 array_copy = newdup(Unit*, array, n+1);
2819 if (!array_copy)
2820 log_oom();
2821 }
2822
2823 /* Finally, execute them all. Note that u1, u2 and the array might contain duplicates, but
2824 * that's fine, manager_invoke_sigchld_event() will ensure we only invoke the handlers once for
2825 * each iteration. */
2826 if (u1) {
2827 /* We check for oom condition, in case we got SIGCHLD before the oom notification.
2828 * We only do this for the cgroup the PID belonged to. */
2829 (void) unit_check_oom(u1);
2830
2831 /* We check if systemd-oomd performed a kill so that we log and notify appropriately */
2832 (void) unit_check_oomd_kill(u1);
2833
2834 manager_invoke_sigchld_event(m, u1, &si);
2835 }
2836 if (u2)
2837 manager_invoke_sigchld_event(m, u2, &si);
2838 if (array_copy)
2839 for (size_t i = 0; array_copy[i]; i++)
2840 manager_invoke_sigchld_event(m, array_copy[i], &si);
2841 }
2842
2843 /* And now, we actually reap the zombie. */
2844 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2845 log_error_errno(errno, "Failed to dequeue child, ignoring: %m");
2846 return 0;
2847 }
2848
2849 return 0;
2850
2851 turn_off:
2852 /* All children processed for now, turn off event source */
2853
2854 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
2855 if (r < 0)
2856 return log_error_errno(r, "Failed to disable SIGCHLD event source: %m");
2857
2858 return 0;
2859 }
2860
2861 static void manager_start_special(Manager *m, const char *name, JobMode mode) {
2862 Job *job;
2863
2864 if (manager_add_job_by_name_and_warn(m, JOB_START, name, mode, NULL, &job) < 0)
2865 return;
2866
2867 const char *s = unit_status_string(job->unit, NULL);
2868
2869 log_info("Activating special unit %s...", s);
2870
2871 (void) sd_notifyf(/* unset_environment= */ false,
2872 "STATUS=Activating special unit %s...", s);
2873 m->status_ready = false;
2874 }
2875
2876 static void manager_handle_ctrl_alt_del(Manager *m) {
2877 /* If the user presses C-A-D more than
2878 * 7 times within 2s, we reboot/shutdown immediately,
2879 * unless it was disabled in system.conf */
2880
2881 if (ratelimit_below(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE)
2882 manager_start_special(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY);
2883 else
2884 emergency_action(m, m->cad_burst_action, EMERGENCY_ACTION_WARN, NULL, -1,
2885 "Ctrl-Alt-Del was pressed more than 7 times within 2s");
2886 }
2887
2888 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2889 Manager *m = ASSERT_PTR(userdata);
2890 ssize_t n;
2891 struct signalfd_siginfo sfsi;
2892 int r;
2893
2894 assert(m->signal_fd == fd);
2895
2896 if (revents != EPOLLIN) {
2897 log_warning("Got unexpected events from signal file descriptor.");
2898 return 0;
2899 }
2900
2901 n = read(m->signal_fd, &sfsi, sizeof(sfsi));
2902 if (n < 0) {
2903 if (ERRNO_IS_TRANSIENT(errno))
2904 return 0;
2905
2906 /* We return an error here, which will kill this handler,
2907 * to avoid a busy loop on read error. */
2908 return log_error_errno(errno, "Reading from signal fd failed: %m");
2909 }
2910 if (n != sizeof(sfsi)) {
2911 log_warning("Truncated read from signal fd (%zi bytes), ignoring!", n);
2912 return 0;
2913 }
2914
2915 log_received_signal(sfsi.ssi_signo == SIGCHLD ||
2916 (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m))
2917 ? LOG_DEBUG : LOG_INFO,
2918 &sfsi);
2919
2920 switch (sfsi.ssi_signo) {
2921
2922 case SIGCHLD:
2923 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2924 if (r < 0)
2925 log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m");
2926
2927 break;
2928
2929 case SIGTERM:
2930 if (MANAGER_IS_SYSTEM(m)) {
2931 /* This is for compatibility with the original sysvinit */
2932 if (verify_run_space_and_log("Refusing to reexecute") < 0)
2933 break;
2934
2935 m->objective = MANAGER_REEXECUTE;
2936 break;
2937 }
2938
2939 _fallthrough_;
2940 case SIGINT:
2941 if (MANAGER_IS_SYSTEM(m))
2942 manager_handle_ctrl_alt_del(m);
2943 else
2944 manager_start_special(m, SPECIAL_EXIT_TARGET, JOB_REPLACE_IRREVERSIBLY);
2945 break;
2946
2947 case SIGWINCH:
2948 /* This is a nop on non-init */
2949 if (MANAGER_IS_SYSTEM(m))
2950 manager_start_special(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2951
2952 break;
2953
2954 case SIGPWR:
2955 /* This is a nop on non-init */
2956 if (MANAGER_IS_SYSTEM(m))
2957 manager_start_special(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2958
2959 break;
2960
2961 case SIGUSR1:
2962 if (manager_dbus_is_running(m, false)) {
2963 log_info("Trying to reconnect to bus...");
2964
2965 (void) bus_init_api(m);
2966
2967 if (MANAGER_IS_SYSTEM(m))
2968 (void) bus_init_system(m);
2969 } else
2970 manager_start_special(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2971
2972 break;
2973
2974 case SIGUSR2: {
2975 _cleanup_free_ char *dump = NULL;
2976
2977 r = manager_get_dump_string(m, /* patterns= */ NULL, &dump);
2978 if (r < 0) {
2979 log_warning_errno(errno, "Failed to acquire manager dump: %m");
2980 break;
2981 }
2982
2983 log_dump(LOG_INFO, dump);
2984 break;
2985 }
2986
2987 case SIGHUP:
2988 if (verify_run_space_and_log("Refusing to reload") < 0)
2989 break;
2990
2991 m->objective = MANAGER_RELOAD;
2992 break;
2993
2994 default: {
2995
2996 /* Starting SIGRTMIN+0 */
2997 static const struct {
2998 const char *target;
2999 JobMode mode;
3000 } target_table[] = {
3001 [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE },
3002 [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE },
3003 [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE },
3004 [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY },
3005 [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY },
3006 [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
3007 [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY },
3008 [7] = { SPECIAL_SOFT_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
3009 };
3010
3011 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
3012 static const ManagerObjective objective_table[] = {
3013 [0] = MANAGER_HALT,
3014 [1] = MANAGER_POWEROFF,
3015 [2] = MANAGER_REBOOT,
3016 [3] = MANAGER_KEXEC,
3017 [4] = MANAGER_SOFT_REBOOT,
3018 };
3019
3020 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
3021 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
3022 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
3023 manager_start_special(m, target_table[idx].target, target_table[idx].mode);
3024 break;
3025 }
3026
3027 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
3028 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(objective_table)) {
3029 m->objective = objective_table[sfsi.ssi_signo - SIGRTMIN - 13];
3030 break;
3031 }
3032
3033 switch (sfsi.ssi_signo - SIGRTMIN) {
3034
3035 case 18: {
3036 bool generic = false;
3037
3038 if (sfsi.ssi_code != SI_QUEUE)
3039 generic = true;
3040 else {
3041 /* Override a few select commands by our own PID1-specific logic */
3042
3043 switch (sfsi.ssi_int) {
3044
3045 case _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE..._COMMON_SIGNAL_COMMAND_LOG_LEVEL_END:
3046 manager_override_log_level(m, sfsi.ssi_int - _COMMON_SIGNAL_COMMAND_LOG_LEVEL_BASE);
3047 break;
3048
3049 case COMMON_SIGNAL_COMMAND_CONSOLE:
3050 manager_override_log_target(m, LOG_TARGET_CONSOLE);
3051 break;
3052
3053 case COMMON_SIGNAL_COMMAND_JOURNAL:
3054 manager_override_log_target(m, LOG_TARGET_JOURNAL);
3055 break;
3056
3057 case COMMON_SIGNAL_COMMAND_KMSG:
3058 manager_override_log_target(m, LOG_TARGET_KMSG);
3059 break;
3060
3061 case COMMON_SIGNAL_COMMAND_NULL:
3062 manager_override_log_target(m, LOG_TARGET_NULL);
3063 break;
3064
3065 case MANAGER_SIGNAL_COMMAND_DUMP_JOBS: {
3066 _cleanup_free_ char *dump_jobs = NULL;
3067
3068 r = manager_get_dump_jobs_string(m, /* patterns= */ NULL, " ", &dump_jobs);
3069 if (r < 0) {
3070 log_warning_errno(errno, "Failed to acquire manager jobs dump: %m");
3071 break;
3072 }
3073
3074 log_dump(LOG_INFO, dump_jobs);
3075 break;
3076 }
3077
3078 default:
3079 generic = true;
3080 }
3081 }
3082
3083 if (generic)
3084 return sigrtmin18_handler(source, &sfsi, NULL);
3085
3086 break;
3087 }
3088
3089 case 20:
3090 manager_override_show_status(m, SHOW_STATUS_YES, "signal");
3091 break;
3092
3093 case 21:
3094 manager_override_show_status(m, SHOW_STATUS_NO, "signal");
3095 break;
3096
3097 case 22:
3098 manager_override_log_level(m, LOG_DEBUG);
3099 break;
3100
3101 case 23:
3102 manager_restore_original_log_level(m);
3103 break;
3104
3105 case 24:
3106 if (MANAGER_IS_USER(m)) {
3107 m->objective = MANAGER_EXIT;
3108 return 0;
3109 }
3110
3111 /* This is a nop on init */
3112 break;
3113
3114 case 25:
3115 m->objective = MANAGER_REEXECUTE;
3116 break;
3117
3118 case 26:
3119 case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
3120 manager_restore_original_log_target(m);
3121 break;
3122
3123 case 27:
3124 manager_override_log_target(m, LOG_TARGET_CONSOLE);
3125 break;
3126
3127 case 28:
3128 manager_override_log_target(m, LOG_TARGET_KMSG);
3129 break;
3130
3131 default:
3132 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
3133 }
3134 }}
3135
3136 return 0;
3137 }
3138
3139 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
3140 Manager *m = ASSERT_PTR(userdata);
3141 Unit *u;
3142
3143 log_struct(LOG_DEBUG,
3144 "MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR,
3145 LOG_MESSAGE("Time has been changed"));
3146
3147 /* Restart the watch */
3148 (void) manager_setup_time_change(m);
3149
3150 HASHMAP_FOREACH(u, m->units)
3151 if (UNIT_VTABLE(u)->time_change)
3152 UNIT_VTABLE(u)->time_change(u);
3153
3154 return 0;
3155 }
3156
3157 static int manager_dispatch_timezone_change(
3158 sd_event_source *source,
3159 const struct inotify_event *e,
3160 void *userdata) {
3161
3162 Manager *m = ASSERT_PTR(userdata);
3163 int changed;
3164 Unit *u;
3165
3166 log_debug("inotify event for /etc/localtime");
3167
3168 changed = manager_read_timezone_stat(m);
3169 if (changed <= 0)
3170 return changed;
3171
3172 /* Something changed, restart the watch, to ensure we watch the new /etc/localtime if it changed */
3173 (void) manager_setup_timezone_change(m);
3174
3175 /* Read the new timezone */
3176 tzset();
3177
3178 log_debug("Timezone has been changed (now: %s).", tzname[daylight]);
3179
3180 HASHMAP_FOREACH(u, m->units)
3181 if (UNIT_VTABLE(u)->timezone_change)
3182 UNIT_VTABLE(u)->timezone_change(u);
3183
3184 return 0;
3185 }
3186
3187 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
3188 Manager *m = ASSERT_PTR(userdata);
3189
3190 assert(m->idle_pipe[2] == fd);
3191
3192 /* There's at least one Type=idle child that just gave up on us waiting for the boot process to
3193 * complete. Let's now turn off any further console output if there's at least one service that needs
3194 * console access, so that from now on our own output should not spill into that service's output
3195 * anymore. After all, we support Type=idle only to beautify console output and it generally is set
3196 * on services that want to own the console exclusively without our interference. */
3197 m->no_console_output = m->n_on_console > 0;
3198
3199 /* Acknowledge the child's request, and let all other children know too that they shouldn't wait
3200 * any longer by closing the pipes towards them, which is what they are waiting for. */
3201 manager_close_idle_pipe(m);
3202
3203 return 0;
3204 }
3205
3206 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) {
3207 Manager *m = ASSERT_PTR(userdata);
3208 int r;
3209
3210 assert(source);
3211
3212 manager_print_jobs_in_progress(m);
3213
3214 r = sd_event_source_set_time_relative(source, JOBS_IN_PROGRESS_PERIOD_USEC);
3215 if (r < 0)
3216 return r;
3217
3218 return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT);
3219 }
3220
3221 int manager_loop(Manager *m) {
3222 RateLimit rl = { .interval = 1*USEC_PER_SEC, .burst = 50000 };
3223 int r;
3224
3225 assert(m);
3226 assert(m->objective == MANAGER_OK); /* Ensure manager_startup() has been called */
3227
3228 manager_check_finished(m);
3229
3230 /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
3231 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
3232 if (r < 0)
3233 return log_error_errno(r, "Failed to enable SIGCHLD event source: %m");
3234
3235 while (m->objective == MANAGER_OK) {
3236
3237 (void) watchdog_ping();
3238
3239 if (!ratelimit_below(&rl)) {
3240 /* Yay, something is going seriously wrong, pause a little */
3241 log_warning("Looping too fast. Throttling execution a little.");
3242 sleep(1);
3243 }
3244
3245 if (manager_dispatch_load_queue(m) > 0)
3246 continue;
3247
3248 if (manager_dispatch_gc_job_queue(m) > 0)
3249 continue;
3250
3251 if (manager_dispatch_gc_unit_queue(m) > 0)
3252 continue;
3253
3254 if (manager_dispatch_cleanup_queue(m) > 0)
3255 continue;
3256
3257 if (manager_dispatch_cgroup_realize_queue(m) > 0)
3258 continue;
3259
3260 if (manager_dispatch_start_when_upheld_queue(m) > 0)
3261 continue;
3262
3263 if (manager_dispatch_stop_when_bound_queue(m) > 0)
3264 continue;
3265
3266 if (manager_dispatch_stop_when_unneeded_queue(m) > 0)
3267 continue;
3268
3269 if (manager_dispatch_release_resources_queue(m) > 0)
3270 continue;
3271
3272 if (manager_dispatch_dbus_queue(m) > 0)
3273 continue;
3274
3275 /* Sleep for watchdog runtime wait time */
3276 r = sd_event_run(m->event, watchdog_runtime_wait());
3277 if (r < 0)
3278 return log_error_errno(r, "Failed to run event loop: %m");
3279 }
3280
3281 return m->objective;
3282 }
3283
3284 int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) {
3285 _cleanup_free_ char *n = NULL;
3286 sd_id128_t invocation_id;
3287 Unit *u;
3288 int r;
3289
3290 assert(m);
3291 assert(s);
3292 assert(_u);
3293
3294 r = unit_name_from_dbus_path(s, &n);
3295 if (r < 0)
3296 return r;
3297
3298 /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128-bit ID then
3299 * we use it as invocation ID. */
3300 r = sd_id128_from_string(n, &invocation_id);
3301 if (r >= 0) {
3302 u = hashmap_get(m->units_by_invocation_id, &invocation_id);
3303 if (u) {
3304 *_u = u;
3305 return 0;
3306 }
3307
3308 return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID,
3309 "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.",
3310 SD_ID128_FORMAT_VAL(invocation_id));
3311 }
3312
3313 /* If this didn't work, we check if this is a unit name */
3314 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
3315 _cleanup_free_ char *nn = NULL;
3316
3317 nn = cescape(n);
3318 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS,
3319 "Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn));
3320 }
3321
3322 r = manager_load_unit(m, n, NULL, e, &u);
3323 if (r < 0)
3324 return r;
3325
3326 *_u = u;
3327 return 0;
3328 }
3329
3330 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
3331 const char *p;
3332 unsigned id;
3333 Job *j;
3334 int r;
3335
3336 assert(m);
3337 assert(s);
3338 assert(_j);
3339
3340 p = startswith(s, "/org/freedesktop/systemd1/job/");
3341 if (!p)
3342 return -EINVAL;
3343
3344 r = safe_atou(p, &id);
3345 if (r < 0)
3346 return r;
3347
3348 j = manager_get_job(m, id);
3349 if (!j)
3350 return -ENOENT;
3351
3352 *_j = j;
3353
3354 return 0;
3355 }
3356
3357 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
3358
3359 #if HAVE_AUDIT
3360 _cleanup_free_ char *p = NULL;
3361 const char *msg;
3362 int audit_fd, r;
3363
3364 assert(m);
3365 assert(u);
3366
3367 if (!MANAGER_IS_SYSTEM(m))
3368 return;
3369
3370 /* Don't generate audit events if the service was already started and we're just deserializing */
3371 if (MANAGER_IS_RELOADING(m))
3372 return;
3373
3374 audit_fd = get_audit_fd();
3375 if (audit_fd < 0)
3376 return;
3377
3378 r = unit_name_to_prefix_and_instance(u->id, &p);
3379 if (r < 0) {
3380 log_warning_errno(r, "Failed to extract prefix and instance of unit name, ignoring: %m");
3381 return;
3382 }
3383
3384 msg = strjoina("unit=", p);
3385 if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) {
3386 if (ERRNO_IS_PRIVILEGE(errno)) {
3387 /* We aren't allowed to send audit messages? Then let's not retry again. */
3388 log_debug_errno(errno, "Failed to send audit message, closing audit socket: %m");
3389 close_audit_fd();
3390 } else
3391 log_warning_errno(errno, "Failed to send audit message, ignoring: %m");
3392 }
3393 #endif
3394 }
3395
3396 void manager_send_unit_plymouth(Manager *m, Unit *u) {
3397 _cleanup_free_ char *message = NULL;
3398 int c, r;
3399
3400 assert(m);
3401 assert(u);
3402
3403 if (!MANAGER_IS_SYSTEM(m))
3404 return;
3405
3406 /* Don't generate plymouth events if the service was already started and we're just deserializing */
3407 if (MANAGER_IS_RELOADING(m))
3408 return;
3409
3410 if (detect_container() > 0)
3411 return;
3412
3413 if (!UNIT_VTABLE(u)->notify_plymouth)
3414 return;
3415
3416 c = asprintf(&message, "U\x02%c%s%c", (int) (strlen(u->id) + 1), u->id, '\x00');
3417 if (c < 0)
3418 return (void) log_oom();
3419
3420 /* We set SOCK_NONBLOCK here so that we rather drop the message then wait for plymouth */
3421 r = plymouth_send_raw(message, c, SOCK_NONBLOCK);
3422 if (r < 0)
3423 log_full_errno(ERRNO_IS_NO_PLYMOUTH(r) ? LOG_DEBUG : LOG_WARNING, r,
3424 "Failed to communicate with plymouth: %m");
3425 }
3426
3427 void manager_send_unit_supervisor(Manager *m, Unit *u, bool active) {
3428 assert(m);
3429 assert(u);
3430
3431 /* Notify a "supervisor" process about our progress, i.e. a container manager, hypervisor, or
3432 * surrounding service manager. */
3433
3434 if (MANAGER_IS_RELOADING(m))
3435 return;
3436
3437 if (!UNIT_VTABLE(u)->notify_supervisor)
3438 return;
3439
3440 if (in_initrd()) /* Only send these once we left the initrd */
3441 return;
3442
3443 (void) sd_notifyf(/* unset_environment= */ false,
3444 active ? "X_SYSTEMD_UNIT_ACTIVE=%s" : "X_SYSTEMD_UNIT_INACTIVE=%s",
3445 u->id);
3446 }
3447
3448 usec_t manager_get_watchdog(Manager *m, WatchdogType t) {
3449 assert(m);
3450
3451 if (MANAGER_IS_USER(m))
3452 return USEC_INFINITY;
3453
3454 if (m->watchdog_overridden[t] != USEC_INFINITY)
3455 return m->watchdog_overridden[t];
3456
3457 return m->watchdog[t];
3458 }
3459
3460 void manager_set_watchdog(Manager *m, WatchdogType t, usec_t timeout) {
3461
3462 assert(m);
3463
3464 if (MANAGER_IS_USER(m))
3465 return;
3466
3467 if (m->watchdog[t] == timeout)
3468 return;
3469
3470 if (m->watchdog_overridden[t] == USEC_INFINITY) {
3471 if (t == WATCHDOG_RUNTIME)
3472 (void) watchdog_setup(timeout);
3473 else if (t == WATCHDOG_PRETIMEOUT)
3474 (void) watchdog_setup_pretimeout(timeout);
3475 }
3476
3477 m->watchdog[t] = timeout;
3478 }
3479
3480 void manager_override_watchdog(Manager *m, WatchdogType t, usec_t timeout) {
3481 usec_t usec;
3482
3483 assert(m);
3484
3485 if (MANAGER_IS_USER(m))
3486 return;
3487
3488 if (m->watchdog_overridden[t] == timeout)
3489 return;
3490
3491 usec = timeout == USEC_INFINITY ? m->watchdog[t] : timeout;
3492 if (t == WATCHDOG_RUNTIME)
3493 (void) watchdog_setup(usec);
3494 else if (t == WATCHDOG_PRETIMEOUT)
3495 (void) watchdog_setup_pretimeout(usec);
3496
3497 m->watchdog_overridden[t] = timeout;
3498 }
3499
3500 int manager_set_watchdog_pretimeout_governor(Manager *m, const char *governor) {
3501 _cleanup_free_ char *p = NULL;
3502 int r;
3503
3504 assert(m);
3505
3506 if (MANAGER_IS_USER(m))
3507 return 0;
3508
3509 if (streq_ptr(m->watchdog_pretimeout_governor, governor))
3510 return 0;
3511
3512 p = strdup(governor);
3513 if (!p)
3514 return -ENOMEM;
3515
3516 r = watchdog_setup_pretimeout_governor(governor);
3517 if (r < 0)
3518 return r;
3519
3520 return free_and_replace(m->watchdog_pretimeout_governor, p);
3521 }
3522
3523 int manager_override_watchdog_pretimeout_governor(Manager *m, const char *governor) {
3524 _cleanup_free_ char *p = NULL;
3525 int r;
3526
3527 assert(m);
3528
3529 if (MANAGER_IS_USER(m))
3530 return 0;
3531
3532 if (streq_ptr(m->watchdog_pretimeout_governor_overridden, governor))
3533 return 0;
3534
3535 p = strdup(governor);
3536 if (!p)
3537 return -ENOMEM;
3538
3539 r = watchdog_setup_pretimeout_governor(governor);
3540 if (r < 0)
3541 return r;
3542
3543 return free_and_replace(m->watchdog_pretimeout_governor_overridden, p);
3544 }
3545
3546 int manager_reload(Manager *m) {
3547 _unused_ _cleanup_(manager_reloading_stopp) Manager *reloading = NULL;
3548 _cleanup_fdset_free_ FDSet *fds = NULL;
3549 _cleanup_fclose_ FILE *f = NULL;
3550 int r;
3551
3552 assert(m);
3553
3554 r = manager_open_serialization(m, &f);
3555 if (r < 0)
3556 return log_error_errno(r, "Failed to create serialization file: %m");
3557
3558 fds = fdset_new();
3559 if (!fds)
3560 return log_oom();
3561
3562 /* We are officially in reload mode from here on. */
3563 reloading = manager_reloading_start(m);
3564
3565 r = manager_serialize(m, f, fds, false);
3566 if (r < 0)
3567 return r;
3568
3569 if (fseeko(f, 0, SEEK_SET) < 0)
3570 return log_error_errno(errno, "Failed to seek to beginning of serialization: %m");
3571
3572 /* 💀 This is the point of no return, from here on there is no way back. 💀 */
3573 reloading = NULL;
3574
3575 bus_manager_send_reloading(m, true);
3576
3577 /* Start by flushing out all jobs and units, all generated units, all runtime environments, all dynamic users
3578 * and everything else that is worth flushing out. We'll get it all back from the serialization — if we need
3579 * it. */
3580
3581 manager_clear_jobs_and_units(m);
3582 lookup_paths_flush_generator(&m->lookup_paths);
3583 lookup_paths_done(&m->lookup_paths);
3584 exec_shared_runtime_vacuum(m);
3585 dynamic_user_vacuum(m, false);
3586 m->uid_refs = hashmap_free(m->uid_refs);
3587 m->gid_refs = hashmap_free(m->gid_refs);
3588
3589 r = lookup_paths_init_or_warn(&m->lookup_paths, m->runtime_scope, 0, NULL);
3590 if (r < 0)
3591 return r;
3592
3593 (void) manager_run_environment_generators(m);
3594 (void) manager_run_generators(m);
3595
3596 lookup_paths_log(&m->lookup_paths);
3597
3598 /* We flushed out generated files, for which we don't watch mtime, so we should flush the old map. */
3599 manager_free_unit_name_maps(m);
3600 m->unit_file_state_outdated = false;
3601
3602 /* First, enumerate what we can from kernel and suchlike */
3603 manager_enumerate_perpetual(m);
3604 manager_enumerate(m);
3605
3606 /* Second, deserialize our stored data */
3607 r = manager_deserialize(m, f, fds);
3608 if (r < 0)
3609 log_warning_errno(r, "Deserialization failed, proceeding anyway: %m");
3610
3611 /* We don't need the serialization anymore */
3612 f = safe_fclose(f);
3613
3614 /* Re-register notify_fd as event source, and set up other sockets/communication channels we might need */
3615 (void) manager_setup_notify(m);
3616 (void) manager_setup_cgroups_agent(m);
3617 (void) manager_setup_user_lookup_fd(m);
3618
3619 /* Third, fire things up! */
3620 manager_coldplug(m);
3621
3622 /* Clean up runtime objects no longer referenced */
3623 manager_vacuum(m);
3624
3625 /* Clean up deserialized tracked clients */
3626 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
3627
3628 /* Consider the reload process complete now. */
3629 assert(m->n_reloading > 0);
3630 m->n_reloading--;
3631
3632 manager_ready(m);
3633
3634 m->send_reloading_done = true;
3635 return 0;
3636 }
3637
3638 void manager_reset_failed(Manager *m) {
3639 Unit *u;
3640
3641 assert(m);
3642
3643 HASHMAP_FOREACH(u, m->units)
3644 unit_reset_failed(u);
3645 }
3646
3647 bool manager_unit_inactive_or_pending(Manager *m, const char *name) {
3648 Unit *u;
3649
3650 assert(m);
3651 assert(name);
3652
3653 /* Returns true if the unit is inactive or going down */
3654 u = manager_get_unit(m, name);
3655 if (!u)
3656 return true;
3657
3658 return unit_inactive_or_pending(u);
3659 }
3660
3661 static void log_taint_string(Manager *m) {
3662 _cleanup_free_ char *taint = NULL;
3663
3664 assert(m);
3665
3666 if (MANAGER_IS_USER(m) || m->taint_logged)
3667 return;
3668
3669 m->taint_logged = true; /* only check for taint once */
3670
3671 taint = manager_taint_string(m);
3672 if (isempty(taint))
3673 return;
3674
3675 log_struct(LOG_NOTICE,
3676 LOG_MESSAGE("System is tainted: %s", taint),
3677 "TAINT=%s", taint,
3678 "MESSAGE_ID=" SD_MESSAGE_TAINTED_STR);
3679 }
3680
3681 static void manager_notify_finished(Manager *m) {
3682 usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec;
3683
3684 if (MANAGER_IS_TEST_RUN(m))
3685 return;
3686
3687 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) {
3688 char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")]
3689 = {};
3690 char *p = buf;
3691 size_t size = sizeof buf;
3692
3693 /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
3694 * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
3695 * negative values. */
3696
3697 firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic;
3698 loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3699 userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3700 total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic;
3701
3702 if (firmware_usec > 0)
3703 size = strpcpyf(&p, size, "%s (firmware) + ", FORMAT_TIMESPAN(firmware_usec, USEC_PER_MSEC));
3704 if (loader_usec > 0)
3705 size = strpcpyf(&p, size, "%s (loader) + ", FORMAT_TIMESPAN(loader_usec, USEC_PER_MSEC));
3706
3707 if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
3708
3709 /* The initrd case on bare-metal */
3710 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3711 initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
3712
3713 log_struct(LOG_INFO,
3714 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3715 "KERNEL_USEC="USEC_FMT, kernel_usec,
3716 "INITRD_USEC="USEC_FMT, initrd_usec,
3717 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3718 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.",
3719 buf,
3720 FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC),
3721 FORMAT_TIMESPAN(initrd_usec, USEC_PER_MSEC),
3722 FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC),
3723 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
3724 } else {
3725 /* The initrd-less case on bare-metal */
3726
3727 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3728 initrd_usec = 0;
3729
3730 log_struct(LOG_INFO,
3731 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3732 "KERNEL_USEC="USEC_FMT, kernel_usec,
3733 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3734 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.",
3735 buf,
3736 FORMAT_TIMESPAN(kernel_usec, USEC_PER_MSEC),
3737 FORMAT_TIMESPAN(userspace_usec, USEC_PER_MSEC),
3738 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
3739 }
3740 } else {
3741 /* The container and --user case */
3742 firmware_usec = loader_usec = initrd_usec = kernel_usec = 0;
3743 total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3744
3745 log_struct(LOG_INFO,
3746 "MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR,
3747 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3748 LOG_MESSAGE("Startup finished in %s.",
3749 FORMAT_TIMESPAN(total_usec, USEC_PER_MSEC)));
3750 }
3751
3752 bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec);
3753
3754 log_taint_string(m);
3755 }
3756
3757 static void manager_send_ready_user_scope(Manager *m) {
3758 int r;
3759
3760 assert(m);
3761
3762 /* We send READY=1 on reaching basic.target only when running in --user mode. */
3763 if (!MANAGER_IS_USER(m) || m->ready_sent)
3764 return;
3765
3766 r = sd_notify(/* unset_environment= */ false,
3767 "READY=1\n"
3768 "STATUS=Reached " SPECIAL_BASIC_TARGET ".");
3769 if (r < 0)
3770 log_warning_errno(r, "Failed to send readiness notification, ignoring: %m");
3771
3772 m->ready_sent = true;
3773 m->status_ready = false;
3774 }
3775
3776 static void manager_send_ready_system_scope(Manager *m) {
3777 int r;
3778
3779 assert(m);
3780
3781 if (!MANAGER_IS_SYSTEM(m))
3782 return;
3783
3784 /* Skip the notification if nothing changed. */
3785 if (m->ready_sent && m->status_ready)
3786 return;
3787
3788 r = sd_notify(/* unset_environment= */ false,
3789 "READY=1\n"
3790 "STATUS=Ready.");
3791 if (r < 0)
3792 log_full_errno(m->ready_sent ? LOG_DEBUG : LOG_WARNING, r,
3793 "Failed to send readiness notification, ignoring: %m");
3794
3795 m->ready_sent = m->status_ready = true;
3796 }
3797
3798 static void manager_check_basic_target(Manager *m) {
3799 Unit *u;
3800
3801 assert(m);
3802
3803 /* Small shortcut */
3804 if (m->ready_sent && m->taint_logged)
3805 return;
3806
3807 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3808 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3809 return;
3810
3811 /* For user managers, send out READY=1 as soon as we reach basic.target */
3812 manager_send_ready_user_scope(m);
3813
3814 /* Log the taint string as soon as we reach basic.target */
3815 log_taint_string(m);
3816 }
3817
3818 void manager_check_finished(Manager *m) {
3819 assert(m);
3820
3821 if (MANAGER_IS_RELOADING(m))
3822 return;
3823
3824 /* Verify that we have entered the event loop already, and not left it again. */
3825 if (!MANAGER_IS_RUNNING(m))
3826 return;
3827
3828 manager_check_basic_target(m);
3829
3830 if (!hashmap_isempty(m->jobs)) {
3831 if (m->jobs_in_progress_event_source)
3832 /* Ignore any failure, this is only for feedback */
3833 (void) sd_event_source_set_time(m->jobs_in_progress_event_source,
3834 manager_watch_jobs_next_time(m));
3835 return;
3836 }
3837
3838 /* The jobs hashmap tends to grow a lot during boot, and then it's not reused until shutdown. Let's
3839 kill the hashmap if it is relatively large. */
3840 if (hashmap_buckets(m->jobs) > hashmap_size(m->units) / 10)
3841 m->jobs = hashmap_free(m->jobs);
3842
3843 manager_send_ready_system_scope(m);
3844
3845 /* Notify Type=idle units that we are done now */
3846 manager_close_idle_pipe(m);
3847
3848 if (MANAGER_IS_FINISHED(m))
3849 return;
3850
3851 manager_flip_auto_status(m, false, "boot finished");
3852
3853 /* Turn off confirm spawn now */
3854 m->confirm_spawn = NULL;
3855
3856 /* No need to update ask password status when we're going non-interactive */
3857 manager_close_ask_password(m);
3858
3859 /* This is no longer the first boot */
3860 manager_set_first_boot(m, false);
3861
3862 dual_timestamp_now(m->timestamps + MANAGER_TIMESTAMP_FINISH);
3863
3864 manager_notify_finished(m);
3865
3866 manager_invalidate_startup_units(m);
3867 }
3868
3869 void manager_send_reloading(Manager *m) {
3870 assert(m);
3871
3872 /* Let whoever invoked us know that we are now reloading */
3873 (void) sd_notifyf(/* unset_environment= */ false,
3874 "RELOADING=1\n"
3875 "MONOTONIC_USEC=" USEC_FMT "\n", now(CLOCK_MONOTONIC));
3876
3877 /* And ensure that we'll send READY=1 again as soon as we are ready again */
3878 m->ready_sent = false;
3879 }
3880
3881 static bool generator_path_any(const char* const* paths) {
3882 bool found = false;
3883
3884 /* Optimize by skipping the whole process by not creating output directories
3885 * if no generators are found. */
3886 STRV_FOREACH(path, paths)
3887 if (access(*path, F_OK) == 0)
3888 found = true;
3889 else if (errno != ENOENT)
3890 log_warning_errno(errno, "Failed to open generator directory %s: %m", *path);
3891
3892 return found;
3893 }
3894
3895 static int manager_run_environment_generators(Manager *m) {
3896 char **tmp = NULL; /* this is only used in the forked process, no cleanup here */
3897 _cleanup_strv_free_ char **paths = NULL;
3898 void* args[] = {
3899 [STDOUT_GENERATE] = &tmp,
3900 [STDOUT_COLLECT] = &tmp,
3901 [STDOUT_CONSUME] = &m->transient_environment,
3902 };
3903 int r;
3904
3905 if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS))
3906 return 0;
3907
3908 paths = env_generator_binary_paths(m->runtime_scope);
3909 if (!paths)
3910 return log_oom();
3911
3912 if (!generator_path_any((const char* const*) paths))
3913 return 0;
3914
3915 WITH_UMASK(0022)
3916 r = execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC, gather_environment,
3917 args, NULL, m->transient_environment,
3918 EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID);
3919 return r;
3920 }
3921
3922 static int build_generator_environment(Manager *m, char ***ret) {
3923 _cleanup_strv_free_ char **nl = NULL;
3924 Virtualization v;
3925 ConfidentialVirtualization cv;
3926 int r;
3927
3928 assert(m);
3929 assert(ret);
3930
3931 /* Generators oftentimes want to know some basic facts about the environment they run in, in order to
3932 * adjust generated units to that. Let's pass down some bits of information that are easy for us to
3933 * determine (but a bit harder for generator scripts to determine), as environment variables. */
3934
3935 nl = strv_copy(m->transient_environment);
3936 if (!nl)
3937 return -ENOMEM;
3938
3939 r = strv_env_assign(&nl, "SYSTEMD_SCOPE", runtime_scope_to_string(m->runtime_scope));
3940 if (r < 0)
3941 return r;
3942
3943 if (MANAGER_IS_SYSTEM(m)) {
3944 /* Note that $SYSTEMD_IN_INITRD may be used to override the initrd detection in much of our
3945 * codebase. This is hence more than purely informational. It will shortcut detection of the
3946 * initrd state if generators invoke our own tools. But that's OK, as it would come to the
3947 * same results (hopefully). */
3948 r = strv_env_assign(&nl, "SYSTEMD_IN_INITRD", one_zero(in_initrd()));
3949 if (r < 0)
3950 return r;
3951
3952 if (m->first_boot >= 0) {
3953 r = strv_env_assign(&nl, "SYSTEMD_FIRST_BOOT", one_zero(m->first_boot));
3954 if (r < 0)
3955 return r;
3956 }
3957 }
3958
3959 v = detect_virtualization();
3960 if (v < 0)
3961 log_debug_errno(v, "Failed to detect virtualization, ignoring: %m");
3962 else if (v > 0) {
3963 const char *s;
3964
3965 s = strjoina(VIRTUALIZATION_IS_VM(v) ? "vm:" :
3966 VIRTUALIZATION_IS_CONTAINER(v) ? "container:" : ":",
3967 virtualization_to_string(v));
3968
3969 r = strv_env_assign(&nl, "SYSTEMD_VIRTUALIZATION", s);
3970 if (r < 0)
3971 return r;
3972 }
3973
3974 cv = detect_confidential_virtualization();
3975 if (cv < 0)
3976 log_debug_errno(cv, "Failed to detect confidential virtualization, ignoring: %m");
3977 else if (cv > 0) {
3978 r = strv_env_assign(&nl, "SYSTEMD_CONFIDENTIAL_VIRTUALIZATION", confidential_virtualization_to_string(cv));
3979 if (r < 0)
3980 return r;
3981 }
3982
3983 r = strv_env_assign(&nl, "SYSTEMD_ARCHITECTURE", architecture_to_string(uname_architecture()));
3984 if (r < 0)
3985 return r;
3986
3987 *ret = TAKE_PTR(nl);
3988 return 0;
3989 }
3990
3991 static int manager_execute_generators(Manager *m, char **paths, bool remount_ro) {
3992 _cleanup_strv_free_ char **ge = NULL;
3993 const char *argv[] = {
3994 NULL, /* Leave this empty, execute_directory() will fill something in */
3995 m->lookup_paths.generator,
3996 m->lookup_paths.generator_early,
3997 m->lookup_paths.generator_late,
3998 NULL,
3999 };
4000 int r;
4001
4002 r = build_generator_environment(m, &ge);
4003 if (r < 0)
4004 return log_error_errno(r, "Failed to build generator environment: %m");
4005
4006 if (remount_ro) {
4007 /* Remount most of the filesystem tree read-only. We leave /sys/ as-is, because our code
4008 * checks whether it is read-only to detect containerized execution environments. We leave
4009 * /run/ as-is too, because that's where our output goes. We also leave /proc/ and /dev/shm/
4010 * because they're API, and /tmp/ that safe_fork() mounted for us.
4011 */
4012 r = bind_remount_recursive("/", MS_RDONLY, MS_RDONLY,
4013 STRV_MAKE("/sys", "/run", "/proc", "/dev/shm", "/tmp"));
4014 if (r < 0)
4015 log_warning_errno(r, "Read-only bind remount failed, ignoring: %m");
4016 }
4017
4018 BLOCK_WITH_UMASK(0022);
4019 return execute_directories(
4020 (const char* const*) paths,
4021 DEFAULT_TIMEOUT_USEC,
4022 /* callbacks= */ NULL, /* callback_args= */ NULL,
4023 (char**) argv,
4024 ge,
4025 EXEC_DIR_PARALLEL | EXEC_DIR_IGNORE_ERRORS | EXEC_DIR_SET_SYSTEMD_EXEC_PID);
4026 }
4027
4028 static int manager_run_generators(Manager *m) {
4029 ForkFlags flags = FORK_RESET_SIGNALS | FORK_WAIT | FORK_NEW_MOUNTNS | FORK_MOUNTNS_SLAVE;
4030 _cleanup_strv_free_ char **paths = NULL;
4031 int r;
4032
4033 assert(m);
4034
4035 if (MANAGER_IS_TEST_RUN(m) && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS))
4036 return 0;
4037
4038 paths = generator_binary_paths(m->runtime_scope);
4039 if (!paths)
4040 return log_oom();
4041
4042 if (!generator_path_any((const char* const*) paths))
4043 return 0;
4044
4045 r = lookup_paths_mkdir_generator(&m->lookup_paths);
4046 if (r < 0) {
4047 log_error_errno(r, "Failed to create generator directories: %m");
4048 goto finish;
4049 }
4050
4051 /* If we are the system manager, we fork and invoke the generators in a sanitized mount namespace. If
4052 * we are the user manager, let's just execute the generators directly. We might not have the
4053 * necessary privileges, and the system manager has already mounted /tmp/ and everything else for us.
4054 */
4055 if (MANAGER_IS_USER(m)) {
4056 r = manager_execute_generators(m, paths, /* remount_ro= */ false);
4057 goto finish;
4058 }
4059
4060 /* On some systems /tmp/ doesn't exist, and on some other systems we cannot create it at all. Avoid
4061 * trying to mount a private tmpfs on it as there's no one size fits all. */
4062 if (is_dir("/tmp", /* follow= */ false) > 0)
4063 flags |= FORK_PRIVATE_TMP;
4064
4065 r = safe_fork("(sd-gens)", flags, NULL);
4066 if (r == 0) {
4067 r = manager_execute_generators(m, paths, /* remount_ro= */ true);
4068 _exit(r >= 0 ? EXIT_SUCCESS : EXIT_FAILURE);
4069 }
4070 if (r < 0) {
4071 if (!ERRNO_IS_PRIVILEGE(r) && r != -EINVAL) {
4072 log_error_errno(r, "Failed to fork off sandboxing environment for executing generators: %m");
4073 goto finish;
4074 }
4075
4076 /* Failed to fork with new mount namespace? Maybe, running in a container environment with
4077 * seccomp or without capability.
4078 *
4079 * We also allow -EINVAL to allow running without CLONE_NEWNS.
4080 *
4081 * Also, when running on non-native userland architecture via systemd-nspawn and
4082 * qemu-user-static QEMU-emulator, clone() with CLONE_NEWNS fails with EINVAL, see
4083 * https://github.com/systemd/systemd/issues/28901.
4084 */
4085 log_debug_errno(r,
4086 "Failed to fork off sandboxing environment for executing generators. "
4087 "Falling back to execute generators without sandboxing: %m");
4088 r = manager_execute_generators(m, paths, /* remount_ro= */ false);
4089 }
4090
4091 finish:
4092 lookup_paths_trim_generator(&m->lookup_paths);
4093 return r;
4094 }
4095
4096 int manager_transient_environment_add(Manager *m, char **plus) {
4097 char **a;
4098
4099 assert(m);
4100
4101 if (strv_isempty(plus))
4102 return 0;
4103
4104 a = strv_env_merge(m->transient_environment, plus);
4105 if (!a)
4106 return log_oom();
4107
4108 sanitize_environment(a);
4109
4110 return strv_free_and_replace(m->transient_environment, a);
4111 }
4112
4113 int manager_client_environment_modify(
4114 Manager *m,
4115 char **minus,
4116 char **plus) {
4117
4118 char **a = NULL, **b = NULL, **l;
4119
4120 assert(m);
4121
4122 if (strv_isempty(minus) && strv_isempty(plus))
4123 return 0;
4124
4125 l = m->client_environment;
4126
4127 if (!strv_isempty(minus)) {
4128 a = strv_env_delete(l, 1, minus);
4129 if (!a)
4130 return -ENOMEM;
4131
4132 l = a;
4133 }
4134
4135 if (!strv_isempty(plus)) {
4136 b = strv_env_merge(l, plus);
4137 if (!b) {
4138 strv_free(a);
4139 return -ENOMEM;
4140 }
4141
4142 l = b;
4143 }
4144
4145 if (m->client_environment != l)
4146 strv_free(m->client_environment);
4147
4148 if (a != l)
4149 strv_free(a);
4150 if (b != l)
4151 strv_free(b);
4152
4153 m->client_environment = sanitize_environment(l);
4154 return 0;
4155 }
4156
4157 int manager_get_effective_environment(Manager *m, char ***ret) {
4158 char **l;
4159
4160 assert(m);
4161 assert(ret);
4162
4163 l = strv_env_merge(m->transient_environment, m->client_environment);
4164 if (!l)
4165 return -ENOMEM;
4166
4167 *ret = l;
4168 return 0;
4169 }
4170
4171 int manager_set_unit_defaults(Manager *m, const UnitDefaults *defaults) {
4172 _cleanup_free_ char *label = NULL;
4173 struct rlimit *rlimit[_RLIMIT_MAX];
4174 int r;
4175
4176 assert(m);
4177 assert(defaults);
4178
4179 if (streq_ptr(defaults->smack_process_label, "/"))
4180 label = NULL;
4181 else {
4182 const char *l = defaults->smack_process_label;
4183 #ifdef SMACK_DEFAULT_PROCESS_LABEL
4184 if (!l)
4185 l = SMACK_DEFAULT_PROCESS_LABEL;
4186 #endif
4187 if (l) {
4188 label = strdup(l);
4189 if (!label)
4190 return -ENOMEM;
4191 } else
4192 label = NULL;
4193 }
4194
4195 r = rlimit_copy_all(rlimit, defaults->rlimit);
4196 if (r < 0)
4197 return r;
4198
4199 m->defaults.std_output = defaults->std_output;
4200 m->defaults.std_error = defaults->std_error;
4201
4202 m->defaults.restart_usec = defaults->restart_usec;
4203 m->defaults.timeout_start_usec = defaults->timeout_start_usec;
4204 m->defaults.timeout_stop_usec = defaults->timeout_stop_usec;
4205 m->defaults.timeout_abort_usec = defaults->timeout_abort_usec;
4206 m->defaults.timeout_abort_set = defaults->timeout_abort_set;
4207 m->defaults.device_timeout_usec = defaults->device_timeout_usec;
4208
4209 m->defaults.start_limit_interval = defaults->start_limit_interval;
4210 m->defaults.start_limit_burst = defaults->start_limit_burst;
4211
4212 m->defaults.cpu_accounting = defaults->cpu_accounting;
4213 m->defaults.memory_accounting = defaults->memory_accounting;
4214 m->defaults.io_accounting = defaults->io_accounting;
4215 m->defaults.blockio_accounting = defaults->blockio_accounting;
4216 m->defaults.tasks_accounting = defaults->tasks_accounting;
4217 m->defaults.ip_accounting = defaults->ip_accounting;
4218
4219 m->defaults.tasks_max = defaults->tasks_max;
4220 m->defaults.timer_accuracy_usec = defaults->timer_accuracy_usec;
4221
4222 m->defaults.oom_policy = defaults->oom_policy;
4223 m->defaults.oom_score_adjust = defaults->oom_score_adjust;
4224 m->defaults.oom_score_adjust_set = defaults->oom_score_adjust_set;
4225
4226 m->defaults.memory_pressure_watch = defaults->memory_pressure_watch;
4227 m->defaults.memory_pressure_threshold_usec = defaults->memory_pressure_threshold_usec;
4228
4229 free_and_replace(m->defaults.smack_process_label, label);
4230 rlimit_free_all(m->defaults.rlimit);
4231 memcpy(m->defaults.rlimit, rlimit, sizeof(struct rlimit*) * _RLIMIT_MAX);
4232
4233 return 0;
4234 }
4235
4236 void manager_recheck_dbus(Manager *m) {
4237 assert(m);
4238
4239 /* Connects to the bus if the dbus service and socket are running. If we are running in user mode
4240 * this is all it does. In system mode we'll also connect to the system bus (which will most likely
4241 * just reuse the connection of the API bus). That's because the system bus after all runs as service
4242 * of the system instance, while in the user instance we can assume it's already there. */
4243
4244 if (MANAGER_IS_RELOADING(m))
4245 return; /* don't check while we are reloading… */
4246
4247 if (manager_dbus_is_running(m, false)) {
4248 (void) bus_init_api(m);
4249
4250 if (MANAGER_IS_SYSTEM(m))
4251 (void) bus_init_system(m);
4252 } else {
4253 (void) bus_done_api(m);
4254
4255 if (MANAGER_IS_SYSTEM(m))
4256 (void) bus_done_system(m);
4257 }
4258 }
4259
4260 static bool manager_journal_is_running(Manager *m) {
4261 Unit *u;
4262
4263 assert(m);
4264
4265 if (MANAGER_IS_TEST_RUN(m))
4266 return false;
4267
4268 /* If we are the user manager we can safely assume that the journal is up */
4269 if (!MANAGER_IS_SYSTEM(m))
4270 return true;
4271
4272 /* Check that the socket is not only up, but in RUNNING state */
4273 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
4274 if (!u)
4275 return false;
4276 if (SOCKET(u)->state != SOCKET_RUNNING)
4277 return false;
4278
4279 /* Similar, check if the daemon itself is fully up, too */
4280 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
4281 if (!u)
4282 return false;
4283 if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING))
4284 return false;
4285
4286 return true;
4287 }
4288
4289 void disable_printk_ratelimit(void) {
4290 /* Disable kernel's printk ratelimit.
4291 *
4292 * Logging to /dev/kmsg is most useful during early boot and shutdown, where normal logging
4293 * mechanisms are not available. The semantics of this sysctl are such that any kernel command-line
4294 * setting takes precedence. */
4295 int r;
4296
4297 r = sysctl_write("kernel/printk_devkmsg", "on");
4298 if (r < 0)
4299 log_debug_errno(r, "Failed to set sysctl kernel.printk_devkmsg=on: %m");
4300 }
4301
4302 void manager_recheck_journal(Manager *m) {
4303
4304 assert(m);
4305
4306 /* Don't bother with this unless we are in the special situation of being PID 1 */
4307 if (getpid_cached() != 1)
4308 return;
4309
4310 /* Don't check this while we are reloading, things might still change */
4311 if (MANAGER_IS_RELOADING(m))
4312 return;
4313
4314 /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If
4315 * the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we
4316 * might trigger an activation ourselves we can't fulfill. */
4317 log_set_prohibit_ipc(!manager_journal_is_running(m));
4318 log_open();
4319 }
4320
4321 static ShowStatus manager_get_show_status(Manager *m) {
4322 assert(m);
4323
4324 if (MANAGER_IS_USER(m))
4325 return _SHOW_STATUS_INVALID;
4326
4327 if (m->show_status_overridden != _SHOW_STATUS_INVALID)
4328 return m->show_status_overridden;
4329
4330 return m->show_status;
4331 }
4332
4333 bool manager_get_show_status_on(Manager *m) {
4334 assert(m);
4335
4336 return show_status_on(manager_get_show_status(m));
4337 }
4338
4339 static void set_show_status_marker(bool b) {
4340 if (b)
4341 (void) touch("/run/systemd/show-status");
4342 else
4343 (void) unlink("/run/systemd/show-status");
4344 }
4345
4346 void manager_set_show_status(Manager *m, ShowStatus mode, const char *reason) {
4347 assert(m);
4348 assert(reason);
4349 assert(mode >= 0 && mode < _SHOW_STATUS_MAX);
4350
4351 if (MANAGER_IS_USER(m))
4352 return;
4353
4354 if (mode == m->show_status)
4355 return;
4356
4357 if (m->show_status_overridden == _SHOW_STATUS_INVALID) {
4358 bool enabled;
4359
4360 enabled = show_status_on(mode);
4361 log_debug("%s (%s) showing of status (%s).",
4362 enabled ? "Enabling" : "Disabling",
4363 strna(show_status_to_string(mode)),
4364 reason);
4365
4366 set_show_status_marker(enabled);
4367 }
4368
4369 m->show_status = mode;
4370 }
4371
4372 void manager_override_show_status(Manager *m, ShowStatus mode, const char *reason) {
4373 assert(m);
4374 assert(mode < _SHOW_STATUS_MAX);
4375
4376 if (MANAGER_IS_USER(m))
4377 return;
4378
4379 if (mode == m->show_status_overridden)
4380 return;
4381
4382 m->show_status_overridden = mode;
4383
4384 if (mode == _SHOW_STATUS_INVALID)
4385 mode = m->show_status;
4386
4387 log_debug("%s (%s) showing of status (%s).",
4388 m->show_status_overridden != _SHOW_STATUS_INVALID ? "Overriding" : "Restoring",
4389 strna(show_status_to_string(mode)),
4390 reason);
4391
4392 set_show_status_marker(show_status_on(mode));
4393 }
4394
4395 const char *manager_get_confirm_spawn(Manager *m) {
4396 static int last_errno = 0;
4397 struct stat st;
4398 int r;
4399
4400 assert(m);
4401
4402 /* Here's the deal: we want to test the validity of the console but don't want
4403 * PID1 to go through the whole console process which might block. But we also
4404 * want to warn the user only once if something is wrong with the console so we
4405 * cannot do the sanity checks after spawning our children. So here we simply do
4406 * really basic tests to hopefully trap common errors.
4407 *
4408 * If the console suddenly disappear at the time our children will really it
4409 * then they will simply fail to acquire it and a positive answer will be
4410 * assumed. New children will fall back to /dev/console though.
4411 *
4412 * Note: TTYs are devices that can come and go any time, and frequently aren't
4413 * available yet during early boot (consider a USB rs232 dongle...). If for any
4414 * reason the configured console is not ready, we fall back to the default
4415 * console. */
4416
4417 if (!m->confirm_spawn || path_equal(m->confirm_spawn, "/dev/console"))
4418 return m->confirm_spawn;
4419
4420 if (stat(m->confirm_spawn, &st) < 0) {
4421 r = -errno;
4422 goto fail;
4423 }
4424
4425 if (!S_ISCHR(st.st_mode)) {
4426 r = -ENOTTY;
4427 goto fail;
4428 }
4429
4430 last_errno = 0;
4431 return m->confirm_spawn;
4432
4433 fail:
4434 if (last_errno != r)
4435 last_errno = log_warning_errno(r, "Failed to open %s, using default console: %m", m->confirm_spawn);
4436
4437 return "/dev/console";
4438 }
4439
4440 void manager_set_first_boot(Manager *m, bool b) {
4441 assert(m);
4442
4443 if (!MANAGER_IS_SYSTEM(m))
4444 return;
4445
4446 if (m->first_boot != (int) b) {
4447 if (b)
4448 (void) touch("/run/systemd/first-boot");
4449 else
4450 (void) unlink("/run/systemd/first-boot");
4451 }
4452
4453 m->first_boot = b;
4454 }
4455
4456 void manager_disable_confirm_spawn(void) {
4457 (void) touch("/run/systemd/confirm_spawn_disabled");
4458 }
4459
4460 static bool manager_should_show_status(Manager *m, StatusType type) {
4461 assert(m);
4462
4463 if (!MANAGER_IS_SYSTEM(m))
4464 return false;
4465
4466 if (m->no_console_output)
4467 return false;
4468
4469 if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING))
4470 return false;
4471
4472 /* If we cannot find out the status properly, just proceed. */
4473 if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0)
4474 return false;
4475
4476 if (type == STATUS_TYPE_NOTICE && m->show_status != SHOW_STATUS_NO)
4477 return true;
4478
4479 return manager_get_show_status_on(m);
4480 }
4481
4482 void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) {
4483 va_list ap;
4484
4485 /* If m is NULL, assume we're after shutdown and let the messages through. */
4486
4487 if (m && !manager_should_show_status(m, type))
4488 return;
4489
4490 /* XXX We should totally drop the check for ephemeral here
4491 * and thus effectively make 'Type=idle' pointless. */
4492 if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0)
4493 return;
4494
4495 va_start(ap, format);
4496 status_vprintf(status, SHOW_STATUS_ELLIPSIZE|(type == STATUS_TYPE_EPHEMERAL ? SHOW_STATUS_EPHEMERAL : 0), format, ap);
4497 va_end(ap);
4498 }
4499
4500 Set* manager_get_units_needing_mounts_for(Manager *m, const char *path, UnitMountDependencyType t) {
4501 assert(m);
4502 assert(path);
4503 assert(t >= 0 && t < _UNIT_MOUNT_DEPENDENCY_TYPE_MAX);
4504
4505 if (path_equal(path, "/"))
4506 path = "";
4507
4508 return hashmap_get(m->units_needing_mounts_for[t], path);
4509 }
4510
4511 int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
4512 unsigned size;
4513 int r;
4514
4515 assert(m);
4516 assert(u->manager == m);
4517
4518 size = set_size(m->failed_units);
4519
4520 if (failed) {
4521 r = set_ensure_put(&m->failed_units, NULL, u);
4522 if (r < 0)
4523 return log_oom();
4524 } else
4525 (void) set_remove(m->failed_units, u);
4526
4527 if (set_size(m->failed_units) != size)
4528 bus_manager_send_change_signal(m);
4529
4530 return 0;
4531 }
4532
4533 ManagerState manager_state(Manager *m) {
4534 Unit *u;
4535
4536 assert(m);
4537
4538 /* Is the special shutdown target active or queued? If so, we are in shutdown state */
4539 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
4540 if (u && unit_active_or_pending(u))
4541 return MANAGER_STOPPING;
4542
4543 /* Did we ever finish booting? If not then we are still starting up */
4544 if (!MANAGER_IS_FINISHED(m)) {
4545
4546 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
4547 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
4548 return MANAGER_INITIALIZING;
4549
4550 return MANAGER_STARTING;
4551 }
4552
4553 if (MANAGER_IS_SYSTEM(m)) {
4554 /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
4555 u = manager_get_unit(m, SPECIAL_RESCUE_TARGET);
4556 if (u && unit_active_or_pending(u))
4557 return MANAGER_MAINTENANCE;
4558
4559 u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET);
4560 if (u && unit_active_or_pending(u))
4561 return MANAGER_MAINTENANCE;
4562 }
4563
4564 /* Are there any failed units? If so, we are in degraded mode */
4565 if (!set_isempty(m->failed_units))
4566 return MANAGER_DEGRADED;
4567
4568 return MANAGER_RUNNING;
4569 }
4570
4571 static void manager_unref_uid_internal(
4572 Hashmap *uid_refs,
4573 uid_t uid,
4574 bool destroy_now,
4575 int (*_clean_ipc)(uid_t uid)) {
4576
4577 uint32_t c, n;
4578
4579 assert(uid_is_valid(uid));
4580 assert(_clean_ipc);
4581
4582 /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the
4583 * assumption that uid_t and gid_t are actually defined the same way, with the same validity rules.
4584 *
4585 * We store a hashmap where the key is the UID/GID and the value is a 32-bit reference counter, whose
4586 * highest bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last
4587 * reference to the UID/GID is dropped. The flag is set to on, once at least one reference from a
4588 * unit where RemoveIPC= is set is added on a UID/GID. It is reset when the UID's/GID's reference
4589 * counter drops to 0 again. */
4590
4591 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4592 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4593
4594 if (uid == 0) /* We don't keep track of root, and will never destroy it */
4595 return;
4596
4597 c = PTR_TO_UINT32(hashmap_get(uid_refs, UID_TO_PTR(uid)));
4598
4599 n = c & ~DESTROY_IPC_FLAG;
4600 assert(n > 0);
4601 n--;
4602
4603 if (destroy_now && n == 0) {
4604 hashmap_remove(uid_refs, UID_TO_PTR(uid));
4605
4606 if (c & DESTROY_IPC_FLAG) {
4607 log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
4608 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
4609 uid);
4610 (void) _clean_ipc(uid);
4611 }
4612 } else {
4613 c = n | (c & DESTROY_IPC_FLAG);
4614 assert_se(hashmap_update(uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
4615 }
4616 }
4617
4618 void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
4619 manager_unref_uid_internal(m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
4620 }
4621
4622 void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
4623 manager_unref_uid_internal(m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
4624 }
4625
4626 static int manager_ref_uid_internal(
4627 Hashmap **uid_refs,
4628 uid_t uid,
4629 bool clean_ipc) {
4630
4631 uint32_t c, n;
4632 int r;
4633
4634 assert(uid_refs);
4635 assert(uid_is_valid(uid));
4636
4637 /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the
4638 * assumption that uid_t and gid_t are actually defined the same way, with the same validity
4639 * rules. */
4640
4641 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4642 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4643
4644 if (uid == 0) /* We don't keep track of root, and will never destroy it */
4645 return 0;
4646
4647 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
4648 if (r < 0)
4649 return r;
4650
4651 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
4652
4653 n = c & ~DESTROY_IPC_FLAG;
4654 n++;
4655
4656 if (n & DESTROY_IPC_FLAG) /* check for overflow */
4657 return -EOVERFLOW;
4658
4659 c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0);
4660
4661 return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
4662 }
4663
4664 int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
4665 return manager_ref_uid_internal(&m->uid_refs, uid, clean_ipc);
4666 }
4667
4668 int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
4669 return manager_ref_uid_internal(&m->gid_refs, (uid_t) gid, clean_ipc);
4670 }
4671
4672 static void manager_vacuum_uid_refs_internal(
4673 Hashmap *uid_refs,
4674 int (*_clean_ipc)(uid_t uid)) {
4675
4676 void *p, *k;
4677
4678 assert(_clean_ipc);
4679
4680 HASHMAP_FOREACH_KEY(p, k, uid_refs) {
4681 uint32_t c, n;
4682 uid_t uid;
4683
4684 uid = PTR_TO_UID(k);
4685 c = PTR_TO_UINT32(p);
4686
4687 n = c & ~DESTROY_IPC_FLAG;
4688 if (n > 0)
4689 continue;
4690
4691 if (c & DESTROY_IPC_FLAG) {
4692 log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.",
4693 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
4694 uid);
4695 (void) _clean_ipc(uid);
4696 }
4697
4698 assert_se(hashmap_remove(uid_refs, k) == p);
4699 }
4700 }
4701
4702 static void manager_vacuum_uid_refs(Manager *m) {
4703 manager_vacuum_uid_refs_internal(m->uid_refs, clean_ipc_by_uid);
4704 }
4705
4706 static void manager_vacuum_gid_refs(Manager *m) {
4707 manager_vacuum_uid_refs_internal(m->gid_refs, clean_ipc_by_gid);
4708 }
4709
4710 static void manager_vacuum(Manager *m) {
4711 assert(m);
4712
4713 /* Release any dynamic users no longer referenced */
4714 dynamic_user_vacuum(m, true);
4715
4716 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
4717 manager_vacuum_uid_refs(m);
4718 manager_vacuum_gid_refs(m);
4719
4720 /* Release any runtimes no longer referenced */
4721 exec_shared_runtime_vacuum(m);
4722 }
4723
4724 int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
4725 struct buffer {
4726 uid_t uid;
4727 gid_t gid;
4728 char unit_name[UNIT_NAME_MAX+1];
4729 } _packed_ buffer;
4730
4731 Manager *m = userdata;
4732 ssize_t l;
4733 size_t n;
4734 Unit *u;
4735
4736 assert_se(source);
4737 assert_se(m);
4738
4739 /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the
4740 * resulting UID/GID in a datagram. We parse the datagram here and pass it off to the unit, so that
4741 * it can add a reference to the UID/GID so that it can destroy the UID/GID's IPC objects when the
4742 * reference counter drops to 0. */
4743
4744 l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT);
4745 if (l < 0) {
4746 if (ERRNO_IS_TRANSIENT(errno))
4747 return 0;
4748
4749 return log_error_errno(errno, "Failed to read from user lookup fd: %m");
4750 }
4751
4752 if ((size_t) l <= offsetof(struct buffer, unit_name)) {
4753 log_warning("Received too short user lookup message, ignoring.");
4754 return 0;
4755 }
4756
4757 if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) {
4758 log_warning("Received too long user lookup message, ignoring.");
4759 return 0;
4760 }
4761
4762 if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) {
4763 log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
4764 return 0;
4765 }
4766
4767 n = (size_t) l - offsetof(struct buffer, unit_name);
4768 if (memchr(buffer.unit_name, 0, n)) {
4769 log_warning("Received lookup message with embedded NUL character, ignoring.");
4770 return 0;
4771 }
4772
4773 buffer.unit_name[n] = 0;
4774 u = manager_get_unit(m, buffer.unit_name);
4775 if (!u) {
4776 log_debug("Got user lookup message but unit doesn't exist, ignoring.");
4777 return 0;
4778 }
4779
4780 log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid);
4781
4782 unit_notify_user_lookup(u, buffer.uid, buffer.gid);
4783 return 0;
4784 }
4785
4786 static int short_uid_range(const char *path) {
4787 _cleanup_(uid_range_freep) UIDRange *p = NULL;
4788 int r;
4789
4790 assert(path);
4791
4792 /* Taint systemd if we the UID range assigned to this environment doesn't at least cover 0…65534,
4793 * i.e. from root to nobody. */
4794
4795 r = uid_range_load_userns(&p, path);
4796 if (ERRNO_IS_NEG_NOT_SUPPORTED(r))
4797 return false;
4798 if (r < 0)
4799 return log_debug_errno(r, "Failed to load %s: %m", path);
4800
4801 return !uid_range_covers(p, 0, 65535);
4802 }
4803
4804 char* manager_taint_string(const Manager *m) {
4805 /* Returns a "taint string", e.g. "local-hwclock:var-run-bad". Only things that are detected at
4806 * runtime should be tagged here. For stuff that is known during compilation, emit a warning in the
4807 * configuration phase. */
4808
4809 assert(m);
4810
4811 const char* stage[12] = {};
4812 size_t n = 0;
4813
4814 _cleanup_free_ char *usrbin = NULL;
4815 if (readlink_malloc("/bin", &usrbin) < 0 || !PATH_IN_SET(usrbin, "usr/bin", "/usr/bin"))
4816 stage[n++] = "unmerged-usr";
4817
4818 if (access("/proc/cgroups", F_OK) < 0)
4819 stage[n++] = "cgroups-missing";
4820
4821 if (cg_all_unified() == 0)
4822 stage[n++] = "cgroupsv1";
4823
4824 if (clock_is_localtime(NULL) > 0)
4825 stage[n++] = "local-hwclock";
4826
4827 if (os_release_support_ended(NULL, /* quiet= */ true, NULL) > 0)
4828 stage[n++] = "support-ended";
4829
4830 _cleanup_free_ char *destination = NULL;
4831 if (readlink_malloc("/var/run", &destination) < 0 ||
4832 !PATH_IN_SET(destination, "../run", "/run"))
4833 stage[n++] = "var-run-bad";
4834
4835 _cleanup_free_ char *overflowuid = NULL, *overflowgid = NULL;
4836 if (read_one_line_file("/proc/sys/kernel/overflowuid", &overflowuid) >= 0 &&
4837 !streq(overflowuid, "65534"))
4838 stage[n++] = "overflowuid-not-65534";
4839 if (read_one_line_file("/proc/sys/kernel/overflowgid", &overflowgid) >= 0 &&
4840 !streq(overflowgid, "65534"))
4841 stage[n++] = "overflowgid-not-65534";
4842
4843 struct utsname uts;
4844 assert_se(uname(&uts) >= 0);
4845 if (strverscmp_improved(uts.release, KERNEL_BASELINE_VERSION) < 0)
4846 stage[n++] = "old-kernel";
4847
4848 if (short_uid_range("/proc/self/uid_map") > 0)
4849 stage[n++] = "short-uid-range";
4850 if (short_uid_range("/proc/self/gid_map") > 0)
4851 stage[n++] = "short-gid-range";
4852
4853 assert(n < ELEMENTSOF(stage) - 1); /* One extra for NULL terminator */
4854
4855 return strv_join((char**) stage, ":");
4856 }
4857
4858 void manager_ref_console(Manager *m) {
4859 assert(m);
4860
4861 m->n_on_console++;
4862 }
4863
4864 void manager_unref_console(Manager *m) {
4865
4866 assert(m->n_on_console > 0);
4867 m->n_on_console--;
4868
4869 if (m->n_on_console == 0)
4870 m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */
4871 }
4872
4873 void manager_override_log_level(Manager *m, int level) {
4874 _cleanup_free_ char *s = NULL;
4875 assert(m);
4876
4877 if (!m->log_level_overridden) {
4878 m->original_log_level = log_get_max_level();
4879 m->log_level_overridden = true;
4880 }
4881
4882 (void) log_level_to_string_alloc(level, &s);
4883 log_info("Setting log level to %s.", strna(s));
4884
4885 log_set_max_level(level);
4886 }
4887
4888 void manager_restore_original_log_level(Manager *m) {
4889 _cleanup_free_ char *s = NULL;
4890 assert(m);
4891
4892 if (!m->log_level_overridden)
4893 return;
4894
4895 (void) log_level_to_string_alloc(m->original_log_level, &s);
4896 log_info("Restoring log level to original (%s).", strna(s));
4897
4898 log_set_max_level(m->original_log_level);
4899 m->log_level_overridden = false;
4900 }
4901
4902 void manager_override_log_target(Manager *m, LogTarget target) {
4903 assert(m);
4904
4905 if (!m->log_target_overridden) {
4906 m->original_log_target = log_get_target();
4907 m->log_target_overridden = true;
4908 }
4909
4910 log_info("Setting log target to %s.", log_target_to_string(target));
4911 log_set_target(target);
4912 }
4913
4914 void manager_restore_original_log_target(Manager *m) {
4915 assert(m);
4916
4917 if (!m->log_target_overridden)
4918 return;
4919
4920 log_info("Restoring log target to original %s.", log_target_to_string(m->original_log_target));
4921
4922 log_set_target(m->original_log_target);
4923 m->log_target_overridden = false;
4924 }
4925
4926 ManagerTimestamp manager_timestamp_initrd_mangle(ManagerTimestamp s) {
4927 if (in_initrd() &&
4928 s >= MANAGER_TIMESTAMP_SECURITY_START &&
4929 s <= MANAGER_TIMESTAMP_UNITS_LOAD_FINISH)
4930 return s - MANAGER_TIMESTAMP_SECURITY_START + MANAGER_TIMESTAMP_INITRD_SECURITY_START;
4931 return s;
4932 }
4933
4934 int manager_allocate_idle_pipe(Manager *m) {
4935 int r;
4936
4937 assert(m);
4938
4939 if (m->idle_pipe[0] >= 0) {
4940 assert(m->idle_pipe[1] >= 0);
4941 assert(m->idle_pipe[2] >= 0);
4942 assert(m->idle_pipe[3] >= 0);
4943 return 0;
4944 }
4945
4946 assert(m->idle_pipe[1] < 0);
4947 assert(m->idle_pipe[2] < 0);
4948 assert(m->idle_pipe[3] < 0);
4949
4950 r = RET_NERRNO(pipe2(m->idle_pipe + 0, O_NONBLOCK|O_CLOEXEC));
4951 if (r < 0)
4952 return r;
4953
4954 r = RET_NERRNO(pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC));
4955 if (r < 0) {
4956 safe_close_pair(m->idle_pipe + 0);
4957 return r;
4958 }
4959
4960 return 1;
4961 }
4962
4963 void unit_defaults_init(UnitDefaults *defaults, RuntimeScope scope) {
4964 assert(defaults);
4965 assert(scope >= 0);
4966 assert(scope < _RUNTIME_SCOPE_MAX);
4967
4968 *defaults = (UnitDefaults) {
4969 .std_output = EXEC_OUTPUT_JOURNAL,
4970 .std_error = EXEC_OUTPUT_INHERIT,
4971 .restart_usec = DEFAULT_RESTART_USEC,
4972 .timeout_start_usec = manager_default_timeout(scope),
4973 .timeout_stop_usec = manager_default_timeout(scope),
4974 .timeout_abort_usec = manager_default_timeout(scope),
4975 .timeout_abort_set = false,
4976 .device_timeout_usec = manager_default_timeout(scope),
4977 .start_limit_interval = DEFAULT_START_LIMIT_INTERVAL,
4978 .start_limit_burst = DEFAULT_START_LIMIT_BURST,
4979
4980 /* On 4.15+ with unified hierarchy, CPU accounting is essentially free as it doesn't require the CPU
4981 * controller to be enabled, so the default is to enable it unless we got told otherwise. */
4982 .cpu_accounting = cpu_accounting_is_cheap(),
4983 .memory_accounting = MEMORY_ACCOUNTING_DEFAULT,
4984 .io_accounting = false,
4985 .blockio_accounting = false,
4986 .tasks_accounting = true,
4987 .ip_accounting = false,
4988
4989 .tasks_max = DEFAULT_TASKS_MAX,
4990 .timer_accuracy_usec = 1 * USEC_PER_MINUTE,
4991
4992 .memory_pressure_watch = CGROUP_PRESSURE_WATCH_AUTO,
4993 .memory_pressure_threshold_usec = MEMORY_PRESSURE_DEFAULT_THRESHOLD_USEC,
4994
4995 .oom_policy = OOM_STOP,
4996 .oom_score_adjust_set = false,
4997 };
4998 }
4999
5000 void unit_defaults_done(UnitDefaults *defaults) {
5001 assert(defaults);
5002
5003 defaults->smack_process_label = mfree(defaults->smack_process_label);
5004 rlimit_free_all(defaults->rlimit);
5005 }
5006
5007 LogTarget manager_get_executor_log_target(Manager *m) {
5008 assert(m);
5009
5010 /* If journald is not available tell sd-executor to go to kmsg, as it might be starting journald */
5011
5012 if (manager_journal_is_running(m))
5013 return log_get_target();
5014
5015 return LOG_TARGET_KMSG;
5016 }
5017
5018 static const char *const manager_state_table[_MANAGER_STATE_MAX] = {
5019 [MANAGER_INITIALIZING] = "initializing",
5020 [MANAGER_STARTING] = "starting",
5021 [MANAGER_RUNNING] = "running",
5022 [MANAGER_DEGRADED] = "degraded",
5023 [MANAGER_MAINTENANCE] = "maintenance",
5024 [MANAGER_STOPPING] = "stopping",
5025 };
5026
5027 DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState);
5028
5029 static const char *const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = {
5030 [MANAGER_TIMESTAMP_FIRMWARE] = "firmware",
5031 [MANAGER_TIMESTAMP_LOADER] = "loader",
5032 [MANAGER_TIMESTAMP_KERNEL] = "kernel",
5033 [MANAGER_TIMESTAMP_INITRD] = "initrd",
5034 [MANAGER_TIMESTAMP_USERSPACE] = "userspace",
5035 [MANAGER_TIMESTAMP_FINISH] = "finish",
5036 [MANAGER_TIMESTAMP_SECURITY_START] = "security-start",
5037 [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish",
5038 [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start",
5039 [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish",
5040 [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start",
5041 [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish",
5042 [MANAGER_TIMESTAMP_UNITS_LOAD] = "units-load",
5043 [MANAGER_TIMESTAMP_INITRD_SECURITY_START] = "initrd-security-start",
5044 [MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH] = "initrd-security-finish",
5045 [MANAGER_TIMESTAMP_INITRD_GENERATORS_START] = "initrd-generators-start",
5046 [MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH] = "initrd-generators-finish",
5047 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START] = "initrd-units-load-start",
5048 [MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH] = "initrd-units-load-finish",
5049 };
5050
5051 DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp);
5052
5053 static const char* const oom_policy_table[_OOM_POLICY_MAX] = {
5054 [OOM_CONTINUE] = "continue",
5055 [OOM_STOP] = "stop",
5056 [OOM_KILL] = "kill",
5057 };
5058
5059 DEFINE_STRING_TABLE_LOOKUP(oom_policy, OOMPolicy);