]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/manager.c
4a043fbb11f1312798ba1e1840c6f65d16751780
[thirdparty/systemd.git] / src / core / manager.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <linux/kd.h>
24 #include <signal.h>
25 #include <stdio_ext.h>
26 #include <string.h>
27 #include <sys/epoll.h>
28 #include <sys/inotify.h>
29 #include <sys/ioctl.h>
30 #include <sys/reboot.h>
31 #include <sys/timerfd.h>
32 #include <sys/wait.h>
33 #include <unistd.h>
34
35 #if HAVE_AUDIT
36 #include <libaudit.h>
37 #endif
38
39 #include "sd-daemon.h"
40 #include "sd-messages.h"
41 #include "sd-path.h"
42
43 #include "alloc-util.h"
44 #include "audit-fd.h"
45 #include "boot-timestamps.h"
46 #include "bus-common-errors.h"
47 #include "bus-error.h"
48 #include "bus-kernel.h"
49 #include "bus-util.h"
50 #include "clean-ipc.h"
51 #include "clock-util.h"
52 #include "dbus-job.h"
53 #include "dbus-manager.h"
54 #include "dbus-unit.h"
55 #include "dbus.h"
56 #include "dirent-util.h"
57 #include "env-util.h"
58 #include "escape.h"
59 #include "exec-util.h"
60 #include "execute.h"
61 #include "exit-status.h"
62 #include "fd-util.h"
63 #include "fileio.h"
64 #include "fs-util.h"
65 #include "hashmap.h"
66 #include "io-util.h"
67 #include "label.h"
68 #include "locale-setup.h"
69 #include "log.h"
70 #include "macro.h"
71 #include "manager.h"
72 #include "missing.h"
73 #include "mkdir.h"
74 #include "parse-util.h"
75 #include "path-lookup.h"
76 #include "path-util.h"
77 #include "process-util.h"
78 #include "ratelimit.h"
79 #include "rm-rf.h"
80 #include "signal-util.h"
81 #include "special.h"
82 #include "stat-util.h"
83 #include "string-table.h"
84 #include "string-util.h"
85 #include "strv.h"
86 #include "terminal-util.h"
87 #include "time-util.h"
88 #include "transaction.h"
89 #include "umask-util.h"
90 #include "unit-name.h"
91 #include "user-util.h"
92 #include "util.h"
93 #include "virt.h"
94 #include "watchdog.h"
95
96 #define NOTIFY_RCVBUF_SIZE (8*1024*1024)
97 #define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024)
98
99 /* Initial delay and the interval for printing status messages about running jobs */
100 #define JOBS_IN_PROGRESS_WAIT_USEC (5*USEC_PER_SEC)
101 #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
102 #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
103
104 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
105 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
106 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
107 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
108 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
109 static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
110 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata);
111 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata);
112 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata);
113 static int manager_run_environment_generators(Manager *m);
114 static int manager_run_generators(Manager *m);
115
116 static void manager_watch_jobs_in_progress(Manager *m) {
117 usec_t next;
118 int r;
119
120 assert(m);
121
122 /* We do not want to show the cylon animation if the user
123 * needs to confirm service executions otherwise confirmation
124 * messages will be screwed by the cylon animation. */
125 if (!manager_is_confirm_spawn_disabled(m))
126 return;
127
128 if (m->jobs_in_progress_event_source)
129 return;
130
131 next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC;
132 r = sd_event_add_time(
133 m->event,
134 &m->jobs_in_progress_event_source,
135 CLOCK_MONOTONIC,
136 next, 0,
137 manager_dispatch_jobs_in_progress, m);
138 if (r < 0)
139 return;
140
141 (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress");
142 }
143
144 #define CYLON_BUFFER_EXTRA (2*STRLEN(ANSI_RED) + STRLEN(ANSI_HIGHLIGHT_RED) + 2*STRLEN(ANSI_NORMAL))
145
146 static void draw_cylon(char buffer[], size_t buflen, unsigned width, unsigned pos) {
147 char *p = buffer;
148
149 assert(buflen >= CYLON_BUFFER_EXTRA + width + 1);
150 assert(pos <= width+1); /* 0 or width+1 mean that the center light is behind the corner */
151
152 if (pos > 1) {
153 if (pos > 2)
154 p = mempset(p, ' ', pos-2);
155 if (log_get_show_color())
156 p = stpcpy(p, ANSI_RED);
157 *p++ = '*';
158 }
159
160 if (pos > 0 && pos <= width) {
161 if (log_get_show_color())
162 p = stpcpy(p, ANSI_HIGHLIGHT_RED);
163 *p++ = '*';
164 }
165
166 if (log_get_show_color())
167 p = stpcpy(p, ANSI_NORMAL);
168
169 if (pos < width) {
170 if (log_get_show_color())
171 p = stpcpy(p, ANSI_RED);
172 *p++ = '*';
173 if (pos < width-1)
174 p = mempset(p, ' ', width-1-pos);
175 if (log_get_show_color())
176 strcpy(p, ANSI_NORMAL);
177 }
178 }
179
180 void manager_flip_auto_status(Manager *m, bool enable) {
181 assert(m);
182
183 if (enable) {
184 if (m->show_status == SHOW_STATUS_AUTO)
185 manager_set_show_status(m, SHOW_STATUS_TEMPORARY);
186 } else {
187 if (m->show_status == SHOW_STATUS_TEMPORARY)
188 manager_set_show_status(m, SHOW_STATUS_AUTO);
189 }
190 }
191
192 static void manager_print_jobs_in_progress(Manager *m) {
193 _cleanup_free_ char *job_of_n = NULL;
194 Iterator i;
195 Job *j;
196 unsigned counter = 0, print_nr;
197 char cylon[6 + CYLON_BUFFER_EXTRA + 1];
198 unsigned cylon_pos;
199 char time[FORMAT_TIMESPAN_MAX], limit[FORMAT_TIMESPAN_MAX] = "no limit";
200 uint64_t x;
201
202 assert(m);
203 assert(m->n_running_jobs > 0);
204
205 manager_flip_auto_status(m, true);
206
207 print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs;
208
209 HASHMAP_FOREACH(j, m->jobs, i)
210 if (j->state == JOB_RUNNING && counter++ == print_nr)
211 break;
212
213 /* m->n_running_jobs must be consistent with the contents of m->jobs,
214 * so the above loop must have succeeded in finding j. */
215 assert(counter == print_nr + 1);
216 assert(j);
217
218 cylon_pos = m->jobs_in_progress_iteration % 14;
219 if (cylon_pos >= 8)
220 cylon_pos = 14 - cylon_pos;
221 draw_cylon(cylon, sizeof(cylon), 6, cylon_pos);
222
223 m->jobs_in_progress_iteration++;
224
225 if (m->n_running_jobs > 1) {
226 if (asprintf(&job_of_n, "(%u of %u) ", counter, m->n_running_jobs) < 0)
227 job_of_n = NULL;
228 }
229
230 format_timespan(time, sizeof(time), now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC);
231 if (job_get_timeout(j, &x) > 0)
232 format_timespan(limit, sizeof(limit), x - j->begin_usec, 1*USEC_PER_SEC);
233
234 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
235 "%sA %s job is running for %s (%s / %s)",
236 strempty(job_of_n),
237 job_type_to_string(j->type),
238 unit_description(j->unit),
239 time, limit);
240 }
241
242 static int have_ask_password(void) {
243 _cleanup_closedir_ DIR *dir;
244 struct dirent *de;
245
246 dir = opendir("/run/systemd/ask-password");
247 if (!dir) {
248 if (errno == ENOENT)
249 return false;
250 else
251 return -errno;
252 }
253
254 FOREACH_DIRENT_ALL(de, dir, return -errno) {
255 if (startswith(de->d_name, "ask."))
256 return true;
257 }
258 return false;
259 }
260
261 static int manager_dispatch_ask_password_fd(sd_event_source *source,
262 int fd, uint32_t revents, void *userdata) {
263 Manager *m = userdata;
264
265 assert(m);
266
267 (void) flush_fd(fd);
268
269 m->have_ask_password = have_ask_password();
270 if (m->have_ask_password < 0)
271 /* Log error but continue. Negative have_ask_password
272 * is treated as unknown status. */
273 log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m");
274
275 return 0;
276 }
277
278 static void manager_close_ask_password(Manager *m) {
279 assert(m);
280
281 m->ask_password_event_source = sd_event_source_unref(m->ask_password_event_source);
282 m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
283 m->have_ask_password = -EINVAL;
284 }
285
286 static int manager_check_ask_password(Manager *m) {
287 int r;
288
289 assert(m);
290
291 if (!m->ask_password_event_source) {
292 assert(m->ask_password_inotify_fd < 0);
293
294 mkdir_p_label("/run/systemd/ask-password", 0755);
295
296 m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
297 if (m->ask_password_inotify_fd < 0)
298 return log_error_errno(errno, "inotify_init1() failed: %m");
299
300 if (inotify_add_watch(m->ask_password_inotify_fd, "/run/systemd/ask-password", IN_CREATE|IN_DELETE|IN_MOVE) < 0) {
301 log_error_errno(errno, "Failed to add watch on /run/systemd/ask-password: %m");
302 manager_close_ask_password(m);
303 return -errno;
304 }
305
306 r = sd_event_add_io(m->event, &m->ask_password_event_source,
307 m->ask_password_inotify_fd, EPOLLIN,
308 manager_dispatch_ask_password_fd, m);
309 if (r < 0) {
310 log_error_errno(errno, "Failed to add event source for /run/systemd/ask-password: %m");
311 manager_close_ask_password(m);
312 return -errno;
313 }
314
315 (void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password");
316
317 /* Queries might have been added meanwhile... */
318 manager_dispatch_ask_password_fd(m->ask_password_event_source,
319 m->ask_password_inotify_fd, EPOLLIN, m);
320 }
321
322 return m->have_ask_password;
323 }
324
325 static int manager_watch_idle_pipe(Manager *m) {
326 int r;
327
328 assert(m);
329
330 if (m->idle_pipe_event_source)
331 return 0;
332
333 if (m->idle_pipe[2] < 0)
334 return 0;
335
336 r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m);
337 if (r < 0)
338 return log_error_errno(r, "Failed to watch idle pipe: %m");
339
340 (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe");
341
342 return 0;
343 }
344
345 static void manager_close_idle_pipe(Manager *m) {
346 assert(m);
347
348 m->idle_pipe_event_source = sd_event_source_unref(m->idle_pipe_event_source);
349
350 safe_close_pair(m->idle_pipe);
351 safe_close_pair(m->idle_pipe + 2);
352 }
353
354 static int manager_setup_time_change(Manager *m) {
355 int r;
356
357 /* We only care for the cancellation event, hence we set the
358 * timeout to the latest possible value. */
359 struct itimerspec its = {
360 .it_value.tv_sec = TIME_T_MAX,
361 };
362
363 assert(m);
364 assert_cc(sizeof(time_t) == sizeof(TIME_T_MAX));
365
366 if (m->test_run_flags)
367 return 0;
368
369 /* Uses TFD_TIMER_CANCEL_ON_SET to get notifications whenever
370 * CLOCK_REALTIME makes a jump relative to CLOCK_MONOTONIC */
371
372 m->time_change_fd = timerfd_create(CLOCK_REALTIME, TFD_NONBLOCK|TFD_CLOEXEC);
373 if (m->time_change_fd < 0)
374 return log_error_errno(errno, "Failed to create timerfd: %m");
375
376 if (timerfd_settime(m->time_change_fd, TFD_TIMER_ABSTIME|TFD_TIMER_CANCEL_ON_SET, &its, NULL) < 0) {
377 log_debug_errno(errno, "Failed to set up TFD_TIMER_CANCEL_ON_SET, ignoring: %m");
378 m->time_change_fd = safe_close(m->time_change_fd);
379 return 0;
380 }
381
382 r = sd_event_add_io(m->event, &m->time_change_event_source, m->time_change_fd, EPOLLIN, manager_dispatch_time_change_fd, m);
383 if (r < 0)
384 return log_error_errno(r, "Failed to create time change event source: %m");
385
386 (void) sd_event_source_set_description(m->time_change_event_source, "manager-time-change");
387
388 log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
389
390 return 0;
391 }
392
393 static int enable_special_signals(Manager *m) {
394 _cleanup_close_ int fd = -1;
395
396 assert(m);
397
398 if (m->test_run_flags)
399 return 0;
400
401 /* Enable that we get SIGINT on control-alt-del. In containers
402 * this will fail with EPERM (older) or EINVAL (newer), so
403 * ignore that. */
404 if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL))
405 log_warning_errno(errno, "Failed to enable ctrl-alt-del handling: %m");
406
407 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
408 if (fd < 0) {
409 /* Support systems without virtual console */
410 if (fd != -ENOENT)
411 log_warning_errno(errno, "Failed to open /dev/tty0: %m");
412 } else {
413 /* Enable that we get SIGWINCH on kbrequest */
414 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
415 log_warning_errno(errno, "Failed to enable kbrequest handling: %m");
416 }
417
418 return 0;
419 }
420
421 static int manager_setup_signals(Manager *m) {
422 struct sigaction sa = {
423 .sa_handler = SIG_DFL,
424 .sa_flags = SA_NOCLDSTOP|SA_RESTART,
425 };
426 sigset_t mask;
427 int r;
428
429 assert(m);
430
431 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
432
433 /* We make liberal use of realtime signals here. On
434 * Linux/glibc we have 30 of them (with the exception of Linux
435 * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30
436 * (aka SIGRTMAX). */
437
438 assert_se(sigemptyset(&mask) == 0);
439 sigset_add_many(&mask,
440 SIGCHLD, /* Child died */
441 SIGTERM, /* Reexecute daemon */
442 SIGHUP, /* Reload configuration */
443 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
444 SIGUSR2, /* systemd: dump status */
445 SIGINT, /* Kernel sends us this on control-alt-del */
446 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
447 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
448
449 SIGRTMIN+0, /* systemd: start default.target */
450 SIGRTMIN+1, /* systemd: isolate rescue.target */
451 SIGRTMIN+2, /* systemd: isolate emergency.target */
452 SIGRTMIN+3, /* systemd: start halt.target */
453 SIGRTMIN+4, /* systemd: start poweroff.target */
454 SIGRTMIN+5, /* systemd: start reboot.target */
455 SIGRTMIN+6, /* systemd: start kexec.target */
456
457 /* ... space for more special targets ... */
458
459 SIGRTMIN+13, /* systemd: Immediate halt */
460 SIGRTMIN+14, /* systemd: Immediate poweroff */
461 SIGRTMIN+15, /* systemd: Immediate reboot */
462 SIGRTMIN+16, /* systemd: Immediate kexec */
463
464 /* ... space for more immediate system state changes ... */
465
466 SIGRTMIN+20, /* systemd: enable status messages */
467 SIGRTMIN+21, /* systemd: disable status messages */
468 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
469 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
470 SIGRTMIN+24, /* systemd: Immediate exit (--user only) */
471
472 /* .. one free signal here ... */
473
474 #if !defined(__hppa64__) && !defined(__hppa__)
475 /* Apparently Linux on hppa has fewer RT
476 * signals (SIGRTMAX is SIGRTMIN+25 there),
477 * hence let's not try to make use of them
478 * here. Since these commands are accessible
479 * by different means and only really a safety
480 * net, the missing functionality on hppa
481 * shouldn't matter. */
482
483 SIGRTMIN+26, /* systemd: set log target to journal-or-kmsg */
484 SIGRTMIN+27, /* systemd: set log target to console */
485 SIGRTMIN+28, /* systemd: set log target to kmsg */
486 SIGRTMIN+29, /* systemd: set log target to syslog-or-kmsg (obsolete) */
487
488 /* ... one free signal here SIGRTMIN+30 ... */
489 #endif
490 -1);
491 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
492
493 m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
494 if (m->signal_fd < 0)
495 return -errno;
496
497 r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m);
498 if (r < 0)
499 return r;
500
501 (void) sd_event_source_set_description(m->signal_event_source, "manager-signal");
502
503 /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
504 * notify processing can still figure out to which process/service a message belongs, before we reap the
505 * process. Also, process this before handling cgroup notifications, so that we always collect child exit
506 * status information before detecting that there's no process in a cgroup. */
507 r = sd_event_source_set_priority(m->signal_event_source, SD_EVENT_PRIORITY_NORMAL-6);
508 if (r < 0)
509 return r;
510
511 if (MANAGER_IS_SYSTEM(m))
512 return enable_special_signals(m);
513
514 return 0;
515 }
516
517 static void manager_sanitize_environment(Manager *m) {
518 assert(m);
519
520 /* Let's remove some environment variables that we need ourselves to communicate with our clients */
521 strv_env_unset_many(
522 m->environment,
523 "EXIT_CODE",
524 "EXIT_STATUS",
525 "INVOCATION_ID",
526 "JOURNAL_STREAM",
527 "LISTEN_FDNAMES",
528 "LISTEN_FDS",
529 "LISTEN_PID",
530 "MAINPID",
531 "MANAGERPID",
532 "NOTIFY_SOCKET",
533 "REMOTE_ADDR",
534 "REMOTE_PORT",
535 "SERVICE_RESULT",
536 "WATCHDOG_PID",
537 "WATCHDOG_USEC",
538 NULL);
539
540 /* Let's order the environment alphabetically, just to make it pretty */
541 strv_sort(m->environment);
542 }
543
544 static int manager_default_environment(Manager *m) {
545 assert(m);
546
547 if (MANAGER_IS_SYSTEM(m)) {
548 /* The system manager always starts with a clean
549 * environment for its children. It does not import
550 * the kernel's or the parents' exported variables.
551 *
552 * The initial passed environment is untouched to keep
553 * /proc/self/environ valid; it is used for tagging
554 * the init process inside containers. */
555 m->environment = strv_new("PATH=" DEFAULT_PATH,
556 NULL);
557
558 /* Import locale variables LC_*= from configuration */
559 locale_setup(&m->environment);
560 } else
561 /* The user manager passes its own environment
562 * along to its children. */
563 m->environment = strv_copy(environ);
564
565 if (!m->environment)
566 return -ENOMEM;
567
568 manager_sanitize_environment(m);
569
570 return 0;
571 }
572
573 static int manager_setup_prefix(Manager *m) {
574 struct table_entry {
575 uint64_t type;
576 const char *suffix;
577 };
578
579 static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = {
580 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL },
581 [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL },
582 [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL },
583 [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL },
584 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL },
585 };
586
587 static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = {
588 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL },
589 [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_CONFIGURATION, NULL },
590 [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL },
591 [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_CONFIGURATION, "log" },
592 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL },
593 };
594
595 const struct table_entry *p;
596 ExecDirectoryType i;
597 int r;
598
599 assert(m);
600
601 if (MANAGER_IS_SYSTEM(m))
602 p = paths_system;
603 else
604 p = paths_user;
605
606 for (i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
607 r = sd_path_home(p[i].type, p[i].suffix, &m->prefix[i]);
608 if (r < 0)
609 return r;
610 }
611
612 return 0;
613 }
614
615 static int manager_setup_run_queue(Manager *m) {
616 int r;
617
618 assert(m);
619 assert(!m->run_queue_event_source);
620
621 r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m);
622 if (r < 0)
623 return r;
624
625 r = sd_event_source_set_priority(m->run_queue_event_source, SD_EVENT_PRIORITY_IDLE);
626 if (r < 0)
627 return r;
628
629 r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF);
630 if (r < 0)
631 return r;
632
633 (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue");
634
635 return 0;
636 }
637
638 static int manager_setup_sigchld_event_source(Manager *m) {
639 int r;
640
641 assert(m);
642 assert(!m->sigchld_event_source);
643
644 r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m);
645 if (r < 0)
646 return r;
647
648 r = sd_event_source_set_priority(m->sigchld_event_source, SD_EVENT_PRIORITY_NORMAL-7);
649 if (r < 0)
650 return r;
651
652 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
653 if (r < 0)
654 return r;
655
656 (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld");
657
658 return 0;
659 }
660
661 int manager_new(UnitFileScope scope, unsigned test_run_flags, Manager **_m) {
662 Manager *m;
663 int r;
664
665 assert(_m);
666 assert(IN_SET(scope, UNIT_FILE_SYSTEM, UNIT_FILE_USER));
667
668 m = new0(Manager, 1);
669 if (!m)
670 return -ENOMEM;
671
672 m->unit_file_scope = scope;
673 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
674 m->default_timer_accuracy_usec = USEC_PER_MINUTE;
675 m->default_tasks_accounting = true;
676 m->default_tasks_max = UINT64_MAX;
677 m->default_timeout_start_usec = DEFAULT_TIMEOUT_USEC;
678 m->default_timeout_stop_usec = DEFAULT_TIMEOUT_USEC;
679 m->default_restart_usec = DEFAULT_RESTART_USEC;
680
681 #if ENABLE_EFI
682 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0)
683 boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE,
684 m->timestamps + MANAGER_TIMESTAMP_FIRMWARE,
685 m->timestamps + MANAGER_TIMESTAMP_LOADER);
686 #endif
687
688 /* Prepare log fields we can use for structured logging */
689 if (MANAGER_IS_SYSTEM(m)) {
690 m->unit_log_field = "UNIT=";
691 m->unit_log_format_string = "UNIT=%s";
692
693 m->invocation_log_field = "INVOCATION_ID=";
694 m->invocation_log_format_string = "INVOCATION_ID=%s";
695 } else {
696 m->unit_log_field = "USER_UNIT=";
697 m->unit_log_format_string = "USER_UNIT=%s";
698
699 m->invocation_log_field = "USER_INVOCATION_ID=";
700 m->invocation_log_format_string = "USER_INVOCATION_ID=%s";
701 }
702
703 m->idle_pipe[0] = m->idle_pipe[1] = m->idle_pipe[2] = m->idle_pipe[3] = -1;
704
705 m->pin_cgroupfs_fd = m->notify_fd = m->cgroups_agent_fd = m->signal_fd = m->time_change_fd =
706 m->dev_autofs_fd = m->private_listen_fd = m->cgroup_inotify_fd =
707 m->ask_password_inotify_fd = -1;
708
709 m->user_lookup_fds[0] = m->user_lookup_fds[1] = -1;
710
711 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
712
713 m->have_ask_password = -EINVAL; /* we don't know */
714 m->first_boot = -1;
715
716 m->test_run_flags = test_run_flags;
717
718 /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
719 RATELIMIT_INIT(m->ctrl_alt_del_ratelimit, 2 * USEC_PER_SEC, 7);
720
721 r = manager_default_environment(m);
722 if (r < 0)
723 goto fail;
724
725 r = hashmap_ensure_allocated(&m->units, &string_hash_ops);
726 if (r < 0)
727 goto fail;
728
729 r = hashmap_ensure_allocated(&m->jobs, NULL);
730 if (r < 0)
731 goto fail;
732
733 r = hashmap_ensure_allocated(&m->cgroup_unit, &string_hash_ops);
734 if (r < 0)
735 goto fail;
736
737 r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops);
738 if (r < 0)
739 goto fail;
740
741 r = sd_event_default(&m->event);
742 if (r < 0)
743 goto fail;
744
745 r = manager_setup_run_queue(m);
746 if (r < 0)
747 goto fail;
748
749 r = manager_setup_signals(m);
750 if (r < 0)
751 goto fail;
752
753 r = manager_setup_cgroup(m);
754 if (r < 0)
755 goto fail;
756
757 r = manager_setup_time_change(m);
758 if (r < 0)
759 goto fail;
760
761 r = manager_setup_sigchld_event_source(m);
762 if (r < 0)
763 goto fail;
764
765 m->udev = udev_new();
766 if (!m->udev) {
767 r = -ENOMEM;
768 goto fail;
769 }
770
771 r = manager_setup_prefix(m);
772 if (r < 0)
773 goto fail;
774
775 if (MANAGER_IS_SYSTEM(m) && test_run_flags == 0) {
776 r = mkdir_label("/run/systemd/units", 0755);
777 if (r < 0 && r != -EEXIST)
778 goto fail;
779 }
780
781 m->taint_usr =
782 !in_initrd() &&
783 dir_is_empty("/usr") > 0;
784
785 /* Note that we do not set up the notify fd here. We do that after deserialization,
786 * since they might have gotten serialized across the reexec. */
787
788 *_m = m;
789 return 0;
790
791 fail:
792 manager_free(m);
793 return r;
794 }
795
796 static int manager_setup_notify(Manager *m) {
797 int r;
798
799 if (m->test_run_flags)
800 return 0;
801
802 if (m->notify_fd < 0) {
803 _cleanup_close_ int fd = -1;
804 union sockaddr_union sa = {
805 .sa.sa_family = AF_UNIX,
806 };
807 static const int one = 1;
808
809 /* First free all secondary fields */
810 m->notify_socket = mfree(m->notify_socket);
811 m->notify_event_source = sd_event_source_unref(m->notify_event_source);
812
813 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
814 if (fd < 0)
815 return log_error_errno(errno, "Failed to allocate notification socket: %m");
816
817 fd_inc_rcvbuf(fd, NOTIFY_RCVBUF_SIZE);
818
819 m->notify_socket = strappend(m->prefix[EXEC_DIRECTORY_RUNTIME], "/systemd/notify");
820 if (!m->notify_socket)
821 return log_oom();
822
823 (void) mkdir_parents_label(m->notify_socket, 0755);
824 (void) unlink(m->notify_socket);
825
826 strncpy(sa.un.sun_path, m->notify_socket, sizeof(sa.un.sun_path)-1);
827 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
828 if (r < 0)
829 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
830
831 r = setsockopt(fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
832 if (r < 0)
833 return log_error_errno(errno, "SO_PASSCRED failed: %m");
834
835 m->notify_fd = fd;
836 fd = -1;
837
838 log_debug("Using notification socket %s", m->notify_socket);
839 }
840
841 if (!m->notify_event_source) {
842 r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m);
843 if (r < 0)
844 return log_error_errno(r, "Failed to allocate notify event source: %m");
845
846 /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
847 * service an exit message belongs. */
848 r = sd_event_source_set_priority(m->notify_event_source, SD_EVENT_PRIORITY_NORMAL-8);
849 if (r < 0)
850 return log_error_errno(r, "Failed to set priority of notify event source: %m");
851
852 (void) sd_event_source_set_description(m->notify_event_source, "manager-notify");
853 }
854
855 return 0;
856 }
857
858 static int manager_setup_cgroups_agent(Manager *m) {
859
860 static const union sockaddr_union sa = {
861 .un.sun_family = AF_UNIX,
862 .un.sun_path = "/run/systemd/cgroups-agent",
863 };
864 int r;
865
866 /* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering
867 * these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and
868 * each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on
869 * overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number
870 * of D-Bus connections may be queued until the kernel will start dropping further incoming connections,
871 * possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX
872 * socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and
873 * we thus won't lose messages.
874 *
875 * Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen
876 * to it. The system instance hence listens on this special socket, but the user instances listen on the system
877 * bus for these messages. */
878
879 if (m->test_run_flags)
880 return 0;
881
882 if (!MANAGER_IS_SYSTEM(m))
883 return 0;
884
885 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
886 if (r < 0)
887 return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m");
888 if (r > 0) /* We don't need this anymore on the unified hierarchy */
889 return 0;
890
891 if (m->cgroups_agent_fd < 0) {
892 _cleanup_close_ int fd = -1;
893
894 /* First free all secondary fields */
895 m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
896
897 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
898 if (fd < 0)
899 return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m");
900
901 fd_inc_rcvbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE);
902
903 (void) unlink(sa.un.sun_path);
904
905 /* Only allow root to connect to this socket */
906 RUN_WITH_UMASK(0077)
907 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
908 if (r < 0)
909 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
910
911 m->cgroups_agent_fd = fd;
912 fd = -1;
913 }
914
915 if (!m->cgroups_agent_event_source) {
916 r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m);
917 if (r < 0)
918 return log_error_errno(r, "Failed to allocate cgroups agent event source: %m");
919
920 /* Process cgroups notifications early, but after having processed service notification messages or
921 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of notification,
922 * and we collected the metadata the notification and SIGCHLD stuff offers first. Also see handling of
923 * cgroup inotify for the unified cgroup stuff. */
924 r = sd_event_source_set_priority(m->cgroups_agent_event_source, SD_EVENT_PRIORITY_NORMAL-4);
925 if (r < 0)
926 return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m");
927
928 (void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent");
929 }
930
931 return 0;
932 }
933
934 static int manager_setup_user_lookup_fd(Manager *m) {
935 int r;
936
937 assert(m);
938
939 /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
940 * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
941 * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
942 * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
943 * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
944 * hence we establish this communication channel so that forked off processes can pass their UID/GID
945 * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
946 * datagram, along with their unit name, so that we can share one communication socket pair among all units for
947 * this purpose.
948 *
949 * You might wonder why we need a communication channel for this that is independent of the usual notification
950 * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
951 * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
952 * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
953 *
954 * Note that this function is called under two circumstances: when we first initialize (in which case we
955 * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
956 * (in which case the socket pair already exists but we still need to allocate the event source for it). */
957
958 if (m->user_lookup_fds[0] < 0) {
959
960 /* Free all secondary fields */
961 safe_close_pair(m->user_lookup_fds);
962 m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
963
964 if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
965 return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
966
967 (void) fd_inc_rcvbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE);
968 }
969
970 if (!m->user_lookup_event_source) {
971 r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m);
972 if (r < 0)
973 return log_error_errno(errno, "Failed to allocate user lookup event source: %m");
974
975 /* Process even earlier than the notify event source, so that we always know first about valid UID/GID
976 * resolutions */
977 r = sd_event_source_set_priority(m->user_lookup_event_source, SD_EVENT_PRIORITY_NORMAL-11);
978 if (r < 0)
979 return log_error_errno(errno, "Failed to set priority ot user lookup event source: %m");
980
981 (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup");
982 }
983
984 return 0;
985 }
986
987 static int manager_connect_bus(Manager *m, bool reexecuting) {
988 bool try_bus_connect;
989 Unit *u = NULL;
990
991 assert(m);
992
993 if (m->test_run_flags)
994 return 0;
995
996 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
997
998 try_bus_connect =
999 (u && SERVICE(u)->deserialized_state == SERVICE_RUNNING) &&
1000 (reexecuting ||
1001 (MANAGER_IS_USER(m) && getenv("DBUS_SESSION_BUS_ADDRESS")));
1002
1003 /* Try to connect to the buses, if possible. */
1004 return bus_init(m, try_bus_connect);
1005 }
1006
1007 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
1008 Unit *u;
1009 unsigned n = 0;
1010
1011 assert(m);
1012
1013 while ((u = m->cleanup_queue)) {
1014 assert(u->in_cleanup_queue);
1015
1016 unit_free(u);
1017 n++;
1018 }
1019
1020 return n;
1021 }
1022
1023 enum {
1024 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
1025 GC_OFFSET_UNSURE, /* No clue */
1026 GC_OFFSET_GOOD, /* We still need this unit */
1027 GC_OFFSET_BAD, /* We don't need this unit anymore */
1028 _GC_OFFSET_MAX
1029 };
1030
1031 static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
1032 Unit *other;
1033 Iterator i;
1034 void *v;
1035
1036 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
1037
1038 /* Recursively mark referenced units as GOOD as well */
1039 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCES], i)
1040 if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
1041 unit_gc_mark_good(other, gc_marker);
1042 }
1043
1044 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
1045 Unit *other;
1046 bool is_bad;
1047 Iterator i;
1048 void *v;
1049
1050 assert(u);
1051
1052 if (IN_SET(u->gc_marker - gc_marker,
1053 GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH))
1054 return;
1055
1056 if (u->in_cleanup_queue)
1057 goto bad;
1058
1059 if (unit_check_gc(u))
1060 goto good;
1061
1062 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
1063
1064 is_bad = true;
1065
1066 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCED_BY], i) {
1067 unit_gc_sweep(other, gc_marker);
1068
1069 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
1070 goto good;
1071
1072 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
1073 is_bad = false;
1074 }
1075
1076 if (is_bad)
1077 goto bad;
1078
1079 /* We were unable to find anything out about this entry, so
1080 * let's investigate it later */
1081 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
1082 unit_add_to_gc_queue(u);
1083 return;
1084
1085 bad:
1086 /* We definitely know that this one is not useful anymore, so
1087 * let's mark it for deletion */
1088 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1089 unit_add_to_cleanup_queue(u);
1090 return;
1091
1092 good:
1093 unit_gc_mark_good(u, gc_marker);
1094 }
1095
1096 static unsigned manager_dispatch_gc_unit_queue(Manager *m) {
1097 unsigned n = 0, gc_marker;
1098 Unit *u;
1099
1100 assert(m);
1101
1102 /* log_debug("Running GC..."); */
1103
1104 m->gc_marker += _GC_OFFSET_MAX;
1105 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
1106 m->gc_marker = 1;
1107
1108 gc_marker = m->gc_marker;
1109
1110 while ((u = m->gc_unit_queue)) {
1111 assert(u->in_gc_queue);
1112
1113 unit_gc_sweep(u, gc_marker);
1114
1115 LIST_REMOVE(gc_queue, m->gc_unit_queue, u);
1116 u->in_gc_queue = false;
1117
1118 n++;
1119
1120 if (IN_SET(u->gc_marker - gc_marker,
1121 GC_OFFSET_BAD, GC_OFFSET_UNSURE)) {
1122 if (u->id)
1123 log_unit_debug(u, "Collecting.");
1124 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1125 unit_add_to_cleanup_queue(u);
1126 }
1127 }
1128
1129 return n;
1130 }
1131
1132 static unsigned manager_dispatch_gc_job_queue(Manager *m) {
1133 unsigned n = 0;
1134 Job *j;
1135
1136 assert(m);
1137
1138 while ((j = m->gc_job_queue)) {
1139 assert(j->in_gc_queue);
1140
1141 LIST_REMOVE(gc_queue, m->gc_job_queue, j);
1142 j->in_gc_queue = false;
1143
1144 n++;
1145
1146 if (job_check_gc(j))
1147 continue;
1148
1149 log_unit_debug(j->unit, "Collecting job.");
1150 (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false);
1151 }
1152
1153 return n;
1154 }
1155
1156 static void manager_clear_jobs_and_units(Manager *m) {
1157 Unit *u;
1158
1159 assert(m);
1160
1161 while ((u = hashmap_first(m->units)))
1162 unit_free(u);
1163
1164 manager_dispatch_cleanup_queue(m);
1165
1166 assert(!m->load_queue);
1167 assert(!m->run_queue);
1168 assert(!m->dbus_unit_queue);
1169 assert(!m->dbus_job_queue);
1170 assert(!m->cleanup_queue);
1171 assert(!m->gc_unit_queue);
1172 assert(!m->gc_job_queue);
1173
1174 assert(hashmap_isempty(m->jobs));
1175 assert(hashmap_isempty(m->units));
1176
1177 m->n_on_console = 0;
1178 m->n_running_jobs = 0;
1179 }
1180
1181 Manager* manager_free(Manager *m) {
1182 UnitType c;
1183 int i;
1184 ExecDirectoryType dt;
1185
1186 if (!m)
1187 return NULL;
1188
1189 manager_clear_jobs_and_units(m);
1190
1191 for (c = 0; c < _UNIT_TYPE_MAX; c++)
1192 if (unit_vtable[c]->shutdown)
1193 unit_vtable[c]->shutdown(m);
1194
1195 /* If we reexecute ourselves, we keep the root cgroup around */
1196 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
1197
1198 lookup_paths_flush_generator(&m->lookup_paths);
1199
1200 bus_done(m);
1201
1202 dynamic_user_vacuum(m, false);
1203 hashmap_free(m->dynamic_users);
1204
1205 hashmap_free(m->units);
1206 hashmap_free(m->units_by_invocation_id);
1207 hashmap_free(m->jobs);
1208 hashmap_free(m->watch_pids);
1209 hashmap_free(m->watch_bus);
1210
1211 set_free(m->startup_units);
1212 set_free(m->failed_units);
1213
1214 sd_event_source_unref(m->signal_event_source);
1215 sd_event_source_unref(m->sigchld_event_source);
1216 sd_event_source_unref(m->notify_event_source);
1217 sd_event_source_unref(m->cgroups_agent_event_source);
1218 sd_event_source_unref(m->time_change_event_source);
1219 sd_event_source_unref(m->jobs_in_progress_event_source);
1220 sd_event_source_unref(m->run_queue_event_source);
1221 sd_event_source_unref(m->user_lookup_event_source);
1222
1223 safe_close(m->signal_fd);
1224 safe_close(m->notify_fd);
1225 safe_close(m->cgroups_agent_fd);
1226 safe_close(m->time_change_fd);
1227 safe_close_pair(m->user_lookup_fds);
1228
1229 manager_close_ask_password(m);
1230
1231 manager_close_idle_pipe(m);
1232
1233 udev_unref(m->udev);
1234 sd_event_unref(m->event);
1235
1236 free(m->notify_socket);
1237
1238 lookup_paths_free(&m->lookup_paths);
1239 strv_free(m->environment);
1240
1241 hashmap_free(m->cgroup_unit);
1242 set_free_free(m->unit_path_cache);
1243
1244 free(m->switch_root);
1245 free(m->switch_root_init);
1246
1247 for (i = 0; i < _RLIMIT_MAX; i++)
1248 m->rlimit[i] = mfree(m->rlimit[i]);
1249
1250 assert(hashmap_isempty(m->units_requiring_mounts_for));
1251 hashmap_free(m->units_requiring_mounts_for);
1252
1253 hashmap_free(m->uid_refs);
1254 hashmap_free(m->gid_refs);
1255
1256 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
1257 m->prefix[dt] = mfree(m->prefix[dt]);
1258
1259 return mfree(m);
1260 }
1261
1262 void manager_enumerate(Manager *m) {
1263 UnitType c;
1264
1265 assert(m);
1266
1267 /* Let's ask every type to load all units from disk/kernel
1268 * that it might know */
1269 for (c = 0; c < _UNIT_TYPE_MAX; c++) {
1270 if (!unit_type_supported(c)) {
1271 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
1272 continue;
1273 }
1274
1275 if (!unit_vtable[c]->enumerate)
1276 continue;
1277
1278 unit_vtable[c]->enumerate(m);
1279 }
1280
1281 manager_dispatch_load_queue(m);
1282 }
1283
1284 static void manager_coldplug(Manager *m) {
1285 Iterator i;
1286 Unit *u;
1287 char *k;
1288 int r;
1289
1290 assert(m);
1291
1292 /* Then, let's set up their initial state. */
1293 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1294
1295 /* ignore aliases */
1296 if (u->id != k)
1297 continue;
1298
1299 r = unit_coldplug(u);
1300 if (r < 0)
1301 log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id);
1302 }
1303 }
1304
1305 static void manager_build_unit_path_cache(Manager *m) {
1306 char **i;
1307 int r;
1308
1309 assert(m);
1310
1311 set_free_free(m->unit_path_cache);
1312
1313 m->unit_path_cache = set_new(&string_hash_ops);
1314 if (!m->unit_path_cache) {
1315 r = -ENOMEM;
1316 goto fail;
1317 }
1318
1319 /* This simply builds a list of files we know exist, so that
1320 * we don't always have to go to disk */
1321
1322 STRV_FOREACH(i, m->lookup_paths.search_path) {
1323 _cleanup_closedir_ DIR *d = NULL;
1324 struct dirent *de;
1325
1326 d = opendir(*i);
1327 if (!d) {
1328 if (errno != ENOENT)
1329 log_warning_errno(errno, "Failed to open directory %s, ignoring: %m", *i);
1330 continue;
1331 }
1332
1333 FOREACH_DIRENT(de, d, r = -errno; goto fail) {
1334 char *p;
1335
1336 p = strjoin(streq(*i, "/") ? "" : *i, "/", de->d_name);
1337 if (!p) {
1338 r = -ENOMEM;
1339 goto fail;
1340 }
1341
1342 r = set_consume(m->unit_path_cache, p);
1343 if (r < 0)
1344 goto fail;
1345 }
1346 }
1347
1348 return;
1349
1350 fail:
1351 log_warning_errno(r, "Failed to build unit path cache, proceeding without: %m");
1352 m->unit_path_cache = set_free_free(m->unit_path_cache);
1353 }
1354
1355 static void manager_distribute_fds(Manager *m, FDSet *fds) {
1356 Iterator i;
1357 Unit *u;
1358
1359 assert(m);
1360
1361 HASHMAP_FOREACH(u, m->units, i) {
1362
1363 if (fdset_size(fds) <= 0)
1364 break;
1365
1366 if (!UNIT_VTABLE(u)->distribute_fds)
1367 continue;
1368
1369 UNIT_VTABLE(u)->distribute_fds(u, fds);
1370 }
1371 }
1372
1373 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
1374 int r;
1375
1376 assert(m);
1377
1378 /* If we are running in test mode, we still want to run the generators,
1379 * but we should not touch the real generator directories. */
1380 r = lookup_paths_init(&m->lookup_paths, m->unit_file_scope,
1381 m->test_run_flags ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0,
1382 NULL);
1383 if (r < 0)
1384 return r;
1385
1386 r = manager_run_environment_generators(m);
1387 if (r < 0)
1388 return r;
1389
1390 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_START);
1391 r = manager_run_generators(m);
1392 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_FINISH);
1393 if (r < 0)
1394 return r;
1395
1396 /* If this is the first boot, and we are in the host system, then preset everything */
1397 if (m->first_boot > 0 &&
1398 MANAGER_IS_SYSTEM(m) &&
1399 !m->test_run_flags) {
1400
1401 r = unit_file_preset_all(UNIT_FILE_SYSTEM, 0, NULL, UNIT_FILE_PRESET_ENABLE_ONLY, NULL, 0);
1402 if (r < 0)
1403 log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r,
1404 "Failed to populate /etc with preset unit settings, ignoring: %m");
1405 else
1406 log_info("Populated /etc with preset unit settings.");
1407 }
1408
1409 lookup_paths_reduce(&m->lookup_paths);
1410 manager_build_unit_path_cache(m);
1411
1412 /* If we will deserialize make sure that during enumeration
1413 * this is already known, so we increase the counter here
1414 * already */
1415 if (serialization)
1416 m->n_reloading++;
1417
1418 /* First, enumerate what we can from all config files */
1419 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_START);
1420 manager_enumerate(m);
1421 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_FINISH);
1422
1423 /* Second, deserialize if there is something to deserialize */
1424 if (serialization) {
1425 r = manager_deserialize(m, serialization, fds);
1426 if (r < 0)
1427 return log_error_errno(r, "Deserialization failed: %m");
1428 }
1429
1430 /* Any fds left? Find some unit which wants them. This is
1431 * useful to allow container managers to pass some file
1432 * descriptors to us pre-initialized. This enables
1433 * socket-based activation of entire containers. */
1434 manager_distribute_fds(m, fds);
1435
1436 /* We might have deserialized the notify fd, but if we didn't
1437 * then let's create the bus now */
1438 r = manager_setup_notify(m);
1439 if (r < 0)
1440 /* No sense to continue without notifications, our children would fail anyway. */
1441 return r;
1442
1443 r = manager_setup_cgroups_agent(m);
1444 if (r < 0)
1445 /* Likewise, no sense to continue without empty cgroup notifications. */
1446 return r;
1447
1448 r = manager_setup_user_lookup_fd(m);
1449 if (r < 0)
1450 /* This shouldn't fail, except if things are really broken. */
1451 return r;
1452
1453 /* Let's connect to the bus now. */
1454 (void) manager_connect_bus(m, !!serialization);
1455
1456 (void) bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed);
1457 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
1458
1459 /* Third, fire things up! */
1460 manager_coldplug(m);
1461
1462 /* Release any dynamic users no longer referenced */
1463 dynamic_user_vacuum(m, true);
1464
1465 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
1466 manager_vacuum_uid_refs(m);
1467 manager_vacuum_gid_refs(m);
1468
1469 if (serialization) {
1470 assert(m->n_reloading > 0);
1471 m->n_reloading--;
1472
1473 /* Let's wait for the UnitNew/JobNew messages being
1474 * sent, before we notify that the reload is
1475 * finished */
1476 m->send_reloading_done = true;
1477 }
1478
1479 return 0;
1480 }
1481
1482 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, sd_bus_error *e, Job **_ret) {
1483 int r;
1484 Transaction *tr;
1485
1486 assert(m);
1487 assert(type < _JOB_TYPE_MAX);
1488 assert(unit);
1489 assert(mode < _JOB_MODE_MAX);
1490
1491 if (mode == JOB_ISOLATE && type != JOB_START)
1492 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
1493
1494 if (mode == JOB_ISOLATE && !unit->allow_isolate)
1495 return sd_bus_error_setf(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1496
1497 log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1498
1499 type = job_type_collapse(type, unit);
1500
1501 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
1502 if (!tr)
1503 return -ENOMEM;
1504
1505 r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, false,
1506 IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS),
1507 mode == JOB_IGNORE_DEPENDENCIES, e);
1508 if (r < 0)
1509 goto tr_abort;
1510
1511 if (mode == JOB_ISOLATE) {
1512 r = transaction_add_isolate_jobs(tr, m);
1513 if (r < 0)
1514 goto tr_abort;
1515 }
1516
1517 r = transaction_activate(tr, m, mode, e);
1518 if (r < 0)
1519 goto tr_abort;
1520
1521 log_unit_debug(unit,
1522 "Enqueued job %s/%s as %u", unit->id,
1523 job_type_to_string(type), (unsigned) tr->anchor_job->id);
1524
1525 if (_ret)
1526 *_ret = tr->anchor_job;
1527
1528 transaction_free(tr);
1529 return 0;
1530
1531 tr_abort:
1532 transaction_abort(tr);
1533 transaction_free(tr);
1534 return r;
1535 }
1536
1537 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, sd_bus_error *e, Job **ret) {
1538 Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */
1539 int r;
1540
1541 assert(m);
1542 assert(type < _JOB_TYPE_MAX);
1543 assert(name);
1544 assert(mode < _JOB_MODE_MAX);
1545
1546 r = manager_load_unit(m, name, NULL, NULL, &unit);
1547 if (r < 0)
1548 return r;
1549 assert(unit);
1550
1551 return manager_add_job(m, type, unit, mode, e, ret);
1552 }
1553
1554 int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Job **ret) {
1555 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1556 int r;
1557
1558 assert(m);
1559 assert(type < _JOB_TYPE_MAX);
1560 assert(name);
1561 assert(mode < _JOB_MODE_MAX);
1562
1563 r = manager_add_job_by_name(m, type, name, mode, &error, ret);
1564 if (r < 0)
1565 return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r));
1566
1567 return r;
1568 }
1569
1570 int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) {
1571 int r;
1572 Transaction *tr;
1573
1574 assert(m);
1575 assert(unit);
1576 assert(mode < _JOB_MODE_MAX);
1577 assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */
1578
1579 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
1580 if (!tr)
1581 return -ENOMEM;
1582
1583 /* We need an anchor job */
1584 r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, false, false, true, true, e);
1585 if (r < 0)
1586 goto tr_abort;
1587
1588 /* Failure in adding individual dependencies is ignored, so this always succeeds. */
1589 transaction_add_propagate_reload_jobs(tr, unit, tr->anchor_job, mode == JOB_IGNORE_DEPENDENCIES, e);
1590
1591 r = transaction_activate(tr, m, mode, e);
1592 if (r < 0)
1593 goto tr_abort;
1594
1595 transaction_free(tr);
1596 return 0;
1597
1598 tr_abort:
1599 transaction_abort(tr);
1600 transaction_free(tr);
1601 return r;
1602 }
1603
1604 Job *manager_get_job(Manager *m, uint32_t id) {
1605 assert(m);
1606
1607 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1608 }
1609
1610 Unit *manager_get_unit(Manager *m, const char *name) {
1611 assert(m);
1612 assert(name);
1613
1614 return hashmap_get(m->units, name);
1615 }
1616
1617 unsigned manager_dispatch_load_queue(Manager *m) {
1618 Unit *u;
1619 unsigned n = 0;
1620
1621 assert(m);
1622
1623 /* Make sure we are not run recursively */
1624 if (m->dispatching_load_queue)
1625 return 0;
1626
1627 m->dispatching_load_queue = true;
1628
1629 /* Dispatches the load queue. Takes a unit from the queue and
1630 * tries to load its data until the queue is empty */
1631
1632 while ((u = m->load_queue)) {
1633 assert(u->in_load_queue);
1634
1635 unit_load(u);
1636 n++;
1637 }
1638
1639 m->dispatching_load_queue = false;
1640 return n;
1641 }
1642
1643 int manager_load_unit_prepare(
1644 Manager *m,
1645 const char *name,
1646 const char *path,
1647 sd_bus_error *e,
1648 Unit **_ret) {
1649
1650 Unit *ret;
1651 UnitType t;
1652 int r;
1653
1654 assert(m);
1655 assert(name || path);
1656 assert(_ret);
1657
1658 /* This will prepare the unit for loading, but not actually
1659 * load anything from disk. */
1660
1661 if (path && !is_path(path))
1662 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path);
1663
1664 if (!name)
1665 name = basename(path);
1666
1667 t = unit_name_to_type(name);
1668
1669 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
1670 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE))
1671 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name);
1672
1673 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name);
1674 }
1675
1676 ret = manager_get_unit(m, name);
1677 if (ret) {
1678 *_ret = ret;
1679 return 1;
1680 }
1681
1682 ret = unit_new(m, unit_vtable[t]->object_size);
1683 if (!ret)
1684 return -ENOMEM;
1685
1686 if (path) {
1687 ret->fragment_path = strdup(path);
1688 if (!ret->fragment_path) {
1689 unit_free(ret);
1690 return -ENOMEM;
1691 }
1692 }
1693
1694 r = unit_add_name(ret, name);
1695 if (r < 0) {
1696 unit_free(ret);
1697 return r;
1698 }
1699
1700 unit_add_to_load_queue(ret);
1701 unit_add_to_dbus_queue(ret);
1702 unit_add_to_gc_queue(ret);
1703
1704 *_ret = ret;
1705
1706 return 0;
1707 }
1708
1709 int manager_load_unit(
1710 Manager *m,
1711 const char *name,
1712 const char *path,
1713 sd_bus_error *e,
1714 Unit **_ret) {
1715
1716 int r;
1717
1718 assert(m);
1719 assert(_ret);
1720
1721 /* This will load the service information files, but not actually
1722 * start any services or anything. */
1723
1724 r = manager_load_unit_prepare(m, name, path, e, _ret);
1725 if (r != 0)
1726 return r;
1727
1728 manager_dispatch_load_queue(m);
1729
1730 *_ret = unit_follow_merge(*_ret);
1731
1732 return 0;
1733 }
1734
1735 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1736 Iterator i;
1737 Job *j;
1738
1739 assert(s);
1740 assert(f);
1741
1742 HASHMAP_FOREACH(j, s->jobs, i)
1743 job_dump(j, f, prefix);
1744 }
1745
1746 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1747 Iterator i;
1748 Unit *u;
1749 const char *t;
1750
1751 assert(s);
1752 assert(f);
1753
1754 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1755 if (u->id == t)
1756 unit_dump(u, f, prefix);
1757 }
1758
1759 void manager_dump(Manager *m, FILE *f, const char *prefix) {
1760 ManagerTimestamp q;
1761
1762 assert(m);
1763 assert(f);
1764
1765 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
1766 char buf[FORMAT_TIMESTAMP_MAX];
1767
1768 if (dual_timestamp_is_set(m->timestamps + q))
1769 fprintf(f, "%sTimestamp %s: %s\n",
1770 strempty(prefix),
1771 manager_timestamp_to_string(q),
1772 format_timestamp(buf, sizeof(buf), m->timestamps[q].realtime));
1773 }
1774
1775 manager_dump_units(m, f, prefix);
1776 manager_dump_jobs(m, f, prefix);
1777 }
1778
1779 int manager_get_dump_string(Manager *m, char **ret) {
1780 _cleanup_free_ char *dump = NULL;
1781 _cleanup_fclose_ FILE *f = NULL;
1782 size_t size;
1783 int r;
1784
1785 assert(m);
1786 assert(ret);
1787
1788 f = open_memstream(&dump, &size);
1789 if (!f)
1790 return -errno;
1791
1792 (void) __fsetlocking(f, FSETLOCKING_BYCALLER);
1793
1794 manager_dump(m, f, NULL);
1795
1796 r = fflush_and_check(f);
1797 if (r < 0)
1798 return r;
1799
1800 f = safe_fclose(f);
1801
1802 *ret = dump;
1803 dump = NULL;
1804
1805 return 0;
1806 }
1807
1808 void manager_clear_jobs(Manager *m) {
1809 Job *j;
1810
1811 assert(m);
1812
1813 while ((j = hashmap_first(m->jobs)))
1814 /* No need to recurse. We're cancelling all jobs. */
1815 job_finish_and_invalidate(j, JOB_CANCELED, false, false);
1816 }
1817
1818 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) {
1819 Manager *m = userdata;
1820 Job *j;
1821
1822 assert(source);
1823 assert(m);
1824
1825 while ((j = m->run_queue)) {
1826 assert(j->installed);
1827 assert(j->in_run_queue);
1828
1829 job_run_and_invalidate(j);
1830 }
1831
1832 if (m->n_running_jobs > 0)
1833 manager_watch_jobs_in_progress(m);
1834
1835 if (m->n_on_console > 0)
1836 manager_watch_idle_pipe(m);
1837
1838 return 1;
1839 }
1840
1841 static unsigned manager_dispatch_dbus_queue(Manager *m) {
1842 Job *j;
1843 Unit *u;
1844 unsigned n = 0;
1845
1846 assert(m);
1847
1848 if (m->dispatching_dbus_queue)
1849 return 0;
1850
1851 m->dispatching_dbus_queue = true;
1852
1853 while ((u = m->dbus_unit_queue)) {
1854 assert(u->in_dbus_queue);
1855
1856 bus_unit_send_change_signal(u);
1857 n++;
1858 }
1859
1860 while ((j = m->dbus_job_queue)) {
1861 assert(j->in_dbus_queue);
1862
1863 bus_job_send_change_signal(j);
1864 n++;
1865 }
1866
1867 m->dispatching_dbus_queue = false;
1868
1869 if (m->send_reloading_done) {
1870 m->send_reloading_done = false;
1871
1872 bus_manager_send_reloading(m, false);
1873 }
1874
1875 if (m->queued_message)
1876 bus_send_queued_message(m);
1877
1878 return n;
1879 }
1880
1881 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
1882 Manager *m = userdata;
1883 char buf[PATH_MAX+1];
1884 ssize_t n;
1885
1886 n = recv(fd, buf, sizeof(buf), 0);
1887 if (n < 0)
1888 return log_error_errno(errno, "Failed to read cgroups agent message: %m");
1889 if (n == 0) {
1890 log_error("Got zero-length cgroups agent message, ignoring.");
1891 return 0;
1892 }
1893 if ((size_t) n >= sizeof(buf)) {
1894 log_error("Got overly long cgroups agent message, ignoring.");
1895 return 0;
1896 }
1897
1898 if (memchr(buf, 0, n)) {
1899 log_error("Got cgroups agent message with embedded NUL byte, ignoring.");
1900 return 0;
1901 }
1902 buf[n] = 0;
1903
1904 manager_notify_cgroup_empty(m, buf);
1905 (void) bus_forward_agent_released(m, buf);
1906
1907 return 0;
1908 }
1909
1910 static void manager_invoke_notify_message(
1911 Manager *m,
1912 Unit *u,
1913 const struct ucred *ucred,
1914 const char *buf,
1915 FDSet *fds) {
1916
1917 assert(m);
1918 assert(u);
1919 assert(ucred);
1920 assert(buf);
1921
1922 if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */
1923 return;
1924 u->notifygen = m->notifygen;
1925
1926 if (UNIT_VTABLE(u)->notify_message) {
1927 _cleanup_strv_free_ char **tags = NULL;
1928
1929 tags = strv_split(buf, NEWLINE);
1930 if (!tags) {
1931 log_oom();
1932 return;
1933 }
1934
1935 UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds);
1936
1937 } else if (DEBUG_LOGGING) {
1938 _cleanup_free_ char *x = NULL, *y = NULL;
1939
1940 x = ellipsize(buf, 20, 90);
1941 if (x)
1942 y = cescape(x);
1943
1944 log_unit_debug(u, "Got notification message \"%s\", ignoring.", strnull(y));
1945 }
1946 }
1947
1948 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
1949
1950 _cleanup_fdset_free_ FDSet *fds = NULL;
1951 Manager *m = userdata;
1952 char buf[NOTIFY_BUFFER_MAX+1];
1953 struct iovec iovec = {
1954 .iov_base = buf,
1955 .iov_len = sizeof(buf)-1,
1956 };
1957 union {
1958 struct cmsghdr cmsghdr;
1959 uint8_t buf[CMSG_SPACE(sizeof(struct ucred)) +
1960 CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)];
1961 } control = {};
1962 struct msghdr msghdr = {
1963 .msg_iov = &iovec,
1964 .msg_iovlen = 1,
1965 .msg_control = &control,
1966 .msg_controllen = sizeof(control),
1967 };
1968
1969 struct cmsghdr *cmsg;
1970 struct ucred *ucred = NULL;
1971 _cleanup_free_ Unit **array_copy = NULL;
1972 Unit *u1, *u2, **array;
1973 int r, *fd_array = NULL;
1974 unsigned n_fds = 0;
1975 bool found = false;
1976 ssize_t n;
1977
1978 assert(m);
1979 assert(m->notify_fd == fd);
1980
1981 if (revents != EPOLLIN) {
1982 log_warning("Got unexpected poll event for notify fd.");
1983 return 0;
1984 }
1985
1986 n = recvmsg(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC);
1987 if (n < 0) {
1988 if (IN_SET(errno, EAGAIN, EINTR))
1989 return 0; /* Spurious wakeup, try again */
1990
1991 /* If this is any other, real error, then let's stop processing this socket. This of course means we
1992 * won't take notification messages anymore, but that's still better than busy looping around this:
1993 * being woken up over and over again but being unable to actually read the message off the socket. */
1994 return log_error_errno(errno, "Failed to receive notification message: %m");
1995 }
1996
1997 CMSG_FOREACH(cmsg, &msghdr) {
1998 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1999
2000 fd_array = (int*) CMSG_DATA(cmsg);
2001 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
2002
2003 } else if (cmsg->cmsg_level == SOL_SOCKET &&
2004 cmsg->cmsg_type == SCM_CREDENTIALS &&
2005 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
2006
2007 ucred = (struct ucred*) CMSG_DATA(cmsg);
2008 }
2009 }
2010
2011 if (n_fds > 0) {
2012 assert(fd_array);
2013
2014 r = fdset_new_array(&fds, fd_array, n_fds);
2015 if (r < 0) {
2016 close_many(fd_array, n_fds);
2017 log_oom();
2018 return 0;
2019 }
2020 }
2021
2022 if (!ucred || !pid_is_valid(ucred->pid)) {
2023 log_warning("Received notify message without valid credentials. Ignoring.");
2024 return 0;
2025 }
2026
2027 if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) {
2028 log_warning("Received notify message exceeded maximum size. Ignoring.");
2029 return 0;
2030 }
2031
2032 /* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes. We permit one
2033 * trailing NUL byte in the message, but don't expect it. */
2034 if (n > 1 && memchr(buf, 0, n-1)) {
2035 log_warning("Received notify message with embedded NUL bytes. Ignoring.");
2036 return 0;
2037 }
2038
2039 /* Make sure it's NUL-terminated. */
2040 buf[n] = 0;
2041
2042 /* Increase the generation counter used for filtering out duplicate unit invocations. */
2043 m->notifygen++;
2044
2045 /* Notify every unit that might be interested, which might be multiple. */
2046 u1 = manager_get_unit_by_pid_cgroup(m, ucred->pid);
2047 u2 = hashmap_get(m->watch_pids, PID_TO_PTR(ucred->pid));
2048 array = hashmap_get(m->watch_pids, PID_TO_PTR(-ucred->pid));
2049 if (array) {
2050 size_t k = 0;
2051
2052 while (array[k])
2053 k++;
2054
2055 array_copy = newdup(Unit*, array, k+1);
2056 if (!array_copy)
2057 log_oom();
2058 }
2059 /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle duplicate units
2060 * make sure we only invoke each unit's handler once. */
2061 if (u1) {
2062 manager_invoke_notify_message(m, u1, ucred, buf, fds);
2063 found = true;
2064 }
2065 if (u2) {
2066 manager_invoke_notify_message(m, u2, ucred, buf, fds);
2067 found = true;
2068 }
2069 if (array_copy)
2070 for (size_t i = 0; array_copy[i]; i++) {
2071 manager_invoke_notify_message(m, array_copy[i], ucred, buf, fds);
2072 found = true;
2073 }
2074
2075 if (!found)
2076 log_warning("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid);
2077
2078 if (fdset_size(fds) > 0)
2079 log_warning("Got extra auxiliary fds with notification message, closing them.");
2080
2081 return 0;
2082 }
2083
2084 static void manager_invoke_sigchld_event(
2085 Manager *m,
2086 Unit *u,
2087 const siginfo_t *si) {
2088
2089 assert(m);
2090 assert(u);
2091 assert(si);
2092
2093 /* Already invoked the handler of this unit in this iteration? Then don't process this again */
2094 if (u->sigchldgen == m->sigchldgen)
2095 return;
2096 u->sigchldgen = m->sigchldgen;
2097
2098 log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id);
2099 unit_unwatch_pid(u, si->si_pid);
2100
2101 if (UNIT_VTABLE(u)->sigchld_event)
2102 UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status);
2103 }
2104
2105 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) {
2106 Manager *m = userdata;
2107 siginfo_t si = {};
2108 int r;
2109
2110 assert(source);
2111 assert(m);
2112
2113 /* First we call waitd() for a PID and do not reap the zombie. That way we can still access /proc/$PID for it
2114 * while it is a zombie. */
2115
2116 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2117
2118 if (errno == ECHILD)
2119 goto turn_off;
2120
2121 log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m");
2122 return 0;
2123 }
2124
2125 if (si.si_pid <= 0)
2126 goto turn_off;
2127
2128 if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) {
2129 _cleanup_free_ Unit **array_copy = NULL;
2130 _cleanup_free_ char *name = NULL;
2131 Unit *u1, *u2, **array;
2132
2133 (void) get_process_comm(si.si_pid, &name);
2134
2135 log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)",
2136 si.si_pid, strna(name),
2137 sigchld_code_to_string(si.si_code),
2138 si.si_status,
2139 strna(si.si_code == CLD_EXITED
2140 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2141 : signal_to_string(si.si_status)));
2142
2143 /* Increase the generation counter used for filtering out duplicate unit invocations */
2144 m->sigchldgen++;
2145
2146 /* And now figure out the unit this belongs to, it might be multiple... */
2147 u1 = manager_get_unit_by_pid_cgroup(m, si.si_pid);
2148 u2 = hashmap_get(m->watch_pids, PID_TO_PTR(si.si_pid));
2149 array = hashmap_get(m->watch_pids, PID_TO_PTR(-si.si_pid));
2150 if (array) {
2151 size_t n = 0;
2152
2153 /* Cound how many entries the array has */
2154 while (array[n])
2155 n++;
2156
2157 /* Make a copy of the array so that we don't trip up on the array changing beneath us */
2158 array_copy = newdup(Unit*, array, n+1);
2159 if (!array_copy)
2160 log_oom();
2161 }
2162
2163 /* Finally, execute them all. Note that u1, u2 and the array might contain duplicates, but
2164 * that's fine, manager_invoke_sigchld_event() will ensure we only invoke the handlers once for
2165 * each iteration. */
2166 if (u1)
2167 manager_invoke_sigchld_event(m, u1, &si);
2168 if (u2)
2169 manager_invoke_sigchld_event(m, u2, &si);
2170 if (array_copy)
2171 for (size_t i = 0; array_copy[i]; i++)
2172 manager_invoke_sigchld_event(m, array_copy[i], &si);
2173 }
2174
2175 /* And now, we actually reap the zombie. */
2176 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2177 log_error_errno(errno, "Failed to dequeue child, ignoring: %m");
2178 return 0;
2179 }
2180
2181 return 0;
2182
2183 turn_off:
2184 /* All children processed for now, turn off event source */
2185
2186 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
2187 if (r < 0)
2188 return log_error_errno(r, "Failed to disable SIGCHLD event source: %m");
2189
2190 return 0;
2191 }
2192
2193 static void manager_start_target(Manager *m, const char *name, JobMode mode) {
2194 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2195 int r;
2196
2197 log_debug("Activating special unit %s", name);
2198
2199 r = manager_add_job_by_name(m, JOB_START, name, mode, &error, NULL);
2200 if (r < 0)
2201 log_error("Failed to enqueue %s job: %s", name, bus_error_message(&error, r));
2202 }
2203
2204 static void manager_handle_ctrl_alt_del(Manager *m) {
2205 /* If the user presses C-A-D more than
2206 * 7 times within 2s, we reboot/shutdown immediately,
2207 * unless it was disabled in system.conf */
2208
2209 if (ratelimit_test(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE)
2210 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY);
2211 else
2212 emergency_action(m, m->cad_burst_action, NULL,
2213 "Ctrl-Alt-Del was pressed more than 7 times within 2s");
2214 }
2215
2216 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2217 Manager *m = userdata;
2218 ssize_t n;
2219 struct signalfd_siginfo sfsi;
2220 int r;
2221
2222 assert(m);
2223 assert(m->signal_fd == fd);
2224
2225 if (revents != EPOLLIN) {
2226 log_warning("Got unexpected events from signal file descriptor.");
2227 return 0;
2228 }
2229
2230 n = read(m->signal_fd, &sfsi, sizeof(sfsi));
2231 if (n != sizeof(sfsi)) {
2232 if (n >= 0) {
2233 log_warning("Truncated read from signal fd (%zu bytes), ignoring!", n);
2234 return 0;
2235 }
2236
2237 if (IN_SET(errno, EINTR, EAGAIN))
2238 return 0;
2239
2240 /* We return an error here, which will kill this handler,
2241 * to avoid a busy loop on read error. */
2242 return log_error_errno(errno, "Reading from signal fd failed: %m");
2243 }
2244
2245 log_received_signal(sfsi.ssi_signo == SIGCHLD ||
2246 (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m))
2247 ? LOG_DEBUG : LOG_INFO,
2248 &sfsi);
2249
2250 switch (sfsi.ssi_signo) {
2251
2252 case SIGCHLD:
2253 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2254 if (r < 0)
2255 log_warning_errno(r, "Failed to enable SIGCHLD even source, ignoring: %m");
2256
2257 break;
2258
2259 case SIGTERM:
2260 if (MANAGER_IS_SYSTEM(m)) {
2261 /* This is for compatibility with the
2262 * original sysvinit */
2263 r = verify_run_space_and_log("Refusing to reexecute");
2264 if (r >= 0)
2265 m->exit_code = MANAGER_REEXECUTE;
2266 break;
2267 }
2268
2269 _fallthrough_;
2270 case SIGINT:
2271 if (MANAGER_IS_SYSTEM(m))
2272 manager_handle_ctrl_alt_del(m);
2273 else
2274 manager_start_target(m, SPECIAL_EXIT_TARGET,
2275 JOB_REPLACE_IRREVERSIBLY);
2276 break;
2277
2278 case SIGWINCH:
2279 if (MANAGER_IS_SYSTEM(m))
2280 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2281
2282 /* This is a nop on non-init */
2283 break;
2284
2285 case SIGPWR:
2286 if (MANAGER_IS_SYSTEM(m))
2287 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2288
2289 /* This is a nop on non-init */
2290 break;
2291
2292 case SIGUSR1: {
2293 Unit *u;
2294
2295 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
2296
2297 if (!u || UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
2298 log_info("Trying to reconnect to bus...");
2299 bus_init(m, true);
2300 }
2301
2302 if (!u || !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u))) {
2303 log_info("Loading D-Bus service...");
2304 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2305 }
2306
2307 break;
2308 }
2309
2310 case SIGUSR2: {
2311 _cleanup_free_ char *dump = NULL;
2312
2313 r = manager_get_dump_string(m, &dump);
2314 if (r < 0) {
2315 log_warning_errno(errno, "Failed to acquire manager dump: %m");
2316 break;
2317 }
2318
2319 log_dump(LOG_INFO, dump);
2320 break;
2321 }
2322
2323 case SIGHUP:
2324 r = verify_run_space_and_log("Refusing to reload");
2325 if (r >= 0)
2326 m->exit_code = MANAGER_RELOAD;
2327 break;
2328
2329 default: {
2330
2331 /* Starting SIGRTMIN+0 */
2332 static const struct {
2333 const char *target;
2334 JobMode mode;
2335 } target_table[] = {
2336 [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE },
2337 [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE },
2338 [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE },
2339 [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY },
2340 [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY },
2341 [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
2342 [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY },
2343 };
2344
2345 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2346 static const ManagerExitCode code_table[] = {
2347 [0] = MANAGER_HALT,
2348 [1] = MANAGER_POWEROFF,
2349 [2] = MANAGER_REBOOT,
2350 [3] = MANAGER_KEXEC,
2351 };
2352
2353 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2354 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2355 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2356 manager_start_target(m, target_table[idx].target,
2357 target_table[idx].mode);
2358 break;
2359 }
2360
2361 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2362 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2363 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2364 break;
2365 }
2366
2367 switch (sfsi.ssi_signo - SIGRTMIN) {
2368
2369 case 20:
2370 manager_set_show_status(m, SHOW_STATUS_YES);
2371 break;
2372
2373 case 21:
2374 manager_set_show_status(m, SHOW_STATUS_NO);
2375 break;
2376
2377 case 22:
2378 log_set_max_level(LOG_DEBUG);
2379 log_info("Setting log level to debug.");
2380 break;
2381
2382 case 23:
2383 log_set_max_level(LOG_INFO);
2384 log_info("Setting log level to info.");
2385 break;
2386
2387 case 24:
2388 if (MANAGER_IS_USER(m)) {
2389 m->exit_code = MANAGER_EXIT;
2390 return 0;
2391 }
2392
2393 /* This is a nop on init */
2394 break;
2395
2396 case 26:
2397 case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
2398 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2399 log_notice("Setting log target to journal-or-kmsg.");
2400 break;
2401
2402 case 27:
2403 log_set_target(LOG_TARGET_CONSOLE);
2404 log_notice("Setting log target to console.");
2405 break;
2406
2407 case 28:
2408 log_set_target(LOG_TARGET_KMSG);
2409 log_notice("Setting log target to kmsg.");
2410 break;
2411
2412 default:
2413 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2414 }
2415 }}
2416
2417 return 0;
2418 }
2419
2420 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2421 Manager *m = userdata;
2422 Iterator i;
2423 Unit *u;
2424
2425 assert(m);
2426 assert(m->time_change_fd == fd);
2427
2428 log_struct(LOG_DEBUG,
2429 "MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR,
2430 LOG_MESSAGE("Time has been changed"),
2431 NULL);
2432
2433 /* Restart the watch */
2434 m->time_change_event_source = sd_event_source_unref(m->time_change_event_source);
2435 m->time_change_fd = safe_close(m->time_change_fd);
2436
2437 manager_setup_time_change(m);
2438
2439 HASHMAP_FOREACH(u, m->units, i)
2440 if (UNIT_VTABLE(u)->time_change)
2441 UNIT_VTABLE(u)->time_change(u);
2442
2443 return 0;
2444 }
2445
2446 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2447 Manager *m = userdata;
2448
2449 assert(m);
2450 assert(m->idle_pipe[2] == fd);
2451
2452 /* There's at least one Type=idle child that just gave up on us waiting for the boot process to complete. Let's
2453 * now turn off any further console output if there's at least one service that needs console access, so that
2454 * from now on our own output should not spill into that service's output anymore. After all, we support
2455 * Type=idle only to beautify console output and it generally is set on services that want to own the console
2456 * exclusively without our interference. */
2457 m->no_console_output = m->n_on_console > 0;
2458
2459 /* Acknowledge the child's request, and let all all other children know too that they shouldn't wait any longer
2460 * by closing the pipes towards them, which is what they are waiting for. */
2461 manager_close_idle_pipe(m);
2462
2463 return 0;
2464 }
2465
2466 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) {
2467 Manager *m = userdata;
2468 int r;
2469 uint64_t next;
2470
2471 assert(m);
2472 assert(source);
2473
2474 manager_print_jobs_in_progress(m);
2475
2476 next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_PERIOD_USEC;
2477 r = sd_event_source_set_time(source, next);
2478 if (r < 0)
2479 return r;
2480
2481 return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT);
2482 }
2483
2484 int manager_loop(Manager *m) {
2485 int r;
2486
2487 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2488
2489 assert(m);
2490 m->exit_code = MANAGER_OK;
2491
2492 /* Release the path cache */
2493 m->unit_path_cache = set_free_free(m->unit_path_cache);
2494
2495 manager_check_finished(m);
2496
2497 /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
2498 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2499 if (r < 0)
2500 return log_error_errno(r, "Failed to enable SIGCHLD event source: %m");
2501
2502 while (m->exit_code == MANAGER_OK) {
2503 usec_t wait_usec;
2504
2505 if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m))
2506 watchdog_ping();
2507
2508 if (!ratelimit_test(&rl)) {
2509 /* Yay, something is going seriously wrong, pause a little */
2510 log_warning("Looping too fast. Throttling execution a little.");
2511 sleep(1);
2512 }
2513
2514 if (manager_dispatch_load_queue(m) > 0)
2515 continue;
2516
2517 if (manager_dispatch_gc_job_queue(m) > 0)
2518 continue;
2519
2520 if (manager_dispatch_gc_unit_queue(m) > 0)
2521 continue;
2522
2523 if (manager_dispatch_cleanup_queue(m) > 0)
2524 continue;
2525
2526 if (manager_dispatch_cgroup_realize_queue(m) > 0)
2527 continue;
2528
2529 if (manager_dispatch_dbus_queue(m) > 0)
2530 continue;
2531
2532 /* Sleep for half the watchdog time */
2533 if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m)) {
2534 wait_usec = m->runtime_watchdog / 2;
2535 if (wait_usec <= 0)
2536 wait_usec = 1;
2537 } else
2538 wait_usec = USEC_INFINITY;
2539
2540 r = sd_event_run(m->event, wait_usec);
2541 if (r < 0)
2542 return log_error_errno(r, "Failed to run event loop: %m");
2543 }
2544
2545 return m->exit_code;
2546 }
2547
2548 int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) {
2549 _cleanup_free_ char *n = NULL;
2550 sd_id128_t invocation_id;
2551 Unit *u;
2552 int r;
2553
2554 assert(m);
2555 assert(s);
2556 assert(_u);
2557
2558 r = unit_name_from_dbus_path(s, &n);
2559 if (r < 0)
2560 return r;
2561
2562 /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128bit ID then we use it
2563 * as invocation ID. */
2564 r = sd_id128_from_string(n, &invocation_id);
2565 if (r >= 0) {
2566 u = hashmap_get(m->units_by_invocation_id, &invocation_id);
2567 if (u) {
2568 *_u = u;
2569 return 0;
2570 }
2571
2572 return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID, "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.", SD_ID128_FORMAT_VAL(invocation_id));
2573 }
2574
2575 /* If this didn't work, we check if this is a unit name */
2576 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
2577 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is neither a valid invocation ID nor unit name.", n);
2578
2579 r = manager_load_unit(m, n, NULL, e, &u);
2580 if (r < 0)
2581 return r;
2582
2583 *_u = u;
2584 return 0;
2585 }
2586
2587 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2588 const char *p;
2589 unsigned id;
2590 Job *j;
2591 int r;
2592
2593 assert(m);
2594 assert(s);
2595 assert(_j);
2596
2597 p = startswith(s, "/org/freedesktop/systemd1/job/");
2598 if (!p)
2599 return -EINVAL;
2600
2601 r = safe_atou(p, &id);
2602 if (r < 0)
2603 return r;
2604
2605 j = manager_get_job(m, id);
2606 if (!j)
2607 return -ENOENT;
2608
2609 *_j = j;
2610
2611 return 0;
2612 }
2613
2614 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2615
2616 #if HAVE_AUDIT
2617 _cleanup_free_ char *p = NULL;
2618 const char *msg;
2619 int audit_fd, r;
2620
2621 if (!MANAGER_IS_SYSTEM(m))
2622 return;
2623
2624 audit_fd = get_audit_fd();
2625 if (audit_fd < 0)
2626 return;
2627
2628 /* Don't generate audit events if the service was already
2629 * started and we're just deserializing */
2630 if (MANAGER_IS_RELOADING(m))
2631 return;
2632
2633 if (u->type != UNIT_SERVICE)
2634 return;
2635
2636 r = unit_name_to_prefix_and_instance(u->id, &p);
2637 if (r < 0) {
2638 log_error_errno(r, "Failed to extract prefix and instance of unit name: %m");
2639 return;
2640 }
2641
2642 msg = strjoina("unit=", p);
2643 if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) {
2644 if (errno == EPERM)
2645 /* We aren't allowed to send audit messages?
2646 * Then let's not retry again. */
2647 close_audit_fd();
2648 else
2649 log_warning_errno(errno, "Failed to send audit message: %m");
2650 }
2651 #endif
2652
2653 }
2654
2655 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2656 static const union sockaddr_union sa = PLYMOUTH_SOCKET;
2657 _cleanup_free_ char *message = NULL;
2658 _cleanup_close_ int fd = -1;
2659 int n = 0;
2660
2661 /* Don't generate plymouth events if the service was already
2662 * started and we're just deserializing */
2663 if (MANAGER_IS_RELOADING(m))
2664 return;
2665
2666 if (!MANAGER_IS_SYSTEM(m))
2667 return;
2668
2669 if (detect_container() > 0)
2670 return;
2671
2672 if (!IN_SET(u->type, UNIT_SERVICE, UNIT_MOUNT, UNIT_SWAP))
2673 return;
2674
2675 /* We set SOCK_NONBLOCK here so that we rather drop the
2676 * message then wait for plymouth */
2677 fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
2678 if (fd < 0) {
2679 log_error_errno(errno, "socket() failed: %m");
2680 return;
2681 }
2682
2683 if (connect(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un)) < 0) {
2684
2685 if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
2686 log_error_errno(errno, "connect() failed: %m");
2687 return;
2688 }
2689
2690 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2691 log_oom();
2692 return;
2693 }
2694
2695 errno = 0;
2696 if (write(fd, message, n + 1) != n + 1)
2697 if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
2698 log_error_errno(errno, "Failed to write Plymouth message: %m");
2699 }
2700
2701 int manager_open_serialization(Manager *m, FILE **_f) {
2702 int fd;
2703 FILE *f;
2704
2705 assert(_f);
2706
2707 fd = open_serialization_fd("systemd-state");
2708 if (fd < 0)
2709 return fd;
2710
2711 f = fdopen(fd, "w+");
2712 if (!f) {
2713 safe_close(fd);
2714 return -errno;
2715 }
2716
2717 *_f = f;
2718 return 0;
2719 }
2720
2721 int manager_serialize(Manager *m, FILE *f, FDSet *fds, bool switching_root) {
2722 ManagerTimestamp q;
2723 const char *t;
2724 Iterator i;
2725 Unit *u;
2726 int r;
2727
2728 assert(m);
2729 assert(f);
2730 assert(fds);
2731
2732 m->n_reloading++;
2733
2734 fprintf(f, "current-job-id=%"PRIu32"\n", m->current_job_id);
2735 fprintf(f, "n-installed-jobs=%u\n", m->n_installed_jobs);
2736 fprintf(f, "n-failed-jobs=%u\n", m->n_failed_jobs);
2737 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2738 fprintf(f, "ready-sent=%s\n", yes_no(m->ready_sent));
2739 fprintf(f, "taint-logged=%s\n", yes_no(m->taint_logged));
2740 fprintf(f, "service-watchdogs=%s\n", yes_no(m->service_watchdogs));
2741
2742 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
2743 /* The userspace and finish timestamps only apply to the host system, hence only serialize them there */
2744 if (in_initrd() && IN_SET(q, MANAGER_TIMESTAMP_USERSPACE, MANAGER_TIMESTAMP_FINISH))
2745 continue;
2746
2747 t = manager_timestamp_to_string(q);
2748 {
2749 char field[strlen(t) + STRLEN("-timestamp") + 1];
2750 strcpy(stpcpy(field, t), "-timestamp");
2751 dual_timestamp_serialize(f, field, m->timestamps + q);
2752 }
2753 }
2754
2755 if (!switching_root)
2756 (void) serialize_environment(f, m->environment);
2757
2758 if (m->notify_fd >= 0) {
2759 int copy;
2760
2761 copy = fdset_put_dup(fds, m->notify_fd);
2762 if (copy < 0)
2763 return copy;
2764
2765 fprintf(f, "notify-fd=%i\n", copy);
2766 fprintf(f, "notify-socket=%s\n", m->notify_socket);
2767 }
2768
2769 if (m->cgroups_agent_fd >= 0) {
2770 int copy;
2771
2772 copy = fdset_put_dup(fds, m->cgroups_agent_fd);
2773 if (copy < 0)
2774 return copy;
2775
2776 fprintf(f, "cgroups-agent-fd=%i\n", copy);
2777 }
2778
2779 if (m->user_lookup_fds[0] >= 0) {
2780 int copy0, copy1;
2781
2782 copy0 = fdset_put_dup(fds, m->user_lookup_fds[0]);
2783 if (copy0 < 0)
2784 return copy0;
2785
2786 copy1 = fdset_put_dup(fds, m->user_lookup_fds[1]);
2787 if (copy1 < 0)
2788 return copy1;
2789
2790 fprintf(f, "user-lookup=%i %i\n", copy0, copy1);
2791 }
2792
2793 bus_track_serialize(m->subscribed, f, "subscribed");
2794
2795 r = dynamic_user_serialize(m, f, fds);
2796 if (r < 0)
2797 return r;
2798
2799 manager_serialize_uid_refs(m, f);
2800 manager_serialize_gid_refs(m, f);
2801
2802 (void) fputc('\n', f);
2803
2804 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2805 if (u->id != t)
2806 continue;
2807
2808 /* Start marker */
2809 fputs(u->id, f);
2810 fputc('\n', f);
2811
2812 r = unit_serialize(u, f, fds, !switching_root);
2813 if (r < 0) {
2814 m->n_reloading--;
2815 return r;
2816 }
2817 }
2818
2819 assert(m->n_reloading > 0);
2820 m->n_reloading--;
2821
2822 if (ferror(f))
2823 return -EIO;
2824
2825 r = bus_fdset_add_all(m, fds);
2826 if (r < 0)
2827 return r;
2828
2829 return 0;
2830 }
2831
2832 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2833 int r = 0;
2834
2835 assert(m);
2836 assert(f);
2837
2838 log_debug("Deserializing state...");
2839
2840 m->n_reloading++;
2841
2842 for (;;) {
2843 char line[LINE_MAX];
2844 const char *val, *l;
2845
2846 if (!fgets(line, sizeof(line), f)) {
2847 if (feof(f))
2848 r = 0;
2849 else
2850 r = -errno;
2851
2852 goto finish;
2853 }
2854
2855 char_array_0(line);
2856 l = strstrip(line);
2857
2858 if (l[0] == 0)
2859 break;
2860
2861 if ((val = startswith(l, "current-job-id="))) {
2862 uint32_t id;
2863
2864 if (safe_atou32(val, &id) < 0)
2865 log_notice("Failed to parse current job id value %s", val);
2866 else
2867 m->current_job_id = MAX(m->current_job_id, id);
2868
2869 } else if ((val = startswith(l, "n-installed-jobs="))) {
2870 uint32_t n;
2871
2872 if (safe_atou32(val, &n) < 0)
2873 log_notice("Failed to parse installed jobs counter %s", val);
2874 else
2875 m->n_installed_jobs += n;
2876
2877 } else if ((val = startswith(l, "n-failed-jobs="))) {
2878 uint32_t n;
2879
2880 if (safe_atou32(val, &n) < 0)
2881 log_notice("Failed to parse failed jobs counter %s", val);
2882 else
2883 m->n_failed_jobs += n;
2884
2885 } else if ((val = startswith(l, "taint-usr="))) {
2886 int b;
2887
2888 b = parse_boolean(val);
2889 if (b < 0)
2890 log_notice("Failed to parse taint /usr flag %s", val);
2891 else
2892 m->taint_usr = m->taint_usr || b;
2893
2894 } else if ((val = startswith(l, "ready-sent="))) {
2895 int b;
2896
2897 b = parse_boolean(val);
2898 if (b < 0)
2899 log_notice("Failed to parse ready-sent flag %s", val);
2900 else
2901 m->ready_sent = m->ready_sent || b;
2902
2903 } else if ((val = startswith(l, "taint-logged="))) {
2904 int b;
2905
2906 b = parse_boolean(val);
2907 if (b < 0)
2908 log_notice("Failed to parse taint-logged flag %s", val);
2909 else
2910 m->taint_logged = m->taint_logged || b;
2911
2912 } else if ((val = startswith(l, "service-watchdogs="))) {
2913 int b;
2914
2915 b = parse_boolean(val);
2916 if (b < 0)
2917 log_notice("Failed to parse service-watchdogs flag %s", val);
2918 else
2919 m->service_watchdogs = b;
2920
2921 } else if (startswith(l, "env=")) {
2922 r = deserialize_environment(&m->environment, l);
2923 if (r == -ENOMEM)
2924 goto finish;
2925 if (r < 0)
2926 log_notice_errno(r, "Failed to parse environment entry: \"%s\": %m", l);
2927
2928 } else if ((val = startswith(l, "notify-fd="))) {
2929 int fd;
2930
2931 if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
2932 log_notice("Failed to parse notify fd: \"%s\"", val);
2933 else {
2934 m->notify_event_source = sd_event_source_unref(m->notify_event_source);
2935 safe_close(m->notify_fd);
2936 m->notify_fd = fdset_remove(fds, fd);
2937 }
2938
2939 } else if ((val = startswith(l, "notify-socket="))) {
2940 char *n;
2941
2942 n = strdup(val);
2943 if (!n) {
2944 r = -ENOMEM;
2945 goto finish;
2946 }
2947
2948 free(m->notify_socket);
2949 m->notify_socket = n;
2950
2951 } else if ((val = startswith(l, "cgroups-agent-fd="))) {
2952 int fd;
2953
2954 if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
2955 log_notice("Failed to parse cgroups agent fd: %s", val);
2956 else {
2957 m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
2958 safe_close(m->cgroups_agent_fd);
2959 m->cgroups_agent_fd = fdset_remove(fds, fd);
2960 }
2961
2962 } else if ((val = startswith(l, "user-lookup="))) {
2963 int fd0, fd1;
2964
2965 if (sscanf(val, "%i %i", &fd0, &fd1) != 2 || fd0 < 0 || fd1 < 0 || fd0 == fd1 || !fdset_contains(fds, fd0) || !fdset_contains(fds, fd1))
2966 log_notice("Failed to parse user lookup fd: %s", val);
2967 else {
2968 m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
2969 safe_close_pair(m->user_lookup_fds);
2970 m->user_lookup_fds[0] = fdset_remove(fds, fd0);
2971 m->user_lookup_fds[1] = fdset_remove(fds, fd1);
2972 }
2973
2974 } else if ((val = startswith(l, "dynamic-user=")))
2975 dynamic_user_deserialize_one(m, val, fds);
2976 else if ((val = startswith(l, "destroy-ipc-uid=")))
2977 manager_deserialize_uid_refs_one(m, val);
2978 else if ((val = startswith(l, "destroy-ipc-gid=")))
2979 manager_deserialize_gid_refs_one(m, val);
2980 else if ((val = startswith(l, "subscribed="))) {
2981
2982 if (strv_extend(&m->deserialized_subscribed, val) < 0)
2983 log_oom();
2984 } else {
2985 ManagerTimestamp q;
2986
2987 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
2988 val = startswith(l, manager_timestamp_to_string(q));
2989 if (!val)
2990 continue;
2991
2992 val = startswith(val, "-timestamp=");
2993 if (val)
2994 break;
2995 }
2996
2997 if (q < _MANAGER_TIMESTAMP_MAX) /* found it */
2998 dual_timestamp_deserialize(val, m->timestamps + q);
2999 else if (!startswith(l, "kdbus-fd=")) /* ignore kdbus */
3000 log_notice("Unknown serialization item '%s'", l);
3001 }
3002 }
3003
3004 for (;;) {
3005 Unit *u;
3006 char name[UNIT_NAME_MAX+2];
3007 const char* unit_name;
3008
3009 /* Start marker */
3010 if (!fgets(name, sizeof(name), f)) {
3011 if (feof(f))
3012 r = 0;
3013 else
3014 r = -errno;
3015
3016 goto finish;
3017 }
3018
3019 char_array_0(name);
3020 unit_name = strstrip(name);
3021
3022 r = manager_load_unit(m, unit_name, NULL, NULL, &u);
3023 if (r < 0) {
3024 log_notice_errno(r, "Failed to load unit \"%s\", skipping deserialization: %m", unit_name);
3025 if (r == -ENOMEM)
3026 goto finish;
3027 unit_deserialize_skip(f);
3028 continue;
3029 }
3030
3031 r = unit_deserialize(u, f, fds);
3032 if (r < 0) {
3033 log_notice_errno(r, "Failed to deserialize unit \"%s\": %m", unit_name);
3034 if (r == -ENOMEM)
3035 goto finish;
3036 }
3037 }
3038
3039 finish:
3040 if (ferror(f))
3041 r = -EIO;
3042
3043 assert(m->n_reloading > 0);
3044 m->n_reloading--;
3045
3046 return r;
3047 }
3048
3049 int manager_reload(Manager *m) {
3050 int r, q;
3051 _cleanup_fclose_ FILE *f = NULL;
3052 _cleanup_fdset_free_ FDSet *fds = NULL;
3053
3054 assert(m);
3055
3056 r = manager_open_serialization(m, &f);
3057 if (r < 0)
3058 return r;
3059
3060 m->n_reloading++;
3061 bus_manager_send_reloading(m, true);
3062
3063 fds = fdset_new();
3064 if (!fds) {
3065 m->n_reloading--;
3066 return -ENOMEM;
3067 }
3068
3069 r = manager_serialize(m, f, fds, false);
3070 if (r < 0) {
3071 m->n_reloading--;
3072 return r;
3073 }
3074
3075 if (fseeko(f, 0, SEEK_SET) < 0) {
3076 m->n_reloading--;
3077 return -errno;
3078 }
3079
3080 /* From here on there is no way back. */
3081 manager_clear_jobs_and_units(m);
3082 lookup_paths_flush_generator(&m->lookup_paths);
3083 lookup_paths_free(&m->lookup_paths);
3084 dynamic_user_vacuum(m, false);
3085 m->uid_refs = hashmap_free(m->uid_refs);
3086 m->gid_refs = hashmap_free(m->gid_refs);
3087
3088 q = lookup_paths_init(&m->lookup_paths, m->unit_file_scope, 0, NULL);
3089 if (q < 0 && r >= 0)
3090 r = q;
3091
3092 q = manager_run_environment_generators(m);
3093 if (q < 0 && r >= 0)
3094 r = q;
3095
3096 /* Find new unit paths */
3097 q = manager_run_generators(m);
3098 if (q < 0 && r >= 0)
3099 r = q;
3100
3101 lookup_paths_reduce(&m->lookup_paths);
3102 manager_build_unit_path_cache(m);
3103
3104 /* First, enumerate what we can from all config files */
3105 manager_enumerate(m);
3106
3107 /* Second, deserialize our stored data */
3108 q = manager_deserialize(m, f, fds);
3109 if (q < 0) {
3110 log_error_errno(q, "Deserialization failed: %m");
3111
3112 if (r >= 0)
3113 r = q;
3114 }
3115
3116 fclose(f);
3117 f = NULL;
3118
3119 /* Re-register notify_fd as event source */
3120 q = manager_setup_notify(m);
3121 if (q < 0 && r >= 0)
3122 r = q;
3123
3124 q = manager_setup_cgroups_agent(m);
3125 if (q < 0 && r >= 0)
3126 r = q;
3127
3128 q = manager_setup_user_lookup_fd(m);
3129 if (q < 0 && r >= 0)
3130 r = q;
3131
3132 /* Third, fire things up! */
3133 manager_coldplug(m);
3134
3135 /* Release any dynamic users no longer referenced */
3136 dynamic_user_vacuum(m, true);
3137
3138 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
3139 manager_vacuum_uid_refs(m);
3140 manager_vacuum_gid_refs(m);
3141
3142 /* It might be safe to log to the journal now. */
3143 manager_recheck_journal(m);
3144
3145 /* Sync current state of bus names with our set of listening units */
3146 if (m->api_bus)
3147 manager_sync_bus_names(m, m->api_bus);
3148
3149 assert(m->n_reloading > 0);
3150 m->n_reloading--;
3151
3152 m->send_reloading_done = true;
3153
3154 return r;
3155 }
3156
3157 void manager_reset_failed(Manager *m) {
3158 Unit *u;
3159 Iterator i;
3160
3161 assert(m);
3162
3163 HASHMAP_FOREACH(u, m->units, i)
3164 unit_reset_failed(u);
3165 }
3166
3167 bool manager_unit_inactive_or_pending(Manager *m, const char *name) {
3168 Unit *u;
3169
3170 assert(m);
3171 assert(name);
3172
3173 /* Returns true if the unit is inactive or going down */
3174 u = manager_get_unit(m, name);
3175 if (!u)
3176 return true;
3177
3178 return unit_inactive_or_pending(u);
3179 }
3180
3181 static void log_taint_string(Manager *m) {
3182 _cleanup_free_ char *taint = NULL;
3183
3184 assert(m);
3185
3186 if (MANAGER_IS_USER(m) || m->taint_logged)
3187 return;
3188
3189 m->taint_logged = true; /* only check for taint once */
3190
3191 taint = manager_taint_string(m);
3192 if (isempty(taint))
3193 return;
3194
3195 log_struct(LOG_NOTICE,
3196 LOG_MESSAGE("System is tainted: %s", taint),
3197 "TAINT=%s", taint,
3198 "MESSAGE_ID=" SD_MESSAGE_TAINTED_STR,
3199 NULL);
3200 }
3201
3202 static void manager_notify_finished(Manager *m) {
3203 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3204 usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec;
3205
3206 if (m->test_run_flags)
3207 return;
3208
3209 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) {
3210
3211 /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
3212 * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
3213 * negative values. */
3214
3215 firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic;
3216 loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3217 userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3218 total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic;
3219
3220 if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
3221
3222 /* The initrd case on bare-metal*/
3223 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3224 initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
3225
3226 log_struct(LOG_INFO,
3227 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3228 "KERNEL_USEC="USEC_FMT, kernel_usec,
3229 "INITRD_USEC="USEC_FMT, initrd_usec,
3230 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3231 LOG_MESSAGE("Startup finished in %s (kernel) + %s (initrd) + %s (userspace) = %s.",
3232 format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
3233 format_timespan(initrd, sizeof(initrd), initrd_usec, USEC_PER_MSEC),
3234 format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
3235 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3236 NULL);
3237 } else {
3238 /* The initrd-less case on bare-metal*/
3239
3240 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3241 initrd_usec = 0;
3242
3243 log_struct(LOG_INFO,
3244 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3245 "KERNEL_USEC="USEC_FMT, kernel_usec,
3246 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3247 LOG_MESSAGE("Startup finished in %s (kernel) + %s (userspace) = %s.",
3248 format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
3249 format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
3250 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3251 NULL);
3252 }
3253 } else {
3254 /* The container and --user case */
3255 firmware_usec = loader_usec = initrd_usec = kernel_usec = 0;
3256 total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3257
3258 log_struct(LOG_INFO,
3259 "MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR,
3260 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3261 LOG_MESSAGE("Startup finished in %s.",
3262 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3263 NULL);
3264 }
3265
3266 bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec);
3267
3268 sd_notifyf(false,
3269 m->ready_sent ? "STATUS=Startup finished in %s."
3270 : "READY=1\n"
3271 "STATUS=Startup finished in %s.",
3272 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC));
3273 m->ready_sent = true;
3274
3275 log_taint_string(m);
3276 }
3277
3278 static void manager_send_ready(Manager *m) {
3279 assert(m);
3280
3281 /* We send READY=1 on reaching basic.target only when running in --user mode. */
3282 if (!MANAGER_IS_USER(m) || m->ready_sent)
3283 return;
3284
3285 m->ready_sent = true;
3286
3287 sd_notifyf(false,
3288 "READY=1\n"
3289 "STATUS=Reached " SPECIAL_BASIC_TARGET ".");
3290 }
3291
3292 static void manager_check_basic_target(Manager *m) {
3293 Unit *u;
3294
3295 assert(m);
3296
3297 /* Small shortcut */
3298 if (m->ready_sent && m->taint_logged)
3299 return;
3300
3301 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3302 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3303 return;
3304
3305 /* For user managers, send out READY=1 as soon as we reach basic.target */
3306 manager_send_ready(m);
3307
3308 /* Log the taint string as soon as we reach basic.target */
3309 log_taint_string(m);
3310 }
3311
3312 void manager_check_finished(Manager *m) {
3313 assert(m);
3314
3315 if (MANAGER_IS_RELOADING(m))
3316 return;
3317
3318 /* Verify that we have entered the event loop already, and not left it again. */
3319 if (!MANAGER_IS_RUNNING(m))
3320 return;
3321
3322 manager_check_basic_target(m);
3323
3324 if (hashmap_size(m->jobs) > 0) {
3325 if (m->jobs_in_progress_event_source)
3326 /* Ignore any failure, this is only for feedback */
3327 (void) sd_event_source_set_time(m->jobs_in_progress_event_source, now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC);
3328
3329 return;
3330 }
3331
3332 manager_flip_auto_status(m, false);
3333
3334 /* Notify Type=idle units that we are done now */
3335 manager_close_idle_pipe(m);
3336
3337 /* Turn off confirm spawn now */
3338 m->confirm_spawn = NULL;
3339
3340 /* No need to update ask password status when we're going non-interactive */
3341 manager_close_ask_password(m);
3342
3343 /* This is no longer the first boot */
3344 manager_set_first_boot(m, false);
3345
3346 if (MANAGER_IS_FINISHED(m))
3347 return;
3348
3349 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_FINISH);
3350
3351 manager_notify_finished(m);
3352
3353 manager_invalidate_startup_units(m);
3354 }
3355
3356 static bool generator_path_any(const char* const* paths) {
3357 char **path;
3358 bool found = false;
3359
3360 /* Optimize by skipping the whole process by not creating output directories
3361 * if no generators are found. */
3362 STRV_FOREACH(path, (char**) paths)
3363 if (access(*path, F_OK) == 0)
3364 found = true;
3365 else if (errno != ENOENT)
3366 log_warning_errno(errno, "Failed to open generator directory %s: %m", *path);
3367
3368 return found;
3369 }
3370
3371 static const char* system_env_generator_binary_paths[] = {
3372 "/run/systemd/system-environment-generators",
3373 "/etc/systemd/system-environment-generators",
3374 "/usr/local/lib/systemd/system-environment-generators",
3375 SYSTEM_ENV_GENERATOR_PATH,
3376 NULL
3377 };
3378
3379 static const char* user_env_generator_binary_paths[] = {
3380 "/run/systemd/user-environment-generators",
3381 "/etc/systemd/user-environment-generators",
3382 "/usr/local/lib/systemd/user-environment-generators",
3383 USER_ENV_GENERATOR_PATH,
3384 NULL
3385 };
3386
3387 static int manager_run_environment_generators(Manager *m) {
3388 char **tmp = NULL; /* this is only used in the forked process, no cleanup here */
3389 const char **paths;
3390 void* args[] = {&tmp, &tmp, &m->environment};
3391
3392 if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS))
3393 return 0;
3394
3395 paths = MANAGER_IS_SYSTEM(m) ? system_env_generator_binary_paths : user_env_generator_binary_paths;
3396
3397 if (!generator_path_any(paths))
3398 return 0;
3399
3400 return execute_directories(paths, DEFAULT_TIMEOUT_USEC, gather_environment, args, NULL);
3401 }
3402
3403 static int manager_run_generators(Manager *m) {
3404 _cleanup_strv_free_ char **paths = NULL;
3405 const char *argv[5];
3406 int r;
3407
3408 assert(m);
3409
3410 if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS))
3411 return 0;
3412
3413 paths = generator_binary_paths(m->unit_file_scope);
3414 if (!paths)
3415 return log_oom();
3416
3417 if (!generator_path_any((const char* const*) paths))
3418 return 0;
3419
3420 r = lookup_paths_mkdir_generator(&m->lookup_paths);
3421 if (r < 0)
3422 goto finish;
3423
3424 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3425 argv[1] = m->lookup_paths.generator;
3426 argv[2] = m->lookup_paths.generator_early;
3427 argv[3] = m->lookup_paths.generator_late;
3428 argv[4] = NULL;
3429
3430 RUN_WITH_UMASK(0022)
3431 execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC,
3432 NULL, NULL, (char**) argv);
3433
3434 finish:
3435 lookup_paths_trim_generator(&m->lookup_paths);
3436 return r;
3437 }
3438
3439 int manager_environment_add(Manager *m, char **minus, char **plus) {
3440 char **a = NULL, **b = NULL, **l;
3441 assert(m);
3442
3443 l = m->environment;
3444
3445 if (!strv_isempty(minus)) {
3446 a = strv_env_delete(l, 1, minus);
3447 if (!a)
3448 return -ENOMEM;
3449
3450 l = a;
3451 }
3452
3453 if (!strv_isempty(plus)) {
3454 b = strv_env_merge(2, l, plus);
3455 if (!b) {
3456 strv_free(a);
3457 return -ENOMEM;
3458 }
3459
3460 l = b;
3461 }
3462
3463 if (m->environment != l)
3464 strv_free(m->environment);
3465 if (a != l)
3466 strv_free(a);
3467 if (b != l)
3468 strv_free(b);
3469
3470 m->environment = l;
3471 manager_sanitize_environment(m);
3472
3473 return 0;
3474 }
3475
3476 int manager_set_default_rlimits(Manager *m, struct rlimit **default_rlimit) {
3477 int i;
3478
3479 assert(m);
3480
3481 for (i = 0; i < _RLIMIT_MAX; i++) {
3482 m->rlimit[i] = mfree(m->rlimit[i]);
3483
3484 if (!default_rlimit[i])
3485 continue;
3486
3487 m->rlimit[i] = newdup(struct rlimit, default_rlimit[i], 1);
3488 if (!m->rlimit[i])
3489 return log_oom();
3490 }
3491
3492 return 0;
3493 }
3494
3495 static bool manager_journal_is_running(Manager *m) {
3496 Unit *u;
3497
3498 assert(m);
3499
3500 /* If we are the user manager we can safely assume that the journal is up */
3501 if (!MANAGER_IS_SYSTEM(m))
3502 return true;
3503
3504 /* Check that the socket is not only up, but in RUNNING state */
3505 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3506 if (!u)
3507 return false;
3508 if (SOCKET(u)->state != SOCKET_RUNNING)
3509 return false;
3510
3511 /* Similar, check if the daemon itself is fully up, too */
3512 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3513 if (!u)
3514 return false;
3515 if (SERVICE(u)->state != SERVICE_RUNNING)
3516 return false;
3517
3518 return true;
3519 }
3520
3521 void manager_recheck_journal(Manager *m) {
3522
3523 assert(m);
3524
3525 /* Don't bother with this unless we are in the special situation of being PID 1 */
3526 if (getpid_cached() != 1)
3527 return;
3528
3529 if (manager_journal_is_running(m)) {
3530
3531 /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. */
3532 log_set_prohibit_ipc(false);
3533 log_open();
3534 } else {
3535
3536 /* If the journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we
3537 * might trigger an activation ourselves we can't fulfill */
3538 log_set_prohibit_ipc(true);
3539 log_close_journal();
3540 }
3541 }
3542
3543 void manager_set_show_status(Manager *m, ShowStatus mode) {
3544 assert(m);
3545 assert(IN_SET(mode, SHOW_STATUS_AUTO, SHOW_STATUS_NO, SHOW_STATUS_YES, SHOW_STATUS_TEMPORARY));
3546
3547 if (!MANAGER_IS_SYSTEM(m))
3548 return;
3549
3550 if (m->show_status != mode)
3551 log_debug("%s showing of status.",
3552 mode == SHOW_STATUS_NO ? "Disabling" : "Enabling");
3553 m->show_status = mode;
3554
3555 if (mode > 0)
3556 (void) touch("/run/systemd/show-status");
3557 else
3558 (void) unlink("/run/systemd/show-status");
3559 }
3560
3561 static bool manager_get_show_status(Manager *m, StatusType type) {
3562 assert(m);
3563
3564 if (!MANAGER_IS_SYSTEM(m))
3565 return false;
3566
3567 if (m->no_console_output)
3568 return false;
3569
3570 if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING))
3571 return false;
3572
3573 /* If we cannot find out the status properly, just proceed. */
3574 if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0)
3575 return false;
3576
3577 return m->show_status > 0;
3578 }
3579
3580 const char *manager_get_confirm_spawn(Manager *m) {
3581 static int last_errno = 0;
3582 const char *vc = m->confirm_spawn;
3583 struct stat st;
3584 int r;
3585
3586 /* Here's the deal: we want to test the validity of the console but don't want
3587 * PID1 to go through the whole console process which might block. But we also
3588 * want to warn the user only once if something is wrong with the console so we
3589 * cannot do the sanity checks after spawning our children. So here we simply do
3590 * really basic tests to hopefully trap common errors.
3591 *
3592 * If the console suddenly disappear at the time our children will really it
3593 * then they will simply fail to acquire it and a positive answer will be
3594 * assumed. New children will fallback to /dev/console though.
3595 *
3596 * Note: TTYs are devices that can come and go any time, and frequently aren't
3597 * available yet during early boot (consider a USB rs232 dongle...). If for any
3598 * reason the configured console is not ready, we fallback to the default
3599 * console. */
3600
3601 if (!vc || path_equal(vc, "/dev/console"))
3602 return vc;
3603
3604 r = stat(vc, &st);
3605 if (r < 0)
3606 goto fail;
3607
3608 if (!S_ISCHR(st.st_mode)) {
3609 errno = ENOTTY;
3610 goto fail;
3611 }
3612
3613 last_errno = 0;
3614 return vc;
3615 fail:
3616 if (last_errno != errno) {
3617 last_errno = errno;
3618 log_warning_errno(errno, "Failed to open %s: %m, using default console", vc);
3619 }
3620 return "/dev/console";
3621 }
3622
3623 void manager_set_first_boot(Manager *m, bool b) {
3624 assert(m);
3625
3626 if (!MANAGER_IS_SYSTEM(m))
3627 return;
3628
3629 if (m->first_boot != (int) b) {
3630 if (b)
3631 (void) touch("/run/systemd/first-boot");
3632 else
3633 (void) unlink("/run/systemd/first-boot");
3634 }
3635
3636 m->first_boot = b;
3637 }
3638
3639 void manager_disable_confirm_spawn(void) {
3640 (void) touch("/run/systemd/confirm_spawn_disabled");
3641 }
3642
3643 bool manager_is_confirm_spawn_disabled(Manager *m) {
3644 if (!m->confirm_spawn)
3645 return true;
3646
3647 return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0;
3648 }
3649
3650 void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) {
3651 va_list ap;
3652
3653 /* If m is NULL, assume we're after shutdown and let the messages through. */
3654
3655 if (m && !manager_get_show_status(m, type))
3656 return;
3657
3658 /* XXX We should totally drop the check for ephemeral here
3659 * and thus effectively make 'Type=idle' pointless. */
3660 if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0)
3661 return;
3662
3663 va_start(ap, format);
3664 status_vprintf(status, true, type == STATUS_TYPE_EPHEMERAL, format, ap);
3665 va_end(ap);
3666 }
3667
3668 Set *manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
3669 char p[strlen(path)+1];
3670
3671 assert(m);
3672 assert(path);
3673
3674 strcpy(p, path);
3675 path_kill_slashes(p);
3676
3677 return hashmap_get(m->units_requiring_mounts_for, streq(p, "/") ? "" : p);
3678 }
3679
3680 void manager_set_exec_params(Manager *m, ExecParameters *p) {
3681 assert(m);
3682 assert(p);
3683
3684 p->environment = m->environment;
3685 p->confirm_spawn = manager_get_confirm_spawn(m);
3686 p->cgroup_supported = m->cgroup_supported;
3687 p->prefix = m->prefix;
3688
3689 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(m));
3690 }
3691
3692 int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
3693 unsigned size;
3694 int r;
3695
3696 assert(m);
3697 assert(u->manager == m);
3698
3699 size = set_size(m->failed_units);
3700
3701 if (failed) {
3702 r = set_ensure_allocated(&m->failed_units, NULL);
3703 if (r < 0)
3704 return log_oom();
3705
3706 if (set_put(m->failed_units, u) < 0)
3707 return log_oom();
3708 } else
3709 (void) set_remove(m->failed_units, u);
3710
3711 if (set_size(m->failed_units) != size)
3712 bus_manager_send_change_signal(m);
3713
3714 return 0;
3715 }
3716
3717 ManagerState manager_state(Manager *m) {
3718 Unit *u;
3719
3720 assert(m);
3721
3722 /* Did we ever finish booting? If not then we are still starting up */
3723 if (!MANAGER_IS_FINISHED(m)) {
3724
3725 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3726 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3727 return MANAGER_INITIALIZING;
3728
3729 return MANAGER_STARTING;
3730 }
3731
3732 /* Is the special shutdown target active or queued? If so, we are in shutdown state */
3733 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
3734 if (u && unit_active_or_pending(u))
3735 return MANAGER_STOPPING;
3736
3737 if (MANAGER_IS_SYSTEM(m)) {
3738 /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
3739 u = manager_get_unit(m, SPECIAL_RESCUE_TARGET);
3740 if (u && unit_active_or_pending(u))
3741 return MANAGER_MAINTENANCE;
3742
3743 u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET);
3744 if (u && unit_active_or_pending(u))
3745 return MANAGER_MAINTENANCE;
3746 }
3747
3748 /* Are there any failed units? If so, we are in degraded mode */
3749 if (set_size(m->failed_units) > 0)
3750 return MANAGER_DEGRADED;
3751
3752 return MANAGER_RUNNING;
3753 }
3754
3755 #define DESTROY_IPC_FLAG (UINT32_C(1) << 31)
3756
3757 static void manager_unref_uid_internal(
3758 Manager *m,
3759 Hashmap **uid_refs,
3760 uid_t uid,
3761 bool destroy_now,
3762 int (*_clean_ipc)(uid_t uid)) {
3763
3764 uint32_t c, n;
3765
3766 assert(m);
3767 assert(uid_refs);
3768 assert(uid_is_valid(uid));
3769 assert(_clean_ipc);
3770
3771 /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the assumption
3772 * that uid_t and gid_t are actually defined the same way, with the same validity rules.
3773 *
3774 * We store a hashmap where the UID/GID is they key and the value is a 32bit reference counter, whose highest
3775 * bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last reference to the UID/GID
3776 * is dropped. The flag is set to on, once at least one reference from a unit where RemoveIPC= is set is added
3777 * on a UID/GID. It is reset when the UID's/GID's reference counter drops to 0 again. */
3778
3779 assert_cc(sizeof(uid_t) == sizeof(gid_t));
3780 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
3781
3782 if (uid == 0) /* We don't keep track of root, and will never destroy it */
3783 return;
3784
3785 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
3786
3787 n = c & ~DESTROY_IPC_FLAG;
3788 assert(n > 0);
3789 n--;
3790
3791 if (destroy_now && n == 0) {
3792 hashmap_remove(*uid_refs, UID_TO_PTR(uid));
3793
3794 if (c & DESTROY_IPC_FLAG) {
3795 log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
3796 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
3797 uid);
3798 (void) _clean_ipc(uid);
3799 }
3800 } else {
3801 c = n | (c & DESTROY_IPC_FLAG);
3802 assert_se(hashmap_update(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
3803 }
3804 }
3805
3806 void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
3807 manager_unref_uid_internal(m, &m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
3808 }
3809
3810 void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
3811 manager_unref_uid_internal(m, &m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
3812 }
3813
3814 static int manager_ref_uid_internal(
3815 Manager *m,
3816 Hashmap **uid_refs,
3817 uid_t uid,
3818 bool clean_ipc) {
3819
3820 uint32_t c, n;
3821 int r;
3822
3823 assert(m);
3824 assert(uid_refs);
3825 assert(uid_is_valid(uid));
3826
3827 /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the assumption
3828 * that uid_t and gid_t are actually defined the same way, with the same validity rules. */
3829
3830 assert_cc(sizeof(uid_t) == sizeof(gid_t));
3831 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
3832
3833 if (uid == 0) /* We don't keep track of root, and will never destroy it */
3834 return 0;
3835
3836 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
3837 if (r < 0)
3838 return r;
3839
3840 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
3841
3842 n = c & ~DESTROY_IPC_FLAG;
3843 n++;
3844
3845 if (n & DESTROY_IPC_FLAG) /* check for overflow */
3846 return -EOVERFLOW;
3847
3848 c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0);
3849
3850 return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
3851 }
3852
3853 int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
3854 return manager_ref_uid_internal(m, &m->uid_refs, uid, clean_ipc);
3855 }
3856
3857 int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
3858 return manager_ref_uid_internal(m, &m->gid_refs, (uid_t) gid, clean_ipc);
3859 }
3860
3861 static void manager_vacuum_uid_refs_internal(
3862 Manager *m,
3863 Hashmap **uid_refs,
3864 int (*_clean_ipc)(uid_t uid)) {
3865
3866 Iterator i;
3867 void *p, *k;
3868
3869 assert(m);
3870 assert(uid_refs);
3871 assert(_clean_ipc);
3872
3873 HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
3874 uint32_t c, n;
3875 uid_t uid;
3876
3877 uid = PTR_TO_UID(k);
3878 c = PTR_TO_UINT32(p);
3879
3880 n = c & ~DESTROY_IPC_FLAG;
3881 if (n > 0)
3882 continue;
3883
3884 if (c & DESTROY_IPC_FLAG) {
3885 log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.",
3886 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
3887 uid);
3888 (void) _clean_ipc(uid);
3889 }
3890
3891 assert_se(hashmap_remove(*uid_refs, k) == p);
3892 }
3893 }
3894
3895 void manager_vacuum_uid_refs(Manager *m) {
3896 manager_vacuum_uid_refs_internal(m, &m->uid_refs, clean_ipc_by_uid);
3897 }
3898
3899 void manager_vacuum_gid_refs(Manager *m) {
3900 manager_vacuum_uid_refs_internal(m, &m->gid_refs, clean_ipc_by_gid);
3901 }
3902
3903 static void manager_serialize_uid_refs_internal(
3904 Manager *m,
3905 FILE *f,
3906 Hashmap **uid_refs,
3907 const char *field_name) {
3908
3909 Iterator i;
3910 void *p, *k;
3911
3912 assert(m);
3913 assert(f);
3914 assert(uid_refs);
3915 assert(field_name);
3916
3917 /* Serialize the UID reference table. Or actually, just the IPC destruction flag of it, as the actual counter
3918 * of it is better rebuild after a reload/reexec. */
3919
3920 HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
3921 uint32_t c;
3922 uid_t uid;
3923
3924 uid = PTR_TO_UID(k);
3925 c = PTR_TO_UINT32(p);
3926
3927 if (!(c & DESTROY_IPC_FLAG))
3928 continue;
3929
3930 fprintf(f, "%s=" UID_FMT "\n", field_name, uid);
3931 }
3932 }
3933
3934 void manager_serialize_uid_refs(Manager *m, FILE *f) {
3935 manager_serialize_uid_refs_internal(m, f, &m->uid_refs, "destroy-ipc-uid");
3936 }
3937
3938 void manager_serialize_gid_refs(Manager *m, FILE *f) {
3939 manager_serialize_uid_refs_internal(m, f, &m->gid_refs, "destroy-ipc-gid");
3940 }
3941
3942 static void manager_deserialize_uid_refs_one_internal(
3943 Manager *m,
3944 Hashmap** uid_refs,
3945 const char *value) {
3946
3947 uid_t uid;
3948 uint32_t c;
3949 int r;
3950
3951 assert(m);
3952 assert(uid_refs);
3953 assert(value);
3954
3955 r = parse_uid(value, &uid);
3956 if (r < 0 || uid == 0) {
3957 log_debug("Unable to parse UID reference serialization");
3958 return;
3959 }
3960
3961 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
3962 if (r < 0) {
3963 log_oom();
3964 return;
3965 }
3966
3967 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
3968 if (c & DESTROY_IPC_FLAG)
3969 return;
3970
3971 c |= DESTROY_IPC_FLAG;
3972
3973 r = hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
3974 if (r < 0) {
3975 log_debug("Failed to add UID reference entry");
3976 return;
3977 }
3978 }
3979
3980 void manager_deserialize_uid_refs_one(Manager *m, const char *value) {
3981 manager_deserialize_uid_refs_one_internal(m, &m->uid_refs, value);
3982 }
3983
3984 void manager_deserialize_gid_refs_one(Manager *m, const char *value) {
3985 manager_deserialize_uid_refs_one_internal(m, &m->gid_refs, value);
3986 }
3987
3988 int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
3989 struct buffer {
3990 uid_t uid;
3991 gid_t gid;
3992 char unit_name[UNIT_NAME_MAX+1];
3993 } _packed_ buffer;
3994
3995 Manager *m = userdata;
3996 ssize_t l;
3997 size_t n;
3998 Unit *u;
3999
4000 assert_se(source);
4001 assert_se(m);
4002
4003 /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the resulting UID/GID
4004 * in a datagram. We parse the datagram here and pass it off to the unit, so that it can add a reference to the
4005 * UID/GID so that it can destroy the UID/GID's IPC objects when the reference counter drops to 0. */
4006
4007 l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT);
4008 if (l < 0) {
4009 if (IN_SET(errno, EINTR, EAGAIN))
4010 return 0;
4011
4012 return log_error_errno(errno, "Failed to read from user lookup fd: %m");
4013 }
4014
4015 if ((size_t) l <= offsetof(struct buffer, unit_name)) {
4016 log_warning("Received too short user lookup message, ignoring.");
4017 return 0;
4018 }
4019
4020 if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) {
4021 log_warning("Received too long user lookup message, ignoring.");
4022 return 0;
4023 }
4024
4025 if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) {
4026 log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
4027 return 0;
4028 }
4029
4030 n = (size_t) l - offsetof(struct buffer, unit_name);
4031 if (memchr(buffer.unit_name, 0, n)) {
4032 log_warning("Received lookup message with embedded NUL character, ignoring.");
4033 return 0;
4034 }
4035
4036 buffer.unit_name[n] = 0;
4037 u = manager_get_unit(m, buffer.unit_name);
4038 if (!u) {
4039 log_debug("Got user lookup message but unit doesn't exist, ignoring.");
4040 return 0;
4041 }
4042
4043 log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid);
4044
4045 unit_notify_user_lookup(u, buffer.uid, buffer.gid);
4046 return 0;
4047 }
4048
4049 char *manager_taint_string(Manager *m) {
4050 _cleanup_free_ char *destination = NULL, *overflowuid = NULL, *overflowgid = NULL;
4051 char *buf, *e;
4052 int r;
4053
4054 /* Returns a "taint string", e.g. "local-hwclock:var-run-bad".
4055 * Only things that are detected at runtime should be tagged
4056 * here. For stuff that is set during compilation, emit a warning
4057 * in the configuration phase. */
4058
4059 assert(m);
4060
4061 buf = new(char, sizeof("split-usr:"
4062 "cgroups-missing:"
4063 "local-hwclock:"
4064 "var-run-bad:"
4065 "overflowuid-not-65534:"
4066 "overflowgid-not-65534:"));
4067 if (!buf)
4068 return NULL;
4069
4070 e = buf;
4071 buf[0] = 0;
4072
4073 if (m->taint_usr)
4074 e = stpcpy(e, "split-usr:");
4075
4076 if (access("/proc/cgroups", F_OK) < 0)
4077 e = stpcpy(e, "cgroups-missing:");
4078
4079 if (clock_is_localtime(NULL) > 0)
4080 e = stpcpy(e, "local-hwclock:");
4081
4082 r = readlink_malloc("/var/run", &destination);
4083 if (r < 0 || !PATH_IN_SET(destination, "../run", "/run"))
4084 e = stpcpy(e, "var-run-bad:");
4085
4086 r = read_one_line_file("/proc/sys/kernel/overflowuid", &overflowuid);
4087 if (r >= 0 && !streq(overflowuid, "65534"))
4088 e = stpcpy(e, "overflowuid-not-65534:");
4089
4090 r = read_one_line_file("/proc/sys/kernel/overflowgid", &overflowgid);
4091 if (r >= 0 && !streq(overflowgid, "65534"))
4092 e = stpcpy(e, "overflowgid-not-65534:");
4093
4094 /* remove the last ':' */
4095 if (e != buf)
4096 e[-1] = 0;
4097
4098 return buf;
4099 }
4100
4101 void manager_ref_console(Manager *m) {
4102 assert(m);
4103
4104 m->n_on_console++;
4105 }
4106
4107 void manager_unref_console(Manager *m) {
4108
4109 assert(m->n_on_console > 0);
4110 m->n_on_console--;
4111
4112 if (m->n_on_console == 0)
4113 m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */
4114 }
4115
4116 static const char *const manager_state_table[_MANAGER_STATE_MAX] = {
4117 [MANAGER_INITIALIZING] = "initializing",
4118 [MANAGER_STARTING] = "starting",
4119 [MANAGER_RUNNING] = "running",
4120 [MANAGER_DEGRADED] = "degraded",
4121 [MANAGER_MAINTENANCE] = "maintenance",
4122 [MANAGER_STOPPING] = "stopping",
4123 };
4124
4125 DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState);
4126
4127 static const char *const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = {
4128 [MANAGER_TIMESTAMP_FIRMWARE] = "firmware",
4129 [MANAGER_TIMESTAMP_LOADER] = "loader",
4130 [MANAGER_TIMESTAMP_KERNEL] = "kernel",
4131 [MANAGER_TIMESTAMP_INITRD] = "initrd",
4132 [MANAGER_TIMESTAMP_USERSPACE] = "userspace",
4133 [MANAGER_TIMESTAMP_FINISH] = "finish",
4134 [MANAGER_TIMESTAMP_SECURITY_START] = "security-start",
4135 [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish",
4136 [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start",
4137 [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish",
4138 [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start",
4139 [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish",
4140 };
4141
4142 DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp);