]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/core/manager.c
macro: introduce new TAKE_FD() macro
[thirdparty/systemd.git] / src / core / manager.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2010 Lennart Poettering
6
7 systemd is free software; you can redistribute it and/or modify it
8 under the terms of the GNU Lesser General Public License as published by
9 the Free Software Foundation; either version 2.1 of the License, or
10 (at your option) any later version.
11
12 systemd is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with systemd; If not, see <http://www.gnu.org/licenses/>.
19 ***/
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <linux/kd.h>
24 #include <signal.h>
25 #include <stdio_ext.h>
26 #include <string.h>
27 #include <sys/epoll.h>
28 #include <sys/inotify.h>
29 #include <sys/ioctl.h>
30 #include <sys/reboot.h>
31 #include <sys/timerfd.h>
32 #include <sys/wait.h>
33 #include <unistd.h>
34
35 #if HAVE_AUDIT
36 #include <libaudit.h>
37 #endif
38
39 #include "sd-daemon.h"
40 #include "sd-messages.h"
41 #include "sd-path.h"
42
43 #include "alloc-util.h"
44 #include "audit-fd.h"
45 #include "boot-timestamps.h"
46 #include "bus-common-errors.h"
47 #include "bus-error.h"
48 #include "bus-kernel.h"
49 #include "bus-util.h"
50 #include "clean-ipc.h"
51 #include "clock-util.h"
52 #include "dbus-job.h"
53 #include "dbus-manager.h"
54 #include "dbus-unit.h"
55 #include "dbus.h"
56 #include "dirent-util.h"
57 #include "env-util.h"
58 #include "escape.h"
59 #include "exec-util.h"
60 #include "execute.h"
61 #include "exit-status.h"
62 #include "fd-util.h"
63 #include "fileio.h"
64 #include "fs-util.h"
65 #include "hashmap.h"
66 #include "io-util.h"
67 #include "label.h"
68 #include "locale-setup.h"
69 #include "log.h"
70 #include "macro.h"
71 #include "manager.h"
72 #include "missing.h"
73 #include "mkdir.h"
74 #include "parse-util.h"
75 #include "path-lookup.h"
76 #include "path-util.h"
77 #include "process-util.h"
78 #include "ratelimit.h"
79 #include "rm-rf.h"
80 #include "signal-util.h"
81 #include "special.h"
82 #include "stat-util.h"
83 #include "string-table.h"
84 #include "string-util.h"
85 #include "strv.h"
86 #include "strxcpyx.h"
87 #include "terminal-util.h"
88 #include "time-util.h"
89 #include "transaction.h"
90 #include "umask-util.h"
91 #include "unit-name.h"
92 #include "user-util.h"
93 #include "util.h"
94 #include "virt.h"
95 #include "watchdog.h"
96
97 #define NOTIFY_RCVBUF_SIZE (8*1024*1024)
98 #define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024)
99
100 /* Initial delay and the interval for printing status messages about running jobs */
101 #define JOBS_IN_PROGRESS_WAIT_USEC (5*USEC_PER_SEC)
102 #define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
103 #define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
104
105 /* If there are more than 1K bus messages queue across our API and direct busses, then let's not add more on top until
106 * the queue gets more empty. */
107 #define MANAGER_BUS_BUSY_THRESHOLD 1024LU
108
109 /* How many units and jobs to process of the bus queue before returning to the event loop. */
110 #define MANAGER_BUS_MESSAGE_BUDGET 100U
111
112 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
113 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
114 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
115 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
116 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
117 static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
118 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata);
119 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata);
120 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata);
121 static int manager_run_environment_generators(Manager *m);
122 static int manager_run_generators(Manager *m);
123
124 static void manager_watch_jobs_in_progress(Manager *m) {
125 usec_t next;
126 int r;
127
128 assert(m);
129
130 /* We do not want to show the cylon animation if the user
131 * needs to confirm service executions otherwise confirmation
132 * messages will be screwed by the cylon animation. */
133 if (!manager_is_confirm_spawn_disabled(m))
134 return;
135
136 if (m->jobs_in_progress_event_source)
137 return;
138
139 next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC;
140 r = sd_event_add_time(
141 m->event,
142 &m->jobs_in_progress_event_source,
143 CLOCK_MONOTONIC,
144 next, 0,
145 manager_dispatch_jobs_in_progress, m);
146 if (r < 0)
147 return;
148
149 (void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress");
150 }
151
152 #define CYLON_BUFFER_EXTRA (2*STRLEN(ANSI_RED) + STRLEN(ANSI_HIGHLIGHT_RED) + 2*STRLEN(ANSI_NORMAL))
153
154 static void draw_cylon(char buffer[], size_t buflen, unsigned width, unsigned pos) {
155 char *p = buffer;
156
157 assert(buflen >= CYLON_BUFFER_EXTRA + width + 1);
158 assert(pos <= width+1); /* 0 or width+1 mean that the center light is behind the corner */
159
160 if (pos > 1) {
161 if (pos > 2)
162 p = mempset(p, ' ', pos-2);
163 if (log_get_show_color())
164 p = stpcpy(p, ANSI_RED);
165 *p++ = '*';
166 }
167
168 if (pos > 0 && pos <= width) {
169 if (log_get_show_color())
170 p = stpcpy(p, ANSI_HIGHLIGHT_RED);
171 *p++ = '*';
172 }
173
174 if (log_get_show_color())
175 p = stpcpy(p, ANSI_NORMAL);
176
177 if (pos < width) {
178 if (log_get_show_color())
179 p = stpcpy(p, ANSI_RED);
180 *p++ = '*';
181 if (pos < width-1)
182 p = mempset(p, ' ', width-1-pos);
183 if (log_get_show_color())
184 strcpy(p, ANSI_NORMAL);
185 }
186 }
187
188 void manager_flip_auto_status(Manager *m, bool enable) {
189 assert(m);
190
191 if (enable) {
192 if (m->show_status == SHOW_STATUS_AUTO)
193 manager_set_show_status(m, SHOW_STATUS_TEMPORARY);
194 } else {
195 if (m->show_status == SHOW_STATUS_TEMPORARY)
196 manager_set_show_status(m, SHOW_STATUS_AUTO);
197 }
198 }
199
200 static void manager_print_jobs_in_progress(Manager *m) {
201 _cleanup_free_ char *job_of_n = NULL;
202 Iterator i;
203 Job *j;
204 unsigned counter = 0, print_nr;
205 char cylon[6 + CYLON_BUFFER_EXTRA + 1];
206 unsigned cylon_pos;
207 char time[FORMAT_TIMESPAN_MAX], limit[FORMAT_TIMESPAN_MAX] = "no limit";
208 uint64_t x;
209
210 assert(m);
211 assert(m->n_running_jobs > 0);
212
213 manager_flip_auto_status(m, true);
214
215 print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs;
216
217 HASHMAP_FOREACH(j, m->jobs, i)
218 if (j->state == JOB_RUNNING && counter++ == print_nr)
219 break;
220
221 /* m->n_running_jobs must be consistent with the contents of m->jobs,
222 * so the above loop must have succeeded in finding j. */
223 assert(counter == print_nr + 1);
224 assert(j);
225
226 cylon_pos = m->jobs_in_progress_iteration % 14;
227 if (cylon_pos >= 8)
228 cylon_pos = 14 - cylon_pos;
229 draw_cylon(cylon, sizeof(cylon), 6, cylon_pos);
230
231 m->jobs_in_progress_iteration++;
232
233 if (m->n_running_jobs > 1) {
234 if (asprintf(&job_of_n, "(%u of %u) ", counter, m->n_running_jobs) < 0)
235 job_of_n = NULL;
236 }
237
238 format_timespan(time, sizeof(time), now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC);
239 if (job_get_timeout(j, &x) > 0)
240 format_timespan(limit, sizeof(limit), x - j->begin_usec, 1*USEC_PER_SEC);
241
242 manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
243 "%sA %s job is running for %s (%s / %s)",
244 strempty(job_of_n),
245 job_type_to_string(j->type),
246 unit_description(j->unit),
247 time, limit);
248 }
249
250 static int have_ask_password(void) {
251 _cleanup_closedir_ DIR *dir;
252 struct dirent *de;
253
254 dir = opendir("/run/systemd/ask-password");
255 if (!dir) {
256 if (errno == ENOENT)
257 return false;
258 else
259 return -errno;
260 }
261
262 FOREACH_DIRENT_ALL(de, dir, return -errno) {
263 if (startswith(de->d_name, "ask."))
264 return true;
265 }
266 return false;
267 }
268
269 static int manager_dispatch_ask_password_fd(sd_event_source *source,
270 int fd, uint32_t revents, void *userdata) {
271 Manager *m = userdata;
272
273 assert(m);
274
275 (void) flush_fd(fd);
276
277 m->have_ask_password = have_ask_password();
278 if (m->have_ask_password < 0)
279 /* Log error but continue. Negative have_ask_password
280 * is treated as unknown status. */
281 log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m");
282
283 return 0;
284 }
285
286 static void manager_close_ask_password(Manager *m) {
287 assert(m);
288
289 m->ask_password_event_source = sd_event_source_unref(m->ask_password_event_source);
290 m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
291 m->have_ask_password = -EINVAL;
292 }
293
294 static int manager_check_ask_password(Manager *m) {
295 int r;
296
297 assert(m);
298
299 if (!m->ask_password_event_source) {
300 assert(m->ask_password_inotify_fd < 0);
301
302 mkdir_p_label("/run/systemd/ask-password", 0755);
303
304 m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
305 if (m->ask_password_inotify_fd < 0)
306 return log_error_errno(errno, "inotify_init1() failed: %m");
307
308 if (inotify_add_watch(m->ask_password_inotify_fd, "/run/systemd/ask-password", IN_CREATE|IN_DELETE|IN_MOVE) < 0) {
309 log_error_errno(errno, "Failed to add watch on /run/systemd/ask-password: %m");
310 manager_close_ask_password(m);
311 return -errno;
312 }
313
314 r = sd_event_add_io(m->event, &m->ask_password_event_source,
315 m->ask_password_inotify_fd, EPOLLIN,
316 manager_dispatch_ask_password_fd, m);
317 if (r < 0) {
318 log_error_errno(errno, "Failed to add event source for /run/systemd/ask-password: %m");
319 manager_close_ask_password(m);
320 return -errno;
321 }
322
323 (void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password");
324
325 /* Queries might have been added meanwhile... */
326 manager_dispatch_ask_password_fd(m->ask_password_event_source,
327 m->ask_password_inotify_fd, EPOLLIN, m);
328 }
329
330 return m->have_ask_password;
331 }
332
333 static int manager_watch_idle_pipe(Manager *m) {
334 int r;
335
336 assert(m);
337
338 if (m->idle_pipe_event_source)
339 return 0;
340
341 if (m->idle_pipe[2] < 0)
342 return 0;
343
344 r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m);
345 if (r < 0)
346 return log_error_errno(r, "Failed to watch idle pipe: %m");
347
348 (void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe");
349
350 return 0;
351 }
352
353 static void manager_close_idle_pipe(Manager *m) {
354 assert(m);
355
356 m->idle_pipe_event_source = sd_event_source_unref(m->idle_pipe_event_source);
357
358 safe_close_pair(m->idle_pipe);
359 safe_close_pair(m->idle_pipe + 2);
360 }
361
362 static int manager_setup_time_change(Manager *m) {
363 int r;
364
365 /* We only care for the cancellation event, hence we set the
366 * timeout to the latest possible value. */
367 struct itimerspec its = {
368 .it_value.tv_sec = TIME_T_MAX,
369 };
370
371 assert(m);
372 assert_cc(sizeof(time_t) == sizeof(TIME_T_MAX));
373
374 if (m->test_run_flags)
375 return 0;
376
377 /* Uses TFD_TIMER_CANCEL_ON_SET to get notifications whenever
378 * CLOCK_REALTIME makes a jump relative to CLOCK_MONOTONIC */
379
380 m->time_change_fd = timerfd_create(CLOCK_REALTIME, TFD_NONBLOCK|TFD_CLOEXEC);
381 if (m->time_change_fd < 0)
382 return log_error_errno(errno, "Failed to create timerfd: %m");
383
384 if (timerfd_settime(m->time_change_fd, TFD_TIMER_ABSTIME|TFD_TIMER_CANCEL_ON_SET, &its, NULL) < 0) {
385 log_debug_errno(errno, "Failed to set up TFD_TIMER_CANCEL_ON_SET, ignoring: %m");
386 m->time_change_fd = safe_close(m->time_change_fd);
387 return 0;
388 }
389
390 r = sd_event_add_io(m->event, &m->time_change_event_source, m->time_change_fd, EPOLLIN, manager_dispatch_time_change_fd, m);
391 if (r < 0)
392 return log_error_errno(r, "Failed to create time change event source: %m");
393
394 (void) sd_event_source_set_description(m->time_change_event_source, "manager-time-change");
395
396 log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
397
398 return 0;
399 }
400
401 static int enable_special_signals(Manager *m) {
402 _cleanup_close_ int fd = -1;
403
404 assert(m);
405
406 if (m->test_run_flags)
407 return 0;
408
409 /* Enable that we get SIGINT on control-alt-del. In containers
410 * this will fail with EPERM (older) or EINVAL (newer), so
411 * ignore that. */
412 if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL))
413 log_warning_errno(errno, "Failed to enable ctrl-alt-del handling: %m");
414
415 fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
416 if (fd < 0) {
417 /* Support systems without virtual console */
418 if (fd != -ENOENT)
419 log_warning_errno(errno, "Failed to open /dev/tty0: %m");
420 } else {
421 /* Enable that we get SIGWINCH on kbrequest */
422 if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
423 log_warning_errno(errno, "Failed to enable kbrequest handling: %m");
424 }
425
426 return 0;
427 }
428
429 #define RTSIG_IF_AVAILABLE(signum) (signum <= SIGRTMAX ? signum : -1)
430
431 static int manager_setup_signals(Manager *m) {
432 struct sigaction sa = {
433 .sa_handler = SIG_DFL,
434 .sa_flags = SA_NOCLDSTOP|SA_RESTART,
435 };
436 sigset_t mask;
437 int r;
438
439 assert(m);
440
441 assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
442
443 /* We make liberal use of realtime signals here. On
444 * Linux/glibc we have 30 of them (with the exception of Linux
445 * on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30
446 * (aka SIGRTMAX). */
447
448 assert_se(sigemptyset(&mask) == 0);
449 sigset_add_many(&mask,
450 SIGCHLD, /* Child died */
451 SIGTERM, /* Reexecute daemon */
452 SIGHUP, /* Reload configuration */
453 SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
454 SIGUSR2, /* systemd: dump status */
455 SIGINT, /* Kernel sends us this on control-alt-del */
456 SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
457 SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
458
459 SIGRTMIN+0, /* systemd: start default.target */
460 SIGRTMIN+1, /* systemd: isolate rescue.target */
461 SIGRTMIN+2, /* systemd: isolate emergency.target */
462 SIGRTMIN+3, /* systemd: start halt.target */
463 SIGRTMIN+4, /* systemd: start poweroff.target */
464 SIGRTMIN+5, /* systemd: start reboot.target */
465 SIGRTMIN+6, /* systemd: start kexec.target */
466
467 /* ... space for more special targets ... */
468
469 SIGRTMIN+13, /* systemd: Immediate halt */
470 SIGRTMIN+14, /* systemd: Immediate poweroff */
471 SIGRTMIN+15, /* systemd: Immediate reboot */
472 SIGRTMIN+16, /* systemd: Immediate kexec */
473
474 /* ... space for more immediate system state changes ... */
475
476 SIGRTMIN+20, /* systemd: enable status messages */
477 SIGRTMIN+21, /* systemd: disable status messages */
478 SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
479 SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
480 SIGRTMIN+24, /* systemd: Immediate exit (--user only) */
481
482 /* .. one free signal here ... */
483
484 /* Apparently Linux on hppa had fewer RT signals until v3.18,
485 * SIGRTMAX was SIGRTMIN+25, and then SIGRTMIN was lowered,
486 * see commit v3.17-7614-g1f25df2eff.
487 *
488 * We cannot unconditionally make use of those signals here,
489 * so let's use a runtime check. Since these commands are
490 * accessible by different means and only really a safety
491 * net, the missing functionality on hppa shouldn't matter.
492 */
493
494 RTSIG_IF_AVAILABLE(SIGRTMIN+26), /* systemd: set log target to journal-or-kmsg */
495 RTSIG_IF_AVAILABLE(SIGRTMIN+27), /* systemd: set log target to console */
496 RTSIG_IF_AVAILABLE(SIGRTMIN+28), /* systemd: set log target to kmsg */
497 RTSIG_IF_AVAILABLE(SIGRTMIN+29), /* systemd: set log target to syslog-or-kmsg (obsolete) */
498
499 /* ... one free signal here SIGRTMIN+30 ... */
500 -1);
501 assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
502
503 m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
504 if (m->signal_fd < 0)
505 return -errno;
506
507 r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m);
508 if (r < 0)
509 return r;
510
511 (void) sd_event_source_set_description(m->signal_event_source, "manager-signal");
512
513 /* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
514 * notify processing can still figure out to which process/service a message belongs, before we reap the
515 * process. Also, process this before handling cgroup notifications, so that we always collect child exit
516 * status information before detecting that there's no process in a cgroup. */
517 r = sd_event_source_set_priority(m->signal_event_source, SD_EVENT_PRIORITY_NORMAL-6);
518 if (r < 0)
519 return r;
520
521 if (MANAGER_IS_SYSTEM(m))
522 return enable_special_signals(m);
523
524 return 0;
525 }
526
527 static void manager_sanitize_environment(Manager *m) {
528 assert(m);
529
530 /* Let's remove some environment variables that we need ourselves to communicate with our clients */
531 strv_env_unset_many(
532 m->environment,
533 "EXIT_CODE",
534 "EXIT_STATUS",
535 "INVOCATION_ID",
536 "JOURNAL_STREAM",
537 "LISTEN_FDNAMES",
538 "LISTEN_FDS",
539 "LISTEN_PID",
540 "MAINPID",
541 "MANAGERPID",
542 "NOTIFY_SOCKET",
543 "REMOTE_ADDR",
544 "REMOTE_PORT",
545 "SERVICE_RESULT",
546 "WATCHDOG_PID",
547 "WATCHDOG_USEC",
548 NULL);
549
550 /* Let's order the environment alphabetically, just to make it pretty */
551 strv_sort(m->environment);
552 }
553
554 static int manager_default_environment(Manager *m) {
555 assert(m);
556
557 if (MANAGER_IS_SYSTEM(m)) {
558 /* The system manager always starts with a clean
559 * environment for its children. It does not import
560 * the kernel's or the parents' exported variables.
561 *
562 * The initial passed environment is untouched to keep
563 * /proc/self/environ valid; it is used for tagging
564 * the init process inside containers. */
565 m->environment = strv_new("PATH=" DEFAULT_PATH,
566 NULL);
567
568 /* Import locale variables LC_*= from configuration */
569 locale_setup(&m->environment);
570 } else
571 /* The user manager passes its own environment
572 * along to its children. */
573 m->environment = strv_copy(environ);
574
575 if (!m->environment)
576 return -ENOMEM;
577
578 manager_sanitize_environment(m);
579
580 return 0;
581 }
582
583 static int manager_setup_prefix(Manager *m) {
584 struct table_entry {
585 uint64_t type;
586 const char *suffix;
587 };
588
589 static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = {
590 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL },
591 [EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL },
592 [EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL },
593 [EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL },
594 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL },
595 };
596
597 static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = {
598 [EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL },
599 [EXEC_DIRECTORY_STATE] = { SD_PATH_USER_CONFIGURATION, NULL },
600 [EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL },
601 [EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_CONFIGURATION, "log" },
602 [EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL },
603 };
604
605 const struct table_entry *p;
606 ExecDirectoryType i;
607 int r;
608
609 assert(m);
610
611 if (MANAGER_IS_SYSTEM(m))
612 p = paths_system;
613 else
614 p = paths_user;
615
616 for (i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
617 r = sd_path_home(p[i].type, p[i].suffix, &m->prefix[i]);
618 if (r < 0)
619 return r;
620 }
621
622 return 0;
623 }
624
625 static int manager_setup_run_queue(Manager *m) {
626 int r;
627
628 assert(m);
629 assert(!m->run_queue_event_source);
630
631 r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m);
632 if (r < 0)
633 return r;
634
635 r = sd_event_source_set_priority(m->run_queue_event_source, SD_EVENT_PRIORITY_IDLE);
636 if (r < 0)
637 return r;
638
639 r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF);
640 if (r < 0)
641 return r;
642
643 (void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue");
644
645 return 0;
646 }
647
648 static int manager_setup_sigchld_event_source(Manager *m) {
649 int r;
650
651 assert(m);
652 assert(!m->sigchld_event_source);
653
654 r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m);
655 if (r < 0)
656 return r;
657
658 r = sd_event_source_set_priority(m->sigchld_event_source, SD_EVENT_PRIORITY_NORMAL-7);
659 if (r < 0)
660 return r;
661
662 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
663 if (r < 0)
664 return r;
665
666 (void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld");
667
668 return 0;
669 }
670
671 int manager_new(UnitFileScope scope, unsigned test_run_flags, Manager **_m) {
672 _cleanup_(manager_freep) Manager *m = NULL;
673 int r;
674
675 assert(_m);
676 assert(IN_SET(scope, UNIT_FILE_SYSTEM, UNIT_FILE_USER));
677
678 m = new0(Manager, 1);
679 if (!m)
680 return -ENOMEM;
681
682 m->unit_file_scope = scope;
683 m->exit_code = _MANAGER_EXIT_CODE_INVALID;
684 m->default_timer_accuracy_usec = USEC_PER_MINUTE;
685 m->default_memory_accounting = MEMORY_ACCOUNTING_DEFAULT;
686 m->default_tasks_accounting = true;
687 m->default_tasks_max = UINT64_MAX;
688 m->default_timeout_start_usec = DEFAULT_TIMEOUT_USEC;
689 m->default_timeout_stop_usec = DEFAULT_TIMEOUT_USEC;
690 m->default_restart_usec = DEFAULT_RESTART_USEC;
691
692 #if ENABLE_EFI
693 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0)
694 boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE,
695 m->timestamps + MANAGER_TIMESTAMP_FIRMWARE,
696 m->timestamps + MANAGER_TIMESTAMP_LOADER);
697 #endif
698
699 /* Prepare log fields we can use for structured logging */
700 if (MANAGER_IS_SYSTEM(m)) {
701 m->unit_log_field = "UNIT=";
702 m->unit_log_format_string = "UNIT=%s";
703
704 m->invocation_log_field = "INVOCATION_ID=";
705 m->invocation_log_format_string = "INVOCATION_ID=%s";
706 } else {
707 m->unit_log_field = "USER_UNIT=";
708 m->unit_log_format_string = "USER_UNIT=%s";
709
710 m->invocation_log_field = "USER_INVOCATION_ID=";
711 m->invocation_log_format_string = "USER_INVOCATION_ID=%s";
712 }
713
714 m->idle_pipe[0] = m->idle_pipe[1] = m->idle_pipe[2] = m->idle_pipe[3] = -1;
715
716 m->pin_cgroupfs_fd = m->notify_fd = m->cgroups_agent_fd = m->signal_fd = m->time_change_fd =
717 m->dev_autofs_fd = m->private_listen_fd = m->cgroup_inotify_fd =
718 m->ask_password_inotify_fd = -1;
719
720 m->user_lookup_fds[0] = m->user_lookup_fds[1] = -1;
721
722 m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
723
724 m->have_ask_password = -EINVAL; /* we don't know */
725 m->first_boot = -1;
726
727 m->test_run_flags = test_run_flags;
728
729 /* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
730 RATELIMIT_INIT(m->ctrl_alt_del_ratelimit, 2 * USEC_PER_SEC, 7);
731
732 r = manager_default_environment(m);
733 if (r < 0)
734 return r;
735
736 r = hashmap_ensure_allocated(&m->units, &string_hash_ops);
737 if (r < 0)
738 return r;
739
740 r = hashmap_ensure_allocated(&m->jobs, NULL);
741 if (r < 0)
742 return r;
743
744 r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops);
745 if (r < 0)
746 return r;
747
748 r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops);
749 if (r < 0)
750 return r;
751
752 r = manager_setup_prefix(m);
753 if (r < 0)
754 return r;
755
756 m->udev = udev_new();
757 if (!m->udev)
758 return -ENOMEM;
759
760 r = sd_event_default(&m->event);
761 if (r < 0)
762 return r;
763
764 r = manager_setup_run_queue(m);
765 if (r < 0)
766 return r;
767
768 if (test_run_flags == MANAGER_TEST_RUN_MINIMAL) {
769 m->cgroup_root = strdup("");
770 if (!m->cgroup_root)
771 return -ENOMEM;
772 } else {
773 r = manager_setup_signals(m);
774 if (r < 0)
775 return r;
776
777 r = manager_setup_cgroup(m);
778 if (r < 0)
779 return r;
780
781 r = manager_setup_time_change(m);
782 if (r < 0)
783 return r;
784
785 r = manager_setup_sigchld_event_source(m);
786 if (r < 0)
787 return r;
788 }
789
790 if (MANAGER_IS_SYSTEM(m) && test_run_flags == 0) {
791 r = mkdir_label("/run/systemd/units", 0755);
792 if (r < 0 && r != -EEXIST)
793 return r;
794 }
795
796 m->taint_usr =
797 !in_initrd() &&
798 dir_is_empty("/usr") > 0;
799
800 /* Note that we do not set up the notify fd here. We do that after deserialization,
801 * since they might have gotten serialized across the reexec. */
802
803 *_m = m;
804 m = NULL;
805 return 0;
806 }
807
808 static int manager_setup_notify(Manager *m) {
809 int r;
810
811 if (m->test_run_flags)
812 return 0;
813
814 if (m->notify_fd < 0) {
815 _cleanup_close_ int fd = -1;
816 union sockaddr_union sa = {
817 .sa.sa_family = AF_UNIX,
818 };
819 static const int one = 1;
820
821 /* First free all secondary fields */
822 m->notify_socket = mfree(m->notify_socket);
823 m->notify_event_source = sd_event_source_unref(m->notify_event_source);
824
825 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
826 if (fd < 0)
827 return log_error_errno(errno, "Failed to allocate notification socket: %m");
828
829 fd_inc_rcvbuf(fd, NOTIFY_RCVBUF_SIZE);
830
831 m->notify_socket = strappend(m->prefix[EXEC_DIRECTORY_RUNTIME], "/systemd/notify");
832 if (!m->notify_socket)
833 return log_oom();
834
835 (void) mkdir_parents_label(m->notify_socket, 0755);
836 (void) unlink(m->notify_socket);
837
838 strncpy(sa.un.sun_path, m->notify_socket, sizeof(sa.un.sun_path)-1);
839 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
840 if (r < 0)
841 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
842
843 r = setsockopt(fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
844 if (r < 0)
845 return log_error_errno(errno, "SO_PASSCRED failed: %m");
846
847 m->notify_fd = TAKE_FD(fd);
848
849 log_debug("Using notification socket %s", m->notify_socket);
850 }
851
852 if (!m->notify_event_source) {
853 r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m);
854 if (r < 0)
855 return log_error_errno(r, "Failed to allocate notify event source: %m");
856
857 /* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
858 * service an exit message belongs. */
859 r = sd_event_source_set_priority(m->notify_event_source, SD_EVENT_PRIORITY_NORMAL-8);
860 if (r < 0)
861 return log_error_errno(r, "Failed to set priority of notify event source: %m");
862
863 (void) sd_event_source_set_description(m->notify_event_source, "manager-notify");
864 }
865
866 return 0;
867 }
868
869 static int manager_setup_cgroups_agent(Manager *m) {
870
871 static const union sockaddr_union sa = {
872 .un.sun_family = AF_UNIX,
873 .un.sun_path = "/run/systemd/cgroups-agent",
874 };
875 int r;
876
877 /* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering
878 * these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and
879 * each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on
880 * overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number
881 * of D-Bus connections may be queued until the kernel will start dropping further incoming connections,
882 * possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX
883 * socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and
884 * we thus won't lose messages.
885 *
886 * Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen
887 * to it. The system instance hence listens on this special socket, but the user instances listen on the system
888 * bus for these messages. */
889
890 if (m->test_run_flags)
891 return 0;
892
893 if (!MANAGER_IS_SYSTEM(m))
894 return 0;
895
896 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
897 if (r < 0)
898 return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m");
899 if (r > 0) /* We don't need this anymore on the unified hierarchy */
900 return 0;
901
902 if (m->cgroups_agent_fd < 0) {
903 _cleanup_close_ int fd = -1;
904
905 /* First free all secondary fields */
906 m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
907
908 fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
909 if (fd < 0)
910 return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m");
911
912 fd_inc_rcvbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE);
913
914 (void) unlink(sa.un.sun_path);
915
916 /* Only allow root to connect to this socket */
917 RUN_WITH_UMASK(0077)
918 r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
919 if (r < 0)
920 return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
921
922 m->cgroups_agent_fd = fd;
923 fd = -1;
924 }
925
926 if (!m->cgroups_agent_event_source) {
927 r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m);
928 if (r < 0)
929 return log_error_errno(r, "Failed to allocate cgroups agent event source: %m");
930
931 /* Process cgroups notifications early, but after having processed service notification messages or
932 * SIGCHLD signals, so that a cgroup running empty is always just the last safety net of notification,
933 * and we collected the metadata the notification and SIGCHLD stuff offers first. Also see handling of
934 * cgroup inotify for the unified cgroup stuff. */
935 r = sd_event_source_set_priority(m->cgroups_agent_event_source, SD_EVENT_PRIORITY_NORMAL-4);
936 if (r < 0)
937 return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m");
938
939 (void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent");
940 }
941
942 return 0;
943 }
944
945 static int manager_setup_user_lookup_fd(Manager *m) {
946 int r;
947
948 assert(m);
949
950 /* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
951 * 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
952 * and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
953 * off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
954 * created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
955 * hence we establish this communication channel so that forked off processes can pass their UID/GID
956 * information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
957 * datagram, along with their unit name, so that we can share one communication socket pair among all units for
958 * this purpose.
959 *
960 * You might wonder why we need a communication channel for this that is independent of the usual notification
961 * socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
962 * channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
963 * lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
964 *
965 * Note that this function is called under two circumstances: when we first initialize (in which case we
966 * allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
967 * (in which case the socket pair already exists but we still need to allocate the event source for it). */
968
969 if (m->user_lookup_fds[0] < 0) {
970
971 /* Free all secondary fields */
972 safe_close_pair(m->user_lookup_fds);
973 m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
974
975 if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
976 return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
977
978 (void) fd_inc_rcvbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE);
979 }
980
981 if (!m->user_lookup_event_source) {
982 r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m);
983 if (r < 0)
984 return log_error_errno(errno, "Failed to allocate user lookup event source: %m");
985
986 /* Process even earlier than the notify event source, so that we always know first about valid UID/GID
987 * resolutions */
988 r = sd_event_source_set_priority(m->user_lookup_event_source, SD_EVENT_PRIORITY_NORMAL-11);
989 if (r < 0)
990 return log_error_errno(errno, "Failed to set priority ot user lookup event source: %m");
991
992 (void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup");
993 }
994
995 return 0;
996 }
997
998 static unsigned manager_dispatch_cleanup_queue(Manager *m) {
999 Unit *u;
1000 unsigned n = 0;
1001
1002 assert(m);
1003
1004 while ((u = m->cleanup_queue)) {
1005 assert(u->in_cleanup_queue);
1006
1007 unit_free(u);
1008 n++;
1009 }
1010
1011 return n;
1012 }
1013
1014 enum {
1015 GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
1016 GC_OFFSET_UNSURE, /* No clue */
1017 GC_OFFSET_GOOD, /* We still need this unit */
1018 GC_OFFSET_BAD, /* We don't need this unit anymore */
1019 _GC_OFFSET_MAX
1020 };
1021
1022 static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
1023 Unit *other;
1024 Iterator i;
1025 void *v;
1026
1027 u->gc_marker = gc_marker + GC_OFFSET_GOOD;
1028
1029 /* Recursively mark referenced units as GOOD as well */
1030 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCES], i)
1031 if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
1032 unit_gc_mark_good(other, gc_marker);
1033 }
1034
1035 static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
1036 Unit *other;
1037 bool is_bad;
1038 Iterator i;
1039 void *v;
1040
1041 assert(u);
1042
1043 if (IN_SET(u->gc_marker - gc_marker,
1044 GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH))
1045 return;
1046
1047 if (u->in_cleanup_queue)
1048 goto bad;
1049
1050 if (!unit_may_gc(u))
1051 goto good;
1052
1053 u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
1054
1055 is_bad = true;
1056
1057 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCED_BY], i) {
1058 unit_gc_sweep(other, gc_marker);
1059
1060 if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
1061 goto good;
1062
1063 if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
1064 is_bad = false;
1065 }
1066
1067 if (u->refs_by_target) {
1068 const UnitRef *ref;
1069
1070 LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
1071 unit_gc_sweep(ref->source, gc_marker);
1072
1073 if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
1074 goto good;
1075
1076 if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
1077 is_bad = false;
1078 }
1079 }
1080
1081 if (is_bad)
1082 goto bad;
1083
1084 /* We were unable to find anything out about this entry, so
1085 * let's investigate it later */
1086 u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
1087 unit_add_to_gc_queue(u);
1088 return;
1089
1090 bad:
1091 /* We definitely know that this one is not useful anymore, so
1092 * let's mark it for deletion */
1093 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1094 unit_add_to_cleanup_queue(u);
1095 return;
1096
1097 good:
1098 unit_gc_mark_good(u, gc_marker);
1099 }
1100
1101 static unsigned manager_dispatch_gc_unit_queue(Manager *m) {
1102 unsigned n = 0, gc_marker;
1103 Unit *u;
1104
1105 assert(m);
1106
1107 /* log_debug("Running GC..."); */
1108
1109 m->gc_marker += _GC_OFFSET_MAX;
1110 if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
1111 m->gc_marker = 1;
1112
1113 gc_marker = m->gc_marker;
1114
1115 while ((u = m->gc_unit_queue)) {
1116 assert(u->in_gc_queue);
1117
1118 unit_gc_sweep(u, gc_marker);
1119
1120 LIST_REMOVE(gc_queue, m->gc_unit_queue, u);
1121 u->in_gc_queue = false;
1122
1123 n++;
1124
1125 if (IN_SET(u->gc_marker - gc_marker,
1126 GC_OFFSET_BAD, GC_OFFSET_UNSURE)) {
1127 if (u->id)
1128 log_unit_debug(u, "Collecting.");
1129 u->gc_marker = gc_marker + GC_OFFSET_BAD;
1130 unit_add_to_cleanup_queue(u);
1131 }
1132 }
1133
1134 return n;
1135 }
1136
1137 static unsigned manager_dispatch_gc_job_queue(Manager *m) {
1138 unsigned n = 0;
1139 Job *j;
1140
1141 assert(m);
1142
1143 while ((j = m->gc_job_queue)) {
1144 assert(j->in_gc_queue);
1145
1146 LIST_REMOVE(gc_queue, m->gc_job_queue, j);
1147 j->in_gc_queue = false;
1148
1149 n++;
1150
1151 if (!job_may_gc(j))
1152 continue;
1153
1154 log_unit_debug(j->unit, "Collecting job.");
1155 (void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false);
1156 }
1157
1158 return n;
1159 }
1160
1161 static void manager_clear_jobs_and_units(Manager *m) {
1162 Unit *u;
1163
1164 assert(m);
1165
1166 while ((u = hashmap_first(m->units)))
1167 unit_free(u);
1168
1169 manager_dispatch_cleanup_queue(m);
1170
1171 assert(!m->load_queue);
1172 assert(!m->run_queue);
1173 assert(!m->dbus_unit_queue);
1174 assert(!m->dbus_job_queue);
1175 assert(!m->cleanup_queue);
1176 assert(!m->gc_unit_queue);
1177 assert(!m->gc_job_queue);
1178
1179 assert(hashmap_isempty(m->jobs));
1180 assert(hashmap_isempty(m->units));
1181
1182 m->n_on_console = 0;
1183 m->n_running_jobs = 0;
1184 }
1185
1186 Manager* manager_free(Manager *m) {
1187 UnitType c;
1188 int i;
1189 ExecDirectoryType dt;
1190
1191 if (!m)
1192 return NULL;
1193
1194 manager_clear_jobs_and_units(m);
1195
1196 for (c = 0; c < _UNIT_TYPE_MAX; c++)
1197 if (unit_vtable[c]->shutdown)
1198 unit_vtable[c]->shutdown(m);
1199
1200 /* If we reexecute ourselves, we keep the root cgroup around */
1201 manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
1202
1203 lookup_paths_flush_generator(&m->lookup_paths);
1204
1205 bus_done(m);
1206
1207 exec_runtime_vacuum(m);
1208 hashmap_free(m->exec_runtime_by_id);
1209
1210 dynamic_user_vacuum(m, false);
1211 hashmap_free(m->dynamic_users);
1212
1213 hashmap_free(m->units);
1214 hashmap_free(m->units_by_invocation_id);
1215 hashmap_free(m->jobs);
1216 hashmap_free(m->watch_pids);
1217 hashmap_free(m->watch_bus);
1218
1219 set_free(m->startup_units);
1220 set_free(m->failed_units);
1221
1222 sd_event_source_unref(m->signal_event_source);
1223 sd_event_source_unref(m->sigchld_event_source);
1224 sd_event_source_unref(m->notify_event_source);
1225 sd_event_source_unref(m->cgroups_agent_event_source);
1226 sd_event_source_unref(m->time_change_event_source);
1227 sd_event_source_unref(m->jobs_in_progress_event_source);
1228 sd_event_source_unref(m->run_queue_event_source);
1229 sd_event_source_unref(m->user_lookup_event_source);
1230 sd_event_source_unref(m->sync_bus_names_event_source);
1231
1232 safe_close(m->signal_fd);
1233 safe_close(m->notify_fd);
1234 safe_close(m->cgroups_agent_fd);
1235 safe_close(m->time_change_fd);
1236 safe_close_pair(m->user_lookup_fds);
1237
1238 manager_close_ask_password(m);
1239
1240 manager_close_idle_pipe(m);
1241
1242 udev_unref(m->udev);
1243 sd_event_unref(m->event);
1244
1245 free(m->notify_socket);
1246
1247 lookup_paths_free(&m->lookup_paths);
1248 strv_free(m->environment);
1249
1250 hashmap_free(m->cgroup_unit);
1251 set_free_free(m->unit_path_cache);
1252
1253 free(m->switch_root);
1254 free(m->switch_root_init);
1255
1256 for (i = 0; i < _RLIMIT_MAX; i++)
1257 m->rlimit[i] = mfree(m->rlimit[i]);
1258
1259 assert(hashmap_isempty(m->units_requiring_mounts_for));
1260 hashmap_free(m->units_requiring_mounts_for);
1261
1262 hashmap_free(m->uid_refs);
1263 hashmap_free(m->gid_refs);
1264
1265 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
1266 m->prefix[dt] = mfree(m->prefix[dt]);
1267
1268 return mfree(m);
1269 }
1270
1271 void manager_enumerate(Manager *m) {
1272 UnitType c;
1273
1274 assert(m);
1275
1276 /* Let's ask every type to load all units from disk/kernel
1277 * that it might know */
1278 for (c = 0; c < _UNIT_TYPE_MAX; c++) {
1279 if (!unit_type_supported(c)) {
1280 log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
1281 continue;
1282 }
1283
1284 if (!unit_vtable[c]->enumerate)
1285 continue;
1286
1287 unit_vtable[c]->enumerate(m);
1288 }
1289
1290 manager_dispatch_load_queue(m);
1291 }
1292
1293 static void manager_coldplug(Manager *m) {
1294 Iterator i;
1295 Unit *u;
1296 char *k;
1297 int r;
1298
1299 assert(m);
1300
1301 /* Then, let's set up their initial state. */
1302 HASHMAP_FOREACH_KEY(u, k, m->units, i) {
1303
1304 /* ignore aliases */
1305 if (u->id != k)
1306 continue;
1307
1308 r = unit_coldplug(u);
1309 if (r < 0)
1310 log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id);
1311 }
1312 }
1313
1314 static void manager_build_unit_path_cache(Manager *m) {
1315 char **i;
1316 int r;
1317
1318 assert(m);
1319
1320 set_free_free(m->unit_path_cache);
1321
1322 m->unit_path_cache = set_new(&path_hash_ops);
1323 if (!m->unit_path_cache) {
1324 r = -ENOMEM;
1325 goto fail;
1326 }
1327
1328 /* This simply builds a list of files we know exist, so that
1329 * we don't always have to go to disk */
1330
1331 STRV_FOREACH(i, m->lookup_paths.search_path) {
1332 _cleanup_closedir_ DIR *d = NULL;
1333 struct dirent *de;
1334
1335 d = opendir(*i);
1336 if (!d) {
1337 if (errno != ENOENT)
1338 log_warning_errno(errno, "Failed to open directory %s, ignoring: %m", *i);
1339 continue;
1340 }
1341
1342 FOREACH_DIRENT(de, d, r = -errno; goto fail) {
1343 char *p;
1344
1345 p = strjoin(streq(*i, "/") ? "" : *i, "/", de->d_name);
1346 if (!p) {
1347 r = -ENOMEM;
1348 goto fail;
1349 }
1350
1351 r = set_consume(m->unit_path_cache, p);
1352 if (r < 0)
1353 goto fail;
1354 }
1355 }
1356
1357 return;
1358
1359 fail:
1360 log_warning_errno(r, "Failed to build unit path cache, proceeding without: %m");
1361 m->unit_path_cache = set_free_free(m->unit_path_cache);
1362 }
1363
1364 static void manager_distribute_fds(Manager *m, FDSet *fds) {
1365 Iterator i;
1366 Unit *u;
1367
1368 assert(m);
1369
1370 HASHMAP_FOREACH(u, m->units, i) {
1371
1372 if (fdset_size(fds) <= 0)
1373 break;
1374
1375 if (!UNIT_VTABLE(u)->distribute_fds)
1376 continue;
1377
1378 UNIT_VTABLE(u)->distribute_fds(u, fds);
1379 }
1380 }
1381
1382 static bool manager_dbus_is_running(Manager *m, bool deserialized) {
1383 Unit *u;
1384
1385 assert(m);
1386
1387 /* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket
1388 * and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit
1389 * rather than the current one. */
1390
1391 if (m->test_run_flags != 0)
1392 return false;
1393
1394 /* If we are in the user instance, and the env var is already set for us, then this means D-Bus is ran
1395 * somewhere outside of our own logic. Let's use it */
1396 if (MANAGER_IS_USER(m) && getenv("DBUS_SESSION_BUS_ADDRESS"))
1397 return true;
1398
1399 u = manager_get_unit(m, SPECIAL_DBUS_SOCKET);
1400 if (!u)
1401 return false;
1402 if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING)
1403 return false;
1404
1405 u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
1406 if (!u)
1407 return false;
1408 if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state), SERVICE_RUNNING, SERVICE_RELOAD))
1409 return false;
1410
1411 return true;
1412 }
1413
1414 int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
1415 int r;
1416
1417 assert(m);
1418
1419 /* If we are running in test mode, we still want to run the generators,
1420 * but we should not touch the real generator directories. */
1421 r = lookup_paths_init(&m->lookup_paths, m->unit_file_scope,
1422 m->test_run_flags ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0,
1423 NULL);
1424 if (r < 0)
1425 return r;
1426
1427 r = manager_run_environment_generators(m);
1428 if (r < 0)
1429 return r;
1430
1431 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_START);
1432 r = manager_run_generators(m);
1433 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_FINISH);
1434 if (r < 0)
1435 return r;
1436
1437 /* If this is the first boot, and we are in the host system, then preset everything */
1438 if (m->first_boot > 0 &&
1439 MANAGER_IS_SYSTEM(m) &&
1440 !m->test_run_flags) {
1441
1442 r = unit_file_preset_all(UNIT_FILE_SYSTEM, 0, NULL, UNIT_FILE_PRESET_ENABLE_ONLY, NULL, 0);
1443 if (r < 0)
1444 log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r,
1445 "Failed to populate /etc with preset unit settings, ignoring: %m");
1446 else
1447 log_info("Populated /etc with preset unit settings.");
1448 }
1449
1450 lookup_paths_reduce(&m->lookup_paths);
1451 manager_build_unit_path_cache(m);
1452
1453 /* If we will deserialize make sure that during enumeration
1454 * this is already known, so we increase the counter here
1455 * already */
1456 if (serialization)
1457 m->n_reloading++;
1458
1459 /* First, enumerate what we can from all config files */
1460 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_START);
1461 manager_enumerate(m);
1462 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_FINISH);
1463
1464 /* Second, deserialize if there is something to deserialize */
1465 if (serialization) {
1466 r = manager_deserialize(m, serialization, fds);
1467 if (r < 0)
1468 return log_error_errno(r, "Deserialization failed: %m");
1469 }
1470
1471 /* Any fds left? Find some unit which wants them. This is
1472 * useful to allow container managers to pass some file
1473 * descriptors to us pre-initialized. This enables
1474 * socket-based activation of entire containers. */
1475 manager_distribute_fds(m, fds);
1476
1477 /* We might have deserialized the notify fd, but if we didn't
1478 * then let's create the bus now */
1479 r = manager_setup_notify(m);
1480 if (r < 0)
1481 /* No sense to continue without notifications, our children would fail anyway. */
1482 return r;
1483
1484 r = manager_setup_cgroups_agent(m);
1485 if (r < 0)
1486 /* Likewise, no sense to continue without empty cgroup notifications. */
1487 return r;
1488
1489 r = manager_setup_user_lookup_fd(m);
1490 if (r < 0)
1491 /* This shouldn't fail, except if things are really broken. */
1492 return r;
1493
1494 /* Let's set up our private bus connection now, unconditionally */
1495 (void) bus_init_private(m);
1496
1497 /* If we are in --user mode also connect to the system bus now */
1498 if (MANAGER_IS_USER(m))
1499 (void) bus_init_system(m);
1500
1501 /* Let's connect to the bus now, but only if the unit is supposed to be up */
1502 if (manager_dbus_is_running(m, !!serialization)) {
1503 (void) bus_init_api(m);
1504
1505 if (MANAGER_IS_SYSTEM(m))
1506 (void) bus_init_system(m);
1507 }
1508
1509 /* Now that we are connected to all possible busses, let's deserialize who is tracking us. */
1510 (void) bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed);
1511 m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
1512
1513 /* Third, fire things up! */
1514 manager_coldplug(m);
1515
1516 /* Release any dynamic users no longer referenced */
1517 dynamic_user_vacuum(m, true);
1518
1519 exec_runtime_vacuum(m);
1520
1521 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
1522 manager_vacuum_uid_refs(m);
1523 manager_vacuum_gid_refs(m);
1524
1525 if (serialization) {
1526 assert(m->n_reloading > 0);
1527 m->n_reloading--;
1528
1529 /* Let's wait for the UnitNew/JobNew messages being
1530 * sent, before we notify that the reload is
1531 * finished */
1532 m->send_reloading_done = true;
1533 }
1534
1535 return 0;
1536 }
1537
1538 int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, sd_bus_error *e, Job **_ret) {
1539 int r;
1540 Transaction *tr;
1541
1542 assert(m);
1543 assert(type < _JOB_TYPE_MAX);
1544 assert(unit);
1545 assert(mode < _JOB_MODE_MAX);
1546
1547 if (mode == JOB_ISOLATE && type != JOB_START)
1548 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
1549
1550 if (mode == JOB_ISOLATE && !unit->allow_isolate)
1551 return sd_bus_error_setf(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
1552
1553 log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
1554
1555 type = job_type_collapse(type, unit);
1556
1557 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
1558 if (!tr)
1559 return -ENOMEM;
1560
1561 r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, false,
1562 IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS),
1563 mode == JOB_IGNORE_DEPENDENCIES, e);
1564 if (r < 0)
1565 goto tr_abort;
1566
1567 if (mode == JOB_ISOLATE) {
1568 r = transaction_add_isolate_jobs(tr, m);
1569 if (r < 0)
1570 goto tr_abort;
1571 }
1572
1573 r = transaction_activate(tr, m, mode, e);
1574 if (r < 0)
1575 goto tr_abort;
1576
1577 log_unit_debug(unit,
1578 "Enqueued job %s/%s as %u", unit->id,
1579 job_type_to_string(type), (unsigned) tr->anchor_job->id);
1580
1581 if (_ret)
1582 *_ret = tr->anchor_job;
1583
1584 transaction_free(tr);
1585 return 0;
1586
1587 tr_abort:
1588 transaction_abort(tr);
1589 transaction_free(tr);
1590 return r;
1591 }
1592
1593 int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, sd_bus_error *e, Job **ret) {
1594 Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */
1595 int r;
1596
1597 assert(m);
1598 assert(type < _JOB_TYPE_MAX);
1599 assert(name);
1600 assert(mode < _JOB_MODE_MAX);
1601
1602 r = manager_load_unit(m, name, NULL, NULL, &unit);
1603 if (r < 0)
1604 return r;
1605 assert(unit);
1606
1607 return manager_add_job(m, type, unit, mode, e, ret);
1608 }
1609
1610 int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Job **ret) {
1611 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1612 int r;
1613
1614 assert(m);
1615 assert(type < _JOB_TYPE_MAX);
1616 assert(name);
1617 assert(mode < _JOB_MODE_MAX);
1618
1619 r = manager_add_job_by_name(m, type, name, mode, &error, ret);
1620 if (r < 0)
1621 return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r));
1622
1623 return r;
1624 }
1625
1626 int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) {
1627 int r;
1628 Transaction *tr;
1629
1630 assert(m);
1631 assert(unit);
1632 assert(mode < _JOB_MODE_MAX);
1633 assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */
1634
1635 tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
1636 if (!tr)
1637 return -ENOMEM;
1638
1639 /* We need an anchor job */
1640 r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, false, false, true, true, e);
1641 if (r < 0)
1642 goto tr_abort;
1643
1644 /* Failure in adding individual dependencies is ignored, so this always succeeds. */
1645 transaction_add_propagate_reload_jobs(tr, unit, tr->anchor_job, mode == JOB_IGNORE_DEPENDENCIES, e);
1646
1647 r = transaction_activate(tr, m, mode, e);
1648 if (r < 0)
1649 goto tr_abort;
1650
1651 transaction_free(tr);
1652 return 0;
1653
1654 tr_abort:
1655 transaction_abort(tr);
1656 transaction_free(tr);
1657 return r;
1658 }
1659
1660 Job *manager_get_job(Manager *m, uint32_t id) {
1661 assert(m);
1662
1663 return hashmap_get(m->jobs, UINT32_TO_PTR(id));
1664 }
1665
1666 Unit *manager_get_unit(Manager *m, const char *name) {
1667 assert(m);
1668 assert(name);
1669
1670 return hashmap_get(m->units, name);
1671 }
1672
1673 unsigned manager_dispatch_load_queue(Manager *m) {
1674 Unit *u;
1675 unsigned n = 0;
1676
1677 assert(m);
1678
1679 /* Make sure we are not run recursively */
1680 if (m->dispatching_load_queue)
1681 return 0;
1682
1683 m->dispatching_load_queue = true;
1684
1685 /* Dispatches the load queue. Takes a unit from the queue and
1686 * tries to load its data until the queue is empty */
1687
1688 while ((u = m->load_queue)) {
1689 assert(u->in_load_queue);
1690
1691 unit_load(u);
1692 n++;
1693 }
1694
1695 m->dispatching_load_queue = false;
1696 return n;
1697 }
1698
1699 int manager_load_unit_prepare(
1700 Manager *m,
1701 const char *name,
1702 const char *path,
1703 sd_bus_error *e,
1704 Unit **_ret) {
1705
1706 _cleanup_(unit_freep) Unit *cleanup_ret = NULL;
1707 Unit *ret;
1708 UnitType t;
1709 int r;
1710
1711 assert(m);
1712 assert(name || path);
1713 assert(_ret);
1714
1715 /* This will prepare the unit for loading, but not actually
1716 * load anything from disk. */
1717
1718 if (path && !is_path(path))
1719 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path);
1720
1721 if (!name)
1722 name = basename(path);
1723
1724 t = unit_name_to_type(name);
1725
1726 if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
1727 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE))
1728 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name);
1729
1730 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name);
1731 }
1732
1733 ret = manager_get_unit(m, name);
1734 if (ret) {
1735 *_ret = ret;
1736 return 1;
1737 }
1738
1739 ret = cleanup_ret = unit_new(m, unit_vtable[t]->object_size);
1740 if (!ret)
1741 return -ENOMEM;
1742
1743 if (path) {
1744 ret->fragment_path = strdup(path);
1745 if (!ret->fragment_path)
1746 return -ENOMEM;
1747 }
1748
1749 r = unit_add_name(ret, name);
1750 if (r < 0)
1751 return r;
1752
1753 unit_add_to_load_queue(ret);
1754 unit_add_to_dbus_queue(ret);
1755 unit_add_to_gc_queue(ret);
1756
1757 *_ret = ret;
1758 cleanup_ret = NULL;
1759
1760 return 0;
1761 }
1762
1763 int manager_load_unit(
1764 Manager *m,
1765 const char *name,
1766 const char *path,
1767 sd_bus_error *e,
1768 Unit **_ret) {
1769
1770 int r;
1771
1772 assert(m);
1773 assert(_ret);
1774
1775 /* This will load the service information files, but not actually
1776 * start any services or anything. */
1777
1778 r = manager_load_unit_prepare(m, name, path, e, _ret);
1779 if (r != 0)
1780 return r;
1781
1782 manager_dispatch_load_queue(m);
1783
1784 *_ret = unit_follow_merge(*_ret);
1785
1786 return 0;
1787 }
1788
1789 void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
1790 Iterator i;
1791 Job *j;
1792
1793 assert(s);
1794 assert(f);
1795
1796 HASHMAP_FOREACH(j, s->jobs, i)
1797 job_dump(j, f, prefix);
1798 }
1799
1800 void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
1801 Iterator i;
1802 Unit *u;
1803 const char *t;
1804
1805 assert(s);
1806 assert(f);
1807
1808 HASHMAP_FOREACH_KEY(u, t, s->units, i)
1809 if (u->id == t)
1810 unit_dump(u, f, prefix);
1811 }
1812
1813 void manager_dump(Manager *m, FILE *f, const char *prefix) {
1814 ManagerTimestamp q;
1815
1816 assert(m);
1817 assert(f);
1818
1819 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
1820 char buf[FORMAT_TIMESTAMP_MAX];
1821
1822 if (dual_timestamp_is_set(m->timestamps + q))
1823 fprintf(f, "%sTimestamp %s: %s\n",
1824 strempty(prefix),
1825 manager_timestamp_to_string(q),
1826 format_timestamp(buf, sizeof(buf), m->timestamps[q].realtime));
1827 }
1828
1829 manager_dump_units(m, f, prefix);
1830 manager_dump_jobs(m, f, prefix);
1831 }
1832
1833 int manager_get_dump_string(Manager *m, char **ret) {
1834 _cleanup_free_ char *dump = NULL;
1835 _cleanup_fclose_ FILE *f = NULL;
1836 size_t size;
1837 int r;
1838
1839 assert(m);
1840 assert(ret);
1841
1842 f = open_memstream(&dump, &size);
1843 if (!f)
1844 return -errno;
1845
1846 (void) __fsetlocking(f, FSETLOCKING_BYCALLER);
1847
1848 manager_dump(m, f, NULL);
1849
1850 r = fflush_and_check(f);
1851 if (r < 0)
1852 return r;
1853
1854 f = safe_fclose(f);
1855
1856 *ret = dump;
1857 dump = NULL;
1858
1859 return 0;
1860 }
1861
1862 void manager_clear_jobs(Manager *m) {
1863 Job *j;
1864
1865 assert(m);
1866
1867 while ((j = hashmap_first(m->jobs)))
1868 /* No need to recurse. We're cancelling all jobs. */
1869 job_finish_and_invalidate(j, JOB_CANCELED, false, false);
1870 }
1871
1872 static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) {
1873 Manager *m = userdata;
1874 Job *j;
1875
1876 assert(source);
1877 assert(m);
1878
1879 while ((j = m->run_queue)) {
1880 assert(j->installed);
1881 assert(j->in_run_queue);
1882
1883 job_run_and_invalidate(j);
1884 }
1885
1886 if (m->n_running_jobs > 0)
1887 manager_watch_jobs_in_progress(m);
1888
1889 if (m->n_on_console > 0)
1890 manager_watch_idle_pipe(m);
1891
1892 return 1;
1893 }
1894
1895 static unsigned manager_dispatch_dbus_queue(Manager *m) {
1896 unsigned n = 0, budget;
1897 Unit *u;
1898 Job *j;
1899
1900 assert(m);
1901
1902 if (m->dispatching_dbus_queue)
1903 return 0;
1904
1905 /* Anything to do at all? */
1906 if (!m->dbus_unit_queue && !m->dbus_job_queue && !m->send_reloading_done && !m->queued_message)
1907 return 0;
1908
1909 /* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's sit this
1910 * cycle out, and process things in a later cycle when the queues got a bit emptier. */
1911 if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
1912 return 0;
1913
1914 /* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't overly
1915 * full before this call we shouldn't increase it in size too wildly in one step, and we shouldn't monopolize
1916 * CPU time with generating these messages. Note the difference in counting of this "budget" and the
1917 * "threshold" above: the "budget" is decreased only once per generated message, regardless how many
1918 * busses/direct connections it is enqueued on, while the "threshold" is applied to each queued instance of bus
1919 * message, i.e. if the same message is enqueued to five busses/direct connections it will be counted five
1920 * times. This difference in counting ("references" vs. "instances") is primarily a result of the fact that
1921 * it's easier to implement it this way, however it also reflects the thinking that the "threshold" should put
1922 * a limit on used queue memory, i.e. space, while the "budget" should put a limit on time. Also note that
1923 * the "threshold" is currently chosen much higher than the "budget". */
1924 budget = MANAGER_BUS_MESSAGE_BUDGET;
1925
1926 m->dispatching_dbus_queue = true;
1927
1928 while (budget > 0 && (u = m->dbus_unit_queue)) {
1929
1930 assert(u->in_dbus_queue);
1931
1932 bus_unit_send_change_signal(u);
1933 n++, budget--;
1934 }
1935
1936 while (budget > 0 && (j = m->dbus_job_queue)) {
1937 assert(j->in_dbus_queue);
1938
1939 bus_job_send_change_signal(j);
1940 n++, budget--;
1941 }
1942
1943 m->dispatching_dbus_queue = false;
1944
1945 if (budget > 0 && m->send_reloading_done) {
1946 m->send_reloading_done = false;
1947 bus_manager_send_reloading(m, false);
1948 n++, budget--;
1949 }
1950
1951 if (budget > 0 && m->queued_message) {
1952 bus_send_queued_message(m);
1953 n++;
1954 }
1955
1956 return n;
1957 }
1958
1959 static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
1960 Manager *m = userdata;
1961 char buf[PATH_MAX+1];
1962 ssize_t n;
1963
1964 n = recv(fd, buf, sizeof(buf), 0);
1965 if (n < 0)
1966 return log_error_errno(errno, "Failed to read cgroups agent message: %m");
1967 if (n == 0) {
1968 log_error("Got zero-length cgroups agent message, ignoring.");
1969 return 0;
1970 }
1971 if ((size_t) n >= sizeof(buf)) {
1972 log_error("Got overly long cgroups agent message, ignoring.");
1973 return 0;
1974 }
1975
1976 if (memchr(buf, 0, n)) {
1977 log_error("Got cgroups agent message with embedded NUL byte, ignoring.");
1978 return 0;
1979 }
1980 buf[n] = 0;
1981
1982 manager_notify_cgroup_empty(m, buf);
1983 (void) bus_forward_agent_released(m, buf);
1984
1985 return 0;
1986 }
1987
1988 static void manager_invoke_notify_message(
1989 Manager *m,
1990 Unit *u,
1991 const struct ucred *ucred,
1992 const char *buf,
1993 FDSet *fds) {
1994
1995 assert(m);
1996 assert(u);
1997 assert(ucred);
1998 assert(buf);
1999
2000 if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */
2001 return;
2002 u->notifygen = m->notifygen;
2003
2004 if (UNIT_VTABLE(u)->notify_message) {
2005 _cleanup_strv_free_ char **tags = NULL;
2006
2007 tags = strv_split(buf, NEWLINE);
2008 if (!tags) {
2009 log_oom();
2010 return;
2011 }
2012
2013 UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds);
2014
2015 } else if (DEBUG_LOGGING) {
2016 _cleanup_free_ char *x = NULL, *y = NULL;
2017
2018 x = ellipsize(buf, 20, 90);
2019 if (x)
2020 y = cescape(x);
2021
2022 log_unit_debug(u, "Got notification message \"%s\", ignoring.", strnull(y));
2023 }
2024 }
2025
2026 static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2027
2028 _cleanup_fdset_free_ FDSet *fds = NULL;
2029 Manager *m = userdata;
2030 char buf[NOTIFY_BUFFER_MAX+1];
2031 struct iovec iovec = {
2032 .iov_base = buf,
2033 .iov_len = sizeof(buf)-1,
2034 };
2035 union {
2036 struct cmsghdr cmsghdr;
2037 uint8_t buf[CMSG_SPACE(sizeof(struct ucred)) +
2038 CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)];
2039 } control = {};
2040 struct msghdr msghdr = {
2041 .msg_iov = &iovec,
2042 .msg_iovlen = 1,
2043 .msg_control = &control,
2044 .msg_controllen = sizeof(control),
2045 };
2046
2047 struct cmsghdr *cmsg;
2048 struct ucred *ucred = NULL;
2049 _cleanup_free_ Unit **array_copy = NULL;
2050 Unit *u1, *u2, **array;
2051 int r, *fd_array = NULL;
2052 unsigned n_fds = 0;
2053 bool found = false;
2054 ssize_t n;
2055
2056 assert(m);
2057 assert(m->notify_fd == fd);
2058
2059 if (revents != EPOLLIN) {
2060 log_warning("Got unexpected poll event for notify fd.");
2061 return 0;
2062 }
2063
2064 n = recvmsg(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC);
2065 if (n < 0) {
2066 if (IN_SET(errno, EAGAIN, EINTR))
2067 return 0; /* Spurious wakeup, try again */
2068
2069 /* If this is any other, real error, then let's stop processing this socket. This of course means we
2070 * won't take notification messages anymore, but that's still better than busy looping around this:
2071 * being woken up over and over again but being unable to actually read the message off the socket. */
2072 return log_error_errno(errno, "Failed to receive notification message: %m");
2073 }
2074
2075 CMSG_FOREACH(cmsg, &msghdr) {
2076 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
2077
2078 fd_array = (int*) CMSG_DATA(cmsg);
2079 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
2080
2081 } else if (cmsg->cmsg_level == SOL_SOCKET &&
2082 cmsg->cmsg_type == SCM_CREDENTIALS &&
2083 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
2084
2085 ucred = (struct ucred*) CMSG_DATA(cmsg);
2086 }
2087 }
2088
2089 if (n_fds > 0) {
2090 assert(fd_array);
2091
2092 r = fdset_new_array(&fds, fd_array, n_fds);
2093 if (r < 0) {
2094 close_many(fd_array, n_fds);
2095 log_oom();
2096 return 0;
2097 }
2098 }
2099
2100 if (!ucred || !pid_is_valid(ucred->pid)) {
2101 log_warning("Received notify message without valid credentials. Ignoring.");
2102 return 0;
2103 }
2104
2105 if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) {
2106 log_warning("Received notify message exceeded maximum size. Ignoring.");
2107 return 0;
2108 }
2109
2110 /* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes. We permit one
2111 * trailing NUL byte in the message, but don't expect it. */
2112 if (n > 1 && memchr(buf, 0, n-1)) {
2113 log_warning("Received notify message with embedded NUL bytes. Ignoring.");
2114 return 0;
2115 }
2116
2117 /* Make sure it's NUL-terminated. */
2118 buf[n] = 0;
2119
2120 /* Increase the generation counter used for filtering out duplicate unit invocations. */
2121 m->notifygen++;
2122
2123 /* Notify every unit that might be interested, which might be multiple. */
2124 u1 = manager_get_unit_by_pid_cgroup(m, ucred->pid);
2125 u2 = hashmap_get(m->watch_pids, PID_TO_PTR(ucred->pid));
2126 array = hashmap_get(m->watch_pids, PID_TO_PTR(-ucred->pid));
2127 if (array) {
2128 size_t k = 0;
2129
2130 while (array[k])
2131 k++;
2132
2133 array_copy = newdup(Unit*, array, k+1);
2134 if (!array_copy)
2135 log_oom();
2136 }
2137 /* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle duplicate units
2138 * make sure we only invoke each unit's handler once. */
2139 if (u1) {
2140 manager_invoke_notify_message(m, u1, ucred, buf, fds);
2141 found = true;
2142 }
2143 if (u2) {
2144 manager_invoke_notify_message(m, u2, ucred, buf, fds);
2145 found = true;
2146 }
2147 if (array_copy)
2148 for (size_t i = 0; array_copy[i]; i++) {
2149 manager_invoke_notify_message(m, array_copy[i], ucred, buf, fds);
2150 found = true;
2151 }
2152
2153 if (!found)
2154 log_warning("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid);
2155
2156 if (fdset_size(fds) > 0)
2157 log_warning("Got extra auxiliary fds with notification message, closing them.");
2158
2159 return 0;
2160 }
2161
2162 static void manager_invoke_sigchld_event(
2163 Manager *m,
2164 Unit *u,
2165 const siginfo_t *si) {
2166
2167 assert(m);
2168 assert(u);
2169 assert(si);
2170
2171 /* Already invoked the handler of this unit in this iteration? Then don't process this again */
2172 if (u->sigchldgen == m->sigchldgen)
2173 return;
2174 u->sigchldgen = m->sigchldgen;
2175
2176 log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id);
2177 unit_unwatch_pid(u, si->si_pid);
2178
2179 if (UNIT_VTABLE(u)->sigchld_event)
2180 UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status);
2181 }
2182
2183 static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) {
2184 Manager *m = userdata;
2185 siginfo_t si = {};
2186 int r;
2187
2188 assert(source);
2189 assert(m);
2190
2191 /* First we call waitd() for a PID and do not reap the zombie. That way we can still access /proc/$PID for it
2192 * while it is a zombie. */
2193
2194 if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
2195
2196 if (errno != ECHILD)
2197 log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m");
2198
2199 goto turn_off;
2200 }
2201
2202 if (si.si_pid <= 0)
2203 goto turn_off;
2204
2205 if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) {
2206 _cleanup_free_ Unit **array_copy = NULL;
2207 _cleanup_free_ char *name = NULL;
2208 Unit *u1, *u2, **array;
2209
2210 (void) get_process_comm(si.si_pid, &name);
2211
2212 log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)",
2213 si.si_pid, strna(name),
2214 sigchld_code_to_string(si.si_code),
2215 si.si_status,
2216 strna(si.si_code == CLD_EXITED
2217 ? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
2218 : signal_to_string(si.si_status)));
2219
2220 /* Increase the generation counter used for filtering out duplicate unit invocations */
2221 m->sigchldgen++;
2222
2223 /* And now figure out the unit this belongs to, it might be multiple... */
2224 u1 = manager_get_unit_by_pid_cgroup(m, si.si_pid);
2225 u2 = hashmap_get(m->watch_pids, PID_TO_PTR(si.si_pid));
2226 array = hashmap_get(m->watch_pids, PID_TO_PTR(-si.si_pid));
2227 if (array) {
2228 size_t n = 0;
2229
2230 /* Cound how many entries the array has */
2231 while (array[n])
2232 n++;
2233
2234 /* Make a copy of the array so that we don't trip up on the array changing beneath us */
2235 array_copy = newdup(Unit*, array, n+1);
2236 if (!array_copy)
2237 log_oom();
2238 }
2239
2240 /* Finally, execute them all. Note that u1, u2 and the array might contain duplicates, but
2241 * that's fine, manager_invoke_sigchld_event() will ensure we only invoke the handlers once for
2242 * each iteration. */
2243 if (u1)
2244 manager_invoke_sigchld_event(m, u1, &si);
2245 if (u2)
2246 manager_invoke_sigchld_event(m, u2, &si);
2247 if (array_copy)
2248 for (size_t i = 0; array_copy[i]; i++)
2249 manager_invoke_sigchld_event(m, array_copy[i], &si);
2250 }
2251
2252 /* And now, we actually reap the zombie. */
2253 if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
2254 log_error_errno(errno, "Failed to dequeue child, ignoring: %m");
2255 return 0;
2256 }
2257
2258 return 0;
2259
2260 turn_off:
2261 /* All children processed for now, turn off event source */
2262
2263 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
2264 if (r < 0)
2265 return log_error_errno(r, "Failed to disable SIGCHLD event source: %m");
2266
2267 return 0;
2268 }
2269
2270 static void manager_start_target(Manager *m, const char *name, JobMode mode) {
2271 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2272 int r;
2273
2274 log_debug("Activating special unit %s", name);
2275
2276 r = manager_add_job_by_name(m, JOB_START, name, mode, &error, NULL);
2277 if (r < 0)
2278 log_error("Failed to enqueue %s job: %s", name, bus_error_message(&error, r));
2279 }
2280
2281 static void manager_handle_ctrl_alt_del(Manager *m) {
2282 /* If the user presses C-A-D more than
2283 * 7 times within 2s, we reboot/shutdown immediately,
2284 * unless it was disabled in system.conf */
2285
2286 if (ratelimit_test(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE)
2287 manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY);
2288 else
2289 emergency_action(m, m->cad_burst_action, NULL,
2290 "Ctrl-Alt-Del was pressed more than 7 times within 2s");
2291 }
2292
2293 static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2294 Manager *m = userdata;
2295 ssize_t n;
2296 struct signalfd_siginfo sfsi;
2297 int r;
2298
2299 assert(m);
2300 assert(m->signal_fd == fd);
2301
2302 if (revents != EPOLLIN) {
2303 log_warning("Got unexpected events from signal file descriptor.");
2304 return 0;
2305 }
2306
2307 n = read(m->signal_fd, &sfsi, sizeof(sfsi));
2308 if (n != sizeof(sfsi)) {
2309 if (n >= 0) {
2310 log_warning("Truncated read from signal fd (%zu bytes), ignoring!", n);
2311 return 0;
2312 }
2313
2314 if (IN_SET(errno, EINTR, EAGAIN))
2315 return 0;
2316
2317 /* We return an error here, which will kill this handler,
2318 * to avoid a busy loop on read error. */
2319 return log_error_errno(errno, "Reading from signal fd failed: %m");
2320 }
2321
2322 log_received_signal(sfsi.ssi_signo == SIGCHLD ||
2323 (sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m))
2324 ? LOG_DEBUG : LOG_INFO,
2325 &sfsi);
2326
2327 switch (sfsi.ssi_signo) {
2328
2329 case SIGCHLD:
2330 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2331 if (r < 0)
2332 log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m");
2333
2334 break;
2335
2336 case SIGTERM:
2337 if (MANAGER_IS_SYSTEM(m)) {
2338 /* This is for compatibility with the original sysvinit */
2339 r = verify_run_space_and_log("Refusing to reexecute");
2340 if (r >= 0)
2341 m->exit_code = MANAGER_REEXECUTE;
2342 break;
2343 }
2344
2345 _fallthrough_;
2346 case SIGINT:
2347 if (MANAGER_IS_SYSTEM(m))
2348 manager_handle_ctrl_alt_del(m);
2349 else
2350 manager_start_target(m, SPECIAL_EXIT_TARGET,
2351 JOB_REPLACE_IRREVERSIBLY);
2352 break;
2353
2354 case SIGWINCH:
2355 /* This is a nop on non-init */
2356 if (MANAGER_IS_SYSTEM(m))
2357 manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
2358
2359 break;
2360
2361 case SIGPWR:
2362 /* This is a nop on non-init */
2363 if (MANAGER_IS_SYSTEM(m))
2364 manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
2365
2366 break;
2367
2368 case SIGUSR1:
2369 if (manager_dbus_is_running(m, false)) {
2370 log_info("Trying to reconnect to bus...");
2371
2372 (void) bus_init_api(m);
2373
2374 if (MANAGER_IS_SYSTEM(m))
2375 (void) bus_init_system(m);
2376 } else {
2377 log_info("Starting D-Bus service...");
2378 manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
2379 }
2380
2381 break;
2382
2383 case SIGUSR2: {
2384 _cleanup_free_ char *dump = NULL;
2385
2386 r = manager_get_dump_string(m, &dump);
2387 if (r < 0) {
2388 log_warning_errno(errno, "Failed to acquire manager dump: %m");
2389 break;
2390 }
2391
2392 log_dump(LOG_INFO, dump);
2393 break;
2394 }
2395
2396 case SIGHUP:
2397 r = verify_run_space_and_log("Refusing to reload");
2398 if (r >= 0)
2399 m->exit_code = MANAGER_RELOAD;
2400 break;
2401
2402 default: {
2403
2404 /* Starting SIGRTMIN+0 */
2405 static const struct {
2406 const char *target;
2407 JobMode mode;
2408 } target_table[] = {
2409 [0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE },
2410 [1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE },
2411 [2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE },
2412 [3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY },
2413 [4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY },
2414 [5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
2415 [6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY },
2416 };
2417
2418 /* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
2419 static const ManagerExitCode code_table[] = {
2420 [0] = MANAGER_HALT,
2421 [1] = MANAGER_POWEROFF,
2422 [2] = MANAGER_REBOOT,
2423 [3] = MANAGER_KEXEC,
2424 };
2425
2426 if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
2427 (int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
2428 int idx = (int) sfsi.ssi_signo - SIGRTMIN;
2429 manager_start_target(m, target_table[idx].target,
2430 target_table[idx].mode);
2431 break;
2432 }
2433
2434 if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
2435 (int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
2436 m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
2437 break;
2438 }
2439
2440 switch (sfsi.ssi_signo - SIGRTMIN) {
2441
2442 case 20:
2443 manager_set_show_status(m, SHOW_STATUS_YES);
2444 break;
2445
2446 case 21:
2447 manager_set_show_status(m, SHOW_STATUS_NO);
2448 break;
2449
2450 case 22:
2451 log_set_max_level(LOG_DEBUG);
2452 log_info("Setting log level to debug.");
2453 break;
2454
2455 case 23:
2456 log_set_max_level(LOG_INFO);
2457 log_info("Setting log level to info.");
2458 break;
2459
2460 case 24:
2461 if (MANAGER_IS_USER(m)) {
2462 m->exit_code = MANAGER_EXIT;
2463 return 0;
2464 }
2465
2466 /* This is a nop on init */
2467 break;
2468
2469 case 26:
2470 case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
2471 log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
2472 log_notice("Setting log target to journal-or-kmsg.");
2473 break;
2474
2475 case 27:
2476 log_set_target(LOG_TARGET_CONSOLE);
2477 log_notice("Setting log target to console.");
2478 break;
2479
2480 case 28:
2481 log_set_target(LOG_TARGET_KMSG);
2482 log_notice("Setting log target to kmsg.");
2483 break;
2484
2485 default:
2486 log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
2487 }
2488 }}
2489
2490 return 0;
2491 }
2492
2493 static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2494 Manager *m = userdata;
2495 Iterator i;
2496 Unit *u;
2497
2498 assert(m);
2499 assert(m->time_change_fd == fd);
2500
2501 log_struct(LOG_DEBUG,
2502 "MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR,
2503 LOG_MESSAGE("Time has been changed"),
2504 NULL);
2505
2506 /* Restart the watch */
2507 m->time_change_event_source = sd_event_source_unref(m->time_change_event_source);
2508 m->time_change_fd = safe_close(m->time_change_fd);
2509
2510 manager_setup_time_change(m);
2511
2512 HASHMAP_FOREACH(u, m->units, i)
2513 if (UNIT_VTABLE(u)->time_change)
2514 UNIT_VTABLE(u)->time_change(u);
2515
2516 return 0;
2517 }
2518
2519 static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
2520 Manager *m = userdata;
2521
2522 assert(m);
2523 assert(m->idle_pipe[2] == fd);
2524
2525 /* There's at least one Type=idle child that just gave up on us waiting for the boot process to complete. Let's
2526 * now turn off any further console output if there's at least one service that needs console access, so that
2527 * from now on our own output should not spill into that service's output anymore. After all, we support
2528 * Type=idle only to beautify console output and it generally is set on services that want to own the console
2529 * exclusively without our interference. */
2530 m->no_console_output = m->n_on_console > 0;
2531
2532 /* Acknowledge the child's request, and let all all other children know too that they shouldn't wait any longer
2533 * by closing the pipes towards them, which is what they are waiting for. */
2534 manager_close_idle_pipe(m);
2535
2536 return 0;
2537 }
2538
2539 static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) {
2540 Manager *m = userdata;
2541 int r;
2542 uint64_t next;
2543
2544 assert(m);
2545 assert(source);
2546
2547 manager_print_jobs_in_progress(m);
2548
2549 next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_PERIOD_USEC;
2550 r = sd_event_source_set_time(source, next);
2551 if (r < 0)
2552 return r;
2553
2554 return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT);
2555 }
2556
2557 int manager_loop(Manager *m) {
2558 int r;
2559
2560 RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
2561
2562 assert(m);
2563 m->exit_code = MANAGER_OK;
2564
2565 /* Release the path cache */
2566 m->unit_path_cache = set_free_free(m->unit_path_cache);
2567
2568 manager_check_finished(m);
2569
2570 /* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
2571 r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
2572 if (r < 0)
2573 return log_error_errno(r, "Failed to enable SIGCHLD event source: %m");
2574
2575 while (m->exit_code == MANAGER_OK) {
2576 usec_t wait_usec;
2577
2578 if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m))
2579 watchdog_ping();
2580
2581 if (!ratelimit_test(&rl)) {
2582 /* Yay, something is going seriously wrong, pause a little */
2583 log_warning("Looping too fast. Throttling execution a little.");
2584 sleep(1);
2585 }
2586
2587 if (manager_dispatch_load_queue(m) > 0)
2588 continue;
2589
2590 if (manager_dispatch_gc_job_queue(m) > 0)
2591 continue;
2592
2593 if (manager_dispatch_gc_unit_queue(m) > 0)
2594 continue;
2595
2596 if (manager_dispatch_cleanup_queue(m) > 0)
2597 continue;
2598
2599 if (manager_dispatch_cgroup_realize_queue(m) > 0)
2600 continue;
2601
2602 if (manager_dispatch_dbus_queue(m) > 0)
2603 continue;
2604
2605 /* Sleep for half the watchdog time */
2606 if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m)) {
2607 wait_usec = m->runtime_watchdog / 2;
2608 if (wait_usec <= 0)
2609 wait_usec = 1;
2610 } else
2611 wait_usec = USEC_INFINITY;
2612
2613 r = sd_event_run(m->event, wait_usec);
2614 if (r < 0)
2615 return log_error_errno(r, "Failed to run event loop: %m");
2616 }
2617
2618 return m->exit_code;
2619 }
2620
2621 int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) {
2622 _cleanup_free_ char *n = NULL;
2623 sd_id128_t invocation_id;
2624 Unit *u;
2625 int r;
2626
2627 assert(m);
2628 assert(s);
2629 assert(_u);
2630
2631 r = unit_name_from_dbus_path(s, &n);
2632 if (r < 0)
2633 return r;
2634
2635 /* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128bit ID then we use it
2636 * as invocation ID. */
2637 r = sd_id128_from_string(n, &invocation_id);
2638 if (r >= 0) {
2639 u = hashmap_get(m->units_by_invocation_id, &invocation_id);
2640 if (u) {
2641 *_u = u;
2642 return 0;
2643 }
2644
2645 return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID, "No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.", SD_ID128_FORMAT_VAL(invocation_id));
2646 }
2647
2648 /* If this didn't work, we check if this is a unit name */
2649 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
2650 return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is neither a valid invocation ID nor unit name.", n);
2651
2652 r = manager_load_unit(m, n, NULL, e, &u);
2653 if (r < 0)
2654 return r;
2655
2656 *_u = u;
2657 return 0;
2658 }
2659
2660 int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
2661 const char *p;
2662 unsigned id;
2663 Job *j;
2664 int r;
2665
2666 assert(m);
2667 assert(s);
2668 assert(_j);
2669
2670 p = startswith(s, "/org/freedesktop/systemd1/job/");
2671 if (!p)
2672 return -EINVAL;
2673
2674 r = safe_atou(p, &id);
2675 if (r < 0)
2676 return r;
2677
2678 j = manager_get_job(m, id);
2679 if (!j)
2680 return -ENOENT;
2681
2682 *_j = j;
2683
2684 return 0;
2685 }
2686
2687 void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
2688
2689 #if HAVE_AUDIT
2690 _cleanup_free_ char *p = NULL;
2691 const char *msg;
2692 int audit_fd, r;
2693
2694 if (!MANAGER_IS_SYSTEM(m))
2695 return;
2696
2697 audit_fd = get_audit_fd();
2698 if (audit_fd < 0)
2699 return;
2700
2701 /* Don't generate audit events if the service was already
2702 * started and we're just deserializing */
2703 if (MANAGER_IS_RELOADING(m))
2704 return;
2705
2706 if (u->type != UNIT_SERVICE)
2707 return;
2708
2709 r = unit_name_to_prefix_and_instance(u->id, &p);
2710 if (r < 0) {
2711 log_error_errno(r, "Failed to extract prefix and instance of unit name: %m");
2712 return;
2713 }
2714
2715 msg = strjoina("unit=", p);
2716 if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) {
2717 if (errno == EPERM)
2718 /* We aren't allowed to send audit messages?
2719 * Then let's not retry again. */
2720 close_audit_fd();
2721 else
2722 log_warning_errno(errno, "Failed to send audit message: %m");
2723 }
2724 #endif
2725
2726 }
2727
2728 void manager_send_unit_plymouth(Manager *m, Unit *u) {
2729 static const union sockaddr_union sa = PLYMOUTH_SOCKET;
2730 _cleanup_free_ char *message = NULL;
2731 _cleanup_close_ int fd = -1;
2732 int n = 0;
2733
2734 /* Don't generate plymouth events if the service was already
2735 * started and we're just deserializing */
2736 if (MANAGER_IS_RELOADING(m))
2737 return;
2738
2739 if (!MANAGER_IS_SYSTEM(m))
2740 return;
2741
2742 if (detect_container() > 0)
2743 return;
2744
2745 if (!IN_SET(u->type, UNIT_SERVICE, UNIT_MOUNT, UNIT_SWAP))
2746 return;
2747
2748 /* We set SOCK_NONBLOCK here so that we rather drop the
2749 * message then wait for plymouth */
2750 fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
2751 if (fd < 0) {
2752 log_error_errno(errno, "socket() failed: %m");
2753 return;
2754 }
2755
2756 if (connect(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un)) < 0) {
2757 if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
2758 log_error_errno(errno, "connect() failed: %m");
2759 return;
2760 }
2761
2762 if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
2763 log_oom();
2764 return;
2765 }
2766
2767 errno = 0;
2768 if (write(fd, message, n + 1) != n + 1)
2769 if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
2770 log_error_errno(errno, "Failed to write Plymouth message: %m");
2771 }
2772
2773 int manager_open_serialization(Manager *m, FILE **_f) {
2774 int fd;
2775 FILE *f;
2776
2777 assert(_f);
2778
2779 fd = open_serialization_fd("systemd-state");
2780 if (fd < 0)
2781 return fd;
2782
2783 f = fdopen(fd, "w+");
2784 if (!f) {
2785 safe_close(fd);
2786 return -errno;
2787 }
2788
2789 *_f = f;
2790 return 0;
2791 }
2792
2793 int manager_serialize(Manager *m, FILE *f, FDSet *fds, bool switching_root) {
2794 ManagerTimestamp q;
2795 const char *t;
2796 Iterator i;
2797 Unit *u;
2798 int r;
2799
2800 assert(m);
2801 assert(f);
2802 assert(fds);
2803
2804 m->n_reloading++;
2805
2806 fprintf(f, "current-job-id=%"PRIu32"\n", m->current_job_id);
2807 fprintf(f, "n-installed-jobs=%u\n", m->n_installed_jobs);
2808 fprintf(f, "n-failed-jobs=%u\n", m->n_failed_jobs);
2809 fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
2810 fprintf(f, "ready-sent=%s\n", yes_no(m->ready_sent));
2811 fprintf(f, "taint-logged=%s\n", yes_no(m->taint_logged));
2812 fprintf(f, "service-watchdogs=%s\n", yes_no(m->service_watchdogs));
2813
2814 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
2815 /* The userspace and finish timestamps only apply to the host system, hence only serialize them there */
2816 if (in_initrd() && IN_SET(q, MANAGER_TIMESTAMP_USERSPACE, MANAGER_TIMESTAMP_FINISH))
2817 continue;
2818
2819 t = manager_timestamp_to_string(q);
2820 {
2821 char field[strlen(t) + STRLEN("-timestamp") + 1];
2822 strcpy(stpcpy(field, t), "-timestamp");
2823 dual_timestamp_serialize(f, field, m->timestamps + q);
2824 }
2825 }
2826
2827 if (!switching_root)
2828 (void) serialize_environment(f, m->environment);
2829
2830 if (m->notify_fd >= 0) {
2831 int copy;
2832
2833 copy = fdset_put_dup(fds, m->notify_fd);
2834 if (copy < 0)
2835 return copy;
2836
2837 fprintf(f, "notify-fd=%i\n", copy);
2838 fprintf(f, "notify-socket=%s\n", m->notify_socket);
2839 }
2840
2841 if (m->cgroups_agent_fd >= 0) {
2842 int copy;
2843
2844 copy = fdset_put_dup(fds, m->cgroups_agent_fd);
2845 if (copy < 0)
2846 return copy;
2847
2848 fprintf(f, "cgroups-agent-fd=%i\n", copy);
2849 }
2850
2851 if (m->user_lookup_fds[0] >= 0) {
2852 int copy0, copy1;
2853
2854 copy0 = fdset_put_dup(fds, m->user_lookup_fds[0]);
2855 if (copy0 < 0)
2856 return copy0;
2857
2858 copy1 = fdset_put_dup(fds, m->user_lookup_fds[1]);
2859 if (copy1 < 0)
2860 return copy1;
2861
2862 fprintf(f, "user-lookup=%i %i\n", copy0, copy1);
2863 }
2864
2865 bus_track_serialize(m->subscribed, f, "subscribed");
2866
2867 r = dynamic_user_serialize(m, f, fds);
2868 if (r < 0)
2869 return r;
2870
2871 manager_serialize_uid_refs(m, f);
2872 manager_serialize_gid_refs(m, f);
2873
2874 r = exec_runtime_serialize(m, f, fds);
2875 if (r < 0)
2876 return r;
2877
2878 (void) fputc('\n', f);
2879
2880 HASHMAP_FOREACH_KEY(u, t, m->units, i) {
2881 if (u->id != t)
2882 continue;
2883
2884 /* Start marker */
2885 fputs(u->id, f);
2886 fputc('\n', f);
2887
2888 r = unit_serialize(u, f, fds, !switching_root);
2889 if (r < 0) {
2890 m->n_reloading--;
2891 return r;
2892 }
2893 }
2894
2895 assert(m->n_reloading > 0);
2896 m->n_reloading--;
2897
2898 if (ferror(f))
2899 return -EIO;
2900
2901 r = bus_fdset_add_all(m, fds);
2902 if (r < 0)
2903 return r;
2904
2905 return 0;
2906 }
2907
2908 int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
2909 int r = 0;
2910
2911 assert(m);
2912 assert(f);
2913
2914 log_debug("Deserializing state...");
2915
2916 m->n_reloading++;
2917
2918 for (;;) {
2919 char line[LINE_MAX];
2920 const char *val, *l;
2921
2922 if (!fgets(line, sizeof(line), f)) {
2923 if (feof(f))
2924 r = 0;
2925 else
2926 r = -errno;
2927
2928 goto finish;
2929 }
2930
2931 char_array_0(line);
2932 l = strstrip(line);
2933
2934 if (l[0] == 0)
2935 break;
2936
2937 if ((val = startswith(l, "current-job-id="))) {
2938 uint32_t id;
2939
2940 if (safe_atou32(val, &id) < 0)
2941 log_notice("Failed to parse current job id value %s", val);
2942 else
2943 m->current_job_id = MAX(m->current_job_id, id);
2944
2945 } else if ((val = startswith(l, "n-installed-jobs="))) {
2946 uint32_t n;
2947
2948 if (safe_atou32(val, &n) < 0)
2949 log_notice("Failed to parse installed jobs counter %s", val);
2950 else
2951 m->n_installed_jobs += n;
2952
2953 } else if ((val = startswith(l, "n-failed-jobs="))) {
2954 uint32_t n;
2955
2956 if (safe_atou32(val, &n) < 0)
2957 log_notice("Failed to parse failed jobs counter %s", val);
2958 else
2959 m->n_failed_jobs += n;
2960
2961 } else if ((val = startswith(l, "taint-usr="))) {
2962 int b;
2963
2964 b = parse_boolean(val);
2965 if (b < 0)
2966 log_notice("Failed to parse taint /usr flag %s", val);
2967 else
2968 m->taint_usr = m->taint_usr || b;
2969
2970 } else if ((val = startswith(l, "ready-sent="))) {
2971 int b;
2972
2973 b = parse_boolean(val);
2974 if (b < 0)
2975 log_notice("Failed to parse ready-sent flag %s", val);
2976 else
2977 m->ready_sent = m->ready_sent || b;
2978
2979 } else if ((val = startswith(l, "taint-logged="))) {
2980 int b;
2981
2982 b = parse_boolean(val);
2983 if (b < 0)
2984 log_notice("Failed to parse taint-logged flag %s", val);
2985 else
2986 m->taint_logged = m->taint_logged || b;
2987
2988 } else if ((val = startswith(l, "service-watchdogs="))) {
2989 int b;
2990
2991 b = parse_boolean(val);
2992 if (b < 0)
2993 log_notice("Failed to parse service-watchdogs flag %s", val);
2994 else
2995 m->service_watchdogs = b;
2996
2997 } else if (startswith(l, "env=")) {
2998 r = deserialize_environment(&m->environment, l);
2999 if (r == -ENOMEM)
3000 goto finish;
3001 if (r < 0)
3002 log_notice_errno(r, "Failed to parse environment entry: \"%s\": %m", l);
3003
3004 } else if ((val = startswith(l, "notify-fd="))) {
3005 int fd;
3006
3007 if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
3008 log_notice("Failed to parse notify fd: \"%s\"", val);
3009 else {
3010 m->notify_event_source = sd_event_source_unref(m->notify_event_source);
3011 safe_close(m->notify_fd);
3012 m->notify_fd = fdset_remove(fds, fd);
3013 }
3014
3015 } else if ((val = startswith(l, "notify-socket="))) {
3016 char *n;
3017
3018 n = strdup(val);
3019 if (!n) {
3020 r = -ENOMEM;
3021 goto finish;
3022 }
3023
3024 free(m->notify_socket);
3025 m->notify_socket = n;
3026
3027 } else if ((val = startswith(l, "cgroups-agent-fd="))) {
3028 int fd;
3029
3030 if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
3031 log_notice("Failed to parse cgroups agent fd: %s", val);
3032 else {
3033 m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
3034 safe_close(m->cgroups_agent_fd);
3035 m->cgroups_agent_fd = fdset_remove(fds, fd);
3036 }
3037
3038 } else if ((val = startswith(l, "user-lookup="))) {
3039 int fd0, fd1;
3040
3041 if (sscanf(val, "%i %i", &fd0, &fd1) != 2 || fd0 < 0 || fd1 < 0 || fd0 == fd1 || !fdset_contains(fds, fd0) || !fdset_contains(fds, fd1))
3042 log_notice("Failed to parse user lookup fd: %s", val);
3043 else {
3044 m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
3045 safe_close_pair(m->user_lookup_fds);
3046 m->user_lookup_fds[0] = fdset_remove(fds, fd0);
3047 m->user_lookup_fds[1] = fdset_remove(fds, fd1);
3048 }
3049
3050 } else if ((val = startswith(l, "dynamic-user=")))
3051 dynamic_user_deserialize_one(m, val, fds);
3052 else if ((val = startswith(l, "destroy-ipc-uid=")))
3053 manager_deserialize_uid_refs_one(m, val);
3054 else if ((val = startswith(l, "destroy-ipc-gid=")))
3055 manager_deserialize_gid_refs_one(m, val);
3056 else if ((val = startswith(l, "exec-runtime=")))
3057 exec_runtime_deserialize_one(m, val, fds);
3058 else if ((val = startswith(l, "subscribed="))) {
3059
3060 if (strv_extend(&m->deserialized_subscribed, val) < 0)
3061 log_oom();
3062 } else {
3063 ManagerTimestamp q;
3064
3065 for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
3066 val = startswith(l, manager_timestamp_to_string(q));
3067 if (!val)
3068 continue;
3069
3070 val = startswith(val, "-timestamp=");
3071 if (val)
3072 break;
3073 }
3074
3075 if (q < _MANAGER_TIMESTAMP_MAX) /* found it */
3076 dual_timestamp_deserialize(val, m->timestamps + q);
3077 else if (!startswith(l, "kdbus-fd=")) /* ignore kdbus */
3078 log_notice("Unknown serialization item '%s'", l);
3079 }
3080 }
3081
3082 for (;;) {
3083 Unit *u;
3084 char name[UNIT_NAME_MAX+2];
3085 const char* unit_name;
3086
3087 /* Start marker */
3088 if (!fgets(name, sizeof(name), f)) {
3089 if (feof(f))
3090 r = 0;
3091 else
3092 r = -errno;
3093
3094 goto finish;
3095 }
3096
3097 char_array_0(name);
3098 unit_name = strstrip(name);
3099
3100 r = manager_load_unit(m, unit_name, NULL, NULL, &u);
3101 if (r < 0) {
3102 log_notice_errno(r, "Failed to load unit \"%s\", skipping deserialization: %m", unit_name);
3103 if (r == -ENOMEM)
3104 goto finish;
3105 unit_deserialize_skip(f);
3106 continue;
3107 }
3108
3109 r = unit_deserialize(u, f, fds);
3110 if (r < 0) {
3111 log_notice_errno(r, "Failed to deserialize unit \"%s\": %m", unit_name);
3112 if (r == -ENOMEM)
3113 goto finish;
3114 }
3115 }
3116
3117 finish:
3118 if (ferror(f))
3119 r = -EIO;
3120
3121 assert(m->n_reloading > 0);
3122 m->n_reloading--;
3123
3124 return r;
3125 }
3126
3127 int manager_reload(Manager *m) {
3128 int r, q;
3129 _cleanup_fclose_ FILE *f = NULL;
3130 _cleanup_fdset_free_ FDSet *fds = NULL;
3131
3132 assert(m);
3133
3134 r = manager_open_serialization(m, &f);
3135 if (r < 0)
3136 return r;
3137
3138 m->n_reloading++;
3139 bus_manager_send_reloading(m, true);
3140
3141 fds = fdset_new();
3142 if (!fds) {
3143 m->n_reloading--;
3144 return -ENOMEM;
3145 }
3146
3147 r = manager_serialize(m, f, fds, false);
3148 if (r < 0) {
3149 m->n_reloading--;
3150 return r;
3151 }
3152
3153 if (fseeko(f, 0, SEEK_SET) < 0) {
3154 m->n_reloading--;
3155 return -errno;
3156 }
3157
3158 /* From here on there is no way back. */
3159 manager_clear_jobs_and_units(m);
3160 lookup_paths_flush_generator(&m->lookup_paths);
3161 lookup_paths_free(&m->lookup_paths);
3162 exec_runtime_vacuum(m);
3163 dynamic_user_vacuum(m, false);
3164 m->uid_refs = hashmap_free(m->uid_refs);
3165 m->gid_refs = hashmap_free(m->gid_refs);
3166
3167 q = lookup_paths_init(&m->lookup_paths, m->unit_file_scope, 0, NULL);
3168 if (q < 0 && r >= 0)
3169 r = q;
3170
3171 q = manager_run_environment_generators(m);
3172 if (q < 0 && r >= 0)
3173 r = q;
3174
3175 /* Find new unit paths */
3176 q = manager_run_generators(m);
3177 if (q < 0 && r >= 0)
3178 r = q;
3179
3180 lookup_paths_reduce(&m->lookup_paths);
3181 manager_build_unit_path_cache(m);
3182
3183 /* First, enumerate what we can from all config files */
3184 manager_enumerate(m);
3185
3186 /* Second, deserialize our stored data */
3187 q = manager_deserialize(m, f, fds);
3188 if (q < 0) {
3189 log_error_errno(q, "Deserialization failed: %m");
3190
3191 if (r >= 0)
3192 r = q;
3193 }
3194
3195 fclose(f);
3196 f = NULL;
3197
3198 /* Re-register notify_fd as event source */
3199 q = manager_setup_notify(m);
3200 if (q < 0 && r >= 0)
3201 r = q;
3202
3203 q = manager_setup_cgroups_agent(m);
3204 if (q < 0 && r >= 0)
3205 r = q;
3206
3207 q = manager_setup_user_lookup_fd(m);
3208 if (q < 0 && r >= 0)
3209 r = q;
3210
3211 /* Third, fire things up! */
3212 manager_coldplug(m);
3213
3214 /* Release any dynamic users no longer referenced */
3215 dynamic_user_vacuum(m, true);
3216
3217 /* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
3218 manager_vacuum_uid_refs(m);
3219 manager_vacuum_gid_refs(m);
3220
3221 exec_runtime_vacuum(m);
3222
3223 assert(m->n_reloading > 0);
3224 m->n_reloading--;
3225
3226 /* It might be safe to log to the journal now and connect to dbus */
3227 manager_recheck_journal(m);
3228 manager_recheck_dbus(m);
3229
3230 /* Sync current state of bus names with our set of listening units */
3231 q = manager_enqueue_sync_bus_names(m);
3232 if (q < 0 && r >= 0)
3233 r = q;
3234
3235 m->send_reloading_done = true;
3236
3237 return r;
3238 }
3239
3240 void manager_reset_failed(Manager *m) {
3241 Unit *u;
3242 Iterator i;
3243
3244 assert(m);
3245
3246 HASHMAP_FOREACH(u, m->units, i)
3247 unit_reset_failed(u);
3248 }
3249
3250 bool manager_unit_inactive_or_pending(Manager *m, const char *name) {
3251 Unit *u;
3252
3253 assert(m);
3254 assert(name);
3255
3256 /* Returns true if the unit is inactive or going down */
3257 u = manager_get_unit(m, name);
3258 if (!u)
3259 return true;
3260
3261 return unit_inactive_or_pending(u);
3262 }
3263
3264 static void log_taint_string(Manager *m) {
3265 _cleanup_free_ char *taint = NULL;
3266
3267 assert(m);
3268
3269 if (MANAGER_IS_USER(m) || m->taint_logged)
3270 return;
3271
3272 m->taint_logged = true; /* only check for taint once */
3273
3274 taint = manager_taint_string(m);
3275 if (isempty(taint))
3276 return;
3277
3278 log_struct(LOG_NOTICE,
3279 LOG_MESSAGE("System is tainted: %s", taint),
3280 "TAINT=%s", taint,
3281 "MESSAGE_ID=" SD_MESSAGE_TAINTED_STR,
3282 NULL);
3283 }
3284
3285 static void manager_notify_finished(Manager *m) {
3286 char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
3287 usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec;
3288
3289 if (m->test_run_flags)
3290 return;
3291
3292 if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) {
3293 char ts[FORMAT_TIMESPAN_MAX];
3294 char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")]
3295 = {};
3296 char *p = buf;
3297 size_t size = sizeof buf;
3298
3299 /* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
3300 * MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
3301 * negative values. */
3302
3303 firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic;
3304 loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3305 userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3306 total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic;
3307
3308 if (firmware_usec > 0)
3309 size = strpcpyf(&p, size, "%s (firmware) + ", format_timespan(ts, sizeof(ts), firmware_usec, USEC_PER_MSEC));
3310 if (loader_usec > 0)
3311 size = strpcpyf(&p, size, "%s (loader) + ", format_timespan(ts, sizeof(ts), loader_usec, USEC_PER_MSEC));
3312
3313 if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
3314
3315 /* The initrd case on bare-metal*/
3316 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3317 initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
3318
3319 log_struct(LOG_INFO,
3320 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3321 "KERNEL_USEC="USEC_FMT, kernel_usec,
3322 "INITRD_USEC="USEC_FMT, initrd_usec,
3323 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3324 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.",
3325 buf,
3326 format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
3327 format_timespan(initrd, sizeof(initrd), initrd_usec, USEC_PER_MSEC),
3328 format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
3329 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3330 NULL);
3331 } else {
3332 /* The initrd-less case on bare-metal*/
3333
3334 kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
3335 initrd_usec = 0;
3336
3337 log_struct(LOG_INFO,
3338 "MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
3339 "KERNEL_USEC="USEC_FMT, kernel_usec,
3340 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3341 LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.",
3342 buf,
3343 format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
3344 format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
3345 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3346 NULL);
3347 }
3348 } else {
3349 /* The container and --user case */
3350 firmware_usec = loader_usec = initrd_usec = kernel_usec = 0;
3351 total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
3352
3353 log_struct(LOG_INFO,
3354 "MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR,
3355 "USERSPACE_USEC="USEC_FMT, userspace_usec,
3356 LOG_MESSAGE("Startup finished in %s.",
3357 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
3358 NULL);
3359 }
3360
3361 bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec);
3362
3363 sd_notifyf(false,
3364 m->ready_sent ? "STATUS=Startup finished in %s."
3365 : "READY=1\n"
3366 "STATUS=Startup finished in %s.",
3367 format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC));
3368 m->ready_sent = true;
3369
3370 log_taint_string(m);
3371 }
3372
3373 static void manager_send_ready(Manager *m) {
3374 assert(m);
3375
3376 /* We send READY=1 on reaching basic.target only when running in --user mode. */
3377 if (!MANAGER_IS_USER(m) || m->ready_sent)
3378 return;
3379
3380 m->ready_sent = true;
3381
3382 sd_notifyf(false,
3383 "READY=1\n"
3384 "STATUS=Reached " SPECIAL_BASIC_TARGET ".");
3385 }
3386
3387 static void manager_check_basic_target(Manager *m) {
3388 Unit *u;
3389
3390 assert(m);
3391
3392 /* Small shortcut */
3393 if (m->ready_sent && m->taint_logged)
3394 return;
3395
3396 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3397 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3398 return;
3399
3400 /* For user managers, send out READY=1 as soon as we reach basic.target */
3401 manager_send_ready(m);
3402
3403 /* Log the taint string as soon as we reach basic.target */
3404 log_taint_string(m);
3405 }
3406
3407 void manager_check_finished(Manager *m) {
3408 assert(m);
3409
3410 if (MANAGER_IS_RELOADING(m))
3411 return;
3412
3413 /* Verify that we have entered the event loop already, and not left it again. */
3414 if (!MANAGER_IS_RUNNING(m))
3415 return;
3416
3417 manager_check_basic_target(m);
3418
3419 if (hashmap_size(m->jobs) > 0) {
3420 if (m->jobs_in_progress_event_source)
3421 /* Ignore any failure, this is only for feedback */
3422 (void) sd_event_source_set_time(m->jobs_in_progress_event_source, now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC);
3423
3424 return;
3425 }
3426
3427 manager_flip_auto_status(m, false);
3428
3429 /* Notify Type=idle units that we are done now */
3430 manager_close_idle_pipe(m);
3431
3432 /* Turn off confirm spawn now */
3433 m->confirm_spawn = NULL;
3434
3435 /* No need to update ask password status when we're going non-interactive */
3436 manager_close_ask_password(m);
3437
3438 /* This is no longer the first boot */
3439 manager_set_first_boot(m, false);
3440
3441 if (MANAGER_IS_FINISHED(m))
3442 return;
3443
3444 dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_FINISH);
3445
3446 manager_notify_finished(m);
3447
3448 manager_invalidate_startup_units(m);
3449 }
3450
3451 static bool generator_path_any(const char* const* paths) {
3452 char **path;
3453 bool found = false;
3454
3455 /* Optimize by skipping the whole process by not creating output directories
3456 * if no generators are found. */
3457 STRV_FOREACH(path, (char**) paths)
3458 if (access(*path, F_OK) == 0)
3459 found = true;
3460 else if (errno != ENOENT)
3461 log_warning_errno(errno, "Failed to open generator directory %s: %m", *path);
3462
3463 return found;
3464 }
3465
3466 static const char* system_env_generator_binary_paths[] = {
3467 "/run/systemd/system-environment-generators",
3468 "/etc/systemd/system-environment-generators",
3469 "/usr/local/lib/systemd/system-environment-generators",
3470 SYSTEM_ENV_GENERATOR_PATH,
3471 NULL
3472 };
3473
3474 static const char* user_env_generator_binary_paths[] = {
3475 "/run/systemd/user-environment-generators",
3476 "/etc/systemd/user-environment-generators",
3477 "/usr/local/lib/systemd/user-environment-generators",
3478 USER_ENV_GENERATOR_PATH,
3479 NULL
3480 };
3481
3482 static int manager_run_environment_generators(Manager *m) {
3483 char **tmp = NULL; /* this is only used in the forked process, no cleanup here */
3484 const char **paths;
3485 void* args[] = {&tmp, &tmp, &m->environment};
3486
3487 if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS))
3488 return 0;
3489
3490 paths = MANAGER_IS_SYSTEM(m) ? system_env_generator_binary_paths : user_env_generator_binary_paths;
3491
3492 if (!generator_path_any(paths))
3493 return 0;
3494
3495 return execute_directories(paths, DEFAULT_TIMEOUT_USEC, gather_environment, args, NULL);
3496 }
3497
3498 static int manager_run_generators(Manager *m) {
3499 _cleanup_strv_free_ char **paths = NULL;
3500 const char *argv[5];
3501 int r;
3502
3503 assert(m);
3504
3505 if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS))
3506 return 0;
3507
3508 paths = generator_binary_paths(m->unit_file_scope);
3509 if (!paths)
3510 return log_oom();
3511
3512 if (!generator_path_any((const char* const*) paths))
3513 return 0;
3514
3515 r = lookup_paths_mkdir_generator(&m->lookup_paths);
3516 if (r < 0)
3517 goto finish;
3518
3519 argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
3520 argv[1] = m->lookup_paths.generator;
3521 argv[2] = m->lookup_paths.generator_early;
3522 argv[3] = m->lookup_paths.generator_late;
3523 argv[4] = NULL;
3524
3525 RUN_WITH_UMASK(0022)
3526 execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC,
3527 NULL, NULL, (char**) argv);
3528
3529 finish:
3530 lookup_paths_trim_generator(&m->lookup_paths);
3531 return r;
3532 }
3533
3534 int manager_environment_add(Manager *m, char **minus, char **plus) {
3535 char **a = NULL, **b = NULL, **l;
3536 assert(m);
3537
3538 l = m->environment;
3539
3540 if (!strv_isempty(minus)) {
3541 a = strv_env_delete(l, 1, minus);
3542 if (!a)
3543 return -ENOMEM;
3544
3545 l = a;
3546 }
3547
3548 if (!strv_isempty(plus)) {
3549 b = strv_env_merge(2, l, plus);
3550 if (!b) {
3551 strv_free(a);
3552 return -ENOMEM;
3553 }
3554
3555 l = b;
3556 }
3557
3558 if (m->environment != l)
3559 strv_free(m->environment);
3560 if (a != l)
3561 strv_free(a);
3562 if (b != l)
3563 strv_free(b);
3564
3565 m->environment = l;
3566 manager_sanitize_environment(m);
3567
3568 return 0;
3569 }
3570
3571 int manager_set_default_rlimits(Manager *m, struct rlimit **default_rlimit) {
3572 int i;
3573
3574 assert(m);
3575
3576 for (i = 0; i < _RLIMIT_MAX; i++) {
3577 m->rlimit[i] = mfree(m->rlimit[i]);
3578
3579 if (!default_rlimit[i])
3580 continue;
3581
3582 m->rlimit[i] = newdup(struct rlimit, default_rlimit[i], 1);
3583 if (!m->rlimit[i])
3584 return log_oom();
3585 }
3586
3587 return 0;
3588 }
3589
3590 void manager_recheck_dbus(Manager *m) {
3591 assert(m);
3592
3593 /* Connects to the bus if the dbus service and socket are running. If we are running in user mode this is all
3594 * it does. In system mode we'll also connect to the system bus (which will most likely just reuse the
3595 * connection of the API bus). That's because the system bus after all runs as service of the system instance,
3596 * while in the user instance we can assume it's already there. */
3597
3598 if (MANAGER_IS_RELOADING(m))
3599 return; /* don't check while we are reloading… */
3600
3601 if (manager_dbus_is_running(m, false)) {
3602 (void) bus_init_api(m);
3603
3604 if (MANAGER_IS_SYSTEM(m))
3605 (void) bus_init_system(m);
3606 } else {
3607 (void) bus_done_api(m);
3608
3609 if (MANAGER_IS_SYSTEM(m))
3610 (void) bus_done_system(m);
3611 }
3612 }
3613
3614 static bool manager_journal_is_running(Manager *m) {
3615 Unit *u;
3616
3617 assert(m);
3618
3619 if (m->test_run_flags != 0)
3620 return false;
3621
3622 /* If we are the user manager we can safely assume that the journal is up */
3623 if (!MANAGER_IS_SYSTEM(m))
3624 return true;
3625
3626 /* Check that the socket is not only up, but in RUNNING state */
3627 u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
3628 if (!u)
3629 return false;
3630 if (SOCKET(u)->state != SOCKET_RUNNING)
3631 return false;
3632
3633 /* Similar, check if the daemon itself is fully up, too */
3634 u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
3635 if (!u)
3636 return false;
3637 if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING))
3638 return false;
3639
3640 return true;
3641 }
3642
3643 void manager_recheck_journal(Manager *m) {
3644
3645 assert(m);
3646
3647 /* Don't bother with this unless we are in the special situation of being PID 1 */
3648 if (getpid_cached() != 1)
3649 return;
3650
3651 /* Don't check this while we are reloading, things might still change */
3652 if (MANAGER_IS_RELOADING(m))
3653 return;
3654
3655 /* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If the
3656 * journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we might trigger
3657 * an activation ourselves we can't fulfill. */
3658 log_set_prohibit_ipc(!manager_journal_is_running(m));
3659 log_open();
3660 }
3661
3662 void manager_set_show_status(Manager *m, ShowStatus mode) {
3663 assert(m);
3664 assert(IN_SET(mode, SHOW_STATUS_AUTO, SHOW_STATUS_NO, SHOW_STATUS_YES, SHOW_STATUS_TEMPORARY));
3665
3666 if (!MANAGER_IS_SYSTEM(m))
3667 return;
3668
3669 if (m->show_status != mode)
3670 log_debug("%s showing of status.",
3671 mode == SHOW_STATUS_NO ? "Disabling" : "Enabling");
3672 m->show_status = mode;
3673
3674 if (mode > 0)
3675 (void) touch("/run/systemd/show-status");
3676 else
3677 (void) unlink("/run/systemd/show-status");
3678 }
3679
3680 static bool manager_get_show_status(Manager *m, StatusType type) {
3681 assert(m);
3682
3683 if (!MANAGER_IS_SYSTEM(m))
3684 return false;
3685
3686 if (m->no_console_output)
3687 return false;
3688
3689 if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING))
3690 return false;
3691
3692 /* If we cannot find out the status properly, just proceed. */
3693 if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0)
3694 return false;
3695
3696 return m->show_status > 0;
3697 }
3698
3699 const char *manager_get_confirm_spawn(Manager *m) {
3700 static int last_errno = 0;
3701 const char *vc = m->confirm_spawn;
3702 struct stat st;
3703 int r;
3704
3705 /* Here's the deal: we want to test the validity of the console but don't want
3706 * PID1 to go through the whole console process which might block. But we also
3707 * want to warn the user only once if something is wrong with the console so we
3708 * cannot do the sanity checks after spawning our children. So here we simply do
3709 * really basic tests to hopefully trap common errors.
3710 *
3711 * If the console suddenly disappear at the time our children will really it
3712 * then they will simply fail to acquire it and a positive answer will be
3713 * assumed. New children will fallback to /dev/console though.
3714 *
3715 * Note: TTYs are devices that can come and go any time, and frequently aren't
3716 * available yet during early boot (consider a USB rs232 dongle...). If for any
3717 * reason the configured console is not ready, we fallback to the default
3718 * console. */
3719
3720 if (!vc || path_equal(vc, "/dev/console"))
3721 return vc;
3722
3723 r = stat(vc, &st);
3724 if (r < 0)
3725 goto fail;
3726
3727 if (!S_ISCHR(st.st_mode)) {
3728 errno = ENOTTY;
3729 goto fail;
3730 }
3731
3732 last_errno = 0;
3733 return vc;
3734 fail:
3735 if (last_errno != errno) {
3736 last_errno = errno;
3737 log_warning_errno(errno, "Failed to open %s: %m, using default console", vc);
3738 }
3739 return "/dev/console";
3740 }
3741
3742 void manager_set_first_boot(Manager *m, bool b) {
3743 assert(m);
3744
3745 if (!MANAGER_IS_SYSTEM(m))
3746 return;
3747
3748 if (m->first_boot != (int) b) {
3749 if (b)
3750 (void) touch("/run/systemd/first-boot");
3751 else
3752 (void) unlink("/run/systemd/first-boot");
3753 }
3754
3755 m->first_boot = b;
3756 }
3757
3758 void manager_disable_confirm_spawn(void) {
3759 (void) touch("/run/systemd/confirm_spawn_disabled");
3760 }
3761
3762 bool manager_is_confirm_spawn_disabled(Manager *m) {
3763 if (!m->confirm_spawn)
3764 return true;
3765
3766 return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0;
3767 }
3768
3769 void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) {
3770 va_list ap;
3771
3772 /* If m is NULL, assume we're after shutdown and let the messages through. */
3773
3774 if (m && !manager_get_show_status(m, type))
3775 return;
3776
3777 /* XXX We should totally drop the check for ephemeral here
3778 * and thus effectively make 'Type=idle' pointless. */
3779 if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0)
3780 return;
3781
3782 va_start(ap, format);
3783 status_vprintf(status, true, type == STATUS_TYPE_EPHEMERAL, format, ap);
3784 va_end(ap);
3785 }
3786
3787 Set *manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
3788 char p[strlen(path)+1];
3789
3790 assert(m);
3791 assert(path);
3792
3793 strcpy(p, path);
3794 path_kill_slashes(p);
3795
3796 return hashmap_get(m->units_requiring_mounts_for, streq(p, "/") ? "" : p);
3797 }
3798
3799 int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
3800 unsigned size;
3801 int r;
3802
3803 assert(m);
3804 assert(u->manager == m);
3805
3806 size = set_size(m->failed_units);
3807
3808 if (failed) {
3809 r = set_ensure_allocated(&m->failed_units, NULL);
3810 if (r < 0)
3811 return log_oom();
3812
3813 if (set_put(m->failed_units, u) < 0)
3814 return log_oom();
3815 } else
3816 (void) set_remove(m->failed_units, u);
3817
3818 if (set_size(m->failed_units) != size)
3819 bus_manager_send_change_signal(m);
3820
3821 return 0;
3822 }
3823
3824 ManagerState manager_state(Manager *m) {
3825 Unit *u;
3826
3827 assert(m);
3828
3829 /* Did we ever finish booting? If not then we are still starting up */
3830 if (!MANAGER_IS_FINISHED(m)) {
3831
3832 u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
3833 if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
3834 return MANAGER_INITIALIZING;
3835
3836 return MANAGER_STARTING;
3837 }
3838
3839 /* Is the special shutdown target active or queued? If so, we are in shutdown state */
3840 u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
3841 if (u && unit_active_or_pending(u))
3842 return MANAGER_STOPPING;
3843
3844 if (MANAGER_IS_SYSTEM(m)) {
3845 /* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
3846 u = manager_get_unit(m, SPECIAL_RESCUE_TARGET);
3847 if (u && unit_active_or_pending(u))
3848 return MANAGER_MAINTENANCE;
3849
3850 u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET);
3851 if (u && unit_active_or_pending(u))
3852 return MANAGER_MAINTENANCE;
3853 }
3854
3855 /* Are there any failed units? If so, we are in degraded mode */
3856 if (set_size(m->failed_units) > 0)
3857 return MANAGER_DEGRADED;
3858
3859 return MANAGER_RUNNING;
3860 }
3861
3862 #define DESTROY_IPC_FLAG (UINT32_C(1) << 31)
3863
3864 static void manager_unref_uid_internal(
3865 Manager *m,
3866 Hashmap **uid_refs,
3867 uid_t uid,
3868 bool destroy_now,
3869 int (*_clean_ipc)(uid_t uid)) {
3870
3871 uint32_t c, n;
3872
3873 assert(m);
3874 assert(uid_refs);
3875 assert(uid_is_valid(uid));
3876 assert(_clean_ipc);
3877
3878 /* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the assumption
3879 * that uid_t and gid_t are actually defined the same way, with the same validity rules.
3880 *
3881 * We store a hashmap where the UID/GID is they key and the value is a 32bit reference counter, whose highest
3882 * bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last reference to the UID/GID
3883 * is dropped. The flag is set to on, once at least one reference from a unit where RemoveIPC= is set is added
3884 * on a UID/GID. It is reset when the UID's/GID's reference counter drops to 0 again. */
3885
3886 assert_cc(sizeof(uid_t) == sizeof(gid_t));
3887 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
3888
3889 if (uid == 0) /* We don't keep track of root, and will never destroy it */
3890 return;
3891
3892 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
3893
3894 n = c & ~DESTROY_IPC_FLAG;
3895 assert(n > 0);
3896 n--;
3897
3898 if (destroy_now && n == 0) {
3899 hashmap_remove(*uid_refs, UID_TO_PTR(uid));
3900
3901 if (c & DESTROY_IPC_FLAG) {
3902 log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
3903 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
3904 uid);
3905 (void) _clean_ipc(uid);
3906 }
3907 } else {
3908 c = n | (c & DESTROY_IPC_FLAG);
3909 assert_se(hashmap_update(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
3910 }
3911 }
3912
3913 void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
3914 manager_unref_uid_internal(m, &m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
3915 }
3916
3917 void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
3918 manager_unref_uid_internal(m, &m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
3919 }
3920
3921 static int manager_ref_uid_internal(
3922 Manager *m,
3923 Hashmap **uid_refs,
3924 uid_t uid,
3925 bool clean_ipc) {
3926
3927 uint32_t c, n;
3928 int r;
3929
3930 assert(m);
3931 assert(uid_refs);
3932 assert(uid_is_valid(uid));
3933
3934 /* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the assumption
3935 * that uid_t and gid_t are actually defined the same way, with the same validity rules. */
3936
3937 assert_cc(sizeof(uid_t) == sizeof(gid_t));
3938 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
3939
3940 if (uid == 0) /* We don't keep track of root, and will never destroy it */
3941 return 0;
3942
3943 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
3944 if (r < 0)
3945 return r;
3946
3947 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
3948
3949 n = c & ~DESTROY_IPC_FLAG;
3950 n++;
3951
3952 if (n & DESTROY_IPC_FLAG) /* check for overflow */
3953 return -EOVERFLOW;
3954
3955 c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0);
3956
3957 return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
3958 }
3959
3960 int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
3961 return manager_ref_uid_internal(m, &m->uid_refs, uid, clean_ipc);
3962 }
3963
3964 int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
3965 return manager_ref_uid_internal(m, &m->gid_refs, (uid_t) gid, clean_ipc);
3966 }
3967
3968 static void manager_vacuum_uid_refs_internal(
3969 Manager *m,
3970 Hashmap **uid_refs,
3971 int (*_clean_ipc)(uid_t uid)) {
3972
3973 Iterator i;
3974 void *p, *k;
3975
3976 assert(m);
3977 assert(uid_refs);
3978 assert(_clean_ipc);
3979
3980 HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
3981 uint32_t c, n;
3982 uid_t uid;
3983
3984 uid = PTR_TO_UID(k);
3985 c = PTR_TO_UINT32(p);
3986
3987 n = c & ~DESTROY_IPC_FLAG;
3988 if (n > 0)
3989 continue;
3990
3991 if (c & DESTROY_IPC_FLAG) {
3992 log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.",
3993 _clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
3994 uid);
3995 (void) _clean_ipc(uid);
3996 }
3997
3998 assert_se(hashmap_remove(*uid_refs, k) == p);
3999 }
4000 }
4001
4002 void manager_vacuum_uid_refs(Manager *m) {
4003 manager_vacuum_uid_refs_internal(m, &m->uid_refs, clean_ipc_by_uid);
4004 }
4005
4006 void manager_vacuum_gid_refs(Manager *m) {
4007 manager_vacuum_uid_refs_internal(m, &m->gid_refs, clean_ipc_by_gid);
4008 }
4009
4010 static void manager_serialize_uid_refs_internal(
4011 Manager *m,
4012 FILE *f,
4013 Hashmap **uid_refs,
4014 const char *field_name) {
4015
4016 Iterator i;
4017 void *p, *k;
4018
4019 assert(m);
4020 assert(f);
4021 assert(uid_refs);
4022 assert(field_name);
4023
4024 /* Serialize the UID reference table. Or actually, just the IPC destruction flag of it, as the actual counter
4025 * of it is better rebuild after a reload/reexec. */
4026
4027 HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
4028 uint32_t c;
4029 uid_t uid;
4030
4031 uid = PTR_TO_UID(k);
4032 c = PTR_TO_UINT32(p);
4033
4034 if (!(c & DESTROY_IPC_FLAG))
4035 continue;
4036
4037 fprintf(f, "%s=" UID_FMT "\n", field_name, uid);
4038 }
4039 }
4040
4041 void manager_serialize_uid_refs(Manager *m, FILE *f) {
4042 manager_serialize_uid_refs_internal(m, f, &m->uid_refs, "destroy-ipc-uid");
4043 }
4044
4045 void manager_serialize_gid_refs(Manager *m, FILE *f) {
4046 manager_serialize_uid_refs_internal(m, f, &m->gid_refs, "destroy-ipc-gid");
4047 }
4048
4049 static void manager_deserialize_uid_refs_one_internal(
4050 Manager *m,
4051 Hashmap** uid_refs,
4052 const char *value) {
4053
4054 uid_t uid;
4055 uint32_t c;
4056 int r;
4057
4058 assert(m);
4059 assert(uid_refs);
4060 assert(value);
4061
4062 r = parse_uid(value, &uid);
4063 if (r < 0 || uid == 0) {
4064 log_debug("Unable to parse UID reference serialization");
4065 return;
4066 }
4067
4068 r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
4069 if (r < 0) {
4070 log_oom();
4071 return;
4072 }
4073
4074 c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
4075 if (c & DESTROY_IPC_FLAG)
4076 return;
4077
4078 c |= DESTROY_IPC_FLAG;
4079
4080 r = hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
4081 if (r < 0) {
4082 log_debug("Failed to add UID reference entry");
4083 return;
4084 }
4085 }
4086
4087 void manager_deserialize_uid_refs_one(Manager *m, const char *value) {
4088 manager_deserialize_uid_refs_one_internal(m, &m->uid_refs, value);
4089 }
4090
4091 void manager_deserialize_gid_refs_one(Manager *m, const char *value) {
4092 manager_deserialize_uid_refs_one_internal(m, &m->gid_refs, value);
4093 }
4094
4095 int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
4096 struct buffer {
4097 uid_t uid;
4098 gid_t gid;
4099 char unit_name[UNIT_NAME_MAX+1];
4100 } _packed_ buffer;
4101
4102 Manager *m = userdata;
4103 ssize_t l;
4104 size_t n;
4105 Unit *u;
4106
4107 assert_se(source);
4108 assert_se(m);
4109
4110 /* Invoked whenever a child process succeeded resolving its user/group to use and sent us the resulting UID/GID
4111 * in a datagram. We parse the datagram here and pass it off to the unit, so that it can add a reference to the
4112 * UID/GID so that it can destroy the UID/GID's IPC objects when the reference counter drops to 0. */
4113
4114 l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT);
4115 if (l < 0) {
4116 if (IN_SET(errno, EINTR, EAGAIN))
4117 return 0;
4118
4119 return log_error_errno(errno, "Failed to read from user lookup fd: %m");
4120 }
4121
4122 if ((size_t) l <= offsetof(struct buffer, unit_name)) {
4123 log_warning("Received too short user lookup message, ignoring.");
4124 return 0;
4125 }
4126
4127 if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) {
4128 log_warning("Received too long user lookup message, ignoring.");
4129 return 0;
4130 }
4131
4132 if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) {
4133 log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
4134 return 0;
4135 }
4136
4137 n = (size_t) l - offsetof(struct buffer, unit_name);
4138 if (memchr(buffer.unit_name, 0, n)) {
4139 log_warning("Received lookup message with embedded NUL character, ignoring.");
4140 return 0;
4141 }
4142
4143 buffer.unit_name[n] = 0;
4144 u = manager_get_unit(m, buffer.unit_name);
4145 if (!u) {
4146 log_debug("Got user lookup message but unit doesn't exist, ignoring.");
4147 return 0;
4148 }
4149
4150 log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid);
4151
4152 unit_notify_user_lookup(u, buffer.uid, buffer.gid);
4153 return 0;
4154 }
4155
4156 char *manager_taint_string(Manager *m) {
4157 _cleanup_free_ char *destination = NULL, *overflowuid = NULL, *overflowgid = NULL;
4158 char *buf, *e;
4159 int r;
4160
4161 /* Returns a "taint string", e.g. "local-hwclock:var-run-bad".
4162 * Only things that are detected at runtime should be tagged
4163 * here. For stuff that is set during compilation, emit a warning
4164 * in the configuration phase. */
4165
4166 assert(m);
4167
4168 buf = new(char, sizeof("split-usr:"
4169 "cgroups-missing:"
4170 "local-hwclock:"
4171 "var-run-bad:"
4172 "overflowuid-not-65534:"
4173 "overflowgid-not-65534:"));
4174 if (!buf)
4175 return NULL;
4176
4177 e = buf;
4178 buf[0] = 0;
4179
4180 if (m->taint_usr)
4181 e = stpcpy(e, "split-usr:");
4182
4183 if (access("/proc/cgroups", F_OK) < 0)
4184 e = stpcpy(e, "cgroups-missing:");
4185
4186 if (clock_is_localtime(NULL) > 0)
4187 e = stpcpy(e, "local-hwclock:");
4188
4189 r = readlink_malloc("/var/run", &destination);
4190 if (r < 0 || !PATH_IN_SET(destination, "../run", "/run"))
4191 e = stpcpy(e, "var-run-bad:");
4192
4193 r = read_one_line_file("/proc/sys/kernel/overflowuid", &overflowuid);
4194 if (r >= 0 && !streq(overflowuid, "65534"))
4195 e = stpcpy(e, "overflowuid-not-65534:");
4196
4197 r = read_one_line_file("/proc/sys/kernel/overflowgid", &overflowgid);
4198 if (r >= 0 && !streq(overflowgid, "65534"))
4199 e = stpcpy(e, "overflowgid-not-65534:");
4200
4201 /* remove the last ':' */
4202 if (e != buf)
4203 e[-1] = 0;
4204
4205 return buf;
4206 }
4207
4208 void manager_ref_console(Manager *m) {
4209 assert(m);
4210
4211 m->n_on_console++;
4212 }
4213
4214 void manager_unref_console(Manager *m) {
4215
4216 assert(m->n_on_console > 0);
4217 m->n_on_console--;
4218
4219 if (m->n_on_console == 0)
4220 m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */
4221 }
4222
4223 static const char *const manager_state_table[_MANAGER_STATE_MAX] = {
4224 [MANAGER_INITIALIZING] = "initializing",
4225 [MANAGER_STARTING] = "starting",
4226 [MANAGER_RUNNING] = "running",
4227 [MANAGER_DEGRADED] = "degraded",
4228 [MANAGER_MAINTENANCE] = "maintenance",
4229 [MANAGER_STOPPING] = "stopping",
4230 };
4231
4232 DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState);
4233
4234 static const char *const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = {
4235 [MANAGER_TIMESTAMP_FIRMWARE] = "firmware",
4236 [MANAGER_TIMESTAMP_LOADER] = "loader",
4237 [MANAGER_TIMESTAMP_KERNEL] = "kernel",
4238 [MANAGER_TIMESTAMP_INITRD] = "initrd",
4239 [MANAGER_TIMESTAMP_USERSPACE] = "userspace",
4240 [MANAGER_TIMESTAMP_FINISH] = "finish",
4241 [MANAGER_TIMESTAMP_SECURITY_START] = "security-start",
4242 [MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish",
4243 [MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start",
4244 [MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish",
4245 [MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start",
4246 [MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish",
4247 };
4248
4249 DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp);